[v7 PATCH 1/2] hung_task: Refactor detection logic and atomicise detection count

Aaron Tomlin posted 2 patches 2 weeks ago
There is a newer version of this series
[v7 PATCH 1/2] hung_task: Refactor detection logic and atomicise detection count
Posted by Aaron Tomlin 2 weeks ago
The check_hung_task() function currently conflates two distinct
responsibilities: validating whether a task is hung and handling the
subsequent reporting (printing warnings, triggering panics, or
tracepoints).

This patch refactors the logic by introducing hung_task_info(), a
function dedicated solely to reporting. The actual detection check,
task_is_hung(), is hoisted into the primary loop within
check_hung_uninterruptible_tasks(). This separation clearly decouples
the mechanism of detection from the policy of reporting.

Furthermore, to facilitate future support for concurrent hung task
detection, the global sysctl_hung_task_detect_count variable is
converted from unsigned long to atomic_long_t. Consequently, the
counting logic is updated to accumulate the number of hung tasks locally
(this_round_count) during the iteration. The global counter is then
updated atomically via atomic_long_cmpxchg_relaxed() once the loop
concludes, rather than incrementally during the scan.

These changes are strictly preparatory and introduce no functional
change to the system's runtime behaviour.

Signed-off-by: Aaron Tomlin <atomlin@atomlin.com>
---
 kernel/hung_task.c | 58 ++++++++++++++++++++++++++--------------------
 1 file changed, 33 insertions(+), 25 deletions(-)

diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index d2254c91450b..df10830ed9ef 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -36,7 +36,7 @@ static int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
 /*
  * Total number of tasks detected as hung since boot:
  */
-static unsigned long __read_mostly sysctl_hung_task_detect_count;
+static atomic_long_t sysctl_hung_task_detect_count = ATOMIC_LONG_INIT(0);
 
 /*
  * Limit number of tasks checked in a batch.
@@ -223,31 +223,29 @@ static inline void debug_show_blocker(struct task_struct *task, unsigned long ti
 }
 #endif
 
-static void check_hung_task(struct task_struct *t, unsigned long timeout,
-		unsigned long prev_detect_count)
+/**
+ * hung_task_info - Print diagnostic details for a hung task
+ * @t: Pointer to the detected hung task.
+ * @timeout: Timeout threshold for detecting hung tasks
+ * @this_round_count: Count of hung tasks detected in the current iteration
+ *
+ * Print structured information about the specified hung task, if warnings
+ * are enabled or if the panic batch threshold is exceeded.
+ */
+static void hung_task_info(struct task_struct *t, unsigned long timeout,
+			   unsigned long this_round_count)
 {
-	unsigned long total_hung_task;
-
-	if (!task_is_hung(t, timeout))
-		return;
-
-	/*
-	 * This counter tracks the total number of tasks detected as hung
-	 * since boot.
-	 */
-	sysctl_hung_task_detect_count++;
-
-	total_hung_task = sysctl_hung_task_detect_count - prev_detect_count;
 	trace_sched_process_hang(t);
 
-	if (sysctl_hung_task_panic && total_hung_task >= sysctl_hung_task_panic) {
+	if (sysctl_hung_task_panic && this_round_count >= sysctl_hung_task_panic) {
 		console_verbose();
 		hung_task_call_panic = true;
 	}
 
 	/*
-	 * Ok, the task did not get scheduled for more than 2 minutes,
-	 * complain:
+	 * The given task did not get scheduled for more than
+	 * CONFIG_DEFAULT_HUNG_TASK_TIMEOUT. Therefore, complain
+	 * accordingly
 	 */
 	if (sysctl_hung_task_warnings || hung_task_call_panic) {
 		if (sysctl_hung_task_warnings > 0)
@@ -297,18 +295,18 @@ static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
 
 /*
  * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
- * a really long time (120 seconds). If that happens, print out
- * a warning.
+ * a really long time. If that happens, print out a warning.
  */
 static void check_hung_uninterruptible_tasks(unsigned long timeout)
 {
 	int max_count = sysctl_hung_task_check_count;
 	unsigned long last_break = jiffies;
 	struct task_struct *g, *t;
-	unsigned long prev_detect_count = sysctl_hung_task_detect_count;
+	unsigned long total_count, this_round_count;
 	int need_warning = sysctl_hung_task_warnings;
 	unsigned long si_mask = hung_task_si_mask;
 
+	total_count = atomic_long_read(&sysctl_hung_task_detect_count);
 	/*
 	 * If the system crashed already then all bets are off,
 	 * do not report extra hung tasks:
@@ -316,10 +314,9 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
 	if (test_taint(TAINT_DIE) || did_panic)
 		return;
 
-
+	this_round_count = 0;
 	rcu_read_lock();
 	for_each_process_thread(g, t) {
-
 		if (!max_count--)
 			goto unlock;
 		if (time_after(jiffies, last_break + HUNG_TASK_LOCK_BREAK)) {
@@ -328,14 +325,25 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
 			last_break = jiffies;
 		}
 
-		check_hung_task(t, timeout, prev_detect_count);
+		if (task_is_hung(t, timeout)) {
+			this_round_count++;
+			hung_task_info(t, timeout, this_round_count);
+		}
 	}
  unlock:
 	rcu_read_unlock();
 
-	if (!(sysctl_hung_task_detect_count - prev_detect_count))
+	if (!this_round_count)
 		return;
 
+	/*
+	 * This counter tracks the total number of tasks detected as hung
+	 * since boot.
+	 */
+	atomic_long_cmpxchg_relaxed(&sysctl_hung_task_detect_count,
+				    total_count, total_count +
+				    this_round_count);
+
 	if (need_warning || hung_task_call_panic) {
 		si_mask |= SYS_INFO_LOCKS;
 
-- 
2.51.0
Re: [v7 PATCH 1/2] hung_task: Refactor detection logic and atomicise detection count
Posted by Lance Yang 5 days, 15 hours ago

On 2026/1/25 21:58, Aaron Tomlin wrote:
> The check_hung_task() function currently conflates two distinct
> responsibilities: validating whether a task is hung and handling the
> subsequent reporting (printing warnings, triggering panics, or
> tracepoints).
> 
> This patch refactors the logic by introducing hung_task_info(), a
> function dedicated solely to reporting. The actual detection check,
> task_is_hung(), is hoisted into the primary loop within
> check_hung_uninterruptible_tasks(). This separation clearly decouples
> the mechanism of detection from the policy of reporting.
> 
> Furthermore, to facilitate future support for concurrent hung task
> detection, the global sysctl_hung_task_detect_count variable is
> converted from unsigned long to atomic_long_t. Consequently, the
> counting logic is updated to accumulate the number of hung tasks locally
> (this_round_count) during the iteration. The global counter is then
> updated atomically via atomic_long_cmpxchg_relaxed() once the loop
> concludes, rather than incrementally during the scan.
> 
> These changes are strictly preparatory and introduce no functional
> change to the system's runtime behaviour.
> 
> Signed-off-by: Aaron Tomlin <atomlin@atomlin.com>
> ---
>   kernel/hung_task.c | 58 ++++++++++++++++++++++++++--------------------
>   1 file changed, 33 insertions(+), 25 deletions(-)
> 
> diff --git a/kernel/hung_task.c b/kernel/hung_task.c
> index d2254c91450b..df10830ed9ef 100644
> --- a/kernel/hung_task.c
> +++ b/kernel/hung_task.c
> @@ -36,7 +36,7 @@ static int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
>   /*
>    * Total number of tasks detected as hung since boot:
>    */
> -static unsigned long __read_mostly sysctl_hung_task_detect_count;
> +static atomic_long_t sysctl_hung_task_detect_count = ATOMIC_LONG_INIT(0);
>   
>   /*
>    * Limit number of tasks checked in a batch.
> @@ -223,31 +223,29 @@ static inline void debug_show_blocker(struct task_struct *task, unsigned long ti
>   }
>   #endif
>   
> -static void check_hung_task(struct task_struct *t, unsigned long timeout,
> -		unsigned long prev_detect_count)
> +/**
> + * hung_task_info - Print diagnostic details for a hung task
> + * @t: Pointer to the detected hung task.
> + * @timeout: Timeout threshold for detecting hung tasks
> + * @this_round_count: Count of hung tasks detected in the current iteration
> + *
> + * Print structured information about the specified hung task, if warnings
> + * are enabled or if the panic batch threshold is exceeded.
> + */
> +static void hung_task_info(struct task_struct *t, unsigned long timeout,
> +			   unsigned long this_round_count)
>   {
> -	unsigned long total_hung_task;
> -
> -	if (!task_is_hung(t, timeout))
> -		return;
> -
> -	/*
> -	 * This counter tracks the total number of tasks detected as hung
> -	 * since boot.
> -	 */
> -	sysctl_hung_task_detect_count++;

Previously, the global detect count updated immediately when a hung task
was found. BUT now, it only updates after the full scan finishes ...

Ideally, the count should update as soon as possible, so that userspace
can react in time :)

For example, by migrating critical containers away from the node before
the situation gets worse - something we already do.

Cheers,
Lance
Re: [v7 PATCH 1/2] hung_task: Refactor detection logic and atomicise detection count
Posted by Lance Yang 5 days, 15 hours ago

On 2026/2/3 11:05, Lance Yang wrote:
> 
> 
> On 2026/1/25 21:58, Aaron Tomlin wrote:
>> The check_hung_task() function currently conflates two distinct
>> responsibilities: validating whether a task is hung and handling the
>> subsequent reporting (printing warnings, triggering panics, or
>> tracepoints).
>>
>> This patch refactors the logic by introducing hung_task_info(), a
>> function dedicated solely to reporting. The actual detection check,
>> task_is_hung(), is hoisted into the primary loop within
>> check_hung_uninterruptible_tasks(). This separation clearly decouples
>> the mechanism of detection from the policy of reporting.
>>
>> Furthermore, to facilitate future support for concurrent hung task
>> detection, the global sysctl_hung_task_detect_count variable is
>> converted from unsigned long to atomic_long_t. Consequently, the
>> counting logic is updated to accumulate the number of hung tasks locally
>> (this_round_count) during the iteration. The global counter is then
>> updated atomically via atomic_long_cmpxchg_relaxed() once the loop
>> concludes, rather than incrementally during the scan.
>>
>> These changes are strictly preparatory and introduce no functional
>> change to the system's runtime behaviour.
>>
>> Signed-off-by: Aaron Tomlin <atomlin@atomlin.com>
>> ---
>>   kernel/hung_task.c | 58 ++++++++++++++++++++++++++--------------------
>>   1 file changed, 33 insertions(+), 25 deletions(-)
>>
>> diff --git a/kernel/hung_task.c b/kernel/hung_task.c
>> index d2254c91450b..df10830ed9ef 100644
>> --- a/kernel/hung_task.c
>> +++ b/kernel/hung_task.c
>> @@ -36,7 +36,7 @@ static int __read_mostly 
>> sysctl_hung_task_check_count = PID_MAX_LIMIT;
>>   /*
>>    * Total number of tasks detected as hung since boot:
>>    */
>> -static unsigned long __read_mostly sysctl_hung_task_detect_count;
>> +static atomic_long_t sysctl_hung_task_detect_count = 
>> ATOMIC_LONG_INIT(0);
>>   /*
>>    * Limit number of tasks checked in a batch.
>> @@ -223,31 +223,29 @@ static inline void debug_show_blocker(struct 
>> task_struct *task, unsigned long ti
>>   }
>>   #endif
>> -static void check_hung_task(struct task_struct *t, unsigned long 
>> timeout,
>> -        unsigned long prev_detect_count)
>> +/**
>> + * hung_task_info - Print diagnostic details for a hung task
>> + * @t: Pointer to the detected hung task.
>> + * @timeout: Timeout threshold for detecting hung tasks
>> + * @this_round_count: Count of hung tasks detected in the current 
>> iteration
>> + *
>> + * Print structured information about the specified hung task, if 
>> warnings
>> + * are enabled or if the panic batch threshold is exceeded.
>> + */
>> +static void hung_task_info(struct task_struct *t, unsigned long timeout,
>> +               unsigned long this_round_count)
>>   {
>> -    unsigned long total_hung_task;
>> -
>> -    if (!task_is_hung(t, timeout))
>> -        return;
>> -
>> -    /*
>> -     * This counter tracks the total number of tasks detected as hung
>> -     * since boot.
>> -     */
>> -    sysctl_hung_task_detect_count++;
> 
> Previously, the global detect count updated immediately when a hung task
> was found. BUT now, it only updates after the full scan finishes ...
> 
> Ideally, the count should update as soon as possible, so that userspace
> can react in time :)
> 
> For example, by migrating critical containers away from the node before
> the situation gets worse - something we already do.

Sorry, I should have said that earlier - just realized it ...

Re: [v7 PATCH 1/2] hung_task: Refactor detection logic and atomicise detection count
Posted by Petr Mladek 5 days, 9 hours ago
On Tue 2026-02-03 11:08:33, Lance Yang wrote:
> On 2026/2/3 11:05, Lance Yang wrote:
> > On 2026/1/25 21:58, Aaron Tomlin wrote:
> > > The check_hung_task() function currently conflates two distinct
> > > responsibilities: validating whether a task is hung and handling the
> > > subsequent reporting (printing warnings, triggering panics, or
> > > tracepoints).
> > > 
> > > This patch refactors the logic by introducing hung_task_info(), a
> > > function dedicated solely to reporting. The actual detection check,
> > > task_is_hung(), is hoisted into the primary loop within
> > > check_hung_uninterruptible_tasks(). This separation clearly decouples
> > > the mechanism of detection from the policy of reporting.
> > > 
> > > Furthermore, to facilitate future support for concurrent hung task
> > > detection, the global sysctl_hung_task_detect_count variable is
> > > converted from unsigned long to atomic_long_t. Consequently, the
> > > counting logic is updated to accumulate the number of hung tasks locally
> > > (this_round_count) during the iteration. The global counter is then
> > > updated atomically via atomic_long_cmpxchg_relaxed() once the loop
> > > concludes, rather than incrementally during the scan.
> > > 
> > > These changes are strictly preparatory and introduce no functional
> > > change to the system's runtime behaviour.
> > > 
> > > Signed-off-by: Aaron Tomlin <atomlin@atomlin.com>
> > > ---
> > >   kernel/hung_task.c | 58 ++++++++++++++++++++++++++--------------------
> > >   1 file changed, 33 insertions(+), 25 deletions(-)
> > > 
> > > diff --git a/kernel/hung_task.c b/kernel/hung_task.c
> > > index d2254c91450b..df10830ed9ef 100644
> > > --- a/kernel/hung_task.c
> > > +++ b/kernel/hung_task.c
> > > @@ -36,7 +36,7 @@ static int __read_mostly
> > > sysctl_hung_task_check_count = PID_MAX_LIMIT;
> > >   /*
> > >    * Total number of tasks detected as hung since boot:
> > >    */
> > > -static unsigned long __read_mostly sysctl_hung_task_detect_count;
> > > +static atomic_long_t sysctl_hung_task_detect_count =
> > > ATOMIC_LONG_INIT(0);
> > >   /*
> > >    * Limit number of tasks checked in a batch.
> > > @@ -223,31 +223,29 @@ static inline void debug_show_blocker(struct
> > > task_struct *task, unsigned long ti
> > >   }
> > >   #endif
> > > -static void check_hung_task(struct task_struct *t, unsigned long
> > > timeout,
> > > -        unsigned long prev_detect_count)
> > > +/**
> > > + * hung_task_info - Print diagnostic details for a hung task
> > > + * @t: Pointer to the detected hung task.
> > > + * @timeout: Timeout threshold for detecting hung tasks
> > > + * @this_round_count: Count of hung tasks detected in the current
> > > iteration
> > > + *
> > > + * Print structured information about the specified hung task, if
> > > warnings
> > > + * are enabled or if the panic batch threshold is exceeded.
> > > + */
> > > +static void hung_task_info(struct task_struct *t, unsigned long timeout,
> > > +               unsigned long this_round_count)
> > >   {
> > > -    unsigned long total_hung_task;
> > > -
> > > -    if (!task_is_hung(t, timeout))
> > > -        return;
> > > -
> > > -    /*
> > > -     * This counter tracks the total number of tasks detected as hung
> > > -     * since boot.
> > > -     */
> > > -    sysctl_hung_task_detect_count++;
> > 
> > Previously, the global detect count updated immediately when a hung task
> > was found. BUT now, it only updates after the full scan finishes ...
> > 
> > Ideally, the count should update as soon as possible, so that userspace
> > can react in time :)
> > 
> > For example, by migrating critical containers away from the node before
> > the situation gets worse - something we already do.
> 
> Sorry, I should have said that earlier - just realized it ...

Better late then sorry ;-)

That said, is the delay really critical? I guess that the userspace
checks the counter in regular intervals (seconds or tens of seconds).
Or is there any way to get a notification immediately?

Anyway, I thought how the counting and barriers might work when
we update the global counter immediately. And I came up with
the following:

diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 350093de0535..8bc043fbe89c 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -302,15 +302,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
 	int max_count = sysctl_hung_task_check_count;
 	unsigned long last_break = jiffies;
 	struct task_struct *g, *t;
-	unsigned long total_count, this_round_count;
+	unsigned long this_round_count;
 	int need_warning = sysctl_hung_task_warnings;
 	unsigned long si_mask = hung_task_si_mask;
 
-	/*
-	 * The counter might get reset. Remember the initial value.
-	 * Acquire prevents reordering task checks before this point.
-	 */
-	total_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
 	/*
 	 * If the system crashed already then all bets are off,
 	 * do not report extra hung tasks:
@@ -330,6 +325,13 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
 		}
 
 		if (task_is_hung(t, timeout)) {
+			/*
+			 * Increment the global counter so that userspace could
+			 * start migrating tasks ASAP. But count the current
+			 * round separately because userspace could reset
+			 * the global counter at any time.
+			 */
+			atomic_long_inc(&sysctl_hung_task_detect_count);
 			this_round_count++;
 			hung_task_info(t, timeout, this_round_count);
 		}
@@ -340,15 +342,6 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
 	if (!this_round_count)
 		return;
 
-	/*
-	 * Do not count this round when the global counter has been reset
-	 * during this check. Release ensures we see all hang details
-	 * recorded during the scan.
-	 */
-	atomic_long_cmpxchg_release(&sysctl_hung_task_detect_count,
-				    total_count, total_count +
-				    this_round_count);
-
 	if (need_warning || hung_task_call_panic) {
 		si_mask |= SYS_INFO_LOCKS;
 

I am not sure of the comment above the increment is needed.
Well, it might help anyone to understand the motivation without
digging in the git log history.

Best Regards,
Petr
Re: [v7 PATCH 1/2] hung_task: Refactor detection logic and atomicise detection count
Posted by Aaron Tomlin 4 days, 4 hours ago
On Tue, Feb 03, 2026 at 10:03:52AM +0100, Petr Mladek wrote:
> On Tue 2026-02-03 11:08:33, Lance Yang wrote:
> > On 2026/2/3 11:05, Lance Yang wrote:
> > > On 2026/1/25 21:58, Aaron Tomlin wrote:
> > > > The check_hung_task() function currently conflates two distinct
> > > > responsibilities: validating whether a task is hung and handling the
> > > > subsequent reporting (printing warnings, triggering panics, or
> > > > tracepoints).
> > > > 
> > > > This patch refactors the logic by introducing hung_task_info(), a
> > > > function dedicated solely to reporting. The actual detection check,
> > > > task_is_hung(), is hoisted into the primary loop within
> > > > check_hung_uninterruptible_tasks(). This separation clearly decouples
> > > > the mechanism of detection from the policy of reporting.
> > > > 
> > > > Furthermore, to facilitate future support for concurrent hung task
> > > > detection, the global sysctl_hung_task_detect_count variable is
> > > > converted from unsigned long to atomic_long_t. Consequently, the
> > > > counting logic is updated to accumulate the number of hung tasks locally
> > > > (this_round_count) during the iteration. The global counter is then
> > > > updated atomically via atomic_long_cmpxchg_relaxed() once the loop
> > > > concludes, rather than incrementally during the scan.
> > > > 
> > > > These changes are strictly preparatory and introduce no functional
> > > > change to the system's runtime behaviour.
> > > > 
> > > > Signed-off-by: Aaron Tomlin <atomlin@atomlin.com>
> > > > ---
> > > >   kernel/hung_task.c | 58 ++++++++++++++++++++++++++--------------------
> > > >   1 file changed, 33 insertions(+), 25 deletions(-)
> > > > 
> > > > diff --git a/kernel/hung_task.c b/kernel/hung_task.c
> > > > index d2254c91450b..df10830ed9ef 100644
> > > > --- a/kernel/hung_task.c
> > > > +++ b/kernel/hung_task.c
> > > > @@ -36,7 +36,7 @@ static int __read_mostly
> > > > sysctl_hung_task_check_count = PID_MAX_LIMIT;
> > > >   /*
> > > >    * Total number of tasks detected as hung since boot:
> > > >    */
> > > > -static unsigned long __read_mostly sysctl_hung_task_detect_count;
> > > > +static atomic_long_t sysctl_hung_task_detect_count =
> > > > ATOMIC_LONG_INIT(0);
> > > >   /*
> > > >    * Limit number of tasks checked in a batch.
> > > > @@ -223,31 +223,29 @@ static inline void debug_show_blocker(struct
> > > > task_struct *task, unsigned long ti
> > > >   }
> > > >   #endif
> > > > -static void check_hung_task(struct task_struct *t, unsigned long
> > > > timeout,
> > > > -        unsigned long prev_detect_count)
> > > > +/**
> > > > + * hung_task_info - Print diagnostic details for a hung task
> > > > + * @t: Pointer to the detected hung task.
> > > > + * @timeout: Timeout threshold for detecting hung tasks
> > > > + * @this_round_count: Count of hung tasks detected in the current
> > > > iteration
> > > > + *
> > > > + * Print structured information about the specified hung task, if
> > > > warnings
> > > > + * are enabled or if the panic batch threshold is exceeded.
> > > > + */
> > > > +static void hung_task_info(struct task_struct *t, unsigned long timeout,
> > > > +               unsigned long this_round_count)
> > > >   {
> > > > -    unsigned long total_hung_task;
> > > > -
> > > > -    if (!task_is_hung(t, timeout))
> > > > -        return;
> > > > -
> > > > -    /*
> > > > -     * This counter tracks the total number of tasks detected as hung
> > > > -     * since boot.
> > > > -     */
> > > > -    sysctl_hung_task_detect_count++;
> > > 
> > > Previously, the global detect count updated immediately when a hung task
> > > was found. BUT now, it only updates after the full scan finishes ...
> > > 
> > > Ideally, the count should update as soon as possible, so that userspace
> > > can react in time :)
> > > 
> > > For example, by migrating critical containers away from the node before
> > > the situation gets worse - something we already do.
> > 
> > Sorry, I should have said that earlier - just realized it ...
> 
> Better late then sorry ;-)
> 
> That said, is the delay really critical? I guess that the userspace
> checks the counter in regular intervals (seconds or tens of seconds).
> Or is there any way to get a notification immediately?
> 
> Anyway, I thought how the counting and barriers might work when
> we update the global counter immediately. And I came up with
> the following:
> 
> diff --git a/kernel/hung_task.c b/kernel/hung_task.c
> index 350093de0535..8bc043fbe89c 100644
> --- a/kernel/hung_task.c
> +++ b/kernel/hung_task.c
> @@ -302,15 +302,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>  	int max_count = sysctl_hung_task_check_count;
>  	unsigned long last_break = jiffies;
>  	struct task_struct *g, *t;
> -	unsigned long total_count, this_round_count;
> +	unsigned long this_round_count;
>  	int need_warning = sysctl_hung_task_warnings;
>  	unsigned long si_mask = hung_task_si_mask;
>  
> -	/*
> -	 * The counter might get reset. Remember the initial value.
> -	 * Acquire prevents reordering task checks before this point.
> -	 */
> -	total_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
>  	/*
>  	 * If the system crashed already then all bets are off,
>  	 * do not report extra hung tasks:
> @@ -330,6 +325,13 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>  		}
>  
>  		if (task_is_hung(t, timeout)) {
> +			/*
> +			 * Increment the global counter so that userspace could
> +			 * start migrating tasks ASAP. But count the current
> +			 * round separately because userspace could reset
> +			 * the global counter at any time.
> +			 */
> +			atomic_long_inc(&sysctl_hung_task_detect_count);
>  			this_round_count++;
>  			hung_task_info(t, timeout, this_round_count);
>  		}
> @@ -340,15 +342,6 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>  	if (!this_round_count)
>  		return;
>  
> -	/*
> -	 * Do not count this round when the global counter has been reset
> -	 * during this check. Release ensures we see all hang details
> -	 * recorded during the scan.
> -	 */
> -	atomic_long_cmpxchg_release(&sysctl_hung_task_detect_count,
> -				    total_count, total_count +
> -				    this_round_count);
> -
>  	if (need_warning || hung_task_call_panic) {
>  		si_mask |= SYS_INFO_LOCKS;
>  
> 
> I am not sure of the comment above the increment is needed.
> Well, it might help anyone to understand the motivation without
> digging in the git log history.

Hi Petr,

Agreed.

By moving to a "relaxed" atomic_long_inc(), one now will rely on the
atomicity of the individual operation. If a user resets the counter
(writes 0) concurrently:

        CPU 0                               CPU 1
khungtaskd increments (1)
                                        User resets (0)
khungtaskd increments (1)


In the above, the counter reflects 1. This is acceptable behavior for a
"live" counter. The strict protection against "lost updates" required by
the batch calculation (i.e., old + new) is not required for a simple atomic
increment.


Kind regards,
-- 
Aaron Tomlin
Re: [v7 PATCH 1/2] hung_task: Refactor detection logic and atomicise detection count
Posted by Lance Yang 5 days, 7 hours ago

On 2026/2/3 17:03, Petr Mladek wrote:
> On Tue 2026-02-03 11:08:33, Lance Yang wrote:
>> On 2026/2/3 11:05, Lance Yang wrote:
>>> On 2026/1/25 21:58, Aaron Tomlin wrote:
>>>> The check_hung_task() function currently conflates two distinct
>>>> responsibilities: validating whether a task is hung and handling the
>>>> subsequent reporting (printing warnings, triggering panics, or
>>>> tracepoints).
>>>>
>>>> This patch refactors the logic by introducing hung_task_info(), a
>>>> function dedicated solely to reporting. The actual detection check,
>>>> task_is_hung(), is hoisted into the primary loop within
>>>> check_hung_uninterruptible_tasks(). This separation clearly decouples
>>>> the mechanism of detection from the policy of reporting.
>>>>
>>>> Furthermore, to facilitate future support for concurrent hung task
>>>> detection, the global sysctl_hung_task_detect_count variable is
>>>> converted from unsigned long to atomic_long_t. Consequently, the
>>>> counting logic is updated to accumulate the number of hung tasks locally
>>>> (this_round_count) during the iteration. The global counter is then
>>>> updated atomically via atomic_long_cmpxchg_relaxed() once the loop
>>>> concludes, rather than incrementally during the scan.
>>>>
>>>> These changes are strictly preparatory and introduce no functional
>>>> change to the system's runtime behaviour.
>>>>
>>>> Signed-off-by: Aaron Tomlin <atomlin@atomlin.com>
>>>> ---
>>>>    kernel/hung_task.c | 58 ++++++++++++++++++++++++++--------------------
>>>>    1 file changed, 33 insertions(+), 25 deletions(-)
>>>>
>>>> diff --git a/kernel/hung_task.c b/kernel/hung_task.c
>>>> index d2254c91450b..df10830ed9ef 100644
>>>> --- a/kernel/hung_task.c
>>>> +++ b/kernel/hung_task.c
>>>> @@ -36,7 +36,7 @@ static int __read_mostly
>>>> sysctl_hung_task_check_count = PID_MAX_LIMIT;
>>>>    /*
>>>>     * Total number of tasks detected as hung since boot:
>>>>     */
>>>> -static unsigned long __read_mostly sysctl_hung_task_detect_count;
>>>> +static atomic_long_t sysctl_hung_task_detect_count =
>>>> ATOMIC_LONG_INIT(0);
>>>>    /*
>>>>     * Limit number of tasks checked in a batch.
>>>> @@ -223,31 +223,29 @@ static inline void debug_show_blocker(struct
>>>> task_struct *task, unsigned long ti
>>>>    }
>>>>    #endif
>>>> -static void check_hung_task(struct task_struct *t, unsigned long
>>>> timeout,
>>>> -        unsigned long prev_detect_count)
>>>> +/**
>>>> + * hung_task_info - Print diagnostic details for a hung task
>>>> + * @t: Pointer to the detected hung task.
>>>> + * @timeout: Timeout threshold for detecting hung tasks
>>>> + * @this_round_count: Count of hung tasks detected in the current
>>>> iteration
>>>> + *
>>>> + * Print structured information about the specified hung task, if
>>>> warnings
>>>> + * are enabled or if the panic batch threshold is exceeded.
>>>> + */
>>>> +static void hung_task_info(struct task_struct *t, unsigned long timeout,
>>>> +               unsigned long this_round_count)
>>>>    {
>>>> -    unsigned long total_hung_task;
>>>> -
>>>> -    if (!task_is_hung(t, timeout))
>>>> -        return;
>>>> -
>>>> -    /*
>>>> -     * This counter tracks the total number of tasks detected as hung
>>>> -     * since boot.
>>>> -     */
>>>> -    sysctl_hung_task_detect_count++;
>>>
>>> Previously, the global detect count updated immediately when a hung task
>>> was found. BUT now, it only updates after the full scan finishes ...
>>>
>>> Ideally, the count should update as soon as possible, so that userspace
>>> can react in time :)
>>>
>>> For example, by migrating critical containers away from the node before
>>> the situation gets worse - something we already do.
>>
>> Sorry, I should have said that earlier - just realized it ...
> 
> Better late then sorry ;-)

;P

> 
> That said, is the delay really critical? I guess that the userspace
> checks the counter in regular intervals (seconds or tens of seconds).
> Or is there any way to get a notification immediately?

Just rely on polling the counter every 0.x seconds.

I don't think that the full scan would take many seconds, but reporting
(e.g. pr_err) could be slow somehow ...

> 
> Anyway, I thought how the counting and barriers might work when
> we update the global counter immediately. And I came up with
> the following:

Nice! That should be doing the right thing.

> 
> diff --git a/kernel/hung_task.c b/kernel/hung_task.c
> index 350093de0535..8bc043fbe89c 100644
> --- a/kernel/hung_task.c
> +++ b/kernel/hung_task.c
> @@ -302,15 +302,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>   	int max_count = sysctl_hung_task_check_count;
>   	unsigned long last_break = jiffies;
>   	struct task_struct *g, *t;
> -	unsigned long total_count, this_round_count;
> +	unsigned long this_round_count;
>   	int need_warning = sysctl_hung_task_warnings;
>   	unsigned long si_mask = hung_task_si_mask;
>   
> -	/*
> -	 * The counter might get reset. Remember the initial value.
> -	 * Acquire prevents reordering task checks before this point.
> -	 */
> -	total_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
>   	/*
>   	 * If the system crashed already then all bets are off,
>   	 * do not report extra hung tasks:
> @@ -330,6 +325,13 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>   		}
>   
>   		if (task_is_hung(t, timeout)) {
> +			/*
> +			 * Increment the global counter so that userspace could
> +			 * start migrating tasks ASAP. But count the current
> +			 * round separately because userspace could reset
> +			 * the global counter at any time.
> +			 */
> +			atomic_long_inc(&sysctl_hung_task_detect_count);


Atomic increment with relaxed ordering, which is good enough and works 
well, IIUC.


>   			this_round_count++;
>   			hung_task_info(t, timeout, this_round_count);
>   		}
> @@ -340,15 +342,6 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>   	if (!this_round_count)
>   		return;
>   
> -	/*
> -	 * Do not count this round when the global counter has been reset
> -	 * during this check. Release ensures we see all hang details
> -	 * recorded during the scan.
> -	 */
> -	atomic_long_cmpxchg_release(&sysctl_hung_task_detect_count,
> -				    total_count, total_count +
> -				    this_round_count);
> -
>   	if (need_warning || hung_task_call_panic) {
>   		si_mask |= SYS_INFO_LOCKS;
>   
> 
> I am not sure of the comment above the increment is needed.
> Well, it might help anyone to understand the motivation without
> digging in the git log history.

Looks good to me. Could you send it as a follow-up patch?

Cheers,
Lance
[PATCH] hung_task: Increment the global counter immediately
Posted by Petr Mladek 4 days, 7 hours ago
A recent change allowed to reset the global counter of hung tasks using
the sysctl interface. A potential race with the regular check has been
solved by updating the global counter only once at the end of the check.

However, the hung task check can take a significant amount of time,
particularly when task information is being dumped to slow serial
consoles. Some users monitor this global counter to trigger immediate
migration of critical containers. Delaying the increment until the
full check completes postpones these high-priority rescue operations.

Update the global counter as soon as a hung task is detected. Since
the value is read asynchronously, a relaxed atomic operation is
sufficient.

Reported-by: Lance Yang <lance.yang@linux.dev>
Closes: https://lore.kernel.org/r/f239e00f-4282-408d-b172-0f9885f4b01b@linux.dev
Signed-off-by: Petr Mladek <pmladek@suse.com>
---
This is a followup patch for
https://lore.kernel.org/r/20260125135848.3356585-1-atomlin@atomlin.com

Note that I could not use commit IDs because the original
patchset is not in a stable tree yet. In fact, it seems
that it is not even in linux-next at the moment.

Best Regards,
Petr

 kernel/hung_task.c | 23 ++++++++---------------
 1 file changed, 8 insertions(+), 15 deletions(-)

diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 350093de0535..8bc043fbe89c 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -302,15 +302,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
 	int max_count = sysctl_hung_task_check_count;
 	unsigned long last_break = jiffies;
 	struct task_struct *g, *t;
-	unsigned long total_count, this_round_count;
+	unsigned long this_round_count;
 	int need_warning = sysctl_hung_task_warnings;
 	unsigned long si_mask = hung_task_si_mask;
 
-	/*
-	 * The counter might get reset. Remember the initial value.
-	 * Acquire prevents reordering task checks before this point.
-	 */
-	total_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
 	/*
 	 * If the system crashed already then all bets are off,
 	 * do not report extra hung tasks:
@@ -330,6 +325,13 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
 		}
 
 		if (task_is_hung(t, timeout)) {
+			/*
+			 * Increment the global counter so that userspace could
+			 * start migrating tasks ASAP. But count the current
+			 * round separately because userspace could reset
+			 * the global counter at any time.
+			 */
+			atomic_long_inc(&sysctl_hung_task_detect_count);
 			this_round_count++;
 			hung_task_info(t, timeout, this_round_count);
 		}
@@ -340,15 +342,6 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
 	if (!this_round_count)
 		return;
 
-	/*
-	 * Do not count this round when the global counter has been reset
-	 * during this check. Release ensures we see all hang details
-	 * recorded during the scan.
-	 */
-	atomic_long_cmpxchg_release(&sysctl_hung_task_detect_count,
-				    total_count, total_count +
-				    this_round_count);
-
 	if (need_warning || hung_task_call_panic) {
 		si_mask |= SYS_INFO_LOCKS;
 
-- 
2.52.0
Re: [PATCH] hung_task: Increment the global counter immediately
Posted by Andrew Morton 4 days ago
On Wed, 4 Feb 2026 12:04:54 +0100 Petr Mladek <pmladek@suse.com> wrote:

> A recent change allowed to reset the global counter of hung tasks using
> the sysctl interface. A potential race with the regular check has been
> solved by updating the global counter only once at the end of the check.
> 
> However, the hung task check can take a significant amount of time,
> particularly when task information is being dumped to slow serial
> consoles. Some users monitor this global counter to trigger immediate
> migration of critical containers. Delaying the increment until the
> full check completes postpones these high-priority rescue operations.
> 
> Update the global counter as soon as a hung task is detected. Since
> the value is read asynchronously, a relaxed atomic operation is
> sufficient.
> 
> Reported-by: Lance Yang <lance.yang@linux.dev>
> Closes: https://lore.kernel.org/r/f239e00f-4282-408d-b172-0f9885f4b01b@linux.dev
> Signed-off-by: Petr Mladek <pmladek@suse.com>
> ---
> This is a followup patch for
> https://lore.kernel.org/r/20260125135848.3356585-1-atomlin@atomlin.com
> 
> Note that I could not use commit IDs because the original
> patchset is not in a stable tree yet. In fact, it seems
> that it is not even in linux-next at the moment.

Yes, I've gone into "fixes and trivial stuff only" mode, as we're at -rc8.

Aaron, please incorporate Petr's fix into v8 and resend towards the end
of the merge window?

Thanks.
Re: [PATCH] hung_task: Increment the global counter immediately
Posted by Aaron Tomlin 1 day, 21 hours ago
On Wed, Feb 04, 2026 at 10:05:49AM -0800, Andrew Morton wrote:
> On Wed, 4 Feb 2026 12:04:54 +0100 Petr Mladek <pmladek@suse.com> wrote:
> 
> > A recent change allowed to reset the global counter of hung tasks using
> > the sysctl interface. A potential race with the regular check has been
> > solved by updating the global counter only once at the end of the check.
> > 
> > However, the hung task check can take a significant amount of time,
> > particularly when task information is being dumped to slow serial
> > consoles. Some users monitor this global counter to trigger immediate
> > migration of critical containers. Delaying the increment until the
> > full check completes postpones these high-priority rescue operations.
> > 
> > Update the global counter as soon as a hung task is detected. Since
> > the value is read asynchronously, a relaxed atomic operation is
> > sufficient.
> > 
> > Reported-by: Lance Yang <lance.yang@linux.dev>
> > Closes: https://lore.kernel.org/r/f239e00f-4282-408d-b172-0f9885f4b01b@linux.dev
> > Signed-off-by: Petr Mladek <pmladek@suse.com>
> > ---
> > This is a followup patch for
> > https://lore.kernel.org/r/20260125135848.3356585-1-atomlin@atomlin.com
> > 
> > Note that I could not use commit IDs because the original
> > patchset is not in a stable tree yet. In fact, it seems
> > that it is not even in linux-next at the moment.
> 
> Yes, I've gone into "fixes and trivial stuff only" mode, as we're at -rc8.
> 
> Aaron, please incorporate Petr's fix into v8 and resend towards the end
> of the merge window?
> 
> Thanks.

Hi Andrew,

Absolutely.

Kind regards,
-- 
Aaron Tomlin
Re: [PATCH] hung_task: Increment the global counter immediately
Posted by Lance Yang 1 day, 12 hours ago

On 2026/2/7 04:54, Aaron Tomlin wrote:
> On Wed, Feb 04, 2026 at 10:05:49AM -0800, Andrew Morton wrote:
>> On Wed, 4 Feb 2026 12:04:54 +0100 Petr Mladek <pmladek@suse.com> wrote:
>>
>>> A recent change allowed to reset the global counter of hung tasks using
>>> the sysctl interface. A potential race with the regular check has been
>>> solved by updating the global counter only once at the end of the check.
>>>
>>> However, the hung task check can take a significant amount of time,
>>> particularly when task information is being dumped to slow serial
>>> consoles. Some users monitor this global counter to trigger immediate
>>> migration of critical containers. Delaying the increment until the
>>> full check completes postpones these high-priority rescue operations.
>>>
>>> Update the global counter as soon as a hung task is detected. Since
>>> the value is read asynchronously, a relaxed atomic operation is
>>> sufficient.
>>>
>>> Reported-by: Lance Yang <lance.yang@linux.dev>
>>> Closes: https://lore.kernel.org/r/f239e00f-4282-408d-b172-0f9885f4b01b@linux.dev
>>> Signed-off-by: Petr Mladek <pmladek@suse.com>
>>> ---
>>> This is a followup patch for
>>> https://lore.kernel.org/r/20260125135848.3356585-1-atomlin@atomlin.com
>>>
>>> Note that I could not use commit IDs because the original
>>> patchset is not in a stable tree yet. In fact, it seems
>>> that it is not even in linux-next at the moment.
>>
>> Yes, I've gone into "fixes and trivial stuff only" mode, as we're at -rc8.
>>
>> Aaron, please incorporate Petr's fix into v8 and resend towards the end
>> of the merge window?
>>
>> Thanks.
> 
> Hi Andrew,
> 
> Absolutely.

Don't forget to credit Petr - just saying :)
Re: [PATCH] hung_task: Increment the global counter immediately
Posted by Aaron Tomlin 4 days, 4 hours ago
On Wed, Feb 04, 2026 at 12:04:54PM +0100, Petr Mladek wrote:
> A recent change allowed to reset the global counter of hung tasks using
> the sysctl interface. A potential race with the regular check has been
> solved by updating the global counter only once at the end of the check.
> 
> However, the hung task check can take a significant amount of time,
> particularly when task information is being dumped to slow serial
> consoles. Some users monitor this global counter to trigger immediate
> migration of critical containers. Delaying the increment until the
> full check completes postpones these high-priority rescue operations.
> 
> Update the global counter as soon as a hung task is detected. Since
> the value is read asynchronously, a relaxed atomic operation is
> sufficient.
> 
> Reported-by: Lance Yang <lance.yang@linux.dev>
> Closes: https://lore.kernel.org/r/f239e00f-4282-408d-b172-0f9885f4b01b@linux.dev
> Signed-off-by: Petr Mladek <pmladek@suse.com>
> ---
> This is a followup patch for
> https://lore.kernel.org/r/20260125135848.3356585-1-atomlin@atomlin.com
> 
> Note that I could not use commit IDs because the original
> patchset is not in a stable tree yet. In fact, it seems
> that it is not even in linux-next at the moment.
> 
> Best Regards,
> Petr
> 
>  kernel/hung_task.c | 23 ++++++++---------------
>  1 file changed, 8 insertions(+), 15 deletions(-)
> 
> diff --git a/kernel/hung_task.c b/kernel/hung_task.c
> index 350093de0535..8bc043fbe89c 100644
> --- a/kernel/hung_task.c
> +++ b/kernel/hung_task.c
> @@ -302,15 +302,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>  	int max_count = sysctl_hung_task_check_count;
>  	unsigned long last_break = jiffies;
>  	struct task_struct *g, *t;
> -	unsigned long total_count, this_round_count;
> +	unsigned long this_round_count;
>  	int need_warning = sysctl_hung_task_warnings;
>  	unsigned long si_mask = hung_task_si_mask;
>  
> -	/*
> -	 * The counter might get reset. Remember the initial value.
> -	 * Acquire prevents reordering task checks before this point.
> -	 */
> -	total_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
>  	/*
>  	 * If the system crashed already then all bets are off,
>  	 * do not report extra hung tasks:
> @@ -330,6 +325,13 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>  		}
>  
>  		if (task_is_hung(t, timeout)) {
> +			/*
> +			 * Increment the global counter so that userspace could
> +			 * start migrating tasks ASAP. But count the current
> +			 * round separately because userspace could reset
> +			 * the global counter at any time.
> +			 */
> +			atomic_long_inc(&sysctl_hung_task_detect_count);
>  			this_round_count++;
>  			hung_task_info(t, timeout, this_round_count);
>  		}
> @@ -340,15 +342,6 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>  	if (!this_round_count)
>  		return;
>  
> -	/*
> -	 * Do not count this round when the global counter has been reset
> -	 * during this check. Release ensures we see all hang details
> -	 * recorded during the scan.
> -	 */
> -	atomic_long_cmpxchg_release(&sysctl_hung_task_detect_count,
> -				    total_count, total_count +
> -				    this_round_count);
> -
>  	if (need_warning || hung_task_call_panic) {
>  		si_mask |= SYS_INFO_LOCKS;
>  
> -- 
> 2.52.0
> 

Agreed.
This is correct given the architectural shift from "Batched" to "Immediate"
updates.

Reviewed-by: Aaron Tomlin <atomlin@atomlin.com>

-- 
Aaron Tomlin
Re: [PATCH] hung_task: Increment the global counter immediately
Posted by Lance Yang 4 days, 7 hours ago

On 2026/2/4 19:04, Petr Mladek wrote:
> A recent change allowed to reset the global counter of hung tasks using
> the sysctl interface. A potential race with the regular check has been
> solved by updating the global counter only once at the end of the check.
> 
> However, the hung task check can take a significant amount of time,
> particularly when task information is being dumped to slow serial
> consoles. Some users monitor this global counter to trigger immediate
> migration of critical containers. Delaying the increment until the
> full check completes postpones these high-priority rescue operations.
> 
> Update the global counter as soon as a hung task is detected. Since
> the value is read asynchronously, a relaxed atomic operation is
> sufficient.
> 
> Reported-by: Lance Yang <lance.yang@linux.dev>
> Closes: https://lore.kernel.org/r/f239e00f-4282-408d-b172-0f9885f4b01b@linux.dev
> Signed-off-by: Petr Mladek <pmladek@suse.com>
> ---

Cool! Looks good to me:

Reviewed-by: Lance Yang <lance.yang@linux.dev>

> This is a followup patch for
> https://lore.kernel.org/r/20260125135848.3356585-1-atomlin@atomlin.com
> 
> Note that I could not use commit IDs because the original
> patchset is not in a stable tree yet. In fact, it seems
> that it is not even in linux-next at the moment.
> 
> Best Regards,
> Petr
> 
>   kernel/hung_task.c | 23 ++++++++---------------
>   1 file changed, 8 insertions(+), 15 deletions(-)
> 
> diff --git a/kernel/hung_task.c b/kernel/hung_task.c
> index 350093de0535..8bc043fbe89c 100644
> --- a/kernel/hung_task.c
> +++ b/kernel/hung_task.c
> @@ -302,15 +302,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>   	int max_count = sysctl_hung_task_check_count;
>   	unsigned long last_break = jiffies;
>   	struct task_struct *g, *t;
> -	unsigned long total_count, this_round_count;
> +	unsigned long this_round_count;
>   	int need_warning = sysctl_hung_task_warnings;
>   	unsigned long si_mask = hung_task_si_mask;
>   
> -	/*
> -	 * The counter might get reset. Remember the initial value.
> -	 * Acquire prevents reordering task checks before this point.
> -	 */
> -	total_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
>   	/*
>   	 * If the system crashed already then all bets are off,
>   	 * do not report extra hung tasks:
> @@ -330,6 +325,13 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>   		}
>   
>   		if (task_is_hung(t, timeout)) {
> +			/*
> +			 * Increment the global counter so that userspace could
> +			 * start migrating tasks ASAP. But count the current
> +			 * round separately because userspace could reset
> +			 * the global counter at any time.
> +			 */
> +			atomic_long_inc(&sysctl_hung_task_detect_count);
>   			this_round_count++;
>   			hung_task_info(t, timeout, this_round_count);
>   		}
> @@ -340,15 +342,6 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>   	if (!this_round_count)
>   		return;
>   
> -	/*
> -	 * Do not count this round when the global counter has been reset
> -	 * during this check. Release ensures we see all hang details
> -	 * recorded during the scan.
> -	 */
> -	atomic_long_cmpxchg_release(&sysctl_hung_task_detect_count,
> -				    total_count, total_count +
> -				    this_round_count);
> -
>   	if (need_warning || hung_task_call_panic) {
>   		si_mask |= SYS_INFO_LOCKS;
>
Re: [v7 PATCH 1/2] hung_task: Refactor detection logic and atomicise detection count
Posted by Petr Mladek 6 days, 5 hours ago
On Sun 2026-01-25 08:58:47, Aaron Tomlin wrote:
> The check_hung_task() function currently conflates two distinct
> responsibilities: validating whether a task is hung and handling the
> subsequent reporting (printing warnings, triggering panics, or
> tracepoints).
> 
> This patch refactors the logic by introducing hung_task_info(), a
> function dedicated solely to reporting. The actual detection check,
> task_is_hung(), is hoisted into the primary loop within
> check_hung_uninterruptible_tasks(). This separation clearly decouples
> the mechanism of detection from the policy of reporting.
> 
> Furthermore, to facilitate future support for concurrent hung task
> detection, the global sysctl_hung_task_detect_count variable is
> converted from unsigned long to atomic_long_t. Consequently, the
> counting logic is updated to accumulate the number of hung tasks locally
> (this_round_count) during the iteration. The global counter is then
> updated atomically via atomic_long_cmpxchg_relaxed() once the loop
> concludes, rather than incrementally during the scan.
> 
> These changes are strictly preparatory and introduce no functional
> change to the system's runtime behaviour.
> 
> Signed-off-by: Aaron Tomlin <atomlin@atomlin.com>

LGTM. Feel free to use:

Reviewed-by: Petr Mladek <pmladek@suse.com>

Best Regards,
Petr
Re: [v7 PATCH 1/2] hung_task: Refactor detection logic and atomicise detection count
Posted by Masami Hiramatsu (Google) 6 days, 12 hours ago
On Sun, 25 Jan 2026 08:58:47 -0500
Aaron Tomlin <atomlin@atomlin.com> wrote:

> The check_hung_task() function currently conflates two distinct
> responsibilities: validating whether a task is hung and handling the
> subsequent reporting (printing warnings, triggering panics, or
> tracepoints).
> 
> This patch refactors the logic by introducing hung_task_info(), a
> function dedicated solely to reporting. The actual detection check,
> task_is_hung(), is hoisted into the primary loop within
> check_hung_uninterruptible_tasks(). This separation clearly decouples
> the mechanism of detection from the policy of reporting.
> 
> Furthermore, to facilitate future support for concurrent hung task
> detection, the global sysctl_hung_task_detect_count variable is
> converted from unsigned long to atomic_long_t. Consequently, the
> counting logic is updated to accumulate the number of hung tasks locally
> (this_round_count) during the iteration. The global counter is then
> updated atomically via atomic_long_cmpxchg_relaxed() once the loop
> concludes, rather than incrementally during the scan.
> 
> These changes are strictly preparatory and introduce no functional
> change to the system's runtime behaviour.
> 
> Signed-off-by: Aaron Tomlin <atomlin@atomlin.com>

Looks good to me.

Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>

Thanks,

> ---
>  kernel/hung_task.c | 58 ++++++++++++++++++++++++++--------------------
>  1 file changed, 33 insertions(+), 25 deletions(-)
> 
> diff --git a/kernel/hung_task.c b/kernel/hung_task.c
> index d2254c91450b..df10830ed9ef 100644
> --- a/kernel/hung_task.c
> +++ b/kernel/hung_task.c
> @@ -36,7 +36,7 @@ static int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
>  /*
>   * Total number of tasks detected as hung since boot:
>   */
> -static unsigned long __read_mostly sysctl_hung_task_detect_count;
> +static atomic_long_t sysctl_hung_task_detect_count = ATOMIC_LONG_INIT(0);
>  
>  /*
>   * Limit number of tasks checked in a batch.
> @@ -223,31 +223,29 @@ static inline void debug_show_blocker(struct task_struct *task, unsigned long ti
>  }
>  #endif
>  
> -static void check_hung_task(struct task_struct *t, unsigned long timeout,
> -		unsigned long prev_detect_count)
> +/**
> + * hung_task_info - Print diagnostic details for a hung task
> + * @t: Pointer to the detected hung task.
> + * @timeout: Timeout threshold for detecting hung tasks
> + * @this_round_count: Count of hung tasks detected in the current iteration
> + *
> + * Print structured information about the specified hung task, if warnings
> + * are enabled or if the panic batch threshold is exceeded.
> + */
> +static void hung_task_info(struct task_struct *t, unsigned long timeout,
> +			   unsigned long this_round_count)
>  {
> -	unsigned long total_hung_task;
> -
> -	if (!task_is_hung(t, timeout))
> -		return;
> -
> -	/*
> -	 * This counter tracks the total number of tasks detected as hung
> -	 * since boot.
> -	 */
> -	sysctl_hung_task_detect_count++;
> -
> -	total_hung_task = sysctl_hung_task_detect_count - prev_detect_count;
>  	trace_sched_process_hang(t);
>  
> -	if (sysctl_hung_task_panic && total_hung_task >= sysctl_hung_task_panic) {
> +	if (sysctl_hung_task_panic && this_round_count >= sysctl_hung_task_panic) {
>  		console_verbose();
>  		hung_task_call_panic = true;
>  	}
>  
>  	/*
> -	 * Ok, the task did not get scheduled for more than 2 minutes,
> -	 * complain:
> +	 * The given task did not get scheduled for more than
> +	 * CONFIG_DEFAULT_HUNG_TASK_TIMEOUT. Therefore, complain
> +	 * accordingly
>  	 */
>  	if (sysctl_hung_task_warnings || hung_task_call_panic) {
>  		if (sysctl_hung_task_warnings > 0)
> @@ -297,18 +295,18 @@ static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
>  
>  /*
>   * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
> - * a really long time (120 seconds). If that happens, print out
> - * a warning.
> + * a really long time. If that happens, print out a warning.
>   */
>  static void check_hung_uninterruptible_tasks(unsigned long timeout)
>  {
>  	int max_count = sysctl_hung_task_check_count;
>  	unsigned long last_break = jiffies;
>  	struct task_struct *g, *t;
> -	unsigned long prev_detect_count = sysctl_hung_task_detect_count;
> +	unsigned long total_count, this_round_count;
>  	int need_warning = sysctl_hung_task_warnings;
>  	unsigned long si_mask = hung_task_si_mask;
>  
> +	total_count = atomic_long_read(&sysctl_hung_task_detect_count);
>  	/*
>  	 * If the system crashed already then all bets are off,
>  	 * do not report extra hung tasks:
> @@ -316,10 +314,9 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>  	if (test_taint(TAINT_DIE) || did_panic)
>  		return;
>  
> -
> +	this_round_count = 0;
>  	rcu_read_lock();
>  	for_each_process_thread(g, t) {
> -
>  		if (!max_count--)
>  			goto unlock;
>  		if (time_after(jiffies, last_break + HUNG_TASK_LOCK_BREAK)) {
> @@ -328,14 +325,25 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>  			last_break = jiffies;
>  		}
>  
> -		check_hung_task(t, timeout, prev_detect_count);
> +		if (task_is_hung(t, timeout)) {
> +			this_round_count++;
> +			hung_task_info(t, timeout, this_round_count);
> +		}
>  	}
>   unlock:
>  	rcu_read_unlock();
>  
> -	if (!(sysctl_hung_task_detect_count - prev_detect_count))
> +	if (!this_round_count)
>  		return;
>  
> +	/*
> +	 * This counter tracks the total number of tasks detected as hung
> +	 * since boot.
> +	 */
> +	atomic_long_cmpxchg_relaxed(&sysctl_hung_task_detect_count,
> +				    total_count, total_count +
> +				    this_round_count);
> +
>  	if (need_warning || hung_task_call_panic) {
>  		si_mask |= SYS_INFO_LOCKS;
>  
> -- 
> 2.51.0
> 


-- 
Masami Hiramatsu (Google) <mhiramat@kernel.org>