Introduce support for writing to /proc/sys/kernel/hung_task_detect_count.
Writing a value of zero to this file atomically resets the counter of
detected hung tasks. This grants system administrators the ability to
clear the cumulative diagnostic history after resolving an incident,
simplifying monitoring without requiring a system restart.
Signed-off-by: Aaron Tomlin <atomlin@atomlin.com>
---
Documentation/admin-guide/sysctl/kernel.rst | 3 +-
kernel/hung_task.c | 82 ++++++++++++++++++---
2 files changed, 73 insertions(+), 12 deletions(-)
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 239da22c4e28..68da4235225a 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -418,7 +418,8 @@ hung_task_detect_count
======================
Indicates the total number of tasks that have been detected as hung since
-the system boot.
+the system boot or since the counter was reset. The counter is zeroed when
+a value of 0 is written.
This file shows up if ``CONFIG_DETECT_HUNG_TASK`` is enabled.
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 00c3296fd692..3bc72a4e4032 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -17,6 +17,7 @@
#include <linux/export.h>
#include <linux/panic_notifier.h>
#include <linux/sysctl.h>
+#include <linux/atomic.h>
#include <linux/suspend.h>
#include <linux/utsname.h>
#include <linux/sched/signal.h>
@@ -36,7 +37,7 @@ static int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
/*
* Total number of tasks detected as hung since boot:
*/
-static unsigned long __read_mostly sysctl_hung_task_detect_count;
+static atomic_long_t sysctl_hung_task_detect_count = ATOMIC_LONG_INIT(0);
/*
* Limit number of tasks checked in a batch.
@@ -246,20 +247,26 @@ static inline void hung_task_diagnostics(struct task_struct *t)
}
static void check_hung_task(struct task_struct *t, unsigned long timeout,
- unsigned long prev_detect_count)
+ unsigned long prev_detect_count)
{
- unsigned long total_hung_task;
+ unsigned long total_hung_task, cur_detect_count;
if (!task_is_hung(t, timeout))
return;
/*
* This counter tracks the total number of tasks detected as hung
- * since boot.
+ * since boot. If a reset occurred during the scan, we treat the
+ * current count as the new delta to avoid an underflow error.
+ * Ensure hang details are globally visible before the counter
+ * update.
*/
- sysctl_hung_task_detect_count++;
+ cur_detect_count = atomic_long_inc_return_release(&sysctl_hung_task_detect_count);
+ if (cur_detect_count >= prev_detect_count)
+ total_hung_task = cur_detect_count - prev_detect_count;
+ else
+ total_hung_task = cur_detect_count;
- total_hung_task = sysctl_hung_task_detect_count - prev_detect_count;
trace_sched_process_hang(t);
if (sysctl_hung_task_panic && total_hung_task >= sysctl_hung_task_panic) {
@@ -318,10 +325,12 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
int max_count = sysctl_hung_task_check_count;
unsigned long last_break = jiffies;
struct task_struct *g, *t;
- unsigned long prev_detect_count = sysctl_hung_task_detect_count;
+ unsigned long cur_detect_count, prev_detect_count, delta;
int need_warning = sysctl_hung_task_warnings;
unsigned long si_mask = hung_task_si_mask;
+ /* Acquire prevents reordering task checks before this point. */
+ prev_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
/*
* If the system crashed already then all bets are off,
* do not report extra hung tasks:
@@ -346,7 +355,14 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
unlock:
rcu_read_unlock();
- if (!(sysctl_hung_task_detect_count - prev_detect_count))
+ /* Ensures we see all hang details recorded during the scan. */
+ cur_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
+ if (cur_detect_count < prev_detect_count)
+ delta = cur_detect_count;
+ else
+ delta = cur_detect_count - prev_detect_count;
+
+ if (!delta)
return;
if (need_warning || hung_task_call_panic) {
@@ -371,6 +387,51 @@ static long hung_timeout_jiffies(unsigned long last_checked,
}
#ifdef CONFIG_SYSCTL
+
+/**
+ * proc_dohung_task_detect_count - proc handler for hung_task_detect_count
+ * @table: Pointer to the struct ctl_table definition for this proc entry
+ * @dir: Flag indicating the operation
+ * @buffer: User space buffer for data transfer
+ * @lenp: Pointer to the length of the data being transferred
+ * @ppos: Pointer to the current file offset
+ *
+ * This handler is used for reading the current hung task detection count
+ * and for resetting it to zero when a write operation is performed using a
+ * zero value only. Returns 0 on success or a negative error code on
+ * failure.
+ */
+static int proc_dohung_task_detect_count(const struct ctl_table *table, int dir,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ unsigned long detect_count;
+ struct ctl_table proxy_table;
+ int err;
+
+ proxy_table = *table;
+ proxy_table.data = &detect_count;
+
+ if (SYSCTL_KERN_TO_USER(dir)) {
+ detect_count = atomic_long_read(&sysctl_hung_task_detect_count);
+
+ return proc_doulongvec_minmax(&proxy_table, dir, buffer, lenp, ppos);
+ }
+
+ err = proc_doulongvec_minmax(&proxy_table, dir, buffer, lenp, ppos);
+ if (err < 0)
+ return err;
+
+ if (SYSCTL_USER_TO_KERN(dir)) {
+ /* The only valid value for clearing is zero. */
+ if (detect_count)
+ return -EINVAL;
+ atomic_long_set(&sysctl_hung_task_detect_count, 0);
+ }
+
+ *ppos += *lenp;
+ return err;
+}
+
/*
* Process updating of timeout sysctl
*/
@@ -451,10 +512,9 @@ static const struct ctl_table hung_task_sysctls[] = {
},
{
.procname = "hung_task_detect_count",
- .data = &sysctl_hung_task_detect_count,
.maxlen = sizeof(unsigned long),
- .mode = 0444,
- .proc_handler = proc_doulongvec_minmax,
+ .mode = 0644,
+ .proc_handler = proc_dohung_task_detect_count,
},
{
.procname = "hung_task_sys_info",
--
2.51.0
On Tue 2025-12-30 19:41:25, Aaron Tomlin wrote:
> Introduce support for writing to /proc/sys/kernel/hung_task_detect_count.
>
> Writing a value of zero to this file atomically resets the counter of
> detected hung tasks. This grants system administrators the ability to
> clear the cumulative diagnostic history after resolving an incident,
> simplifying monitoring without requiring a system restart.
> --- a/kernel/hung_task.c
> +++ b/kernel/hung_task.c
> @@ -36,7 +37,7 @@ static int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
> /*
> * Total number of tasks detected as hung since boot:
> */
> -static unsigned long __read_mostly sysctl_hung_task_detect_count;
> +static atomic_long_t sysctl_hung_task_detect_count = ATOMIC_LONG_INIT(0);
>
> /*
> * Limit number of tasks checked in a batch.
> @@ -246,20 +247,26 @@ static inline void hung_task_diagnostics(struct task_struct *t)
> }
>
> static void check_hung_task(struct task_struct *t, unsigned long timeout,
> - unsigned long prev_detect_count)
> + unsigned long prev_detect_count)
> {
> - unsigned long total_hung_task;
> + unsigned long total_hung_task, cur_detect_count;
>
> if (!task_is_hung(t, timeout))
> return;
>
> /*
> * This counter tracks the total number of tasks detected as hung
> - * since boot.
> + * since boot. If a reset occurred during the scan, we treat the
> + * current count as the new delta to avoid an underflow error.
> + * Ensure hang details are globally visible before the counter
> + * update.
> */
> - sysctl_hung_task_detect_count++;
> + cur_detect_count = atomic_long_inc_return_release(&sysctl_hung_task_detect_count);
The _release() feels a bit weird here because the counter might
get incremented more times during one scan.
IMHO, it should be perfectly fine to use the _relaxed version here
because it is in the middle of the acquire/release, see below.
The important thing here is that the load/modify/store operation
is done atomically.
> + if (cur_detect_count >= prev_detect_count)
> + total_hung_task = cur_detect_count - prev_detect_count;
> + else
> + total_hung_task = cur_detect_count;
>
> - total_hung_task = sysctl_hung_task_detect_count - prev_detect_count;
> trace_sched_process_hang(t);
>
> if (sysctl_hung_task_panic && total_hung_task >= sysctl_hung_task_panic) {
> @@ -318,10 +325,12 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> int max_count = sysctl_hung_task_check_count;
> unsigned long last_break = jiffies;
> struct task_struct *g, *t;
> - unsigned long prev_detect_count = sysctl_hung_task_detect_count;
> + unsigned long cur_detect_count, prev_detect_count, delta;
> int need_warning = sysctl_hung_task_warnings;
> unsigned long si_mask = hung_task_si_mask;
>
> + /* Acquire prevents reordering task checks before this point. */
> + prev_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
This value is read before the scan started => _acquire
semantic/barrier fits here.
> /*
> * If the system crashed already then all bets are off,
> * do not report extra hung tasks:
> @@ -346,7 +355,14 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> unlock:
> rcu_read_unlock();
>
> - if (!(sysctl_hung_task_detect_count - prev_detect_count))
> + /* Ensures we see all hang details recorded during the scan. */
> + cur_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
This value is read at the end of the scan => _release
semantic/barrier should be here.
> + if (cur_detect_count < prev_detect_count)
> + delta = cur_detect_count;
> + else
> + delta = cur_detect_count - prev_detect_count;
> +
> + if (!delta)
> return;
>
> if (need_warning || hung_task_call_panic) {
Otherwise, I do not have anything more to add. I agree with the other
proposals, for example:
+ remove 1st patch
+ split 2nd patch into two
+ changes in the sysctl code proposed by Joel
Best Regards,
Petr
On Thu, Jan 08, 2026 at 03:41:28PM +0100, Petr Mladek wrote: > The _release() feels a bit weird here because the counter might > get incremented more times during one scan. > > IMHO, it should be perfectly fine to use the _relaxed version here > because it is in the middle of the acquire/release, see below. > The important thing here is that the load/modify/store operation > is done atomically. Hi Petr, Thank you for your review and the detailed feedback. I agree with your assessment regarding the counter update within check_hung_task(). The _release semantics are indeed unnecessary in that specific context; I shall switch to atomic_long_inc_return_relaxed() as the atomicity of the operation suffices there. > > - if (!(sysctl_hung_task_detect_count - prev_detect_count)) > > + /* Ensures we see all hang details recorded during the scan. */ > > + cur_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count); > > This value is read at the end of the scan => _release > semantic/barrier should be here. Since we are performing a read/load operation here to capture the current state, atomic_long_read_acquire() is the correct primitive. It ensures that if we observe the updated counter, we are also guaranteed to observe any associated data (such as the hang details) that were "published" by the writer before the counter was updated. This approach is preferable to inserting a full memory barrier (smp_mb()), as it provides the necessary ordering guarantees with less overhead. > Otherwise, I do not have anything more to add. I agree with the other > proposals, for example: > > + remove 1st patch > + split 2nd patch into two > + changes in the sysctl code proposed by Joel I entirely agree with your other points regarding the patch structure. I shall discard the first patch, split the second into distinct logical changes, and incorporate Joel’s suggestions for the sysctl code. I will prepare a follow-up patch series incorporating these changes. Kind regards, -- Aaron Tomlin
On 2026/1/8 22:41, Petr Mladek wrote:
> On Tue 2025-12-30 19:41:25, Aaron Tomlin wrote:
>> Introduce support for writing to /proc/sys/kernel/hung_task_detect_count.
>>
>> Writing a value of zero to this file atomically resets the counter of
>> detected hung tasks. This grants system administrators the ability to
>> clear the cumulative diagnostic history after resolving an incident,
>> simplifying monitoring without requiring a system restart.
>
>> --- a/kernel/hung_task.c
>> +++ b/kernel/hung_task.c
>> @@ -36,7 +37,7 @@ static int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
>> /*
>> * Total number of tasks detected as hung since boot:
>> */
>> -static unsigned long __read_mostly sysctl_hung_task_detect_count;
>> +static atomic_long_t sysctl_hung_task_detect_count = ATOMIC_LONG_INIT(0);
>>
>> /*
>> * Limit number of tasks checked in a batch.
>> @@ -246,20 +247,26 @@ static inline void hung_task_diagnostics(struct task_struct *t)
>> }
>>
>> static void check_hung_task(struct task_struct *t, unsigned long timeout,
>> - unsigned long prev_detect_count)
>> + unsigned long prev_detect_count)
>> {
>> - unsigned long total_hung_task;
>> + unsigned long total_hung_task, cur_detect_count;
>>
>> if (!task_is_hung(t, timeout))
>> return;
>>
>> /*
>> * This counter tracks the total number of tasks detected as hung
>> - * since boot.
>> + * since boot. If a reset occurred during the scan, we treat the
>> + * current count as the new delta to avoid an underflow error.
>> + * Ensure hang details are globally visible before the counter
>> + * update.
>> */
>> - sysctl_hung_task_detect_count++;
>> + cur_detect_count = atomic_long_inc_return_release(&sysctl_hung_task_detect_count);
>
> The _release() feels a bit weird here because the counter might
> get incremented more times during one scan.
>
> IMHO, it should be perfectly fine to use the _relaxed version here
> because it is in the middle of the acquire/release, see below.
> The important thing here is that the load/modify/store operation
> is done atomically.
Right, we only need atomicity here, not the ordering guarantee :)
>
>> + if (cur_detect_count >= prev_detect_count)
>> + total_hung_task = cur_detect_count - prev_detect_count;
>> + else
>> + total_hung_task = cur_detect_count;
>>
>> - total_hung_task = sysctl_hung_task_detect_count - prev_detect_count;
>> trace_sched_process_hang(t);
>>
>> if (sysctl_hung_task_panic && total_hung_task >= sysctl_hung_task_panic) {
>> @@ -318,10 +325,12 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>> int max_count = sysctl_hung_task_check_count;
>> unsigned long last_break = jiffies;
>> struct task_struct *g, *t;
>> - unsigned long prev_detect_count = sysctl_hung_task_detect_count;
>> + unsigned long cur_detect_count, prev_detect_count, delta;
>> int need_warning = sysctl_hung_task_warnings;
>> unsigned long si_mask = hung_task_si_mask;
>>
>> + /* Acquire prevents reordering task checks before this point. */
>> + prev_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
>
> This value is read before the scan started => _acquire
> semantic/barrier fits here.
>
>> /*
>> * If the system crashed already then all bets are off,
>> * do not report extra hung tasks:
>> @@ -346,7 +355,14 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>> unlock:
>> rcu_read_unlock();
>>
>> - if (!(sysctl_hung_task_detect_count - prev_detect_count))
>> + /* Ensures we see all hang details recorded during the scan. */
>> + cur_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
>
> This value is read at the end of the scan => _release
> semantic/barrier should be here.
Seems like _acquire is still correct here, because it is a load.
_release semantics apply to stores, while _acquire on a load
ensures subsequent memory accesses are not reordered before it.
Or smp_mb()?
In the same thread, atomic operations on the same variable are not
reordered with respect to each other, even the _relaxed variant
preserves program order for that variable, IIRC.
So the increment will always complete before the final read in
program order, and the read will see the updated value (unless
another CPU resets it concurrently, which is a logical race, not
a reordering issue).
So, it would be:
prev = atomic_long_read_acquire(&counter); // scan start
...
cur = atomic_long_inc_return_relaxed(&counter); // during scan
...
cur = atomic_long_read_acquire(&counter); // scan end
The first _acquire ensures no task-checking code is reordered
before the start read, the middle increment is just atomic
without extra barriers, and the final _acquire makes sure we
observe all hang details before computing the delta.
That said, I also see the value in using _release or smp_mb()
here to pair with the _acquire at the start. Making the
ordering semantics clearer to readers.
Cheers,
Lance
>
>> + if (cur_detect_count < prev_detect_count)
>> + delta = cur_detect_count;
>> + else
>> + delta = cur_detect_count - prev_detect_count;
>> +
>> + if (!delta)
>> return;
>>
>> if (need_warning || hung_task_call_panic) {
>
> Otherwise, I do not have anything more to add. I agree with the other
> proposals, for example:
>
> + remove 1st patch
> + split 2nd patch into two
> + changes in the sysctl code proposed by Joel
>
> Best Regards,
> Petr
On Fri 2026-01-09 21:50:20, Lance Yang wrote:
>
>
> On 2026/1/8 22:41, Petr Mladek wrote:
> > On Tue 2025-12-30 19:41:25, Aaron Tomlin wrote:
> > > Introduce support for writing to /proc/sys/kernel/hung_task_detect_count.
> > >
> > > Writing a value of zero to this file atomically resets the counter of
> > > detected hung tasks. This grants system administrators the ability to
> > > clear the cumulative diagnostic history after resolving an incident,
> > > simplifying monitoring without requiring a system restart.
> >
> > > --- a/kernel/hung_task.c
> > > +++ b/kernel/hung_task.c
> > > @@ -346,7 +355,14 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> > > unlock:
> > > rcu_read_unlock();
> > > - if (!(sysctl_hung_task_detect_count - prev_detect_count))
> > > + /* Ensures we see all hang details recorded during the scan. */
> > > + cur_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
> >
> > This value is read at the end of the scan => _release
> > semantic/barrier should be here.
>
> Seems like _acquire is still correct here, because it is a load.
>
> _release semantics apply to stores, while _acquire on a load
> ensures subsequent memory accesses are not reordered before it.
Right!
> Or smp_mb()?
>
> In the same thread, atomic operations on the same variable are not
> reordered with respect to each other, even the _relaxed variant
> preserves program order for that variable, IIRC.
>
> So the increment will always complete before the final read in
> program order, and the read will see the updated value (unless
> another CPU resets it concurrently, which is a logical race, not
> a reordering issue).
>
> So, it would be:
>
> prev = atomic_long_read_acquire(&counter); // scan start
> ...
> cur = atomic_long_inc_return_relaxed(&counter); // during scan
> ...
> cur = atomic_long_read_acquire(&counter); // scan end
>
> The first _acquire ensures no task-checking code is reordered
> before the start read, the middle increment is just atomic
> without extra barriers, and the final _acquire makes sure we
> observe all hang details before computing the delta.
The acquire/relaxed/acquire semantic looks weird. The problem is
that we do not use the counter as a lock.
I thought about a sane approach and the following came to my
mind:
From c28b74c35d653f527aa9017c32630ad08180fb4e Mon Sep 17 00:00:00 2001
From: Petr Mladek <pmladek@suse.com>
Date: Mon, 12 Jan 2026 14:00:52 +0100
Subject: [POC] hung_task: Update the global counter using a proper
acquire/release semantic
The global counter of hung tasks might get reset when the check
is in progress. Also the number of hung tasks detected in the current
round is important to decide whether panic() is needed or not.
Handle races by:
1. Remember the total counter at the beginnning of the check.
2. Count the current round in a local variable.
3. Udpate the total counter only when the value has not been modified
during the check.
Note that this is only compile tested.
Signed-off-by: Petr Mladek <pmladek@suse.com>
---
kernel/hung_task.c | 53 +++++++++++++++++-----------------------------
1 file changed, 20 insertions(+), 33 deletions(-)
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 3bc72a4e4032..c939cd3d8a2c 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -246,30 +246,12 @@ static inline void hung_task_diagnostics(struct task_struct *t)
pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\" disables this message.\n");
}
-static void check_hung_task(struct task_struct *t, unsigned long timeout,
- unsigned long prev_detect_count)
+static void hung_task_info(struct task_struct *t, unsigned long timeout,
+ unsigned long this_round_count)
{
- unsigned long total_hung_task, cur_detect_count;
-
- if (!task_is_hung(t, timeout))
- return;
-
- /*
- * This counter tracks the total number of tasks detected as hung
- * since boot. If a reset occurred during the scan, we treat the
- * current count as the new delta to avoid an underflow error.
- * Ensure hang details are globally visible before the counter
- * update.
- */
- cur_detect_count = atomic_long_inc_return_release(&sysctl_hung_task_detect_count);
- if (cur_detect_count >= prev_detect_count)
- total_hung_task = cur_detect_count - prev_detect_count;
- else
- total_hung_task = cur_detect_count;
-
trace_sched_process_hang(t);
- if (sysctl_hung_task_panic && total_hung_task >= sysctl_hung_task_panic) {
+ if (sysctl_hung_task_panic && this_round_count >= sysctl_hung_task_panic) {
console_verbose();
hung_task_call_panic = true;
}
@@ -325,12 +307,13 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
int max_count = sysctl_hung_task_check_count;
unsigned long last_break = jiffies;
struct task_struct *g, *t;
- unsigned long cur_detect_count, prev_detect_count, delta;
+ unsigned long total_count, this_round_count;
int need_warning = sysctl_hung_task_warnings;
unsigned long si_mask = hung_task_si_mask;
- /* Acquire prevents reordering task checks before this point. */
- prev_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
+ /* The counter might get reset. Remember the initial value. */
+ total_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
+
/*
* If the system crashed already then all bets are off,
* do not report extra hung tasks:
@@ -339,6 +322,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
return;
+ this_round_count = 0UL;
rcu_read_lock();
for_each_process_thread(g, t) {
@@ -350,21 +334,24 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
last_break = jiffies;
}
- check_hung_task(t, timeout, prev_detect_count);
+ if (task_is_hung(t, timeout)) {
+ this_round_count++;
+ hung_task_info(t, timeout, this_round_count);
+ }
}
unlock:
rcu_read_unlock();
- /* Ensures we see all hang details recorded during the scan. */
- cur_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
- if (cur_detect_count < prev_detect_count)
- delta = cur_detect_count;
- else
- delta = cur_detect_count - prev_detect_count;
-
- if (!delta)
+ if (!this_round_count)
return;
+ /*
+ * Do not count this round when the global counter has been reset
+ * during this check.
+ */
+ atomic_long_cmpxchg_release(&sysctl_hung_task_detect_count, total_count,
+ total_count + this_round_count);
+
if (need_warning || hung_task_call_panic) {
si_mask |= SYS_INFO_LOCKS;
--
2.52.0
It is just a POC. Feel free to do the refactoring another way.
Best Regards,
Petr
On 2026/1/12 21:13, Petr Mladek wrote:
> On Fri 2026-01-09 21:50:20, Lance Yang wrote:
>>
>>
>> On 2026/1/8 22:41, Petr Mladek wrote:
>>> On Tue 2025-12-30 19:41:25, Aaron Tomlin wrote:
>>>> Introduce support for writing to /proc/sys/kernel/hung_task_detect_count.
>>>>
>>>> Writing a value of zero to this file atomically resets the counter of
>>>> detected hung tasks. This grants system administrators the ability to
>>>> clear the cumulative diagnostic history after resolving an incident,
>>>> simplifying monitoring without requiring a system restart.
>>>
>>>> --- a/kernel/hung_task.c
>>>> +++ b/kernel/hung_task.c
>>>> @@ -346,7 +355,14 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
>>>> unlock:
>>>> rcu_read_unlock();
>>>> - if (!(sysctl_hung_task_detect_count - prev_detect_count))
>>>> + /* Ensures we see all hang details recorded during the scan. */
>>>> + cur_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
>>>
>>> This value is read at the end of the scan => _release
>>> semantic/barrier should be here.
>>
>> Seems like _acquire is still correct here, because it is a load.
>>
>> _release semantics apply to stores, while _acquire on a load
>> ensures subsequent memory accesses are not reordered before it.
>
> Right!
>
>> Or smp_mb()?
>>
>> In the same thread, atomic operations on the same variable are not
>> reordered with respect to each other, even the _relaxed variant
>> preserves program order for that variable, IIRC.
>>
>> So the increment will always complete before the final read in
>> program order, and the read will see the updated value (unless
>> another CPU resets it concurrently, which is a logical race, not
>> a reordering issue).
>>
>> So, it would be:
>>
>> prev = atomic_long_read_acquire(&counter); // scan start
>> ...
>> cur = atomic_long_inc_return_relaxed(&counter); // during scan
>> ...
>> cur = atomic_long_read_acquire(&counter); // scan end
>>
>> The first _acquire ensures no task-checking code is reordered
>> before the start read, the middle increment is just atomic
>> without extra barriers, and the final _acquire makes sure we
>> observe all hang details before computing the delta.
>
> The acquire/relaxed/acquire semantic looks weird. The problem is
> that we do not use the counter as a lock.
>
> I thought about a sane approach and the following came to my
> mind:
>
> From c28b74c35d653f527aa9017c32630ad08180fb4e Mon Sep 17 00:00:00 2001
> From: Petr Mladek <pmladek@suse.com>
> Date: Mon, 12 Jan 2026 14:00:52 +0100
> Subject: [POC] hung_task: Update the global counter using a proper
> acquire/release semantic
>
> The global counter of hung tasks might get reset when the check
> is in progress. Also the number of hung tasks detected in the current
> round is important to decide whether panic() is needed or not.
>
> Handle races by:
>
> 1. Remember the total counter at the beginnning of the check.
> 2. Count the current round in a local variable.
> 3. Udpate the total counter only when the value has not been modified
> during the check.
Cool!
>
> Note that this is only compile tested.
>
> Signed-off-by: Petr Mladek <pmladek@suse.com>
> ---
> kernel/hung_task.c | 53 +++++++++++++++++-----------------------------
> 1 file changed, 20 insertions(+), 33 deletions(-)
>
> diff --git a/kernel/hung_task.c b/kernel/hung_task.c
> index 3bc72a4e4032..c939cd3d8a2c 100644
> --- a/kernel/hung_task.c
> +++ b/kernel/hung_task.c
> @@ -246,30 +246,12 @@ static inline void hung_task_diagnostics(struct task_struct *t)
> pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\" disables this message.\n");
> }
>
> -static void check_hung_task(struct task_struct *t, unsigned long timeout,
> - unsigned long prev_detect_count)
> +static void hung_task_info(struct task_struct *t, unsigned long timeout,
> + unsigned long this_round_count)
> {
> - unsigned long total_hung_task, cur_detect_count;
> -
> - if (!task_is_hung(t, timeout))
> - return;
> -
> - /*
> - * This counter tracks the total number of tasks detected as hung
> - * since boot. If a reset occurred during the scan, we treat the
> - * current count as the new delta to avoid an underflow error.
> - * Ensure hang details are globally visible before the counter
> - * update.
> - */
> - cur_detect_count = atomic_long_inc_return_release(&sysctl_hung_task_detect_count);
> - if (cur_detect_count >= prev_detect_count)
> - total_hung_task = cur_detect_count - prev_detect_count;
> - else
> - total_hung_task = cur_detect_count;
> -
> trace_sched_process_hang(t);
>
> - if (sysctl_hung_task_panic && total_hung_task >= sysctl_hung_task_panic) {
> + if (sysctl_hung_task_panic && this_round_count >= sysctl_hung_task_panic) {
> console_verbose();
> hung_task_call_panic = true;
> }
> @@ -325,12 +307,13 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> int max_count = sysctl_hung_task_check_count;
> unsigned long last_break = jiffies;
> struct task_struct *g, *t;
> - unsigned long cur_detect_count, prev_detect_count, delta;
> + unsigned long total_count, this_round_count;
> int need_warning = sysctl_hung_task_warnings;
> unsigned long si_mask = hung_task_si_mask;
>
> - /* Acquire prevents reordering task checks before this point. */
> - prev_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
> + /* The counter might get reset. Remember the initial value. */
> + total_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
> +
> /*
> * If the system crashed already then all bets are off,
> * do not report extra hung tasks:
> @@ -339,6 +322,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> return;
>
>
> + this_round_count = 0UL;
> rcu_read_lock();
> for_each_process_thread(g, t) {
>
> @@ -350,21 +334,24 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> last_break = jiffies;
> }
>
> - check_hung_task(t, timeout, prev_detect_count);
> + if (task_is_hung(t, timeout)) {
> + this_round_count++;
> + hung_task_info(t, timeout, this_round_count);
> + }
> }
> unlock:
> rcu_read_unlock();
>
> - /* Ensures we see all hang details recorded during the scan. */
> - cur_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
> - if (cur_detect_count < prev_detect_count)
> - delta = cur_detect_count;
> - else
> - delta = cur_detect_count - prev_detect_count;
> -
> - if (!delta)
> + if (!this_round_count)
> return;
>
> + /*
> + * Do not count this round when the global counter has been reset
> + * during this check.
> + */
> + atomic_long_cmpxchg_release(&sysctl_hung_task_detect_count, total_count,
> + total_count + this_round_count);
> +
> if (need_warning || hung_task_call_panic) {
> si_mask |= SYS_INFO_LOCKS;
>
In general, the POC makes a lot of sense ;)
The acquire/release pairing is now straightforward:
- _read_acquire at start
- local counting (no atomics)
- _cmpxchg_release at end
And yeah, if cmpxchg fails, we simply do not count this round,
which is reasonable behavior.
So, Aaron, can you take care of refactoring/testing based on
Petr's approach?
Cheers,
Lance
On Mon, Jan 12, 2026 at 10:43:35PM +0800, Lance Yang wrote: > In general, the POC makes a lot of sense ;) > > The acquire/release pairing is now straightforward: > - _read_acquire at start > - local counting (no atomics) > - _cmpxchg_release at end > > And yeah, if cmpxchg fails, we simply do not count this round, > which is reasonable behavior. > > So, Aaron, can you take care of refactoring/testing based on > Petr's approach? Hi Lance, Sure! -- Aaron Tomlin
On Tue, Dec 30, 2025 at 07:41:25PM -0500, Aaron Tomlin wrote:
> Introduce support for writing to /proc/sys/kernel/hung_task_detect_count.
>
> Writing a value of zero to this file atomically resets the counter of
> detected hung tasks. This grants system administrators the ability to
> clear the cumulative diagnostic history after resolving an incident,
> simplifying monitoring without requiring a system restart.
>
> Signed-off-by: Aaron Tomlin <atomlin@atomlin.com>
> ---
> Documentation/admin-guide/sysctl/kernel.rst | 3 +-
> kernel/hung_task.c | 82 ++++++++++++++++++---
> 2 files changed, 73 insertions(+), 12 deletions(-)
>
> diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
> index 239da22c4e28..68da4235225a 100644
> --- a/Documentation/admin-guide/sysctl/kernel.rst
> +++ b/Documentation/admin-guide/sysctl/kernel.rst
> @@ -418,7 +418,8 @@ hung_task_detect_count
> ======================
>
> Indicates the total number of tasks that have been detected as hung since
> -the system boot.
> +the system boot or since the counter was reset. The counter is zeroed when
> +a value of 0 is written.
>
> This file shows up if ``CONFIG_DETECT_HUNG_TASK`` is enabled.
>
> diff --git a/kernel/hung_task.c b/kernel/hung_task.c
> index 00c3296fd692..3bc72a4e4032 100644
> --- a/kernel/hung_task.c
> +++ b/kernel/hung_task.c
> @@ -17,6 +17,7 @@
> #include <linux/export.h>
> #include <linux/panic_notifier.h>
> #include <linux/sysctl.h>
> +#include <linux/atomic.h>
> #include <linux/suspend.h>
> #include <linux/utsname.h>
> #include <linux/sched/signal.h>
> @@ -36,7 +37,7 @@ static int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
> /*
> * Total number of tasks detected as hung since boot:
> */
> -static unsigned long __read_mostly sysctl_hung_task_detect_count;
> +static atomic_long_t sysctl_hung_task_detect_count = ATOMIC_LONG_INIT(0);
>
> /*
> * Limit number of tasks checked in a batch.
> @@ -246,20 +247,26 @@ static inline void hung_task_diagnostics(struct task_struct *t)
> }
>
> static void check_hung_task(struct task_struct *t, unsigned long timeout,
> - unsigned long prev_detect_count)
> + unsigned long prev_detect_count)
> {
> - unsigned long total_hung_task;
> + unsigned long total_hung_task, cur_detect_count;
>
> if (!task_is_hung(t, timeout))
> return;
>
> /*
> * This counter tracks the total number of tasks detected as hung
> - * since boot.
> + * since boot. If a reset occurred during the scan, we treat the
> + * current count as the new delta to avoid an underflow error.
> + * Ensure hang details are globally visible before the counter
> + * update.
> */
> - sysctl_hung_task_detect_count++;
> + cur_detect_count = atomic_long_inc_return_release(&sysctl_hung_task_detect_count);
> + if (cur_detect_count >= prev_detect_count)
> + total_hung_task = cur_detect_count - prev_detect_count;
> + else
> + total_hung_task = cur_detect_count;
>
> - total_hung_task = sysctl_hung_task_detect_count - prev_detect_count;
> trace_sched_process_hang(t);
>
> if (sysctl_hung_task_panic && total_hung_task >= sysctl_hung_task_panic) {
> @@ -318,10 +325,12 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> int max_count = sysctl_hung_task_check_count;
> unsigned long last_break = jiffies;
> struct task_struct *g, *t;
> - unsigned long prev_detect_count = sysctl_hung_task_detect_count;
> + unsigned long cur_detect_count, prev_detect_count, delta;
> int need_warning = sysctl_hung_task_warnings;
> unsigned long si_mask = hung_task_si_mask;
>
> + /* Acquire prevents reordering task checks before this point. */
> + prev_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
> /*
> * If the system crashed already then all bets are off,
> * do not report extra hung tasks:
> @@ -346,7 +355,14 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> unlock:
> rcu_read_unlock();
>
> - if (!(sysctl_hung_task_detect_count - prev_detect_count))
> + /* Ensures we see all hang details recorded during the scan. */
> + cur_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
> + if (cur_detect_count < prev_detect_count)
> + delta = cur_detect_count;
> + else
> + delta = cur_detect_count - prev_detect_count;
> +
> + if (!delta)
> return;
>
> if (need_warning || hung_task_call_panic) {
> @@ -371,6 +387,51 @@ static long hung_timeout_jiffies(unsigned long last_checked,
> }
>
> #ifdef CONFIG_SYSCTL
> +
> +/**
> + * proc_dohung_task_detect_count - proc handler for hung_task_detect_count
> + * @table: Pointer to the struct ctl_table definition for this proc entry
> + * @dir: Flag indicating the operation
> + * @buffer: User space buffer for data transfer
> + * @lenp: Pointer to the length of the data being transferred
> + * @ppos: Pointer to the current file offset
> + *
> + * This handler is used for reading the current hung task detection count
> + * and for resetting it to zero when a write operation is performed using a
> + * zero value only. Returns 0 on success or a negative error code on
> + * failure.
> + */
> +static int proc_dohung_task_detect_count(const struct ctl_table *table, int dir,
> + void *buffer, size_t *lenp, loff_t *ppos)
> +{
> + unsigned long detect_count;
> + struct ctl_table proxy_table;
> + int err;
> +
> + proxy_table = *table;
> + proxy_table.data = &detect_count;
> +
> + if (SYSCTL_KERN_TO_USER(dir)) {
> + detect_count = atomic_long_read(&sysctl_hung_task_detect_count);
> +
> + return proc_doulongvec_minmax(&proxy_table, dir, buffer, lenp, ppos);
> + }
Could you do something like this (untested):
if (SYSCTL_KERN_TO_USER(dir))
detect_count = atomic_long_read(&sysctl_hung_task_detect_count);
err = proc_doulongvec_minmax(&proxy_table, dir, buffer, lenp, ppos);
if (err < 0)
return err;
if (SYSCTL_USER_TO_KERN(dir)) {
if (detect_count)
return -EINVAL;
atomic_long_set(&sysctl_hung_task_detect_count, 0);
}
return 0;
> +
> + err = proc_doulongvec_minmax(&proxy_table, dir, buffer, lenp, ppos);
> + if (err < 0)
> + return err;
> +
> + if (SYSCTL_USER_TO_KERN(dir)) {
> + /* The only valid value for clearing is zero. */
> + if (detect_count)
> + return -EINVAL;
> + atomic_long_set(&sysctl_hung_task_detect_count, 0);
> + }
> +
> + *ppos += *lenp;
why do you advance ppos here? It is already advanced when you call
proc_doulongvec_minmax.
> + return err;
> +}
> +
> /*
> * Process updating of timeout sysctl
> */
> @@ -451,10 +512,9 @@ static const struct ctl_table hung_task_sysctls[] = {
> },
> {
> .procname = "hung_task_detect_count",
> - .data = &sysctl_hung_task_detect_count,
> .maxlen = sizeof(unsigned long),
> - .mode = 0444,
> - .proc_handler = proc_doulongvec_minmax,
> + .mode = 0644,
> + .proc_handler = proc_dohung_task_detect_count,
> },
> {
> .procname = "hung_task_sys_info",
> --
> 2.51.0
>
--
Joel Granados
On Tue, Jan 06, 2026 at 12:51:14PM +0100, Joel Granados wrote:
> Could you do something like this (untested):
>
> if (SYSCTL_KERN_TO_USER(dir))
> detect_count = atomic_long_read(&sysctl_hung_task_detect_count);
>
> err = proc_doulongvec_minmax(&proxy_table, dir, buffer, lenp, ppos);
> if (err < 0)
> return err;
>
> if (SYSCTL_USER_TO_KERN(dir)) {
> if (detect_count)
> return -EINVAL;
>
> atomic_long_set(&sysctl_hung_task_detect_count, 0);
> }
>
> return 0;
Fair enough. If the input is malformed or out of bounds, it exits early
with an error.
> why do you advance ppos here? It is already advanced when you call
> proc_doulongvec_minmax.
Acknowledged.
Kind regards,
--
Aaron Tomlin
On Tue, Dec 30, 2025 at 07:41:25PM -0500, Aaron Tomlin wrote:
> Introduce support for writing to /proc/sys/kernel/hung_task_detect_count.
>
> Writing a value of zero to this file atomically resets the counter of
> detected hung tasks. This grants system administrators the ability to
> clear the cumulative diagnostic history after resolving an incident,
> simplifying monitoring without requiring a system restart.
>
> Signed-off-by: Aaron Tomlin <atomlin@atomlin.com>
> ---
> Documentation/admin-guide/sysctl/kernel.rst | 3 +-
> kernel/hung_task.c | 82 ++++++++++++++++++---
> 2 files changed, 73 insertions(+), 12 deletions(-)
>
> diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
> index 239da22c4e28..68da4235225a 100644
> --- a/Documentation/admin-guide/sysctl/kernel.rst
> +++ b/Documentation/admin-guide/sysctl/kernel.rst
> @@ -418,7 +418,8 @@ hung_task_detect_count
> ======================
>
> Indicates the total number of tasks that have been detected as hung since
> -the system boot.
> +the system boot or since the counter was reset. The counter is zeroed when
> +a value of 0 is written.
>
> This file shows up if ``CONFIG_DETECT_HUNG_TASK`` is enabled.
>
> diff --git a/kernel/hung_task.c b/kernel/hung_task.c
> index 00c3296fd692..3bc72a4e4032 100644
> --- a/kernel/hung_task.c
> +++ b/kernel/hung_task.c
> @@ -17,6 +17,7 @@
> #include <linux/export.h>
> #include <linux/panic_notifier.h>
> #include <linux/sysctl.h>
> +#include <linux/atomic.h>
> #include <linux/suspend.h>
> #include <linux/utsname.h>
> #include <linux/sched/signal.h>
> @@ -36,7 +37,7 @@ static int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
> /*
> * Total number of tasks detected as hung since boot:
> */
> -static unsigned long __read_mostly sysctl_hung_task_detect_count;
> +static atomic_long_t sysctl_hung_task_detect_count = ATOMIC_LONG_INIT(0);
>
> /*
> * Limit number of tasks checked in a batch.
> @@ -246,20 +247,26 @@ static inline void hung_task_diagnostics(struct task_struct *t)
> }
>
> static void check_hung_task(struct task_struct *t, unsigned long timeout,
> - unsigned long prev_detect_count)
> + unsigned long prev_detect_count)
> {
> - unsigned long total_hung_task;
> + unsigned long total_hung_task, cur_detect_count;
>
> if (!task_is_hung(t, timeout))
> return;
>
> /*
> * This counter tracks the total number of tasks detected as hung
> - * since boot.
> + * since boot. If a reset occurred during the scan, we treat the
> + * current count as the new delta to avoid an underflow error.
> + * Ensure hang details are globally visible before the counter
> + * update.
> */
> - sysctl_hung_task_detect_count++;
> + cur_detect_count = atomic_long_inc_return_release(&sysctl_hung_task_detect_count);
> + if (cur_detect_count >= prev_detect_count)
> + total_hung_task = cur_detect_count - prev_detect_count;
> + else
> + total_hung_task = cur_detect_count;
>
> - total_hung_task = sysctl_hung_task_detect_count - prev_detect_count;
> trace_sched_process_hang(t);
>
> if (sysctl_hung_task_panic && total_hung_task >= sysctl_hung_task_panic) {
> @@ -318,10 +325,12 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> int max_count = sysctl_hung_task_check_count;
> unsigned long last_break = jiffies;
> struct task_struct *g, *t;
> - unsigned long prev_detect_count = sysctl_hung_task_detect_count;
> + unsigned long cur_detect_count, prev_detect_count, delta;
> int need_warning = sysctl_hung_task_warnings;
> unsigned long si_mask = hung_task_si_mask;
>
> + /* Acquire prevents reordering task checks before this point. */
> + prev_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
> /*
> * If the system crashed already then all bets are off,
> * do not report extra hung tasks:
> @@ -346,7 +355,14 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> unlock:
> rcu_read_unlock();
>
> - if (!(sysctl_hung_task_detect_count - prev_detect_count))
> + /* Ensures we see all hang details recorded during the scan. */
> + cur_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
> + if (cur_detect_count < prev_detect_count)
> + delta = cur_detect_count;
> + else
> + delta = cur_detect_count - prev_detect_count;
> +
> + if (!delta)
> return;
>
> if (need_warning || hung_task_call_panic) {
> @@ -371,6 +387,51 @@ static long hung_timeout_jiffies(unsigned long last_checked,
> }
>
> #ifdef CONFIG_SYSCTL
> +
> +/**
> + * proc_dohung_task_detect_count - proc handler for hung_task_detect_count
> + * @table: Pointer to the struct ctl_table definition for this proc entry
> + * @dir: Flag indicating the operation
> + * @buffer: User space buffer for data transfer
> + * @lenp: Pointer to the length of the data being transferred
> + * @ppos: Pointer to the current file offset
> + *
> + * This handler is used for reading the current hung task detection count
> + * and for resetting it to zero when a write operation is performed using a
> + * zero value only. Returns 0 on success or a negative error code on
> + * failure.
> + */
> +static int proc_dohung_task_detect_count(const struct ctl_table *table, int dir,
> + void *buffer, size_t *lenp, loff_t *ppos)
> +{
> + unsigned long detect_count;
> + struct ctl_table proxy_table;
> + int err;
> +
> + proxy_table = *table;
> + proxy_table.data = &detect_count;
> +
> + if (SYSCTL_KERN_TO_USER(dir)) {
> + detect_count = atomic_long_read(&sysctl_hung_task_detect_count);
> +
> + return proc_doulongvec_minmax(&proxy_table, dir, buffer, lenp, ppos);
> + }
> +
> + err = proc_doulongvec_minmax(&proxy_table, dir, buffer, lenp, ppos);
> + if (err < 0)
> + return err;
> +
> + if (SYSCTL_USER_TO_KERN(dir)) {
> + /* The only valid value for clearing is zero. */
> + if (detect_count)
> + return -EINVAL;
> + atomic_long_set(&sysctl_hung_task_detect_count, 0);
> + }
> +
> + *ppos += *lenp;
> + return err;
> +}
> +
> /*
> * Process updating of timeout sysctl
> */
> @@ -451,10 +512,9 @@ static const struct ctl_table hung_task_sysctls[] = {
> },
> {
> .procname = "hung_task_detect_count",
> - .data = &sysctl_hung_task_detect_count,
> .maxlen = sizeof(unsigned long),
> - .mode = 0444,
> - .proc_handler = proc_doulongvec_minmax,
> + .mode = 0644,
> + .proc_handler = proc_dohung_task_detect_count,
> },
> {
> .procname = "hung_task_sys_info",
I don't understand why do you need a custom proc_handler here.
Couldn't you just do this:
{
.procname = "hung_task_detect_count",
.data = &sysctl_hung_task_detect_count,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = 0,
.extra2 = 0,
},
???
What a I missing?
Best
--
Joel Granados
On Fri, Jan 02, 2026 at 12:14:39AM +0100, Joel Granados wrote:
> I don't understand why do you need a custom proc_handler here.
> Couldn't you just do this:
> {
> .procname = "hung_task_detect_count",
> .data = &sysctl_hung_task_detect_count,
> .maxlen = sizeof(unsigned long),
> .mode = 0644,
> .proc_handler = proc_doulongvec_minmax,
> .extra1 = 0,
> .extra2 = 0,
> },
>
> ???
>
> What a I missing?
Hi Joel,
The reason for the custom handler is that sysctl_hung_task_detect_count is
now an atomic_long_t.
The generic proc_doulongvec_minmax() handler expects a pointer to a raw
unsigned long and performs standard assignments. If we pointed it directly
at an atomic type, we would lose the atomicity and memory barriers required
for the reset logic to work correctly alongside check_hung_task().
The custom handler acts as a wrapper to ensure we use atomic_long_read()
and atomic_long_set(), while also enforcing the policy that only a value of
'0' is valid for writing
Kind regards,
--
Aaron Tomlin
On Thu, Jan 01, 2026 at 08:24:42PM -0500, Aaron Tomlin wrote:
> On Fri, Jan 02, 2026 at 12:14:39AM +0100, Joel Granados wrote:
> > I don't understand why do you need a custom proc_handler here.
> > Couldn't you just do this:
> > {
> > .procname = "hung_task_detect_count",
> > .data = &sysctl_hung_task_detect_count,
> > .maxlen = sizeof(unsigned long),
> > .mode = 0644,
> > .proc_handler = proc_doulongvec_minmax,
> > .extra1 = 0,
> > .extra2 = 0,
> > },
> >
> > ???
> >
> > What a I missing?
>
> Hi Joel,
>
> The reason for the custom handler is that sysctl_hung_task_detect_count is
> now an atomic_long_t.
>
> The generic proc_doulongvec_minmax() handler expects a pointer to a raw
> unsigned long and performs standard assignments. If we pointed it directly
> at an atomic type, we would lose the atomicity and memory barriers required
> for the reset logic to work correctly alongside check_hung_task().
>
> The custom handler acts as a wrapper to ensure we use atomic_long_read()
> and atomic_long_set(), while also enforcing the policy that only a value of
> '0' is valid for writing
It is clear to me that you need a custom handler after changing the type
to an atomic_long_t.
It is not clear to my why you changed it to atomic_long_t. It was
already being modified,read,written when it was a raw unsigned long.
What has changed that requires atomic_long_t?
Best
--
Joel Granados
On Mon, Jan 05, 2026 at 11:53:08AM +0100, Joel Granados wrote:
> It is clear to me that you need a custom handler after changing the type
> to an atomic_long_t.
>
> It is not clear to my why you changed it to atomic_long_t. It was
> already being modified,read,written when it was a raw unsigned long.
> What has changed that requires atomic_long_t?
Hi Joel,
Thank you very much for your review and for raising this important
question. I appreciate the opportunity to clarify the reasoning behind the
type change.
The rationale for switching to atomic_long_t is primarily due to the shift
from a Single-Writer model to a Multi-Writer model, alongside the need for
strict memory ordering which standard unsigned long operations cannot
guarantee.
1. Atomicity (precluding the "Lost Update" scenario) In the existing
implementation, sysctl_hung_task_detect_count is effectively read-only
for userspace. There is only one writer: the watchdog thread. By
introducing the ability for an administrator to write '0' (reset), we
create a race condition between:
- The watchdog thread attempting to increment the counter
(detect_count++)
- The administrator attempting to reset the counter (detect_count = 0)
On many architectures, a standard increment is a non-atomic
Read-Modify-Write sequence. Without atomic_long_inc(), we would risk a
"Lost Update" scenario where a reset occurs precisely between the
watchdog's load and store instructions, causing the counter to revert to a
high value immediately after the reset.
2. Memory Ordering (Acquire/Release Semantics) Beyond basic atomicity, the
use of atomic_long_inc_return_release() and atomic_long_read_acquire()
is necessary to enforce correct synchronisation without the overhead of
full memory barriers
- Release semantics (in check_hung_task()): We utilise Release semantics
when incrementing the counter. This guarantees that all memory
operations associated with detecting the hang (the reads inside
task_is_hung()) are globally visible before the counter is updated.
This effectively "publishes" the hang event only after the detection
logic is fully complete
- Acquire semantics (in check_hung_uninterruptible_tasks()): We utilise
Acquire semantics when reading the counter in the summary loop. This
ensures that we do not reorder the loop's subsequent checks to occur
before we have observed the current counter state
If we were to rely on _relaxed atomics or a simple unsigned long, the CPU
could theoretically reorder these operations, potentially leading to
inconsistencies where the summary loop observes a counter increment but
sees stale task state data.
I hope this explanation clarifies the necessity of these changes. Please
let me know if you have any further queries.
Kind regards,
--
Aaron Tomlin
On Mon, Jan 05, 2026 at 09:42:08AM -0500, Aaron Tomlin wrote: > On Mon, Jan 05, 2026 at 11:53:08AM +0100, Joel Granados wrote: > > It is clear to me that you need a custom handler after changing the type > > to an atomic_long_t. > > > > It is not clear to my why you changed it to atomic_long_t. It was > > already being modified,read,written when it was a raw unsigned long. > > What has changed that requires atomic_long_t? > > Hi Joel, > > Thank you very much for your review and for raising this important > question. I appreciate the opportunity to clarify the reasoning behind the > type change. > > The rationale for switching to atomic_long_t is primarily due to the shift > from a Single-Writer model to a Multi-Writer model, alongside the need for > strict memory ordering which standard unsigned long operations cannot > guarantee. > > 1. Atomicity (precluding the "Lost Update" scenario) In the existing > implementation, sysctl_hung_task_detect_count is effectively read-only > for userspace. There is only one writer: the watchdog thread. By > introducing the ability for an administrator to write '0' (reset), we > create a race condition between: > > - The watchdog thread attempting to increment the counter > (detect_count++) > > - The administrator attempting to reset the counter (detect_count = 0) > > On many architectures, a standard increment is a non-atomic > Read-Modify-Write sequence. Without atomic_long_inc(), we would risk a > "Lost Update" scenario where a reset occurs precisely between the > watchdog's load and store instructions, causing the counter to revert to a > high value immediately after the reset. > > 2. Memory Ordering (Acquire/Release Semantics) Beyond basic atomicity, the > use of atomic_long_inc_return_release() and atomic_long_read_acquire() > is necessary to enforce correct synchronisation without the overhead of > full memory barriers > > - Release semantics (in check_hung_task()): We utilise Release semantics > when incrementing the counter. This guarantees that all memory > operations associated with detecting the hang (the reads inside > task_is_hung()) are globally visible before the counter is updated. > This effectively "publishes" the hang event only after the detection > logic is fully complete > > - Acquire semantics (in check_hung_uninterruptible_tasks()): We utilise > Acquire semantics when reading the counter in the summary loop. This > ensures that we do not reorder the loop's subsequent checks to occur > before we have observed the current counter state > > If we were to rely on _relaxed atomics or a simple unsigned long, the CPU > could theoretically reorder these operations, potentially leading to > inconsistencies where the summary loop observes a counter increment but > sees stale task state data. > > I hope this explanation clarifies the necessity of these changes. Please > let me know if you have any further queries. > > > Kind regards, > -- > Aaron Tomlin Hey Aaron Thx for the very clear explanation. Makes sense. Can you please add this as part of the commit message. Actually, if you are up for it, it would be nice to split this into two commits (this will give you an opportunity to add all this nice explanation from this mail). One that changes the type and is a preparation commit. And two, the changes to make the variable writable from user space. I have more comments on the diff, but I'll "answer" those to the patch mail. Thx again. Best -- Joel Granados
On Tue, Jan 06, 2026 at 12:36:10PM +0100, Joel Granados wrote: > Hey Aaron Hi Joel, > Thx for the very clear explanation. Makes sense. My pleasure. > Can you please add this as part of the commit message. > > Actually, if you are up for it, it would be nice to split this into two > commits (this will give you an opportunity to add all this nice > explanation from this mail). One that changes the type and is a > preparation commit. And two, the changes to make the variable writable > from user space. Certainly, I will structure the first as a preparatory commit to amend the type - utilising that opportunity to include the detailed technical explanation - and the second to implement the user-space write capabilities. > I have more comments on the diff, but I'll "answer" those to the patch > mail. Thank you. Kind regards, -- Aaron Tomlin
On 2025/12/31 08:41, Aaron Tomlin wrote:
> Introduce support for writing to /proc/sys/kernel/hung_task_detect_count.
>
> Writing a value of zero to this file atomically resets the counter of
> detected hung tasks. This grants system administrators the ability to
> clear the cumulative diagnostic history after resolving an incident,
> simplifying monitoring without requiring a system restart.
>
> Signed-off-by: Aaron Tomlin <atomlin@atomlin.com>
> ---
Overall, looks good to me :)
> Documentation/admin-guide/sysctl/kernel.rst | 3 +-
> kernel/hung_task.c | 82 ++++++++++++++++++---
> 2 files changed, 73 insertions(+), 12 deletions(-)
>
> diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
> index 239da22c4e28..68da4235225a 100644
> --- a/Documentation/admin-guide/sysctl/kernel.rst
> +++ b/Documentation/admin-guide/sysctl/kernel.rst
> @@ -418,7 +418,8 @@ hung_task_detect_count
> ======================
>
> Indicates the total number of tasks that have been detected as hung since
> -the system boot.
> +the system boot or since the counter was reset. The counter is zeroed when
> +a value of 0 is written.
>
> This file shows up if ``CONFIG_DETECT_HUNG_TASK`` is enabled.
>
> diff --git a/kernel/hung_task.c b/kernel/hung_task.c
> index 00c3296fd692..3bc72a4e4032 100644
> --- a/kernel/hung_task.c
> +++ b/kernel/hung_task.c
> @@ -17,6 +17,7 @@
> #include <linux/export.h>
> #include <linux/panic_notifier.h>
> #include <linux/sysctl.h>
> +#include <linux/atomic.h>
> #include <linux/suspend.h>
> #include <linux/utsname.h>
> #include <linux/sched/signal.h>
> @@ -36,7 +37,7 @@ static int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
> /*
> * Total number of tasks detected as hung since boot:
> */
> -static unsigned long __read_mostly sysctl_hung_task_detect_count;
> +static atomic_long_t sysctl_hung_task_detect_count = ATOMIC_LONG_INIT(0);
>
> /*
> * Limit number of tasks checked in a batch.
> @@ -246,20 +247,26 @@ static inline void hung_task_diagnostics(struct task_struct *t)
> }
>
> static void check_hung_task(struct task_struct *t, unsigned long timeout,
> - unsigned long prev_detect_count)
> + unsigned long prev_detect_count)
> {
> - unsigned long total_hung_task;
> + unsigned long total_hung_task, cur_detect_count;
>
> if (!task_is_hung(t, timeout))
> return;
>
> /*
> * This counter tracks the total number of tasks detected as hung
> - * since boot.
> + * since boot. If a reset occurred during the scan, we treat the
> + * current count as the new delta to avoid an underflow error.
> + * Ensure hang details are globally visible before the counter
> + * update.
> */
> - sysctl_hung_task_detect_count++;
> + cur_detect_count = atomic_long_inc_return_release(&sysctl_hung_task_detect_count);
> + if (cur_detect_count >= prev_detect_count)
> + total_hung_task = cur_detect_count - prev_detect_count;
> + else
> + total_hung_task = cur_detect_count;
>
> - total_hung_task = sysctl_hung_task_detect_count - prev_detect_count;
> trace_sched_process_hang(t);
>
> if (sysctl_hung_task_panic && total_hung_task >= sysctl_hung_task_panic) {
> @@ -318,10 +325,12 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> int max_count = sysctl_hung_task_check_count;
> unsigned long last_break = jiffies;
> struct task_struct *g, *t;
> - unsigned long prev_detect_count = sysctl_hung_task_detect_count;
> + unsigned long cur_detect_count, prev_detect_count, delta;
> int need_warning = sysctl_hung_task_warnings;
> unsigned long si_mask = hung_task_si_mask;
>
> + /* Acquire prevents reordering task checks before this point. */
> + prev_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
> /*
> * If the system crashed already then all bets are off,
> * do not report extra hung tasks:
> @@ -346,7 +355,14 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> unlock:
> rcu_read_unlock();
>
> - if (!(sysctl_hung_task_detect_count - prev_detect_count))
> + /* Ensures we see all hang details recorded during the scan. */
> + cur_detect_count = atomic_long_read_acquire(&sysctl_hung_task_detect_count);
> + if (cur_detect_count < prev_detect_count)
> + delta = cur_detect_count;
> + else
> + delta = cur_detect_count - prev_detect_count;
> +
> + if (!delta)
> return;
Right. The underflow check applied in both check_hung_task() and
check_hung_uninterruptible_tasks() to handle reset during scan
looks properly addressed.
>
> if (need_warning || hung_task_call_panic) {
> @@ -371,6 +387,51 @@ static long hung_timeout_jiffies(unsigned long last_checked,
> }
>
> #ifdef CONFIG_SYSCTL
> +
> +/**
> + * proc_dohung_task_detect_count - proc handler for hung_task_detect_count
> + * @table: Pointer to the struct ctl_table definition for this proc entry
> + * @dir: Flag indicating the operation
> + * @buffer: User space buffer for data transfer
> + * @lenp: Pointer to the length of the data being transferred
> + * @ppos: Pointer to the current file offset
> + *
> + * This handler is used for reading the current hung task detection count
> + * and for resetting it to zero when a write operation is performed using a
> + * zero value only. Returns 0 on success or a negative error code on
> + * failure.
> + */
> +static int proc_dohung_task_detect_count(const struct ctl_table *table, int dir,
> + void *buffer, size_t *lenp, loff_t *ppos)
> +{
This proc_handler is probably better left for Petr and Joel to review ;)
Nothing else jumped out at me, so:
Acked-by: Lance Yang <lance.yang@linux.dev>
> + unsigned long detect_count;
> + struct ctl_table proxy_table;
> + int err;
> +
> + proxy_table = *table;
> + proxy_table.data = &detect_count;
> +
> + if (SYSCTL_KERN_TO_USER(dir)) {
> + detect_count = atomic_long_read(&sysctl_hung_task_detect_count);
> +
> + return proc_doulongvec_minmax(&proxy_table, dir, buffer, lenp, ppos);
> + }
> +
> + err = proc_doulongvec_minmax(&proxy_table, dir, buffer, lenp, ppos);
> + if (err < 0)
> + return err;
> +
> + if (SYSCTL_USER_TO_KERN(dir)) {
> + /* The only valid value for clearing is zero. */
> + if (detect_count)
> + return -EINVAL;
> + atomic_long_set(&sysctl_hung_task_detect_count, 0);
> + }
> +
> + *ppos += *lenp;
> + return err;
> +}
> +
> /*
> * Process updating of timeout sysctl
> */
> @@ -451,10 +512,9 @@ static const struct ctl_table hung_task_sysctls[] = {
> },
> {
> .procname = "hung_task_detect_count",
> - .data = &sysctl_hung_task_detect_count,
> .maxlen = sizeof(unsigned long),
> - .mode = 0444,
> - .proc_handler = proc_doulongvec_minmax,
> + .mode = 0644,
> + .proc_handler = proc_dohung_task_detect_count,
> },
> {
> .procname = "hung_task_sys_info",
© 2016 - 2026 Red Hat, Inc.