kernel/hung_task.c | 86 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 49 insertions(+), 37 deletions(-)
From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Dump the lock blocker task if it is not hung because if the blocker
task is also hung, it should be dumped by the detector. This will
de-duplicate the same stackdumps if the blocker task is also blocked
by another task (and hung).
Suggested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
---
kernel/hung_task.c | 86 ++++++++++++++++++++++++++++++----------------------
1 file changed, 49 insertions(+), 37 deletions(-)
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index d2432df2b905..52d72beb2233 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -94,9 +94,49 @@ static struct notifier_block panic_block = {
.notifier_call = hung_task_panic,
};
+static bool task_is_hung(struct task_struct *t, unsigned long timeout)
+{
+ unsigned long switch_count = t->nvcsw + t->nivcsw;
+ unsigned int state;
+
+ /*
+ * skip the TASK_KILLABLE tasks -- these can be killed
+ * skip the TASK_IDLE tasks -- those are genuinely idle
+ */
+ state = READ_ONCE(t->__state);
+ if (!(state & TASK_UNINTERRUPTIBLE) ||
+ (state & TASK_WAKEKILL) ||
+ (state & TASK_NOLOAD))
+ return false;
+
+ /*
+ * Ensure the task is not frozen.
+ * Also, skip vfork and any other user process that freezer should skip.
+ */
+ if (unlikely(READ_ONCE(t->__state) & TASK_FROZEN))
+ return false;
+
+ /*
+ * When a freshly created task is scheduled once, changes its state to
+ * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
+ * musn't be checked.
+ */
+ if (unlikely(!switch_count))
+ return false;
+
+ if (switch_count != t->last_switch_count) {
+ t->last_switch_count = switch_count;
+ t->last_switch_time = jiffies;
+ return false;
+ }
+ if (time_is_after_jiffies(t->last_switch_time + timeout * HZ))
+ return false;
+
+ return true;
+}
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
-static void debug_show_blocker(struct task_struct *task)
+static void debug_show_blocker(struct task_struct *task, unsigned long timeout)
{
struct task_struct *g, *t;
unsigned long owner, blocker, blocker_type;
@@ -153,41 +193,21 @@ static void debug_show_blocker(struct task_struct *task)
task->comm, task->pid, t->comm, t->pid);
break;
}
- sched_show_task(t);
+ /* Avoid duplicated task dump, skip if the task is also hung. */
+ if (!task_is_hung(t, timeout))
+ sched_show_task(t);
return;
}
}
#else
-static inline void debug_show_blocker(struct task_struct *task)
+static inline void debug_show_blocker(struct task_struct *task, unsigned long timeout)
{
}
#endif
static void check_hung_task(struct task_struct *t, unsigned long timeout)
{
- unsigned long switch_count = t->nvcsw + t->nivcsw;
-
- /*
- * Ensure the task is not frozen.
- * Also, skip vfork and any other user process that freezer should skip.
- */
- if (unlikely(READ_ONCE(t->__state) & TASK_FROZEN))
- return;
-
- /*
- * When a freshly created task is scheduled once, changes its state to
- * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
- * musn't be checked.
- */
- if (unlikely(!switch_count))
- return;
-
- if (switch_count != t->last_switch_count) {
- t->last_switch_count = switch_count;
- t->last_switch_time = jiffies;
- return;
- }
- if (time_is_after_jiffies(t->last_switch_time + timeout * HZ))
+ if (!task_is_hung(t, timeout))
return;
/*
@@ -222,7 +242,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
" disables this message.\n");
sched_show_task(t);
- debug_show_blocker(t);
+ debug_show_blocker(t, timeout);
hung_task_show_lock = true;
if (sysctl_hung_task_all_cpu_backtrace)
@@ -278,7 +298,6 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
hung_task_show_lock = false;
rcu_read_lock();
for_each_process_thread(g, t) {
- unsigned int state;
if (!max_count--)
goto unlock;
@@ -287,15 +306,8 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
goto unlock;
last_break = jiffies;
}
- /*
- * skip the TASK_KILLABLE tasks -- these can be killed
- * skip the TASK_IDLE tasks -- those are genuinely idle
- */
- state = READ_ONCE(t->__state);
- if ((state & TASK_UNINTERRUPTIBLE) &&
- !(state & TASK_WAKEKILL) &&
- !(state & TASK_NOLOAD))
- check_hung_task(t, timeout);
+
+ check_hung_task(t, timeout);
}
unlock:
rcu_read_unlock();
On 2025/7/30 17:53, Masami Hiramatsu (Google) wrote:
> From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
>
> Dump the lock blocker task if it is not hung because if the blocker
> task is also hung, it should be dumped by the detector. This will
> de-duplicate the same stackdumps if the blocker task is also blocked
> by another task (and hung).
Makes sense to me ;)
>
> Suggested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> ---
> kernel/hung_task.c | 86 ++++++++++++++++++++++++++++++----------------------
> 1 file changed, 49 insertions(+), 37 deletions(-)
>
> diff --git a/kernel/hung_task.c b/kernel/hung_task.c
> index d2432df2b905..52d72beb2233 100644
> --- a/kernel/hung_task.c
> +++ b/kernel/hung_task.c
> @@ -94,9 +94,49 @@ static struct notifier_block panic_block = {
> .notifier_call = hung_task_panic,
> };
>
> +static bool task_is_hung(struct task_struct *t, unsigned long timeout)
> +{
> + unsigned long switch_count = t->nvcsw + t->nivcsw;
> + unsigned int state;
> +
> + /*
> + * skip the TASK_KILLABLE tasks -- these can be killed
> + * skip the TASK_IDLE tasks -- those are genuinely idle
> + */
> + state = READ_ONCE(t->__state);
> + if (!(state & TASK_UNINTERRUPTIBLE) ||
> + (state & TASK_WAKEKILL) ||
> + (state & TASK_NOLOAD))
> + return false;
> +
> + /*
> + * Ensure the task is not frozen.
> + * Also, skip vfork and any other user process that freezer should skip.
> + */
> + if (unlikely(READ_ONCE(t->__state) & TASK_FROZEN))
> + return false;
Nit: the two separate checks on t->__state could be combined into
a single read and one conditional check ;)
Something like:
state = READ_ONCE(t->__state);
if (!(state & TASK_UNINTERRUPTIBLE) ||
(state & (TASK_WAKEKILL | TASK_NOLOAD | TASK_FROZEN)))
return false;
Otherwise, looks good to me:
Acked-by: Lance Yang <lance.yang@linux.dev>
Thanks,
Lance
> +
> + /*
> + * When a freshly created task is scheduled once, changes its state to
> + * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
> + * musn't be checked.
> + */
> + if (unlikely(!switch_count))
> + return false;
> +
> + if (switch_count != t->last_switch_count) {
> + t->last_switch_count = switch_count;
> + t->last_switch_time = jiffies;
> + return false;
> + }
> + if (time_is_after_jiffies(t->last_switch_time + timeout * HZ))
> + return false;
> +
> + return true;
> +}
>
> #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
> -static void debug_show_blocker(struct task_struct *task)
> +static void debug_show_blocker(struct task_struct *task, unsigned long timeout)
> {
> struct task_struct *g, *t;
> unsigned long owner, blocker, blocker_type;
> @@ -153,41 +193,21 @@ static void debug_show_blocker(struct task_struct *task)
> task->comm, task->pid, t->comm, t->pid);
> break;
> }
> - sched_show_task(t);
> + /* Avoid duplicated task dump, skip if the task is also hung. */
> + if (!task_is_hung(t, timeout))
> + sched_show_task(t);
> return;
> }
> }
> #else
> -static inline void debug_show_blocker(struct task_struct *task)
> +static inline void debug_show_blocker(struct task_struct *task, unsigned long timeout)
> {
> }
> #endif
>
> static void check_hung_task(struct task_struct *t, unsigned long timeout)
> {
> - unsigned long switch_count = t->nvcsw + t->nivcsw;
> -
> - /*
> - * Ensure the task is not frozen.
> - * Also, skip vfork and any other user process that freezer should skip.
> - */
> - if (unlikely(READ_ONCE(t->__state) & TASK_FROZEN))
> - return;
> -
> - /*
> - * When a freshly created task is scheduled once, changes its state to
> - * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
> - * musn't be checked.
> - */
> - if (unlikely(!switch_count))
> - return;
> -
> - if (switch_count != t->last_switch_count) {
> - t->last_switch_count = switch_count;
> - t->last_switch_time = jiffies;
> - return;
> - }
> - if (time_is_after_jiffies(t->last_switch_time + timeout * HZ))
> + if (!task_is_hung(t, timeout))
> return;
>
> /*
> @@ -222,7 +242,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
> pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
> " disables this message.\n");
> sched_show_task(t);
> - debug_show_blocker(t);
> + debug_show_blocker(t, timeout);
> hung_task_show_lock = true;
>
> if (sysctl_hung_task_all_cpu_backtrace)
> @@ -278,7 +298,6 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> hung_task_show_lock = false;
> rcu_read_lock();
> for_each_process_thread(g, t) {
> - unsigned int state;
>
> if (!max_count--)
> goto unlock;
> @@ -287,15 +306,8 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> goto unlock;
> last_break = jiffies;
> }
> - /*
> - * skip the TASK_KILLABLE tasks -- these can be killed
> - * skip the TASK_IDLE tasks -- those are genuinely idle
> - */
> - state = READ_ONCE(t->__state);
> - if ((state & TASK_UNINTERRUPTIBLE) &&
> - !(state & TASK_WAKEKILL) &&
> - !(state & TASK_NOLOAD))
> - check_hung_task(t, timeout);
> +
> + check_hung_task(t, timeout);
> }
> unlock:
> rcu_read_unlock();
>
On Wed, 30 Jul 2025 21:46:16 +0800
Lance Yang <lance.yang@linux.dev> wrote:
>
>
> On 2025/7/30 17:53, Masami Hiramatsu (Google) wrote:
> > From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> >
> > Dump the lock blocker task if it is not hung because if the blocker
> > task is also hung, it should be dumped by the detector. This will
> > de-duplicate the same stackdumps if the blocker task is also blocked
> > by another task (and hung).
>
> Makes sense to me ;)
>
> >
> > Suggested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> > Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > ---
> > kernel/hung_task.c | 86 ++++++++++++++++++++++++++++++----------------------
> > 1 file changed, 49 insertions(+), 37 deletions(-)
> >
> > diff --git a/kernel/hung_task.c b/kernel/hung_task.c
> > index d2432df2b905..52d72beb2233 100644
> > --- a/kernel/hung_task.c
> > +++ b/kernel/hung_task.c
> > @@ -94,9 +94,49 @@ static struct notifier_block panic_block = {
> > .notifier_call = hung_task_panic,
> > };
> >
> > +static bool task_is_hung(struct task_struct *t, unsigned long timeout)
> > +{
> > + unsigned long switch_count = t->nvcsw + t->nivcsw;
> > + unsigned int state;
> > +
> > + /*
> > + * skip the TASK_KILLABLE tasks -- these can be killed
> > + * skip the TASK_IDLE tasks -- those are genuinely idle
> > + */
> > + state = READ_ONCE(t->__state);
> > + if (!(state & TASK_UNINTERRUPTIBLE) ||
> > + (state & TASK_WAKEKILL) ||
> > + (state & TASK_NOLOAD))
> > + return false;
> > +
> > + /*
> > + * Ensure the task is not frozen.
> > + * Also, skip vfork and any other user process that freezer should skip.
> > + */
> > + if (unlikely(READ_ONCE(t->__state) & TASK_FROZEN))
> > + return false;
>
>
> Nit: the two separate checks on t->__state could be combined into
> a single read and one conditional check ;)
>
> Something like:
>
> state = READ_ONCE(t->__state);
>
> if (!(state & TASK_UNINTERRUPTIBLE) ||
> (state & (TASK_WAKEKILL | TASK_NOLOAD | TASK_FROZEN)))
> return false;
Ah, Indeed.
>
>
> Otherwise, looks good to me:
> Acked-by: Lance Yang <lance.yang@linux.dev>
Thanks, let me update it. (also on the next tree)
Thank you!
>
> Thanks,
> Lance
>
> > +
> > + /*
> > + * When a freshly created task is scheduled once, changes its state to
> > + * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
> > + * musn't be checked.
> > + */
> > + if (unlikely(!switch_count))
> > + return false;
> > +
> > + if (switch_count != t->last_switch_count) {
> > + t->last_switch_count = switch_count;
> > + t->last_switch_time = jiffies;
> > + return false;
> > + }
> > + if (time_is_after_jiffies(t->last_switch_time + timeout * HZ))
> > + return false;
> > +
> > + return true;
> > +}
> >
> > #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
> > -static void debug_show_blocker(struct task_struct *task)
> > +static void debug_show_blocker(struct task_struct *task, unsigned long timeout)
> > {
> > struct task_struct *g, *t;
> > unsigned long owner, blocker, blocker_type;
> > @@ -153,41 +193,21 @@ static void debug_show_blocker(struct task_struct *task)
> > task->comm, task->pid, t->comm, t->pid);
> > break;
> > }
> > - sched_show_task(t);
> > + /* Avoid duplicated task dump, skip if the task is also hung. */
> > + if (!task_is_hung(t, timeout))
> > + sched_show_task(t);
> > return;
> > }
> > }
> > #else
> > -static inline void debug_show_blocker(struct task_struct *task)
> > +static inline void debug_show_blocker(struct task_struct *task, unsigned long timeout)
> > {
> > }
> > #endif
> >
> > static void check_hung_task(struct task_struct *t, unsigned long timeout)
> > {
> > - unsigned long switch_count = t->nvcsw + t->nivcsw;
> > -
> > - /*
> > - * Ensure the task is not frozen.
> > - * Also, skip vfork and any other user process that freezer should skip.
> > - */
> > - if (unlikely(READ_ONCE(t->__state) & TASK_FROZEN))
> > - return;
> > -
> > - /*
> > - * When a freshly created task is scheduled once, changes its state to
> > - * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
> > - * musn't be checked.
> > - */
> > - if (unlikely(!switch_count))
> > - return;
> > -
> > - if (switch_count != t->last_switch_count) {
> > - t->last_switch_count = switch_count;
> > - t->last_switch_time = jiffies;
> > - return;
> > - }
> > - if (time_is_after_jiffies(t->last_switch_time + timeout * HZ))
> > + if (!task_is_hung(t, timeout))
> > return;
> >
> > /*
> > @@ -222,7 +242,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
> > pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
> > " disables this message.\n");
> > sched_show_task(t);
> > - debug_show_blocker(t);
> > + debug_show_blocker(t, timeout);
> > hung_task_show_lock = true;
> >
> > if (sysctl_hung_task_all_cpu_backtrace)
> > @@ -278,7 +298,6 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> > hung_task_show_lock = false;
> > rcu_read_lock();
> > for_each_process_thread(g, t) {
> > - unsigned int state;
> >
> > if (!max_count--)
> > goto unlock;
> > @@ -287,15 +306,8 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
> > goto unlock;
> > last_break = jiffies;
> > }
> > - /*
> > - * skip the TASK_KILLABLE tasks -- these can be killed
> > - * skip the TASK_IDLE tasks -- those are genuinely idle
> > - */
> > - state = READ_ONCE(t->__state);
> > - if ((state & TASK_UNINTERRUPTIBLE) &&
> > - !(state & TASK_WAKEKILL) &&
> > - !(state & TASK_NOLOAD))
> > - check_hung_task(t, timeout);
> > +
> > + check_hung_task(t, timeout);
> > }
> > unlock:
> > rcu_read_unlock();
> >
>
--
Masami Hiramatsu (Google) <mhiramat@kernel.org>
On (25/07/30 18:53), Masami Hiramatsu (Google) wrote:
> From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
>
> Dump the lock blocker task if it is not hung because if the blocker
> task is also hung, it should be dumped by the detector. This will
> de-duplicate the same stackdumps if the blocker task is also blocked
> by another task (and hung).
[..]
> #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
> -static void debug_show_blocker(struct task_struct *task)
> +static void debug_show_blocker(struct task_struct *task, unsigned long timeout)
> {
> struct task_struct *g, *t;
> unsigned long owner, blocker, blocker_type;
> @@ -153,41 +193,21 @@ static void debug_show_blocker(struct task_struct *task)
> task->comm, task->pid, t->comm, t->pid);
> break;
> }
> - sched_show_task(t);
> + /* Avoid duplicated task dump, skip if the task is also hung. */
> + if (!task_is_hung(t, timeout))
> + sched_show_task(t);
> return;
> }
This patch seems to be against the tree that is significantly
behind the current linux-next. Namely it's in conflict with
linux-next's commit 77da18de55ac6.
On Wed, 30 Jul 2025 22:28:45 +0900
Sergey Senozhatsky <senozhatsky@chromium.org> wrote:
> On (25/07/30 18:53), Masami Hiramatsu (Google) wrote:
> > From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> >
> > Dump the lock blocker task if it is not hung because if the blocker
> > task is also hung, it should be dumped by the detector. This will
> > de-duplicate the same stackdumps if the blocker task is also blocked
> > by another task (and hung).
>
> [..]
>
> > #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
> > -static void debug_show_blocker(struct task_struct *task)
> > +static void debug_show_blocker(struct task_struct *task, unsigned long timeout)
> > {
> > struct task_struct *g, *t;
> > unsigned long owner, blocker, blocker_type;
> > @@ -153,41 +193,21 @@ static void debug_show_blocker(struct task_struct *task)
> > task->comm, task->pid, t->comm, t->pid);
> > break;
> > }
> > - sched_show_task(t);
> > + /* Avoid duplicated task dump, skip if the task is also hung. */
> > + if (!task_is_hung(t, timeout))
> > + sched_show_task(t);
> > return;
> > }
>
> This patch seems to be against the tree that is significantly
> behind the current linux-next. Namely it's in conflict with
> linux-next's commit 77da18de55ac6.
Ah, yes. I just used v6.16 for testing. OK, let me update it
against the linux-next.
Thank you,
--
Masami Hiramatsu (Google) <mhiramat@kernel.org>
© 2016 - 2026 Red Hat, Inc.