From: Steven Rostedt <rostedt@goodmis.org>
In order to know which registered callback requested a stacktrace for when
the task goes back to user space, add a bitmask for all registered
tracers. The bitmask is the size of log, which means that on a 32 bit
machine, it can have at most 32 registered tracers, and on 64 bit, it can
have at most 64 registered tracers. This should not be an issue as there
should not be more than 10 (unless BPF can abuse this?).
When a tracer registers with unwind_deferred_init() it will get a bit
number assigned to it. When a tracer requests a stacktrace, it will have
its bit set within the task_struct. When the task returns back to user
space, it will call the callbacks for all the registered tracers where
their bits are set in the task's mask.
When a tracer is removed by the unwind_deferred_cancel() all current tasks
will clear the associated bit, just in case another tracer gets registered
immediately afterward and then gets their callback called unexpectedly.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
Changes since v6: https://lore.kernel.org/20250424192612.844558089@goodmis.org
- Have unwind_deferred_request() return positive if already queued.
include/linux/sched.h | 1 +
include/linux/unwind_deferred.h | 1 +
kernel/unwind/deferred.c | 46 ++++++++++++++++++++++++++++-----
3 files changed, 41 insertions(+), 7 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a1e1c07cadfb..d3ee0c5405d6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1649,6 +1649,7 @@ struct task_struct {
#ifdef CONFIG_UNWIND_USER
struct unwind_task_info unwind_info;
+ unsigned long unwind_mask;
#endif
/* CPU-specific state of this task: */
diff --git a/include/linux/unwind_deferred.h b/include/linux/unwind_deferred.h
index d36784cae658..719a7cfb3164 100644
--- a/include/linux/unwind_deferred.h
+++ b/include/linux/unwind_deferred.h
@@ -13,6 +13,7 @@ typedef void (*unwind_callback_t)(struct unwind_work *work, struct unwind_stackt
struct unwind_work {
struct list_head list;
unwind_callback_t func;
+ int bit;
};
#ifdef CONFIG_UNWIND_USER
diff --git a/kernel/unwind/deferred.c b/kernel/unwind/deferred.c
index d86ea82a8915..716393dff810 100644
--- a/kernel/unwind/deferred.c
+++ b/kernel/unwind/deferred.c
@@ -26,6 +26,7 @@ static DEFINE_PER_CPU(u64, unwind_ctx_ctr);
/* Guards adding to and reading the list of callbacks */
static DEFINE_MUTEX(callback_mutex);
static LIST_HEAD(callbacks);
+static unsigned long unwind_mask;
/*
* The context cookie is a unique identifier that is assigned to a user
@@ -134,6 +135,7 @@ static void unwind_deferred_task_work(struct callback_head *head)
struct unwind_task_info *info = container_of(head, struct unwind_task_info, work);
struct unwind_stacktrace trace;
struct unwind_work *work;
+ struct task_struct *task = current;
u64 cookie;
if (WARN_ON_ONCE(!info->pending))
@@ -155,7 +157,10 @@ static void unwind_deferred_task_work(struct callback_head *head)
guard(mutex)(&callback_mutex);
list_for_each_entry(work, &callbacks, list) {
- work->func(work, &trace, cookie);
+ if (task->unwind_mask & (1UL << work->bit)) {
+ work->func(work, &trace, cookie);
+ clear_bit(work->bit, ¤t->unwind_mask);
+ }
}
barrier();
/* If another task work is pending, reuse the cookie and stack trace */
@@ -193,9 +198,12 @@ static int unwind_deferred_request_nmi(struct unwind_work *work, u64 *cookie)
*cookie = info->nmi_cookie;
}
- if (info->pending)
+ if (current->unwind_mask & (1UL << work->bit))
return 1;
+ if (info->pending)
+ goto out;
+
ret = task_work_add(current, &info->work, TWA_NMI_CURRENT);
if (ret) {
if (inited_cookie)
@@ -204,8 +212,8 @@ static int unwind_deferred_request_nmi(struct unwind_work *work, u64 *cookie)
}
info->pending = 1;
-
- return 0;
+out:
+ return test_and_set_bit(work->bit, ¤t->unwind_mask);
}
/*
@@ -245,14 +253,18 @@ int unwind_deferred_request(struct unwind_work *work, u64 *cookie)
*cookie = get_cookie(info);
+ /* This is already queued */
+ if (current->unwind_mask & (1UL << work->bit))
+ return 1;
+
/* callback already pending? */
pending = READ_ONCE(info->pending);
if (pending)
- return 1;
+ goto out;
/* Claim the work unless an NMI just now swooped in to do so. */
if (!try_cmpxchg(&info->pending, &pending, 1))
- return 1;
+ goto out;
/* The work has been claimed, now schedule it. */
ret = task_work_add(current, &info->work, TWA_RESUME);
@@ -261,16 +273,27 @@ int unwind_deferred_request(struct unwind_work *work, u64 *cookie)
return ret;
}
- return 0;
+ out:
+ return test_and_set_bit(work->bit, ¤t->unwind_mask);
}
void unwind_deferred_cancel(struct unwind_work *work)
{
+ struct task_struct *g, *t;
+
if (!work)
return;
guard(mutex)(&callback_mutex);
list_del(&work->list);
+
+ clear_bit(work->bit, &unwind_mask);
+
+ guard(rcu)();
+ /* Clear this bit from all threads */
+ for_each_process_thread(g, t) {
+ clear_bit(work->bit, &t->unwind_mask);
+ }
}
int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func)
@@ -278,6 +301,14 @@ int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func)
memset(work, 0, sizeof(*work));
guard(mutex)(&callback_mutex);
+
+ /* See if there's a bit in the mask available */
+ if (unwind_mask == ~0UL)
+ return -EBUSY;
+
+ work->bit = ffz(unwind_mask);
+ unwind_mask |= 1UL << work->bit;
+
list_add(&work->list, &callbacks);
work->func = func;
return 0;
@@ -289,6 +320,7 @@ void unwind_task_init(struct task_struct *task)
memset(info, 0, sizeof(*info));
init_task_work(&info->work, unwind_deferred_task_work);
+ task->unwind_mask = 0;
}
void unwind_task_free(struct task_struct *task)
--
2.47.2
On Fri, 02 May 2025 12:47:57 -0400
Steven Rostedt <rostedt@goodmis.org> wrote:
> @@ -134,6 +135,7 @@ static void unwind_deferred_task_work(struct callback_head *head)
> struct unwind_task_info *info = container_of(head, struct unwind_task_info, work);
> struct unwind_stacktrace trace;
> struct unwind_work *work;
> + struct task_struct *task = current;
> u64 cookie;
>
> if (WARN_ON_ONCE(!info->pending))
> @@ -155,7 +157,10 @@ static void unwind_deferred_task_work(struct callback_head *head)
>
> guard(mutex)(&callback_mutex);
> list_for_each_entry(work, &callbacks, list) {
> - work->func(work, &trace, cookie);
> + if (task->unwind_mask & (1UL << work->bit)) {
> + work->func(work, &trace, cookie);
> + clear_bit(work->bit, ¤t->unwind_mask);
> + }
> }
> barrier();
> /* If another task work is pending, reuse the cookie and stack trace */
So testing this code I hit a live lock. What happened was I enabled the
flag that asks for a user space callback after every event. But when I
enabled this and also enabled all events, I did this on a kernel that had
irq_disable as an event, and then the system hung.
What happened was that after the trace was recorded, on the way back to
user space, interrupts were disabled again, and the request for a callback
was done again. This enabled another task_work to be triggered, and it
would do the callback again, and then back on the way back to user space,
it disabled interrupts, another request for a user space stack trace was
done, the task_work was triggered again, and ... wash, rinse, repeat!
To fix this, I decided to move the "pending" bit into the task->unwind_mask
(the most significant bit). I also moved the clearing of the work bits into
unwind_exit_to_user_mode():
static __always_inline void unwind_exit_to_user_mode(void)
{
unsigned long bits;
/* Was there any unwinding? */
if (likely(!current->unwind_mask))
return;
bits = current->unwind_mask;
do {
/* Is a task_work going to run again before going back */
if (bits & UNWIND_PENDING)
return;
} while (!try_cmpxchg(¤t->unwind_mask, &bits, 0UL));
if (likely(current->unwind_info.cache))
current->unwind_info.cache->nr_entries = 0;
current->unwind_info.timestamp = 0;
}
The idea is, current->unwind_mask would only be set if an unwind was
requested and given. If it's not set, no unwind was done and there's
nothing left to do, so just exit the routine.
Then if unwind_mask has PENDING set, it means that a task work is going to
be executed again before going back to user space, so exit, otherwise, try
to clear all the bits to zero (using try_cmpxchg() in case an NMI comes in).
Finally clear all the data normally.
Now if a tracer requests a callback, it will get only one callback, if it
requests another one on he way out it will be told that it has already
requested one (and it was already delivered). Now, if a tracer really wants
to request another one, and it knows it will not cause an infinite recursion
by doing so, then I could add an API that lets the unwinder do that.
Possibly adding a "force" argument to the request function.
Otherwise, the bit for the tracer stays set until the task goes back to
user space or, if another tracer requests its first stacktrace, then it
will set the pending bit and clear all other bits that were set previously.
Having the pending bit be part of the unwind_mask allows for updating the
pending bit atomically with the other bits.
Here's the patch that implements this:
diff --git a/include/linux/unwind_deferred.h b/include/linux/unwind_deferred.h
index 1789c3624723..db7a8d5d6040 100644
--- a/include/linux/unwind_deferred.h
+++ b/include/linux/unwind_deferred.h
@@ -18,6 +18,9 @@ struct unwind_work {
#ifdef CONFIG_UNWIND_USER
+#define UNWIND_PENDING_BIT (BITS_PER_LONG - 1)
+#define UNWIND_PENDING (1UL << UNWIND_PENDING_BIT)
+
void unwind_task_init(struct task_struct *task);
void unwind_task_free(struct task_struct *task);
@@ -29,7 +32,20 @@ void unwind_deferred_cancel(struct unwind_work *work);
static __always_inline void unwind_exit_to_user_mode(void)
{
- if (unlikely(current->unwind_info.cache))
+ unsigned long bits;
+
+ /* Was there any unwinding? */
+ if (likely(!current->unwind_mask))
+ return;
+
+ bits = current->unwind_mask;
+ do {
+ /* Is a task_work going to run again before going back */
+ if (bits & UNWIND_PENDING)
+ return;
+ } while (!try_cmpxchg(¤t->unwind_mask, &bits, 0UL));
+
+ if (likely(current->unwind_info.cache))
current->unwind_info.cache->nr_entries = 0;
current->unwind_info.timestamp = 0;
}
diff --git a/include/linux/unwind_deferred_types.h b/include/linux/unwind_deferred_types.h
index ae27a02234b8..28811a9d4262 100644
--- a/include/linux/unwind_deferred_types.h
+++ b/include/linux/unwind_deferred_types.h
@@ -12,7 +12,6 @@ struct unwind_task_info {
struct callback_head work;
u64 timestamp;
u64 nmi_timestamp;
- int pending;
};
#endif /* _LINUX_UNWIND_USER_DEFERRED_TYPES_H */
diff --git a/kernel/unwind/deferred.c b/kernel/unwind/deferred.c
index 6ffed486bd7b..637ab6491bc5 100644
--- a/kernel/unwind/deferred.c
+++ b/kernel/unwind/deferred.c
@@ -19,6 +19,11 @@ static LIST_HEAD(callbacks);
static unsigned long unwind_mask;
DEFINE_STATIC_SRCU(unwind_srcu);
+static inline bool unwind_pending(struct task_struct *task)
+{
+ return test_bit(UNWIND_PENDING_BIT, &task->unwind_mask);
+}
+
/*
* Read the task context timestamp, if this is the first caller then
* it will set the timestamp.
@@ -99,11 +104,11 @@ static void unwind_deferred_task_work(struct callback_head *head)
struct task_struct *task = current;
int idx;
- if (WARN_ON_ONCE(!info->pending))
+ if (WARN_ON_ONCE(!unwind_pending(task)))
return;
/* Allow work to come in again */
- WRITE_ONCE(info->pending, 0);
+ clear_bit(UNWIND_PENDING_BIT, &task->unwind_mask);
/*
* From here on out, the callback must always be called, even if it's
@@ -126,10 +131,8 @@ static void unwind_deferred_task_work(struct callback_head *head)
idx = srcu_read_lock(&unwind_srcu);
list_for_each_entry_srcu(work, &callbacks, list,
srcu_read_lock_held(&unwind_srcu)) {
- if (task->unwind_mask & (1UL << work->bit)) {
+ if (task->unwind_mask & (1UL << work->bit))
work->func(work, &trace, timestamp);
- clear_bit(work->bit, ¤t->unwind_mask);
- }
}
srcu_read_unlock(&unwind_srcu, idx);
}
@@ -156,10 +159,11 @@ static int unwind_deferred_request_nmi(struct unwind_work *work, u64 *timestamp)
inited_timestamp = true;
}
+ /* Is this already queued */
if (current->unwind_mask & (1UL << work->bit))
return 1;
- if (info->pending)
+ if (unwind_pending(current))
goto out;
ret = task_work_add(current, &info->work, TWA_NMI_CURRENT);
@@ -174,7 +178,13 @@ static int unwind_deferred_request_nmi(struct unwind_work *work, u64 *timestamp)
return ret;
}
- info->pending = 1;
+ /*
+ * This is the first to set the PENDING_BIT, clear all others
+ * as any other bit has already had their callback called, and
+ * those callbacks should not be called again because of this
+ * new callback.
+ */
+ current->unwind_mask = UNWIND_PENDING;
out:
return test_and_set_bit(work->bit, ¤t->unwind_mask);
}
@@ -202,7 +212,7 @@ static int unwind_deferred_request_nmi(struct unwind_work *work, u64 *timestamp)
int unwind_deferred_request(struct unwind_work *work, u64 *timestamp)
{
struct unwind_task_info *info = ¤t->unwind_info;
- int pending;
+ unsigned long old, bits;
int bit;
int ret;
@@ -224,25 +234,30 @@ int unwind_deferred_request(struct unwind_work *work, u64 *timestamp)
*timestamp = get_timestamp(info);
- /* This is already queued */
+ /* Is this already queued */
if (current->unwind_mask & (1UL << bit))
return 1;
- /* callback already pending? */
- pending = READ_ONCE(info->pending);
- if (pending)
+ old = current->unwind_mask;
+ barrier();
+
+ if (unwind_pending(current))
goto out;
- /* Claim the work unless an NMI just now swooped in to do so. */
- if (!try_cmpxchg(&info->pending, &pending, 1))
+ /* This is the first to set the pending bit since the task enter kernel */
+ bits = UNWIND_PENDING | (1 << bit);
+
+ /* callback already pending? */
+ if (!try_cmpxchg(¤t->unwind_mask, &old, bits))
goto out;
/* The work has been claimed, now schedule it. */
ret = task_work_add(current, &info->work, TWA_RESUME);
- if (WARN_ON_ONCE(ret)) {
- WRITE_ONCE(info->pending, 0);
- return ret;
- }
+
+ if (WARN_ON_ONCE(ret))
+ WRITE_ONCE(current->unwind_mask, 0);
+
+ return ret;
out:
return test_and_set_bit(work->bit, ¤t->unwind_mask);
@@ -281,7 +296,7 @@ int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func)
guard(mutex)(&callback_mutex);
/* See if there's a bit in the mask available */
- if (unwind_mask == ~0UL)
+ if (unwind_mask == ~(UNWIND_PENDING))
return -EBUSY;
work->bit = ffz(unwind_mask);
-- Steve
© 2016 - 2026 Red Hat, Inc.