From: Steven Rostedt <rostedt@goodmis.org>
In order to know which registered callback requested a stacktrace for when
the task goes back to user space, add a bitmask to keep track of all
registered tracers. The bitmask is the size of long, which means that on a
32 bit machine, it can have at most 32 registered tracers, and on 64 bit,
it can have at most 64 registered tracers. This should not be an issue as
there should not be more than 10 (unless BPF can abuse this?).
When a tracer registers with unwind_deferred_init() it will get a bit
number assigned to it. When a tracer requests a stacktrace, it will have
its bit set within the task_struct. When the task returns back to user
space, it will call the callbacks for all the registered tracers where
their bits are set in the task's mask.
When a tracer is removed by the unwind_deferred_cancel() all current tasks
will clear the associated bit, just in case another tracer gets registered
immediately afterward and then gets their callback called unexpectedly.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
Changes since v9: https://lore.kernel.org/linux-trace-kernel/20250513223552.804390728@goodmis.org/
- Use BIT() macro for bit setting and testing.
- Moved the "unwind_mask" from the task_struct into the task->unwind_info
structure.
include/linux/unwind_deferred.h | 1 +
include/linux/unwind_deferred_types.h | 1 +
kernel/unwind/deferred.c | 45 ++++++++++++++++++++++-----
3 files changed, 40 insertions(+), 7 deletions(-)
diff --git a/include/linux/unwind_deferred.h b/include/linux/unwind_deferred.h
index a384eef719a3..1789c3624723 100644
--- a/include/linux/unwind_deferred.h
+++ b/include/linux/unwind_deferred.h
@@ -13,6 +13,7 @@ typedef void (*unwind_callback_t)(struct unwind_work *work, struct unwind_stackt
struct unwind_work {
struct list_head list;
unwind_callback_t func;
+ int bit;
};
#ifdef CONFIG_UNWIND_USER
diff --git a/include/linux/unwind_deferred_types.h b/include/linux/unwind_deferred_types.h
index ae27a02234b8..780b00c07208 100644
--- a/include/linux/unwind_deferred_types.h
+++ b/include/linux/unwind_deferred_types.h
@@ -10,6 +10,7 @@ struct unwind_cache {
struct unwind_task_info {
struct unwind_cache *cache;
struct callback_head work;
+ unsigned long unwind_mask;
u64 timestamp;
u64 nmi_timestamp;
int pending;
diff --git a/kernel/unwind/deferred.c b/kernel/unwind/deferred.c
index 88c867c32c01..268afae31ba4 100644
--- a/kernel/unwind/deferred.c
+++ b/kernel/unwind/deferred.c
@@ -16,6 +16,7 @@
/* Guards adding to and reading the list of callbacks */
static DEFINE_MUTEX(callback_mutex);
static LIST_HEAD(callbacks);
+static unsigned long unwind_mask;
/*
* Read the task context timestamp, if this is the first caller then
@@ -133,7 +134,10 @@ static void unwind_deferred_task_work(struct callback_head *head)
guard(mutex)(&callback_mutex);
list_for_each_entry(work, &callbacks, list) {
- work->func(work, &trace, timestamp);
+ if (info->unwind_mask & BIT(work->bit)) {
+ work->func(work, &trace, timestamp);
+ clear_bit(work->bit, &info->unwind_mask);
+ }
}
}
@@ -159,9 +163,12 @@ static int unwind_deferred_request_nmi(struct unwind_work *work, u64 *timestamp)
inited_timestamp = true;
}
- if (info->pending)
+ if (info->unwind_mask & BIT(work->bit))
return 1;
+ if (info->pending)
+ goto out;
+
ret = task_work_add(current, &info->work, TWA_NMI_CURRENT);
if (ret < 0) {
/*
@@ -175,8 +182,8 @@ static int unwind_deferred_request_nmi(struct unwind_work *work, u64 *timestamp)
}
info->pending = 1;
-
- return 0;
+out:
+ return test_and_set_bit(work->bit, &info->unwind_mask);
}
/**
@@ -223,14 +230,18 @@ int unwind_deferred_request(struct unwind_work *work, u64 *timestamp)
*timestamp = get_timestamp(info);
+ /* This is already queued */
+ if (info->unwind_mask & BIT(work->bit))
+ return 1;
+
/* callback already pending? */
pending = READ_ONCE(info->pending);
if (pending)
- return 1;
+ goto out;
/* Claim the work unless an NMI just now swooped in to do so. */
if (!try_cmpxchg(&info->pending, &pending, 1))
- return 1;
+ goto out;
/* The work has been claimed, now schedule it. */
ret = task_work_add(current, &info->work, TWA_RESUME);
@@ -239,16 +250,27 @@ int unwind_deferred_request(struct unwind_work *work, u64 *timestamp)
return ret;
}
- return 0;
+ out:
+ return test_and_set_bit(work->bit, &info->unwind_mask);
}
void unwind_deferred_cancel(struct unwind_work *work)
{
+ struct task_struct *g, *t;
+
if (!work)
return;
guard(mutex)(&callback_mutex);
list_del(&work->list);
+
+ clear_bit(work->bit, &unwind_mask);
+
+ guard(rcu)();
+ /* Clear this bit from all threads */
+ for_each_process_thread(g, t) {
+ clear_bit(work->bit, &t->unwind_info.unwind_mask);
+ }
}
int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func)
@@ -256,6 +278,14 @@ int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func)
memset(work, 0, sizeof(*work));
guard(mutex)(&callback_mutex);
+
+ /* See if there's a bit in the mask available */
+ if (unwind_mask == ~0UL)
+ return -EBUSY;
+
+ work->bit = ffz(unwind_mask);
+ unwind_mask |= BIT(work->bit);
+
list_add(&work->list, &callbacks);
work->func = func;
return 0;
@@ -267,6 +297,7 @@ void unwind_task_init(struct task_struct *task)
memset(info, 0, sizeof(*info));
init_task_work(&info->work, unwind_deferred_task_work);
+ info->unwind_mask = 0;
}
void unwind_task_free(struct task_struct *task)
--
2.47.2
On Tue, Jun 10, 2025 at 08:54:29PM -0400, Steven Rostedt wrote: > void unwind_deferred_cancel(struct unwind_work *work) > { > + struct task_struct *g, *t; > + > if (!work) > return; > > guard(mutex)(&callback_mutex); > list_del(&work->list); > + > + clear_bit(work->bit, &unwind_mask); atomic bitop > + > + guard(rcu)(); > + /* Clear this bit from all threads */ > + for_each_process_thread(g, t) { > + clear_bit(work->bit, &t->unwind_info.unwind_mask); > + } > } > > int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func) > @@ -256,6 +278,14 @@ int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func) > memset(work, 0, sizeof(*work)); > > guard(mutex)(&callback_mutex); > + > + /* See if there's a bit in the mask available */ > + if (unwind_mask == ~0UL) > + return -EBUSY; > + > + work->bit = ffz(unwind_mask); > + unwind_mask |= BIT(work->bit); regular or > + > list_add(&work->list, &callbacks); > work->func = func; > return 0; > @@ -267,6 +297,7 @@ void unwind_task_init(struct task_struct *task) > > memset(info, 0, sizeof(*info)); > init_task_work(&info->work, unwind_deferred_task_work); > + info->unwind_mask = 0; > } Which is somewhat inconsistent; __clear_bit()/__set_bit() or: unwind_mask &= ~BIT() / unwind_mask |= BIT()
On Fri, 20 Jun 2025 10:15:42 +0200 Peter Zijlstra <peterz@infradead.org> wrote: > On Tue, Jun 10, 2025 at 08:54:29PM -0400, Steven Rostedt wrote: > > > > void unwind_deferred_cancel(struct unwind_work *work) > > { > > + struct task_struct *g, *t; > > + > > if (!work) > > return; > > > > guard(mutex)(&callback_mutex); > > list_del(&work->list); > > + > > + clear_bit(work->bit, &unwind_mask); > > atomic bitop Yeah, it just seemed cleaner than: unwind_mask &= ~(work->bit); It's not needed as the update of unwind_mask is done within the callback_mutex. > > > + > > + guard(rcu)(); > > + /* Clear this bit from all threads */ > > + for_each_process_thread(g, t) { > > + clear_bit(work->bit, &t->unwind_info.unwind_mask); > > + } > > } > > > > int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func) > > @@ -256,6 +278,14 @@ int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func) > > memset(work, 0, sizeof(*work)); > > > > guard(mutex)(&callback_mutex); > > + > > + /* See if there's a bit in the mask available */ > > + if (unwind_mask == ~0UL) > > + return -EBUSY; > > + > > + work->bit = ffz(unwind_mask); > > + unwind_mask |= BIT(work->bit); > > regular or > > > + > > list_add(&work->list, &callbacks); > > work->func = func; > > return 0; > > @@ -267,6 +297,7 @@ void unwind_task_init(struct task_struct *task) > > > > memset(info, 0, sizeof(*info)); > > init_task_work(&info->work, unwind_deferred_task_work); > > + info->unwind_mask = 0; > > } > > Which is somewhat inconsistent; > > __clear_bit()/__set_bit() Hmm, are the above non-atomic? > > or: > > unwind_mask &= ~BIT() / unwind_mask |= BIT() although, because the update is always guarded, this may be the better approach, as it shows there's no atomic needed. -- Steve
On Tue, Jun 24, 2025 at 10:55:38AM -0400, Steven Rostedt wrote: > > Which is somewhat inconsistent; > > > > __clear_bit()/__set_bit() > > Hmm, are the above non-atomic? Yes, ctags or any other code browser of you choice should get you to their definition, which has a comment explaining the non-atomicy of them.
On Tue, 24 Jun 2025 17:00:21 +0200 Peter Zijlstra <peterz@infradead.org> wrote: > On Tue, Jun 24, 2025 at 10:55:38AM -0400, Steven Rostedt wrote: > > > > Which is somewhat inconsistent; > > > > > > __clear_bit()/__set_bit() > > > > Hmm, are the above non-atomic? > > Yes, ctags or any other code browser of you choice should get you to > their definition, which has a comment explaining the non-atomicy of > them. Bah, I did do a TAGS function (emacs) to find them, but totally missed the comment above. I just saw the macro magic of them, but totally missed the comment above them saying: /* * The following macros are non-atomic versions of their non-underscored * counterparts. */ :-p -- Steve
© 2016 - 2025 Red Hat, Inc.