include/linux/kasan.h | 2 -- include/linux/task_work.h | 3 --- kernel/irq_work.c | 2 +- kernel/rcu/tiny.c | 2 +- kernel/rcu/tree.c | 4 ++-- kernel/sched/core.c | 2 +- kernel/task_work.c | 14 +------------- kernel/workqueue.c | 2 +- mm/kasan/generic.c | 18 ++++++------------ mm/slub.c | 2 +- 10 files changed, 14 insertions(+), 37 deletions(-)
From: Peter Zijlstra <peterz@infradead.org>
kasan_record_aux_stack_noalloc() was introduced to record a stack trace
without allocating memory in the process. It has been added to callers
which were invoked while a raw_spinlock_t was held.
More and more callers were identified and changed over time. Is it a
good thing to have this while functions try their best to do a
locklessly setup? The only downside of having kasan_record_aux_stack()
not allocate any memory is that we end up without a stacktrace if
stackdepot runs out of memory and at the same stacktrace was not
recorded before To quote Marco Elver from
https://lore.kernel.org/all/CANpmjNPmQYJ7pv1N3cuU8cP18u7PP_uoZD8YxwZd4jtbof9nVQ@mail.gmail.com/
| I'd be in favor, it simplifies things. And stack depot should be
| able to replenish its pool sufficiently in the "non-aux" cases
| i.e. regular allocations. Worst case we fail to record some
| aux stacks, but I think that's only really bad if there's a bug
| around one of these allocations. In general the probabilities
| of this being a regression are extremely small [...]
Make the kasan_record_aux_stack_noalloc() behaviour default as
kasan_record_aux_stack().
[bigeasy: Dressed the diff as patch. ]
Reported-by: syzbot+39f85d612b7c20d8db48@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/67275485.050a0220.3c8d68.0a37.GAE@google.com
Acked-by: Waiman Long <longman@redhat.com>
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Reviewed-by: Marco Elver <elver@google.com>
Fixes: 7cb3007ce2da2 ("kasan: generic: introduce kasan_record_aux_stack_noalloc()")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
v1…v2:
- Renamed the patch as per Marco.
- Added comment to kasan_record_aux_stack() as per Andrey.
- Added fixes tag since Waiman that it is the only user.
- Added Marco's quote from the mail to the commit description.
include/linux/kasan.h | 2 --
include/linux/task_work.h | 3 ---
kernel/irq_work.c | 2 +-
kernel/rcu/tiny.c | 2 +-
kernel/rcu/tree.c | 4 ++--
kernel/sched/core.c | 2 +-
kernel/task_work.c | 14 +-------------
kernel/workqueue.c | 2 +-
mm/kasan/generic.c | 18 ++++++------------
mm/slub.c | 2 +-
10 files changed, 14 insertions(+), 37 deletions(-)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 00a3bf7c0d8f0..1a623818e8b39 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -488,7 +488,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_record_aux_stack(void *ptr);
-void kasan_record_aux_stack_noalloc(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
@@ -506,7 +505,6 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_record_aux_stack(void *ptr) {}
-static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
#endif /* CONFIG_KASAN_GENERIC */
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
index 2964171856e00..0646804860ff1 100644
--- a/include/linux/task_work.h
+++ b/include/linux/task_work.h
@@ -19,9 +19,6 @@ enum task_work_notify_mode {
TWA_SIGNAL,
TWA_SIGNAL_NO_IPI,
TWA_NMI_CURRENT,
-
- TWA_FLAGS = 0xff00,
- TWAF_NO_ALLOC = 0x0100,
};
static inline bool task_work_pending(struct task_struct *task)
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 2f4fb336dda17..73f7e1fd4ab4d 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -147,7 +147,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
if (!irq_work_claim(work))
return false;
- kasan_record_aux_stack_noalloc(work);
+ kasan_record_aux_stack(work);
preempt_disable();
if (cpu != smp_processor_id()) {
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index b3b3ce34df631..4b3f319114650 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
void kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
if (head)
- kasan_record_aux_stack_noalloc(ptr);
+ kasan_record_aux_stack(ptr);
__kvfree_call_rcu(head, ptr);
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b1f883fcd9185..7eae9bd818a90 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3083,7 +3083,7 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
}
head->func = func;
head->next = NULL;
- kasan_record_aux_stack_noalloc(head);
+ kasan_record_aux_stack(head);
local_irq_save(flags);
rdp = this_cpu_ptr(&rcu_data);
lazy = lazy_in && !rcu_async_should_hurry();
@@ -3807,7 +3807,7 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
return;
}
- kasan_record_aux_stack_noalloc(ptr);
+ kasan_record_aux_stack(ptr);
success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
if (!success) {
run_page_cache_worker(krcp);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a1c353a62c568..3717360a940d2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10485,7 +10485,7 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
return;
/* No page allocation under rq lock */
- task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
+ task_work_add(curr, work, TWA_RESUME);
}
void sched_mm_cid_exit_signals(struct task_struct *t)
diff --git a/kernel/task_work.c b/kernel/task_work.c
index c969f1f26be58..d1efec571a4a4 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -55,26 +55,14 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
enum task_work_notify_mode notify)
{
struct callback_head *head;
- int flags = notify & TWA_FLAGS;
- notify &= ~TWA_FLAGS;
if (notify == TWA_NMI_CURRENT) {
if (WARN_ON_ONCE(task != current))
return -EINVAL;
if (!IS_ENABLED(CONFIG_IRQ_WORK))
return -EINVAL;
} else {
- /*
- * Record the work call stack in order to print it in KASAN
- * reports.
- *
- * Note that stack allocation can fail if TWAF_NO_ALLOC flag
- * is set and new page is needed to expand the stack buffer.
- */
- if (flags & TWAF_NO_ALLOC)
- kasan_record_aux_stack_noalloc(work);
- else
- kasan_record_aux_stack(work);
+ kasan_record_aux_stack(work);
}
head = READ_ONCE(task->task_works);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9949ffad8df09..65b8314b2d538 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2180,7 +2180,7 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
debug_work_activate(work);
/* record the work call stack in order to print it in KASAN reports */
- kasan_record_aux_stack_noalloc(work);
+ kasan_record_aux_stack(work);
/* we own @work, set data and link */
set_work_pwq(work, pwq, extra_flags);
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 6310a180278b6..2242249c2d50d 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -521,7 +521,11 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
sizeof(struct kasan_free_meta) : 0);
}
-static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
+/*
+ * This function avoids dynamic memory allocations and thus can be called from
+ * contexts that do not allow allocating memory.
+ */
+void kasan_record_aux_stack(void *addr)
{
struct slab *slab = kasan_addr_to_slab(addr);
struct kmem_cache *cache;
@@ -538,17 +542,7 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
return;
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
- alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
-}
-
-void kasan_record_aux_stack(void *addr)
-{
- return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
-}
-
-void kasan_record_aux_stack_noalloc(void *addr)
-{
- return __kasan_record_aux_stack(addr, 0);
+ alloc_meta->aux_stack[0] = kasan_save_stack(0, 0);
}
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
diff --git a/mm/slub.c b/mm/slub.c
index 5b832512044e3..b8c4bf3fe0d07 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2300,7 +2300,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
* We have to do this manually because the rcu_head is
* not located inside the object.
*/
- kasan_record_aux_stack_noalloc(x);
+ kasan_record_aux_stack(x);
delayed_free->object = x;
call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
--
2.45.2
On 11/22/24 10:54 AM, Sebastian Andrzej Siewior wrote:
> From: Peter Zijlstra <peterz@infradead.org>
>
> kasan_record_aux_stack_noalloc() was introduced to record a stack trace
> without allocating memory in the process. It has been added to callers
> which were invoked while a raw_spinlock_t was held.
> More and more callers were identified and changed over time. Is it a
> good thing to have this while functions try their best to do a
> locklessly setup? The only downside of having kasan_record_aux_stack()
> not allocate any memory is that we end up without a stacktrace if
> stackdepot runs out of memory and at the same stacktrace was not
> recorded before To quote Marco Elver from
> https://lore.kernel.org/all/CANpmjNPmQYJ7pv1N3cuU8cP18u7PP_uoZD8YxwZd4jtbof9nVQ@mail.gmail.com/
>
> | I'd be in favor, it simplifies things. And stack depot should be
> | able to replenish its pool sufficiently in the "non-aux" cases
> | i.e. regular allocations. Worst case we fail to record some
> | aux stacks, but I think that's only really bad if there's a bug
> | around one of these allocations. In general the probabilities
> | of this being a regression are extremely small [...]
>
> Make the kasan_record_aux_stack_noalloc() behaviour default as
> kasan_record_aux_stack().
>
> [bigeasy: Dressed the diff as patch. ]
>
> Reported-by: syzbot+39f85d612b7c20d8db48@syzkaller.appspotmail.com
> Closes: https://lore.kernel.org/all/67275485.050a0220.3c8d68.0a37.GAE@google.com
> Acked-by: Waiman Long <longman@redhat.com>
> Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
> Reviewed-by: Marco Elver <elver@google.com>
> Fixes: 7cb3007ce2da2 ("kasan: generic: introduce kasan_record_aux_stack_noalloc()")
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> ---
> v1…v2:
> - Renamed the patch as per Marco.
> - Added comment to kasan_record_aux_stack() as per Andrey.
> - Added fixes tag since Waiman that it is the only user.
> - Added Marco's quote from the mail to the commit description.
>
> include/linux/kasan.h | 2 --
> include/linux/task_work.h | 3 ---
> kernel/irq_work.c | 2 +-
> kernel/rcu/tiny.c | 2 +-
> kernel/rcu/tree.c | 4 ++--
> kernel/sched/core.c | 2 +-
> kernel/task_work.c | 14 +-------------
> kernel/workqueue.c | 2 +-
> mm/kasan/generic.c | 18 ++++++------------
> mm/slub.c | 2 +-
> 10 files changed, 14 insertions(+), 37 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 00a3bf7c0d8f0..1a623818e8b39 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -488,7 +488,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
> void kasan_cache_shrink(struct kmem_cache *cache);
> void kasan_cache_shutdown(struct kmem_cache *cache);
> void kasan_record_aux_stack(void *ptr);
> -void kasan_record_aux_stack_noalloc(void *ptr);
>
> #else /* CONFIG_KASAN_GENERIC */
>
> @@ -506,7 +505,6 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
> static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> static inline void kasan_record_aux_stack(void *ptr) {}
> -static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
>
> #endif /* CONFIG_KASAN_GENERIC */
>
> diff --git a/include/linux/task_work.h b/include/linux/task_work.h
> index 2964171856e00..0646804860ff1 100644
> --- a/include/linux/task_work.h
> +++ b/include/linux/task_work.h
> @@ -19,9 +19,6 @@ enum task_work_notify_mode {
> TWA_SIGNAL,
> TWA_SIGNAL_NO_IPI,
> TWA_NMI_CURRENT,
> -
> - TWA_FLAGS = 0xff00,
> - TWAF_NO_ALLOC = 0x0100,
> };
>
> static inline bool task_work_pending(struct task_struct *task)
> diff --git a/kernel/irq_work.c b/kernel/irq_work.c
> index 2f4fb336dda17..73f7e1fd4ab4d 100644
> --- a/kernel/irq_work.c
> +++ b/kernel/irq_work.c
> @@ -147,7 +147,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
> if (!irq_work_claim(work))
> return false;
>
> - kasan_record_aux_stack_noalloc(work);
> + kasan_record_aux_stack(work);
>
> preempt_disable();
> if (cpu != smp_processor_id()) {
> diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
> index b3b3ce34df631..4b3f319114650 100644
> --- a/kernel/rcu/tiny.c
> +++ b/kernel/rcu/tiny.c
> @@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
> void kvfree_call_rcu(struct rcu_head *head, void *ptr)
> {
> if (head)
> - kasan_record_aux_stack_noalloc(ptr);
> + kasan_record_aux_stack(ptr);
>
> __kvfree_call_rcu(head, ptr);
> }
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index b1f883fcd9185..7eae9bd818a90 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3083,7 +3083,7 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
> }
> head->func = func;
> head->next = NULL;
> - kasan_record_aux_stack_noalloc(head);
> + kasan_record_aux_stack(head);
> local_irq_save(flags);
> rdp = this_cpu_ptr(&rcu_data);
> lazy = lazy_in && !rcu_async_should_hurry();
> @@ -3807,7 +3807,7 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
> return;
> }
>
> - kasan_record_aux_stack_noalloc(ptr);
> + kasan_record_aux_stack(ptr);
> success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
> if (!success) {
> run_page_cache_worker(krcp);
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index a1c353a62c568..3717360a940d2 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -10485,7 +10485,7 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
> return;
>
> /* No page allocation under rq lock */
> - task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
> + task_work_add(curr, work, TWA_RESUME);
> }
>
> void sched_mm_cid_exit_signals(struct task_struct *t)
> diff --git a/kernel/task_work.c b/kernel/task_work.c
> index c969f1f26be58..d1efec571a4a4 100644
> --- a/kernel/task_work.c
> +++ b/kernel/task_work.c
> @@ -55,26 +55,14 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
> enum task_work_notify_mode notify)
> {
> struct callback_head *head;
> - int flags = notify & TWA_FLAGS;
>
> - notify &= ~TWA_FLAGS;
> if (notify == TWA_NMI_CURRENT) {
> if (WARN_ON_ONCE(task != current))
> return -EINVAL;
> if (!IS_ENABLED(CONFIG_IRQ_WORK))
> return -EINVAL;
> } else {
> - /*
> - * Record the work call stack in order to print it in KASAN
> - * reports.
> - *
> - * Note that stack allocation can fail if TWAF_NO_ALLOC flag
> - * is set and new page is needed to expand the stack buffer.
> - */
> - if (flags & TWAF_NO_ALLOC)
> - kasan_record_aux_stack_noalloc(work);
> - else
> - kasan_record_aux_stack(work);
> + kasan_record_aux_stack(work);
> }
>
> head = READ_ONCE(task->task_works);
> diff --git a/kernel/workqueue.c b/kernel/workqueue.c
> index 9949ffad8df09..65b8314b2d538 100644
> --- a/kernel/workqueue.c
> +++ b/kernel/workqueue.c
> @@ -2180,7 +2180,7 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
> debug_work_activate(work);
>
> /* record the work call stack in order to print it in KASAN reports */
> - kasan_record_aux_stack_noalloc(work);
> + kasan_record_aux_stack(work);
>
> /* we own @work, set data and link */
> set_work_pwq(work, pwq, extra_flags);
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index 6310a180278b6..2242249c2d50d 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -521,7 +521,11 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
> sizeof(struct kasan_free_meta) : 0);
> }
>
> -static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
> +/*
> + * This function avoids dynamic memory allocations and thus can be called from
> + * contexts that do not allow allocating memory.
> + */
> +void kasan_record_aux_stack(void *addr)
> {
> struct slab *slab = kasan_addr_to_slab(addr);
> struct kmem_cache *cache;
> @@ -538,17 +542,7 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
> return;
>
> alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
> - alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
> -}
> -
> -void kasan_record_aux_stack(void *addr)
> -{
> - return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
> -}
> -
> -void kasan_record_aux_stack_noalloc(void *addr)
> -{
> - return __kasan_record_aux_stack(addr, 0);
> + alloc_meta->aux_stack[0] = kasan_save_stack(0, 0);
> }
>
> void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
> diff --git a/mm/slub.c b/mm/slub.c
> index 5b832512044e3..b8c4bf3fe0d07 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2300,7 +2300,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
> * We have to do this manually because the rcu_head is
> * not located inside the object.
> */
> - kasan_record_aux_stack_noalloc(x);
> + kasan_record_aux_stack(x);
>
> delayed_free->object = x;
> call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
LGTM
Reviewed-by: Waiman Long <longman@redhat.com>
© 2016 - 2026 Red Hat, Inc.