kernel/bpf/stackmap.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)
Syzkaller reported a KASAN slab-out-of-bounds write in __bpf_get_stack()
during stack trace copying.
The issue occurs when: the callchain entry (stored as a per-cpu variable)
grow between collection and buffer copy, causing it to exceed the initially
calculated buffer size based on max_depth.
The callchain collection intentionally avoids locking for performance
reasons, but this creates a window where concurrent modifications can
occur during the copy operation.
To prevent this from happening, we clamp the trace len to the max
depth initially calculated with the buffer size and the size of
a trace.
Reported-by: syzbot+d1b7fa1092def3628bd7@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/691231dc.a70a0220.22f260.0101.GAE@google.com/T/
Fixes: e17d62fedd10 ("bpf: Refactor stack map trace depth calculation into helper function")
Tested-by: syzbot+d1b7fa1092def3628bd7@syzkaller.appspotmail.com
Cc: Brahmajit Das <listout@listout.xyz>
Signed-off-by: Arnaud Lecomte <contact@arnaud-lcm.com>
---
Changes in v2:
- Moved the trace_nr clamping to max_depth above trace->nr skip
verification.
Link to v1: https://lore.kernel.org/all/20260104205220.980752-1-contact@arnaud-lcm.com/
Thanks Brahmajit Das for the initial fix he proposed that I tweaked
with the correct justification and a better implementation in my
opinion.
---
kernel/bpf/stackmap.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index da3d328f5c15..c0a430f9eafb 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -465,7 +465,6 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
if (trace_in) {
trace = trace_in;
- trace->nr = min_t(u32, trace->nr, max_depth);
} else if (kernel && task) {
trace = get_callchain_entry_for_task(task, max_depth);
} else {
@@ -473,13 +472,15 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
crosstask, false, 0);
}
- if (unlikely(!trace) || trace->nr < skip) {
+ trace_nr = min(trace->nr, max_depth);
+
+ if (unlikely(!trace) || trace_nr < skip) {
if (may_fault)
rcu_read_unlock();
goto err_fault;
}
- trace_nr = trace->nr - skip;
+ trace_nr = trace_nr - skip;
copy_len = trace_nr * elem_size;
ips = trace->ip + skip;
--
2.43.0
Aborting in favor of my comment on the first rev.
On 07/01/2026 19:12, Arnaud Lecomte wrote:
> Syzkaller reported a KASAN slab-out-of-bounds write in __bpf_get_stack()
> during stack trace copying.
>
> The issue occurs when: the callchain entry (stored as a per-cpu variable)
> grow between collection and buffer copy, causing it to exceed the initially
> calculated buffer size based on max_depth.
>
> The callchain collection intentionally avoids locking for performance
> reasons, but this creates a window where concurrent modifications can
> occur during the copy operation.
>
> To prevent this from happening, we clamp the trace len to the max
> depth initially calculated with the buffer size and the size of
> a trace.
>
> Reported-by: syzbot+d1b7fa1092def3628bd7@syzkaller.appspotmail.com
> Closes: https://lore.kernel.org/all/691231dc.a70a0220.22f260.0101.GAE@google.com/T/
> Fixes: e17d62fedd10 ("bpf: Refactor stack map trace depth calculation into helper function")
> Tested-by: syzbot+d1b7fa1092def3628bd7@syzkaller.appspotmail.com
> Cc: Brahmajit Das <listout@listout.xyz>
> Signed-off-by: Arnaud Lecomte <contact@arnaud-lcm.com>
> ---
> Changes in v2:
> - Moved the trace_nr clamping to max_depth above trace->nr skip
> verification.
> Link to v1: https://lore.kernel.org/all/20260104205220.980752-1-contact@arnaud-lcm.com/
>
> Thanks Brahmajit Das for the initial fix he proposed that I tweaked
> with the correct justification and a better implementation in my
> opinion.
> ---
> kernel/bpf/stackmap.c | 7 ++++---
> 1 file changed, 4 insertions(+), 3 deletions(-)
>
> diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
> index da3d328f5c15..c0a430f9eafb 100644
> --- a/kernel/bpf/stackmap.c
> +++ b/kernel/bpf/stackmap.c
> @@ -465,7 +465,6 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
>
> if (trace_in) {
> trace = trace_in;
> - trace->nr = min_t(u32, trace->nr, max_depth);
> } else if (kernel && task) {
> trace = get_callchain_entry_for_task(task, max_depth);
> } else {
> @@ -473,13 +472,15 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
> crosstask, false, 0);
> }
>
> - if (unlikely(!trace) || trace->nr < skip) {
> + trace_nr = min(trace->nr, max_depth);
> +
> + if (unlikely(!trace) || trace_nr < skip) {
> if (may_fault)
> rcu_read_unlock();
> goto err_fault;
> }
>
> - trace_nr = trace->nr - skip;
> + trace_nr = trace_nr - skip;
> copy_len = trace_nr * elem_size;
>
> ips = trace->ip + skip;
> diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
> index da3d328f5c15..c0a430f9eafb 100644
> --- a/kernel/bpf/stackmap.c
> +++ b/kernel/bpf/stackmap.c
> @@ -465,7 +465,6 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
>
> if (trace_in) {
> trace = trace_in;
> - trace->nr = min_t(u32, trace->nr, max_depth);
> } else if (kernel && task) {
> trace = get_callchain_entry_for_task(task, max_depth);
> } else {
> @@ -473,13 +472,15 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
> crosstask, false, 0);
> }
>
> + trace_nr = min(trace->nr, max_depth);
^^^^^^^^
Can this dereference trace when it is NULL? Both get_callchain_entry_for_task()
and get_perf_callchain() can return NULL, and several callers pass trace_in as
NULL (bpf_get_stack, bpf_get_stack_sleepable, __bpf_get_task_stack, and one
path in bpf_get_stack_pe).
The NULL check below comes after this dereference:
> +
> + if (unlikely(!trace) || trace_nr < skip) {
^^^^^^
This check is now ineffective because trace->nr was already dereferenced above.
[ ... ]
---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/20791981532
© 2016 - 2026 Red Hat, Inc.