From bpf stack map, we want to use our own buffers to avoid unnecessary
copy, so let us pass it directly. BPF will use this in the next patch.
Signed-off-by: Tao Chen <chen.dylane@linux.dev>
---
include/linux/perf_event.h | 4 ++--
kernel/bpf/stackmap.c | 4 ++--
kernel/events/callchain.c | 13 +++++++++----
kernel/events/core.c | 2 +-
4 files changed, 14 insertions(+), 9 deletions(-)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index fd1d91017b9..b144da7d803 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1719,8 +1719,8 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern struct perf_callchain_entry *
-get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
- u32 max_stack, bool crosstask, bool add_mark);
+get_perf_callchain(struct pt_regs *regs, struct perf_callchain_entry *external_entry,
+ bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 4d53cdd1374..94e46b7f340 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -314,7 +314,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
if (max_depth > sysctl_perf_event_max_stack)
max_depth = sysctl_perf_event_max_stack;
- trace = get_perf_callchain(regs, kernel, user, max_depth,
+ trace = get_perf_callchain(regs, NULL, kernel, user, max_depth,
false, false);
if (unlikely(!trace))
@@ -451,7 +451,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
else if (kernel && task)
trace = get_callchain_entry_for_task(task, max_depth);
else
- trace = get_perf_callchain(regs, kernel, user, max_depth,
+ trace = get_perf_callchain(regs, NULL, kernel, user, max_depth,
crosstask, false);
if (unlikely(!trace) || trace->nr < skip) {
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 808c0d7a31f..851e8f9d026 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -217,8 +217,8 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr
}
struct perf_callchain_entry *
-get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
- u32 max_stack, bool crosstask, bool add_mark)
+get_perf_callchain(struct pt_regs *regs, struct perf_callchain_entry *external_entry,
+ bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark)
{
struct perf_callchain_entry *entry;
struct perf_callchain_entry_ctx ctx;
@@ -228,7 +228,11 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
if (crosstask && user && !kernel)
return NULL;
- entry = get_callchain_entry(&rctx);
+ if (external_entry)
+ entry = external_entry;
+ else
+ entry = get_callchain_entry(&rctx);
+
if (!entry)
return NULL;
@@ -260,7 +264,8 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
}
exit_put:
- put_callchain_entry(rctx);
+ if (!external_entry)
+ put_callchain_entry(rctx);
return entry;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7541f6f85fc..5d8e146003a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8217,7 +8217,7 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
if (!kernel && !user)
return &__empty_callchain;
- callchain = get_perf_callchain(regs, kernel, user,
+ callchain = get_perf_callchain(regs, NULL, kernel, user,
max_stack, crosstask, true);
return callchain ?: &__empty_callchain;
}
--
2.48.1
On Mon, Oct 20, 2025 at 01:01:17AM +0800, Tao Chen wrote:
> From bpf stack map, we want to use our own buffers to avoid unnecessary
> copy, so let us pass it directly. BPF will use this in the next patch.
>
> Signed-off-by: Tao Chen <chen.dylane@linux.dev>
> ---
> diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
> index 808c0d7a31f..851e8f9d026 100644
> --- a/kernel/events/callchain.c
> +++ b/kernel/events/callchain.c
> @@ -217,8 +217,8 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr
> }
>
> struct perf_callchain_entry *
> -get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
> - u32 max_stack, bool crosstask, bool add_mark)
> +get_perf_callchain(struct pt_regs *regs, struct perf_callchain_entry *external_entry,
> + bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark)
> {
> struct perf_callchain_entry *entry;
> struct perf_callchain_entry_ctx ctx;
> @@ -228,7 +228,11 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
> if (crosstask && user && !kernel)
> return NULL;
>
> - entry = get_callchain_entry(&rctx);
> + if (external_entry)
> + entry = external_entry;
> + else
> + entry = get_callchain_entry(&rctx);
> +
> if (!entry)
> return NULL;
>
> @@ -260,7 +264,8 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
> }
>
> exit_put:
> - put_callchain_entry(rctx);
> + if (!external_entry)
> + put_callchain_entry(rctx);
>
> return entry;
> }
Urgh.. How about something like the below, and then you fix up
__bpf_get_stack() a little like this:
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 4d53cdd1374c..8b85b49cecf7 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -303,8 +303,8 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
u32 max_depth = map->value_size / stack_map_data_size(map);
u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
bool user = flags & BPF_F_USER_STACK;
+ struct perf_callchain_entry_ctx ctx;
struct perf_callchain_entry *trace;
- bool kernel = !user;
if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
@@ -314,8 +314,13 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
if (max_depth > sysctl_perf_event_max_stack)
max_depth = sysctl_perf_event_max_stack;
- trace = get_perf_callchain(regs, kernel, user, max_depth,
- false, false);
+ trace = your-stuff;
+
+ __init_perf_callchain_ctx(&ctx, trace, max_depth, false);
+ if (!user)
+ __get_perf_callchain_kernel(&ctx, regs);
+ else
+ __get_perf_callchain_user(&ctx, regs);
if (unlikely(!trace))
/* couldn't fetch the stack trace */
---
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index fd1d91017b99..14a382cad1dd 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -67,6 +67,7 @@ struct perf_callchain_entry_ctx {
u32 nr;
short contexts;
bool contexts_maxed;
+ bool add_mark;
};
typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
@@ -1718,9 +1719,17 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
+
+extern void __init_perf_callchain_ctx(struct perf_callchain_entry_ctx *ctx,
+ struct perf_callchain_entry *entry,
+ u32 max_stack, bool add_mark);
+
+extern void __get_perf_callchain_kernel(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs);
+extern void __get_perf_callchain_user(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs);
+
extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
- u32 max_stack, bool crosstask, bool add_mark);
+ u32 max_stack, bool crosstask);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 808c0d7a31fa..edd76e3bb139 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -216,50 +216,70 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr
#endif
}
+void __init_perf_callchain_ctx(struct perf_callchain_entry_ctx *ctx,
+ struct perf_callchain_entry *entry,
+ u32 max_stack, bool add_mark)
+
+{
+ ctx->entry = entry;
+ ctx->max_stack = max_stack;
+ ctx->nr = entry->nr = 0;
+ ctx->contexts = 0;
+ ctx->contexts_maxed = false;
+ ctx->add_mark = add_mark;
+}
+
+void __get_perf_callchain_kernel(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs)
+{
+ if (user_mode(regs))
+ return;
+
+ if (ctx->add_mark)
+ perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
+ perf_callchain_kernel(ctx, regs);
+}
+
+void __get_perf_callchain_user(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs)
+{
+ int start_entry_idx;
+
+ if (!user_mode(regs)) {
+ if (current->flags & (PF_KTHREAD | PF_USER_WORKER))
+ return;
+ regs = task_pt_regs(current);
+ }
+
+ if (ctx->add_mark)
+ perf_callchain_store_context(ctx, PERF_CONTEXT_USER);
+
+ start_entry_idx = entry->nr;
+ perf_callchain_user(ctx, regs);
+ fixup_uretprobe_trampoline_entries(entry, start_entry_idx);
+}
+
struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
- u32 max_stack, bool crosstask, bool add_mark)
+ u32 max_stack, bool crosstask)
{
- struct perf_callchain_entry *entry;
struct perf_callchain_entry_ctx ctx;
- int rctx, start_entry_idx;
+ struct perf_callchain_entry *entry;
+ int rctx;
/* crosstask is not supported for user stacks */
if (crosstask && user && !kernel)
return NULL;
- entry = get_callchain_entry(&rctx);
+ entry = get_callchain_entry(&rctx, regs);
if (!entry)
return NULL;
- ctx.entry = entry;
- ctx.max_stack = max_stack;
- ctx.nr = entry->nr = 0;
- ctx.contexts = 0;
- ctx.contexts_maxed = false;
+ __init_perf_callchain_ctx(&ctx, entry, max_stack, true);
- if (kernel && !user_mode(regs)) {
- if (add_mark)
- perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
- perf_callchain_kernel(&ctx, regs);
- }
+ if (kernel)
+ __get_perf_callchain_kernel(&ctx, regs);
+ if (user && !crosstask)
+ __get_perf_callchain_user(&ctx, regs);
- if (user && !crosstask) {
- if (!user_mode(regs)) {
- if (current->flags & (PF_KTHREAD | PF_USER_WORKER))
- goto exit_put;
- regs = task_pt_regs(current);
- }
-
- if (add_mark)
- perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
-
- start_entry_idx = entry->nr;
- perf_callchain_user(&ctx, regs);
- fixup_uretprobe_trampoline_entries(entry, start_entry_idx);
- }
-
-exit_put:
put_callchain_entry(rctx);
return entry;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 177e57c1a362..cbe073d761a8 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8218,7 +8218,7 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
return &__empty_callchain;
callchain = get_perf_callchain(regs, kernel, user,
- max_stack, crosstask, true);
+ max_stack, crosstask);
return callchain ?: &__empty_callchain;
}
在 2025/10/20 19:40, Peter Zijlstra 写道:
Hi, Peter
> On Mon, Oct 20, 2025 at 01:01:17AM +0800, Tao Chen wrote:
>> From bpf stack map, we want to use our own buffers to avoid unnecessary
>> copy, so let us pass it directly. BPF will use this in the next patch.
>>
>> Signed-off-by: Tao Chen <chen.dylane@linux.dev>
>> ---
>
>> diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
>> index 808c0d7a31f..851e8f9d026 100644
>> --- a/kernel/events/callchain.c
>> +++ b/kernel/events/callchain.c
>> @@ -217,8 +217,8 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr
>> }
>>
>> struct perf_callchain_entry *
>> -get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
>> - u32 max_stack, bool crosstask, bool add_mark)
>> +get_perf_callchain(struct pt_regs *regs, struct perf_callchain_entry *external_entry,
>> + bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark)
>> {
>> struct perf_callchain_entry *entry;
>> struct perf_callchain_entry_ctx ctx;
>> @@ -228,7 +228,11 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
>> if (crosstask && user && !kernel)
>> return NULL;
>>
>> - entry = get_callchain_entry(&rctx);
>> + if (external_entry)
>> + entry = external_entry;
>> + else
>> + entry = get_callchain_entry(&rctx);
>> +
>> if (!entry)
>> return NULL;
>>
>> @@ -260,7 +264,8 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
>> }
>>
>> exit_put:
>> - put_callchain_entry(rctx);
>> + if (!external_entry)
>> + put_callchain_entry(rctx);
>>
>> return entry;
>> }
>
> Urgh.. How about something like the below, and then you fix up
> __bpf_get_stack() a little like this:
>
Your solution seems to be more scalable, i will develop based on
yours,thanks a lot.
>
> diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
> index 4d53cdd1374c..8b85b49cecf7 100644
> --- a/kernel/bpf/stackmap.c
> +++ b/kernel/bpf/stackmap.c
> @@ -303,8 +303,8 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
> u32 max_depth = map->value_size / stack_map_data_size(map);
> u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
> bool user = flags & BPF_F_USER_STACK;
> + struct perf_callchain_entry_ctx ctx;
> struct perf_callchain_entry *trace;
> - bool kernel = !user;
>
> if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
> BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
> @@ -314,8 +314,13 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
> if (max_depth > sysctl_perf_event_max_stack)
> max_depth = sysctl_perf_event_max_stack;
>
> - trace = get_perf_callchain(regs, kernel, user, max_depth,
> - false, false);
> + trace = your-stuff;
> +
> + __init_perf_callchain_ctx(&ctx, trace, max_depth, false);
> + if (!user)
> + __get_perf_callchain_kernel(&ctx, regs);
> + else
> + __get_perf_callchain_user(&ctx, regs);
>
> if (unlikely(!trace))
> /* couldn't fetch the stack trace */
>
>
>
> ---
> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> index fd1d91017b99..14a382cad1dd 100644
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -67,6 +67,7 @@ struct perf_callchain_entry_ctx {
> u32 nr;
> short contexts;
> bool contexts_maxed;
> + bool add_mark;
> };
>
> typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
> @@ -1718,9 +1719,17 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
>
> extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
> extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
> +
> +extern void __init_perf_callchain_ctx(struct perf_callchain_entry_ctx *ctx,
> + struct perf_callchain_entry *entry,
> + u32 max_stack, bool add_mark);
> +
> +extern void __get_perf_callchain_kernel(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs);
> +extern void __get_perf_callchain_user(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs);
> +
> extern struct perf_callchain_entry *
> get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
> - u32 max_stack, bool crosstask, bool add_mark);
> + u32 max_stack, bool crosstask);
> extern int get_callchain_buffers(int max_stack);
> extern void put_callchain_buffers(void);
> extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
> diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
> index 808c0d7a31fa..edd76e3bb139 100644
> --- a/kernel/events/callchain.c
> +++ b/kernel/events/callchain.c
> @@ -216,50 +216,70 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr
> #endif
> }
>
> +void __init_perf_callchain_ctx(struct perf_callchain_entry_ctx *ctx,
> + struct perf_callchain_entry *entry,
> + u32 max_stack, bool add_mark)
> +
> +{
> + ctx->entry = entry;
> + ctx->max_stack = max_stack;
> + ctx->nr = entry->nr = 0;
> + ctx->contexts = 0;
> + ctx->contexts_maxed = false;
> + ctx->add_mark = add_mark;
> +}
> +
> +void __get_perf_callchain_kernel(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs)
> +{
> + if (user_mode(regs))
> + return;
> +
> + if (ctx->add_mark)
> + perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
> + perf_callchain_kernel(ctx, regs);
> +}
> +
> +void __get_perf_callchain_user(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs)
> +{
> + int start_entry_idx;
> +
> + if (!user_mode(regs)) {
> + if (current->flags & (PF_KTHREAD | PF_USER_WORKER))
> + return;
> + regs = task_pt_regs(current);
> + }
> +
> + if (ctx->add_mark)
> + perf_callchain_store_context(ctx, PERF_CONTEXT_USER);
> +
> + start_entry_idx = entry->nr;
> + perf_callchain_user(ctx, regs);
> + fixup_uretprobe_trampoline_entries(entry, start_entry_idx);
> +}
> +
> struct perf_callchain_entry *
> get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
> - u32 max_stack, bool crosstask, bool add_mark)
> + u32 max_stack, bool crosstask)
> {
> - struct perf_callchain_entry *entry;
> struct perf_callchain_entry_ctx ctx;
> - int rctx, start_entry_idx;
> + struct perf_callchain_entry *entry;
> + int rctx;
>
> /* crosstask is not supported for user stacks */
> if (crosstask && user && !kernel)
> return NULL;
>
> - entry = get_callchain_entry(&rctx);
> + entry = get_callchain_entry(&rctx, regs);
> if (!entry)
> return NULL;
>
> - ctx.entry = entry;
> - ctx.max_stack = max_stack;
> - ctx.nr = entry->nr = 0;
> - ctx.contexts = 0;
> - ctx.contexts_maxed = false;
> + __init_perf_callchain_ctx(&ctx, entry, max_stack, true);
>
> - if (kernel && !user_mode(regs)) {
> - if (add_mark)
> - perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
> - perf_callchain_kernel(&ctx, regs);
> - }
> + if (kernel)
> + __get_perf_callchain_kernel(&ctx, regs);
> + if (user && !crosstask)
> + __get_perf_callchain_user(&ctx, regs);
>
> - if (user && !crosstask) {
> - if (!user_mode(regs)) {
> - if (current->flags & (PF_KTHREAD | PF_USER_WORKER))
> - goto exit_put;
> - regs = task_pt_regs(current);
> - }
> -
> - if (add_mark)
> - perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
> -
> - start_entry_idx = entry->nr;
> - perf_callchain_user(&ctx, regs);
> - fixup_uretprobe_trampoline_entries(entry, start_entry_idx);
> - }
> -
> -exit_put:
> put_callchain_entry(rctx);
>
> return entry;
> diff --git a/kernel/events/core.c b/kernel/events/core.c
> index 177e57c1a362..cbe073d761a8 100644
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -8218,7 +8218,7 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
> return &__empty_callchain;
>
> callchain = get_perf_callchain(regs, kernel, user,
> - max_stack, crosstask, true);
> + max_stack, crosstask);
> return callchain ?: &__empty_callchain;
> }
>
--
Best Regards
Tao Chen
© 2016 - 2026 Red Hat, Inc.