As Alexei noted, get_perf_callchain() return values may be reused
if a task is preempted after the BPF program enters migrate disable
mode. The perf_callchain_entres has a small stack of entries, and
we can reuse it as follows:
1. get the perf callchain entry
2. BPF use...
3. put the perf callchain entry
Signed-off-by: Tao Chen <chen.dylane@linux.dev>
---
kernel/bpf/stackmap.c | 61 ++++++++++++++++++++++++++++++++++---------
1 file changed, 48 insertions(+), 13 deletions(-)
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index e28b35c7e0b..70d38249083 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -188,13 +188,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
}
static struct perf_callchain_entry *
-get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
+get_callchain_entry_for_task(int *rctx, struct task_struct *task, u32 max_depth)
{
#ifdef CONFIG_STACKTRACE
struct perf_callchain_entry *entry;
- int rctx;
- entry = get_callchain_entry(&rctx);
+ entry = get_callchain_entry(rctx);
if (!entry)
return NULL;
@@ -216,8 +215,6 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
to[i] = (u64)(from[i]);
}
- put_callchain_entry(rctx);
-
return entry;
#else /* CONFIG_STACKTRACE */
return NULL;
@@ -297,6 +294,31 @@ static long __bpf_get_stackid(struct bpf_map *map,
return id;
}
+static struct perf_callchain_entry *
+bpf_get_perf_callchain(int *rctx, struct pt_regs *regs, bool kernel, bool user,
+ int max_stack, bool crosstask)
+{
+ struct perf_callchain_entry_ctx ctx;
+ struct perf_callchain_entry *entry;
+
+ entry = get_callchain_entry(rctx);
+ if (unlikely(!entry))
+ return NULL;
+
+ __init_perf_callchain_ctx(&ctx, entry, max_stack, false);
+ if (kernel)
+ __get_perf_callchain_kernel(&ctx, regs);
+ if (user && !crosstask)
+ __get_perf_callchain_user(&ctx, regs);
+
+ return entry;
+}
+
+static void bpf_put_callchain_entry(int rctx)
+{
+ put_callchain_entry(rctx);
+}
+
BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
u64, flags)
{
@@ -305,6 +327,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
bool user = flags & BPF_F_USER_STACK;
struct perf_callchain_entry *trace;
bool kernel = !user;
+ int rctx, ret;
if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
@@ -314,14 +337,15 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
if (max_depth > sysctl_perf_event_max_stack)
max_depth = sysctl_perf_event_max_stack;
- trace = get_perf_callchain(regs, kernel, user, max_depth,
- false);
-
+ trace = bpf_get_perf_callchain(&rctx, regs, kernel, user, max_depth, false);
if (unlikely(!trace))
/* couldn't fetch the stack trace */
return -EFAULT;
- return __bpf_get_stackid(map, trace, flags);
+ ret = __bpf_get_stackid(map, trace, flags);
+ bpf_put_callchain_entry(rctx);
+
+ return ret;
}
const struct bpf_func_proto bpf_get_stackid_proto = {
@@ -415,6 +439,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
bool kernel = !user;
int err = -EINVAL;
u64 *ips;
+ int rctx;
if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
BPF_F_USER_BUILD_ID)))
@@ -449,17 +474,24 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
if (trace_in)
trace = trace_in;
else if (kernel && task)
- trace = get_callchain_entry_for_task(task, max_depth);
+ trace = get_callchain_entry_for_task(&rctx, task, max_depth);
else
- trace = get_perf_callchain(regs, kernel, user, max_depth,
- crosstask);
+ trace = bpf_get_perf_callchain(&rctx, regs, kernel, user, max_depth, crosstask);
- if (unlikely(!trace) || trace->nr < skip) {
+ if (unlikely(!trace)) {
if (may_fault)
rcu_read_unlock();
goto err_fault;
}
+ if (trace->nr < skip) {
+ if (may_fault)
+ rcu_read_unlock();
+ if (!trace_in)
+ bpf_put_callchain_entry(rctx);
+ goto err_fault;
+ }
+
trace_nr = trace->nr - skip;
trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
copy_len = trace_nr * elem_size;
@@ -479,6 +511,9 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
if (may_fault)
rcu_read_unlock();
+ if (!trace_in)
+ bpf_put_callchain_entry(rctx);
+
if (user_build_id)
stack_map_get_build_id_offset(buf, trace_nr, user, may_fault);
--
2.48.1
On 10/28/25 9:25 AM, Tao Chen wrote:
> As Alexei noted, get_perf_callchain() return values may be reused
> if a task is preempted after the BPF program enters migrate disable
> mode. The perf_callchain_entres has a small stack of entries, and
> we can reuse it as follows:
>
> 1. get the perf callchain entry
> 2. BPF use...
> 3. put the perf callchain entry
>
> Signed-off-by: Tao Chen <chen.dylane@linux.dev>
> ---
> kernel/bpf/stackmap.c | 61 ++++++++++++++++++++++++++++++++++---------
> 1 file changed, 48 insertions(+), 13 deletions(-)
>
> diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
> index e28b35c7e0b..70d38249083 100644
> --- a/kernel/bpf/stackmap.c
> +++ b/kernel/bpf/stackmap.c
> @@ -188,13 +188,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
> }
>
> static struct perf_callchain_entry *
> -get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
> +get_callchain_entry_for_task(int *rctx, struct task_struct *task, u32 max_depth)
> {
> #ifdef CONFIG_STACKTRACE
> struct perf_callchain_entry *entry;
> - int rctx;
>
> - entry = get_callchain_entry(&rctx);
> + entry = get_callchain_entry(rctx);
>
> if (!entry)
> return NULL;
> @@ -216,8 +215,6 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
> to[i] = (u64)(from[i]);
> }
>
> - put_callchain_entry(rctx);
> -
> return entry;
> #else /* CONFIG_STACKTRACE */
> return NULL;
> @@ -297,6 +294,31 @@ static long __bpf_get_stackid(struct bpf_map *map,
> return id;
> }
>
> +static struct perf_callchain_entry *
> +bpf_get_perf_callchain(int *rctx, struct pt_regs *regs, bool kernel, bool user,
> + int max_stack, bool crosstask)
> +{
> + struct perf_callchain_entry_ctx ctx;
> + struct perf_callchain_entry *entry;
> +
> + entry = get_callchain_entry(rctx);
I think this may not work. Let us say we have two bpf programs
both pinned to a particular cpu (migrate disabled but preempt enabled).
get_callchain_entry() calls get_recursion_context() to get the
buffer for a particulart level.
static inline int get_recursion_context(u8 *recursion)
{
unsigned char rctx = interrupt_context_level();
if (recursion[rctx])
return -1;
recursion[rctx]++;
barrier();
return rctx;
}
It is possible that both tasks (at process level) may
reach right before "recursion[rctx]++;".
In such cases, both tasks will be able to get
buffer and this is not right.
To fix this, we either need to have preempt disable
in bpf side, or maybe we have some kind of atomic
operation (cmpxchg or similar things), or maybe
has a preempt disable between if statement and recursion[rctx]++,
so only one task can get buffer?
> + if (unlikely(!entry))
> + return NULL;
> +
> + __init_perf_callchain_ctx(&ctx, entry, max_stack, false);
> + if (kernel)
> + __get_perf_callchain_kernel(&ctx, regs);
> + if (user && !crosstask)
> + __get_perf_callchain_user(&ctx, regs);
> +
> + return entry;
> +}
> +
> +static void bpf_put_callchain_entry(int rctx)
we have bpf_get_perf_callchain(), maybe rename the above
to bpf_put_perf_callchain()?
> +{
> + put_callchain_entry(rctx);
> +}
> +
[...]
在 2025/11/6 06:16, Yonghong Song 写道:
>
>
> On 10/28/25 9:25 AM, Tao Chen wrote:
>> As Alexei noted, get_perf_callchain() return values may be reused
>> if a task is preempted after the BPF program enters migrate disable
>> mode. The perf_callchain_entres has a small stack of entries, and
>> we can reuse it as follows:
>>
>> 1. get the perf callchain entry
>> 2. BPF use...
>> 3. put the perf callchain entry
>>
>> Signed-off-by: Tao Chen <chen.dylane@linux.dev>
>> ---
>> kernel/bpf/stackmap.c | 61 ++++++++++++++++++++++++++++++++++---------
>> 1 file changed, 48 insertions(+), 13 deletions(-)
>>
>> diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
>> index e28b35c7e0b..70d38249083 100644
>> --- a/kernel/bpf/stackmap.c
>> +++ b/kernel/bpf/stackmap.c
>> @@ -188,13 +188,12 @@ static void stack_map_get_build_id_offset(struct
>> bpf_stack_build_id *id_offs,
>> }
>> static struct perf_callchain_entry *
>> -get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
>> +get_callchain_entry_for_task(int *rctx, struct task_struct *task, u32
>> max_depth)
>> {
>> #ifdef CONFIG_STACKTRACE
>> struct perf_callchain_entry *entry;
>> - int rctx;
>> - entry = get_callchain_entry(&rctx);
>> + entry = get_callchain_entry(rctx);
>> if (!entry)
>> return NULL;
>> @@ -216,8 +215,6 @@ get_callchain_entry_for_task(struct task_struct
>> *task, u32 max_depth)
>> to[i] = (u64)(from[i]);
>> }
>> - put_callchain_entry(rctx);
>> -
>> return entry;
>> #else /* CONFIG_STACKTRACE */
>> return NULL;
>> @@ -297,6 +294,31 @@ static long __bpf_get_stackid(struct bpf_map *map,
>> return id;
>> }
>> +static struct perf_callchain_entry *
>> +bpf_get_perf_callchain(int *rctx, struct pt_regs *regs, bool kernel,
>> bool user,
>> + int max_stack, bool crosstask)
>> +{
>> + struct perf_callchain_entry_ctx ctx;
>> + struct perf_callchain_entry *entry;
>> +
>> + entry = get_callchain_entry(rctx);
>
> I think this may not work. Let us say we have two bpf programs
> both pinned to a particular cpu (migrate disabled but preempt enabled).
> get_callchain_entry() calls get_recursion_context() to get the
> buffer for a particulart level.
>
> static inline int get_recursion_context(u8 *recursion)
> {
> unsigned char rctx = interrupt_context_level();
> if (recursion[rctx])
> return -1;
> recursion[rctx]++;
> barrier();
> return rctx;
> }
>
> It is possible that both tasks (at process level) may
> reach right before "recursion[rctx]++;".
> In such cases, both tasks will be able to get
> buffer and this is not right.
>
> To fix this, we either need to have preempt disable
> in bpf side, or maybe we have some kind of atomic
> operation (cmpxchg or similar things), or maybe
> has a preempt disable between if statement and recursion[rctx]++,
> so only one task can get buffer?
>
Thanks to your reminder, can we add preempt disable before and after
get_callchain_entry, avoid affecting the original functions of perf.
Regarding multiple task preemption: if the entry is not released via
put_callchain_entry, it appears that perf's buffer does not support
recording the second task, so it returns directly here.
if (recursion[rctx])
return -1;
>
>> + if (unlikely(!entry))
>> + return NULL;
>> +
>> + __init_perf_callchain_ctx(&ctx, entry, max_stack, false);
>> + if (kernel)
>> + __get_perf_callchain_kernel(&ctx, regs);
>> + if (user && !crosstask)
>> + __get_perf_callchain_user(&ctx, regs);
>> +
>> + return entry;
>> +}
>> +
>> +static void bpf_put_callchain_entry(int rctx)
>
> we have bpf_get_perf_callchain(), maybe rename the above
> to bpf_put_perf_callchain()?
>
Ack, thanks.
>> +{
>> + put_callchain_entry(rctx);
>> +}
>> +
>
> [...]
>
--
Best Regards
Tao Chen
On 11/5/25 9:12 PM, Tao Chen wrote:
> 在 2025/11/6 06:16, Yonghong Song 写道:
>>
>>
>> On 10/28/25 9:25 AM, Tao Chen wrote:
>>> As Alexei noted, get_perf_callchain() return values may be reused
>>> if a task is preempted after the BPF program enters migrate disable
>>> mode. The perf_callchain_entres has a small stack of entries, and
>>> we can reuse it as follows:
>>>
>>> 1. get the perf callchain entry
>>> 2. BPF use...
>>> 3. put the perf callchain entry
>>>
>>> Signed-off-by: Tao Chen <chen.dylane@linux.dev>
>>> ---
>>> kernel/bpf/stackmap.c | 61
>>> ++++++++++++++++++++++++++++++++++---------
>>> 1 file changed, 48 insertions(+), 13 deletions(-)
>>>
>>> diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
>>> index e28b35c7e0b..70d38249083 100644
>>> --- a/kernel/bpf/stackmap.c
>>> +++ b/kernel/bpf/stackmap.c
>>> @@ -188,13 +188,12 @@ static void
>>> stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
>>> }
>>> static struct perf_callchain_entry *
>>> -get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
>>> +get_callchain_entry_for_task(int *rctx, struct task_struct *task,
>>> u32 max_depth)
>>> {
>>> #ifdef CONFIG_STACKTRACE
>>> struct perf_callchain_entry *entry;
>>> - int rctx;
>>> - entry = get_callchain_entry(&rctx);
>>> + entry = get_callchain_entry(rctx);
>>> if (!entry)
>>> return NULL;
>>> @@ -216,8 +215,6 @@ get_callchain_entry_for_task(struct task_struct
>>> *task, u32 max_depth)
>>> to[i] = (u64)(from[i]);
>>> }
>>> - put_callchain_entry(rctx);
>>> -
>>> return entry;
>>> #else /* CONFIG_STACKTRACE */
>>> return NULL;
>>> @@ -297,6 +294,31 @@ static long __bpf_get_stackid(struct bpf_map *map,
>>> return id;
>>> }
>>> +static struct perf_callchain_entry *
>>> +bpf_get_perf_callchain(int *rctx, struct pt_regs *regs, bool
>>> kernel, bool user,
>>> + int max_stack, bool crosstask)
>>> +{
>>> + struct perf_callchain_entry_ctx ctx;
>>> + struct perf_callchain_entry *entry;
>>> +
>>> + entry = get_callchain_entry(rctx);
>>
>> I think this may not work. Let us say we have two bpf programs
>> both pinned to a particular cpu (migrate disabled but preempt enabled).
>> get_callchain_entry() calls get_recursion_context() to get the
>> buffer for a particulart level.
>>
>> static inline int get_recursion_context(u8 *recursion)
>> {
>> unsigned char rctx = interrupt_context_level();
>> if (recursion[rctx])
>> return -1;
>> recursion[rctx]++;
>> barrier();
>> return rctx;
>> }
>>
>> It is possible that both tasks (at process level) may
>> reach right before "recursion[rctx]++;".
>> In such cases, both tasks will be able to get
>> buffer and this is not right.
>>
>> To fix this, we either need to have preempt disable
>> in bpf side, or maybe we have some kind of atomic
>> operation (cmpxchg or similar things), or maybe
>> has a preempt disable between if statement and recursion[rctx]++,
>> so only one task can get buffer?
>>
>
> Thanks to your reminder, can we add preempt disable before and after
> get_callchain_entry, avoid affecting the original functions of perf.
Yes, we get two get_callchain_entry() call site:
bpf/stackmap.c: entry = get_callchain_entry(&rctx);
events/callchain.c: entry = get_callchain_entry(&rctx);
We need to have preempt_disable()/preempt_enable() around them.
Another choice maybe adds preempt_disable/enable() for
get_callchain_entry() and get_perf_callchain() in stackmap.c,
assuming these two function usage in other places are for
interrupts (softirq, hardirq and nmi) so they are okay.
But maybe the following is better?
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index d9cc57083091..0ccf94315954 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -214,12 +214,9 @@ static inline int get_recursion_context(u8 *recursion)
{
unsigned char rctx = interrupt_context_level();
- if (recursion[rctx])
+ if (cmpxchg(&recursion[rctx], 0, 1) != 0)
return -1;
- recursion[rctx]++;
- barrier();
-
return rctx;
}
>
> Regarding multiple task preemption: if the entry is not released via
> put_callchain_entry, it appears that perf's buffer does not support
> recording the second task, so it returns directly here.
>
> if (recursion[rctx])
> return -1;
>
>>
>>> + if (unlikely(!entry))
>>> + return NULL;
>>> +
>>> + __init_perf_callchain_ctx(&ctx, entry, max_stack, false);
>>> + if (kernel)
>>> + __get_perf_callchain_kernel(&ctx, regs);
>>> + if (user && !crosstask)
>>> + __get_perf_callchain_user(&ctx, regs);
>>> +
>>> + return entry;
>>> +}
>>> +
>>> +static void bpf_put_callchain_entry(int rctx)
>>
>> we have bpf_get_perf_callchain(), maybe rename the above
>> to bpf_put_perf_callchain()?
>>
>
> Ack, thanks.
>
>>> +{
>>> + put_callchain_entry(rctx);
>>> +}
>>> +
>>
>> [...]
>>
>
>
在 2025/11/6 14:20, Yonghong Song 写道:
>
>
> On 11/5/25 9:12 PM, Tao Chen wrote:
>> 在 2025/11/6 06:16, Yonghong Song 写道:
>>>
>>>
>>> On 10/28/25 9:25 AM, Tao Chen wrote:
>>>> As Alexei noted, get_perf_callchain() return values may be reused
>>>> if a task is preempted after the BPF program enters migrate disable
>>>> mode. The perf_callchain_entres has a small stack of entries, and
>>>> we can reuse it as follows:
>>>>
>>>> 1. get the perf callchain entry
>>>> 2. BPF use...
>>>> 3. put the perf callchain entry
>>>>
>>>> Signed-off-by: Tao Chen <chen.dylane@linux.dev>
>>>> ---
>>>> kernel/bpf/stackmap.c | 61 +++++++++++++++++++++++++++++++++
>>>> +---------
>>>> 1 file changed, 48 insertions(+), 13 deletions(-)
>>>>
>>>> diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
>>>> index e28b35c7e0b..70d38249083 100644
>>>> --- a/kernel/bpf/stackmap.c
>>>> +++ b/kernel/bpf/stackmap.c
>>>> @@ -188,13 +188,12 @@ static void
>>>> stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
>>>> }
>>>> static struct perf_callchain_entry *
>>>> -get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
>>>> +get_callchain_entry_for_task(int *rctx, struct task_struct *task,
>>>> u32 max_depth)
>>>> {
>>>> #ifdef CONFIG_STACKTRACE
>>>> struct perf_callchain_entry *entry;
>>>> - int rctx;
>>>> - entry = get_callchain_entry(&rctx);
>>>> + entry = get_callchain_entry(rctx);
>>>> if (!entry)
>>>> return NULL;
>>>> @@ -216,8 +215,6 @@ get_callchain_entry_for_task(struct task_struct
>>>> *task, u32 max_depth)
>>>> to[i] = (u64)(from[i]);
>>>> }
>>>> - put_callchain_entry(rctx);
>>>> -
>>>> return entry;
>>>> #else /* CONFIG_STACKTRACE */
>>>> return NULL;
>>>> @@ -297,6 +294,31 @@ static long __bpf_get_stackid(struct bpf_map *map,
>>>> return id;
>>>> }
>>>> +static struct perf_callchain_entry *
>>>> +bpf_get_perf_callchain(int *rctx, struct pt_regs *regs, bool
>>>> kernel, bool user,
>>>> + int max_stack, bool crosstask)
>>>> +{
>>>> + struct perf_callchain_entry_ctx ctx;
>>>> + struct perf_callchain_entry *entry;
>>>> +
>>>> + entry = get_callchain_entry(rctx);
>>>
>>> I think this may not work. Let us say we have two bpf programs
>>> both pinned to a particular cpu (migrate disabled but preempt enabled).
>>> get_callchain_entry() calls get_recursion_context() to get the
>>> buffer for a particulart level.
>>>
>>> static inline int get_recursion_context(u8 *recursion)
>>> {
>>> unsigned char rctx = interrupt_context_level();
>>> if (recursion[rctx])
>>> return -1;
>>> recursion[rctx]++;
>>> barrier();
>>> return rctx;
>>> }
>>>
>>> It is possible that both tasks (at process level) may
>>> reach right before "recursion[rctx]++;".
>>> In such cases, both tasks will be able to get
>>> buffer and this is not right.
>>>
>>> To fix this, we either need to have preempt disable
>>> in bpf side, or maybe we have some kind of atomic
>>> operation (cmpxchg or similar things), or maybe
>>> has a preempt disable between if statement and recursion[rctx]++,
>>> so only one task can get buffer?
>>>
>>
>> Thanks to your reminder, can we add preempt disable before and after
>> get_callchain_entry, avoid affecting the original functions of perf.
>
> Yes, we get two get_callchain_entry() call site:
> bpf/stackmap.c: entry = get_callchain_entry(&rctx);
> events/callchain.c: entry = get_callchain_entry(&rctx);
> We need to have preempt_disable()/preempt_enable() around them.
>
> Another choice maybe adds preempt_disable/enable() for
> get_callchain_entry() and get_perf_callchain() in stackmap.c,
> assuming these two function usage in other places are for
> interrupts (softirq, hardirq and nmi) so they are okay.
>
> But maybe the following is better?
>
> diff --git a/kernel/events/internal.h b/kernel/events/internal.h
> index d9cc57083091..0ccf94315954 100644
> --- a/kernel/events/internal.h
> +++ b/kernel/events/internal.h
> @@ -214,12 +214,9 @@ static inline int get_recursion_context(u8 *recursion)
> {
> unsigned char rctx = interrupt_context_level();
>
> - if (recursion[rctx])
> + if (cmpxchg(&recursion[rctx], 0, 1) != 0)
> return -1;
>
> - recursion[rctx]++;
> - barrier();
> -
> return rctx;
> }
>
Agree, this seems to have fewer side effects, thanks.
>>
>> Regarding multiple task preemption: if the entry is not released via
>> put_callchain_entry, it appears that perf's buffer does not support
>> recording the second task, so it returns directly here.
>>
>> if (recursion[rctx])
>> return -1;
>>
>>>
>>>> + if (unlikely(!entry))
>>>> + return NULL;
>>>> +
>>>> + __init_perf_callchain_ctx(&ctx, entry, max_stack, false);
>>>> + if (kernel)
>>>> + __get_perf_callchain_kernel(&ctx, regs);
>>>> + if (user && !crosstask)
>>>> + __get_perf_callchain_user(&ctx, regs);
>>>> +
>>>> + return entry;
>>>> +}
>>>> +
>>>> +static void bpf_put_callchain_entry(int rctx)
>>>
>>> we have bpf_get_perf_callchain(), maybe rename the above
>>> to bpf_put_perf_callchain()?
>>>
>>
>> Ack, thanks.
>>
>>>> +{
>>>> + put_callchain_entry(rctx);
>>>> +}
>>>> +
>>>
>>> [...]
>>>
>>
>>
>
--
Best Regards
Tao Chen
© 2016 - 2026 Red Hat, Inc.