If TRACE_SESSION exists, we will use extra 8-bytes in the stack of the
trampoline to store the flags that we needed, and the 8-bytes lie before
the function argument count, which means ctx[-2]. And we will store the
flag "is_exit" to the first bit of it.
Introduce the kfunc bpf_tracing_is_exit(), which is used to tell if it
is fexit currently.
Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
Co-developed-by: Leon Hwang <leon.hwang@linux.dev>
Signed-off-by: Leon Hwang <leon.hwang@linux.dev>
---
kernel/bpf/verifier.c | 5 ++++-
kernel/trace/bpf_trace.c | 43 +++++++++++++++++++++++++++++++++++++---
2 files changed, 44 insertions(+), 4 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 40e3274e8bc2..a1db11818d01 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12284,6 +12284,7 @@ enum special_kfunc_type {
KF___bpf_trap,
KF_bpf_task_work_schedule_signal,
KF_bpf_task_work_schedule_resume,
+ KF_bpf_tracing_is_exit,
};
BTF_ID_LIST(special_kfunc_list)
@@ -12356,6 +12357,7 @@ BTF_ID(func, bpf_res_spin_unlock_irqrestore)
BTF_ID(func, __bpf_trap)
BTF_ID(func, bpf_task_work_schedule_signal)
BTF_ID(func, bpf_task_work_schedule_resume)
+BTF_ID(func, bpf_tracing_is_exit)
static bool is_task_work_add_kfunc(u32 func_id)
{
@@ -12410,7 +12412,8 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
struct bpf_reg_state *reg = ®s[regno];
bool arg_mem_size = false;
- if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx])
+ if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
+ meta->func_id == special_kfunc_list[KF_bpf_tracing_is_exit])
return KF_ARG_PTR_TO_CTX;
/* In this function, we verify the kfunc's BTF as per the argument type,
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 4f87c16d915a..6dde48b9d27f 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -3356,12 +3356,49 @@ static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = {
.filter = bpf_kprobe_multi_filter,
};
-static int __init bpf_kprobe_multi_kfuncs_init(void)
+__bpf_kfunc_start_defs();
+
+__bpf_kfunc bool bpf_tracing_is_exit(void *ctx)
+{
+ /* ctx[-2] is the session flags, and the last bit is is_exit */
+ return ((u64 *)ctx)[-2] & 1;
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(tracing_kfunc_set_ids)
+BTF_ID_FLAGS(func, bpf_tracing_is_exit)
+BTF_KFUNCS_END(tracing_kfunc_set_ids)
+
+static int bpf_tracing_filter(const struct bpf_prog *prog, u32 kfunc_id)
+{
+ if (!btf_id_set8_contains(&tracing_kfunc_set_ids, kfunc_id))
+ return 0;
+
+ if (prog->type != BPF_PROG_TYPE_TRACING ||
+ prog->expected_attach_type != BPF_TRACE_SESSION)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct btf_kfunc_id_set bpf_tracing_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &tracing_kfunc_set_ids,
+ .filter = bpf_tracing_filter,
+};
+
+static int __init bpf_trace_kfuncs_init(void)
{
- return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set);
+ int err = 0;
+
+ err = err ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set);
+ err = err ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_tracing_kfunc_set);
+
+ return err;
}
-late_initcall(bpf_kprobe_multi_kfuncs_init);
+late_initcall(bpf_trace_kfuncs_init);
typedef int (*copy_fn_t)(void *dst, const void *src, u32 size, struct task_struct *tsk);
--
2.51.0
On Sat, Oct 18, 2025 at 10:21:21PM +0800, Menglong Dong wrote:
> If TRACE_SESSION exists, we will use extra 8-bytes in the stack of the
> trampoline to store the flags that we needed, and the 8-bytes lie before
> the function argument count, which means ctx[-2]. And we will store the
> flag "is_exit" to the first bit of it.
>
> Introduce the kfunc bpf_tracing_is_exit(), which is used to tell if it
> is fexit currently.
>
> Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
> Co-developed-by: Leon Hwang <leon.hwang@linux.dev>
> Signed-off-by: Leon Hwang <leon.hwang@linux.dev>
> ---
> kernel/bpf/verifier.c | 5 ++++-
> kernel/trace/bpf_trace.c | 43 +++++++++++++++++++++++++++++++++++++---
> 2 files changed, 44 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 40e3274e8bc2..a1db11818d01 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -12284,6 +12284,7 @@ enum special_kfunc_type {
> KF___bpf_trap,
> KF_bpf_task_work_schedule_signal,
> KF_bpf_task_work_schedule_resume,
> + KF_bpf_tracing_is_exit,
> };
>
> BTF_ID_LIST(special_kfunc_list)
> @@ -12356,6 +12357,7 @@ BTF_ID(func, bpf_res_spin_unlock_irqrestore)
> BTF_ID(func, __bpf_trap)
> BTF_ID(func, bpf_task_work_schedule_signal)
> BTF_ID(func, bpf_task_work_schedule_resume)
> +BTF_ID(func, bpf_tracing_is_exit)
>
> static bool is_task_work_add_kfunc(u32 func_id)
> {
> @@ -12410,7 +12412,8 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
> struct bpf_reg_state *reg = ®s[regno];
> bool arg_mem_size = false;
>
> - if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx])
> + if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
> + meta->func_id == special_kfunc_list[KF_bpf_tracing_is_exit])
> return KF_ARG_PTR_TO_CTX;
>
> /* In this function, we verify the kfunc's BTF as per the argument type,
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index 4f87c16d915a..6dde48b9d27f 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -3356,12 +3356,49 @@ static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = {
> .filter = bpf_kprobe_multi_filter,
> };
>
> -static int __init bpf_kprobe_multi_kfuncs_init(void)
> +__bpf_kfunc_start_defs();
> +
> +__bpf_kfunc bool bpf_tracing_is_exit(void *ctx)
> +{
> + /* ctx[-2] is the session flags, and the last bit is is_exit */
> + return ((u64 *)ctx)[-2] & 1;
> +}
I think this could be inlined by verifier
jirka
> +
> +__bpf_kfunc_end_defs();
SNIP
On 2025/10/20 16:19, Jiri Olsa wrote:
> On Sat, Oct 18, 2025 at 10:21:21PM +0800, Menglong Dong wrote:
> > If TRACE_SESSION exists, we will use extra 8-bytes in the stack of the
> > trampoline to store the flags that we needed, and the 8-bytes lie before
> > the function argument count, which means ctx[-2]. And we will store the
> > flag "is_exit" to the first bit of it.
> >
> > Introduce the kfunc bpf_tracing_is_exit(), which is used to tell if it
> > is fexit currently.
> >
> > Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
> > Co-developed-by: Leon Hwang <leon.hwang@linux.dev>
> > Signed-off-by: Leon Hwang <leon.hwang@linux.dev>
> > ---
> > kernel/bpf/verifier.c | 5 ++++-
> > kernel/trace/bpf_trace.c | 43 +++++++++++++++++++++++++++++++++++++---
> > 2 files changed, 44 insertions(+), 4 deletions(-)
> >
> > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> > index 40e3274e8bc2..a1db11818d01 100644
> > --- a/kernel/bpf/verifier.c
> > +++ b/kernel/bpf/verifier.c
> > @@ -12284,6 +12284,7 @@ enum special_kfunc_type {
> > KF___bpf_trap,
> > KF_bpf_task_work_schedule_signal,
> > KF_bpf_task_work_schedule_resume,
> > + KF_bpf_tracing_is_exit,
> > };
> >
> > BTF_ID_LIST(special_kfunc_list)
> > @@ -12356,6 +12357,7 @@ BTF_ID(func, bpf_res_spin_unlock_irqrestore)
> > BTF_ID(func, __bpf_trap)
> > BTF_ID(func, bpf_task_work_schedule_signal)
> > BTF_ID(func, bpf_task_work_schedule_resume)
> > +BTF_ID(func, bpf_tracing_is_exit)
> >
> > static bool is_task_work_add_kfunc(u32 func_id)
> > {
> > @@ -12410,7 +12412,8 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
> > struct bpf_reg_state *reg = ®s[regno];
> > bool arg_mem_size = false;
> >
> > - if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx])
> > + if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
> > + meta->func_id == special_kfunc_list[KF_bpf_tracing_is_exit])
> > return KF_ARG_PTR_TO_CTX;
> >
> > /* In this function, we verify the kfunc's BTF as per the argument type,
> > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> > index 4f87c16d915a..6dde48b9d27f 100644
> > --- a/kernel/trace/bpf_trace.c
> > +++ b/kernel/trace/bpf_trace.c
> > @@ -3356,12 +3356,49 @@ static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = {
> > .filter = bpf_kprobe_multi_filter,
> > };
> >
> > -static int __init bpf_kprobe_multi_kfuncs_init(void)
> > +__bpf_kfunc_start_defs();
> > +
> > +__bpf_kfunc bool bpf_tracing_is_exit(void *ctx)
> > +{
> > + /* ctx[-2] is the session flags, and the last bit is is_exit */
> > + return ((u64 *)ctx)[-2] & 1;
> > +}
>
> I think this could be inlined by verifier
Yeah, that make sense. I'll inline it in the next version.
Thanks!
Menglong Dong
>
> jirka
>
>
> > +
> > +__bpf_kfunc_end_defs();
>
> SNIP
>
>
© 2016 - 2026 Red Hat, Inc.