Adding support to use get_func_[arg|arg_cnt] helpers in raw_tp/tp_btf
programs.
We can use get_func_[arg|ret|arg_cnt] helpers in fentry/fexit/fmod_ret
programs currently. If we try to use get_func_[arg|arg_cnt] helpers in
raw_tp/tp_btf programs, verifier will fail to load the program with:
; __u64 cnt = bpf_get_func_arg_cnt(ctx);
3: (85) call bpf_get_func_arg_cnt#185
unknown func bpf_get_func_arg_cnt#185
Adding get_func_[arg|arg_cnt] helpers in raw_tp_prog_func_proto and
tracing_prog_func_proto for raw tracepoint.
Adding 1 arg on ctx of raw tracepoint program and make it stores number of
arguments on ctx-8, so it's easy to verify argument index and find
argument's position.
Signed-off-by: KaFai Wan <mannkafai@gmail.com>
---
kernel/trace/bpf_trace.c | 17 ++++++++++++++---
net/bpf/test_run.c | 13 +++++--------
2 files changed, 19 insertions(+), 11 deletions(-)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 52c432a44aeb..eb4c56013493 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1892,6 +1892,10 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_stackid_proto_raw_tp;
case BPF_FUNC_get_stack:
return &bpf_get_stack_proto_raw_tp;
+ case BPF_FUNC_get_func_arg:
+ return &bpf_get_func_arg_proto;
+ case BPF_FUNC_get_func_arg_cnt:
+ return &bpf_get_func_arg_cnt_proto;
case BPF_FUNC_get_attach_cookie:
return &bpf_get_attach_cookie_proto_tracing;
default:
@@ -1950,10 +1954,16 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_d_path:
return &bpf_d_path_proto;
case BPF_FUNC_get_func_arg:
+ if (prog->type == BPF_PROG_TYPE_TRACING &&
+ prog->expected_attach_type == BPF_TRACE_RAW_TP)
+ return &bpf_get_func_arg_proto;
return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
case BPF_FUNC_get_func_ret:
return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
case BPF_FUNC_get_func_arg_cnt:
+ if (prog->type == BPF_PROG_TYPE_TRACING &&
+ prog->expected_attach_type == BPF_TRACE_RAW_TP)
+ return &bpf_get_func_arg_cnt_proto;
return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
case BPF_FUNC_get_attach_cookie:
if (prog->type == BPF_PROG_TYPE_TRACING &&
@@ -2312,7 +2322,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
#define SARG(X) u64 arg##X
-#define COPY(X) args[X] = arg##X
+#define COPY(X) args[X + 1] = arg##X
#define __DL_COM (,)
#define __DL_SEM (;)
@@ -2323,9 +2333,10 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
void bpf_trace_run##x(struct bpf_raw_tp_link *link, \
REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
{ \
- u64 args[x]; \
+ u64 args[x + 1]; \
+ args[0] = x; \
REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
- __bpf_trace_run(link, args); \
+ __bpf_trace_run(link, args + 1); \
} \
EXPORT_SYMBOL_GPL(bpf_trace_run##x)
BPF_TRACE_DEFN_x(1);
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index aaf13a7d58ed..8cb285187270 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -760,6 +760,7 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
__u32 ctx_size_in = kattr->test.ctx_size_in;
struct bpf_raw_tp_test_run_info info;
+ u64 args[MAX_BPF_FUNC_ARGS + 1] = {};
int cpu = kattr->test.cpu, err = 0;
int current_cpu;
@@ -776,14 +777,11 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
return -EINVAL;
- if (ctx_size_in) {
- info.ctx = memdup_user(ctx_in, ctx_size_in);
- if (IS_ERR(info.ctx))
- return PTR_ERR(info.ctx);
- } else {
- info.ctx = NULL;
- }
+ if (ctx_size_in && copy_from_user(args + 1, ctx_in, ctx_size_in))
+ return -EFAULT;
+ args[0] = ctx_size_in / sizeof(u64);
+ info.ctx = args + 1;
info.prog = prog;
current_cpu = get_cpu();
@@ -807,7 +805,6 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
err = -EFAULT;
- kfree(info.ctx);
return err;
}
--
2.43.0
On Sat, Apr 26, 2025 at 9:00 AM KaFai Wan <mannkafai@gmail.com> wrote:
>
> Adding support to use get_func_[arg|arg_cnt] helpers in raw_tp/tp_btf
> programs.
>
> We can use get_func_[arg|ret|arg_cnt] helpers in fentry/fexit/fmod_ret
> programs currently. If we try to use get_func_[arg|arg_cnt] helpers in
> raw_tp/tp_btf programs, verifier will fail to load the program with:
>
> ; __u64 cnt = bpf_get_func_arg_cnt(ctx);
> 3: (85) call bpf_get_func_arg_cnt#185
> unknown func bpf_get_func_arg_cnt#185
>
> Adding get_func_[arg|arg_cnt] helpers in raw_tp_prog_func_proto and
> tracing_prog_func_proto for raw tracepoint.
>
> Adding 1 arg on ctx of raw tracepoint program and make it stores number of
> arguments on ctx-8, so it's easy to verify argument index and find
> argument's position.
>
> Signed-off-by: KaFai Wan <mannkafai@gmail.com>
> ---
> kernel/trace/bpf_trace.c | 17 ++++++++++++++---
> net/bpf/test_run.c | 13 +++++--------
> 2 files changed, 19 insertions(+), 11 deletions(-)
>
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index 52c432a44aeb..eb4c56013493 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -1892,6 +1892,10 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> return &bpf_get_stackid_proto_raw_tp;
> case BPF_FUNC_get_stack:
> return &bpf_get_stack_proto_raw_tp;
> + case BPF_FUNC_get_func_arg:
> + return &bpf_get_func_arg_proto;
> + case BPF_FUNC_get_func_arg_cnt:
> + return &bpf_get_func_arg_cnt_proto;
> case BPF_FUNC_get_attach_cookie:
> return &bpf_get_attach_cookie_proto_tracing;
> default:
> @@ -1950,10 +1954,16 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> case BPF_FUNC_d_path:
> return &bpf_d_path_proto;
> case BPF_FUNC_get_func_arg:
> + if (prog->type == BPF_PROG_TYPE_TRACING &&
> + prog->expected_attach_type == BPF_TRACE_RAW_TP)
> + return &bpf_get_func_arg_proto;
> return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
> case BPF_FUNC_get_func_ret:
> return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
> case BPF_FUNC_get_func_arg_cnt:
> + if (prog->type == BPF_PROG_TYPE_TRACING &&
> + prog->expected_attach_type == BPF_TRACE_RAW_TP)
> + return &bpf_get_func_arg_cnt_proto;
> return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
> case BPF_FUNC_get_attach_cookie:
> if (prog->type == BPF_PROG_TYPE_TRACING &&
> @@ -2312,7 +2322,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
> #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
>
> #define SARG(X) u64 arg##X
> -#define COPY(X) args[X] = arg##X
> +#define COPY(X) args[X + 1] = arg##X
>
> #define __DL_COM (,)
> #define __DL_SEM (;)
> @@ -2323,9 +2333,10 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
> void bpf_trace_run##x(struct bpf_raw_tp_link *link, \
> REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
> { \
> - u64 args[x]; \
> + u64 args[x + 1]; \
> + args[0] = x; \
> REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
> - __bpf_trace_run(link, args); \
> + __bpf_trace_run(link, args + 1); \
This is neat, but what is this for?
The program that attaches to a particular raw_tp knows what it is
attaching to and how many arguments are there,
so bpf_get_func_arg_cnt() is a 5th wheel.
If the reason is "for completeness" then it's not a good reason
to penalize performance. Though it's just an extra 8 byte of stack
and a single store of a constant.
pw-bot: cr
On Wed, Apr 30, 2025 at 10:46 AM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> On Sat, Apr 26, 2025 at 9:00 AM KaFai Wan <mannkafai@gmail.com> wrote:
> >
> > Adding support to use get_func_[arg|arg_cnt] helpers in raw_tp/tp_btf
> > programs.
> >
> > We can use get_func_[arg|ret|arg_cnt] helpers in fentry/fexit/fmod_ret
> > programs currently. If we try to use get_func_[arg|arg_cnt] helpers in
> > raw_tp/tp_btf programs, verifier will fail to load the program with:
> >
> > ; __u64 cnt = bpf_get_func_arg_cnt(ctx);
> > 3: (85) call bpf_get_func_arg_cnt#185
> > unknown func bpf_get_func_arg_cnt#185
> >
> > Adding get_func_[arg|arg_cnt] helpers in raw_tp_prog_func_proto and
> > tracing_prog_func_proto for raw tracepoint.
> >
> > Adding 1 arg on ctx of raw tracepoint program and make it stores number of
> > arguments on ctx-8, so it's easy to verify argument index and find
> > argument's position.
> >
> > Signed-off-by: KaFai Wan <mannkafai@gmail.com>
> > ---
> > kernel/trace/bpf_trace.c | 17 ++++++++++++++---
> > net/bpf/test_run.c | 13 +++++--------
> > 2 files changed, 19 insertions(+), 11 deletions(-)
> >
> > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> > index 52c432a44aeb..eb4c56013493 100644
> > --- a/kernel/trace/bpf_trace.c
> > +++ b/kernel/trace/bpf_trace.c
> > @@ -1892,6 +1892,10 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> > return &bpf_get_stackid_proto_raw_tp;
> > case BPF_FUNC_get_stack:
> > return &bpf_get_stack_proto_raw_tp;
> > + case BPF_FUNC_get_func_arg:
> > + return &bpf_get_func_arg_proto;
> > + case BPF_FUNC_get_func_arg_cnt:
> > + return &bpf_get_func_arg_cnt_proto;
> > case BPF_FUNC_get_attach_cookie:
> > return &bpf_get_attach_cookie_proto_tracing;
> > default:
> > @@ -1950,10 +1954,16 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> > case BPF_FUNC_d_path:
> > return &bpf_d_path_proto;
> > case BPF_FUNC_get_func_arg:
> > + if (prog->type == BPF_PROG_TYPE_TRACING &&
> > + prog->expected_attach_type == BPF_TRACE_RAW_TP)
> > + return &bpf_get_func_arg_proto;
> > return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
> > case BPF_FUNC_get_func_ret:
> > return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
> > case BPF_FUNC_get_func_arg_cnt:
> > + if (prog->type == BPF_PROG_TYPE_TRACING &&
> > + prog->expected_attach_type == BPF_TRACE_RAW_TP)
> > + return &bpf_get_func_arg_cnt_proto;
> > return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
> > case BPF_FUNC_get_attach_cookie:
> > if (prog->type == BPF_PROG_TYPE_TRACING &&
> > @@ -2312,7 +2322,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
> > #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
> >
> > #define SARG(X) u64 arg##X
> > -#define COPY(X) args[X] = arg##X
> > +#define COPY(X) args[X + 1] = arg##X
> >
> > #define __DL_COM (,)
> > #define __DL_SEM (;)
> > @@ -2323,9 +2333,10 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
> > void bpf_trace_run##x(struct bpf_raw_tp_link *link, \
> > REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
> > { \
> > - u64 args[x]; \
> > + u64 args[x + 1]; \
> > + args[0] = x; \
> > REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
> > - __bpf_trace_run(link, args); \
> > + __bpf_trace_run(link, args + 1); \
>
> This is neat, but what is this for?
> The program that attaches to a particular raw_tp knows what it is
> attaching to and how many arguments are there,
> so bpf_get_func_arg_cnt() is a 5th wheel.
>
> If the reason is "for completeness" then it's not a good reason
> to penalize performance. Though it's just an extra 8 byte of stack
> and a single store of a constant.
>
If we try to capture all arguments of a specific raw_tp in tracing programs,
We first obtain the arguments count from the format file in debugfs or BTF
and pass this count to the BPF program via .bss section or cookie (if
available).
If we store the count in ctx and get it via get_func_arg_cnt helper in
the BPF program,
a) It's easier and more efficient to get the arguments count in the BPF program.
b) It could use a single BPF program to capture arguments for multiple raw_tps,
reduce the number of BPF programs when massive tracing.
Thanks,
KaFai
> pw-bot: cr
On Wed, Apr 30, 2025 at 5:44 AM Kafai Wan <mannkafai@gmail.com> wrote:
>
> On Wed, Apr 30, 2025 at 10:46 AM Alexei Starovoitov
> <alexei.starovoitov@gmail.com> wrote:
> >
> > On Sat, Apr 26, 2025 at 9:00 AM KaFai Wan <mannkafai@gmail.com> wrote:
> > >
> > > Adding support to use get_func_[arg|arg_cnt] helpers in raw_tp/tp_btf
> > > programs.
> > >
> > > We can use get_func_[arg|ret|arg_cnt] helpers in fentry/fexit/fmod_ret
> > > programs currently. If we try to use get_func_[arg|arg_cnt] helpers in
> > > raw_tp/tp_btf programs, verifier will fail to load the program with:
> > >
> > > ; __u64 cnt = bpf_get_func_arg_cnt(ctx);
> > > 3: (85) call bpf_get_func_arg_cnt#185
> > > unknown func bpf_get_func_arg_cnt#185
> > >
> > > Adding get_func_[arg|arg_cnt] helpers in raw_tp_prog_func_proto and
> > > tracing_prog_func_proto for raw tracepoint.
> > >
> > > Adding 1 arg on ctx of raw tracepoint program and make it stores number of
> > > arguments on ctx-8, so it's easy to verify argument index and find
> > > argument's position.
> > >
> > > Signed-off-by: KaFai Wan <mannkafai@gmail.com>
> > > ---
> > > kernel/trace/bpf_trace.c | 17 ++++++++++++++---
> > > net/bpf/test_run.c | 13 +++++--------
> > > 2 files changed, 19 insertions(+), 11 deletions(-)
> > >
> > > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> > > index 52c432a44aeb..eb4c56013493 100644
> > > --- a/kernel/trace/bpf_trace.c
> > > +++ b/kernel/trace/bpf_trace.c
> > > @@ -1892,6 +1892,10 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> > > return &bpf_get_stackid_proto_raw_tp;
> > > case BPF_FUNC_get_stack:
> > > return &bpf_get_stack_proto_raw_tp;
> > > + case BPF_FUNC_get_func_arg:
> > > + return &bpf_get_func_arg_proto;
> > > + case BPF_FUNC_get_func_arg_cnt:
> > > + return &bpf_get_func_arg_cnt_proto;
> > > case BPF_FUNC_get_attach_cookie:
> > > return &bpf_get_attach_cookie_proto_tracing;
> > > default:
> > > @@ -1950,10 +1954,16 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> > > case BPF_FUNC_d_path:
> > > return &bpf_d_path_proto;
> > > case BPF_FUNC_get_func_arg:
> > > + if (prog->type == BPF_PROG_TYPE_TRACING &&
> > > + prog->expected_attach_type == BPF_TRACE_RAW_TP)
> > > + return &bpf_get_func_arg_proto;
> > > return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
> > > case BPF_FUNC_get_func_ret:
> > > return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
> > > case BPF_FUNC_get_func_arg_cnt:
> > > + if (prog->type == BPF_PROG_TYPE_TRACING &&
> > > + prog->expected_attach_type == BPF_TRACE_RAW_TP)
> > > + return &bpf_get_func_arg_cnt_proto;
> > > return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
> > > case BPF_FUNC_get_attach_cookie:
> > > if (prog->type == BPF_PROG_TYPE_TRACING &&
> > > @@ -2312,7 +2322,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
> > > #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
> > >
> > > #define SARG(X) u64 arg##X
> > > -#define COPY(X) args[X] = arg##X
> > > +#define COPY(X) args[X + 1] = arg##X
> > >
> > > #define __DL_COM (,)
> > > #define __DL_SEM (;)
> > > @@ -2323,9 +2333,10 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
> > > void bpf_trace_run##x(struct bpf_raw_tp_link *link, \
> > > REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
> > > { \
> > > - u64 args[x]; \
> > > + u64 args[x + 1]; \
> > > + args[0] = x; \
> > > REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
> > > - __bpf_trace_run(link, args); \
> > > + __bpf_trace_run(link, args + 1); \
> >
> > This is neat, but what is this for?
> > The program that attaches to a particular raw_tp knows what it is
> > attaching to and how many arguments are there,
> > so bpf_get_func_arg_cnt() is a 5th wheel.
> >
> > If the reason is "for completeness" then it's not a good reason
> > to penalize performance. Though it's just an extra 8 byte of stack
> > and a single store of a constant.
> >
> If we try to capture all arguments of a specific raw_tp in tracing programs,
> We first obtain the arguments count from the format file in debugfs or BTF
> and pass this count to the BPF program via .bss section or cookie (if
> available).
To do anything useful with those arguments beside printing their
values in hex you'd need to lookup BTF anyways, no? So at that point
what's the problem just passing the number of arguments as a BPF
cookie?
And then bpf_probe_read_kernel(..., cnt * 8, ctx)?
>
> If we store the count in ctx and get it via get_func_arg_cnt helper in
> the BPF program,
> a) It's easier and more efficient to get the arguments count in the BPF program.
> b) It could use a single BPF program to capture arguments for multiple raw_tps,
> reduce the number of BPF programs when massive tracing.
>
> Thanks,
> KaFai
>
> > pw-bot: cr
On 2025/4/30 20:43, Kafai Wan wrote:
> On Wed, Apr 30, 2025 at 10:46 AM Alexei Starovoitov
> <alexei.starovoitov@gmail.com> wrote:
>>
>> On Sat, Apr 26, 2025 at 9:00 AM KaFai Wan <mannkafai@gmail.com> wrote:
>>>
[...]
>>> @@ -2312,7 +2322,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
>>> #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
>>>
>>> #define SARG(X) u64 arg##X
>>> -#define COPY(X) args[X] = arg##X
>>> +#define COPY(X) args[X + 1] = arg##X
>>>
>>> #define __DL_COM (,)
>>> #define __DL_SEM (;)
>>> @@ -2323,9 +2333,10 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
>>> void bpf_trace_run##x(struct bpf_raw_tp_link *link, \
>>> REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
>>> { \
>>> - u64 args[x]; \
>>> + u64 args[x + 1]; \
>>> + args[0] = x; \
>>> REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
>>> - __bpf_trace_run(link, args); \
>>> + __bpf_trace_run(link, args + 1); \
>>
>> This is neat, but what is this for?
>> The program that attaches to a particular raw_tp knows what it is
>> attaching to and how many arguments are there,
>> so bpf_get_func_arg_cnt() is a 5th wheel.
>>
>> If the reason is "for completeness" then it's not a good reason
>> to penalize performance. Though it's just an extra 8 byte of stack
>> and a single store of a constant.
>>
> If we try to capture all arguments of a specific raw_tp in tracing programs,
> We first obtain the arguments count from the format file in debugfs or BTF
> and pass this count to the BPF program via .bss section or cookie (if
> available).
>
> If we store the count in ctx and get it via get_func_arg_cnt helper in
> the BPF program,
> a) It's easier and more efficient to get the arguments count in the BPF program.
> b) It could use a single BPF program to capture arguments for multiple raw_tps,
> reduce the number of BPF programs when massive tracing.
>
bpf_get_func_arg() will be very helpful for bpfsnoop[1] when tracing tp_btf.
In bpfsnoop, it can generate a small snippet of bpf instructions to use
bpf_get_func_arg() for retrieving and filtering arguments. For example,
with the netif_receive_skb tracepoint, bpfsnoop can use
bpf_get_func_arg() to filter the skb argument using pcap-filter(7)[2] or
a custom attribute-based filter. This will allow bpfsnoop to trace
multiple tracepoints using a single bpf program code.
[1] https://github.com/bpfsnoop/bpfsnoop
[2] https://www.tcpdump.org/manpages/pcap-filter.7.html
Thanks,
Leon
On Wed, Apr 30, 2025 at 8:55 AM Leon Hwang <leon.hwang@linux.dev> wrote:
>
>
>
> On 2025/4/30 20:43, Kafai Wan wrote:
> > On Wed, Apr 30, 2025 at 10:46 AM Alexei Starovoitov
> > <alexei.starovoitov@gmail.com> wrote:
> >>
> >> On Sat, Apr 26, 2025 at 9:00 AM KaFai Wan <mannkafai@gmail.com> wrote:
> >>>
>
> [...]
>
> >>> @@ -2312,7 +2322,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
> >>> #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
> >>>
> >>> #define SARG(X) u64 arg##X
> >>> -#define COPY(X) args[X] = arg##X
> >>> +#define COPY(X) args[X + 1] = arg##X
> >>>
> >>> #define __DL_COM (,)
> >>> #define __DL_SEM (;)
> >>> @@ -2323,9 +2333,10 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
> >>> void bpf_trace_run##x(struct bpf_raw_tp_link *link, \
> >>> REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
> >>> { \
> >>> - u64 args[x]; \
> >>> + u64 args[x + 1]; \
> >>> + args[0] = x; \
> >>> REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
> >>> - __bpf_trace_run(link, args); \
> >>> + __bpf_trace_run(link, args + 1); \
> >>
> >> This is neat, but what is this for?
> >> The program that attaches to a particular raw_tp knows what it is
> >> attaching to and how many arguments are there,
> >> so bpf_get_func_arg_cnt() is a 5th wheel.
> >>
> >> If the reason is "for completeness" then it's not a good reason
> >> to penalize performance. Though it's just an extra 8 byte of stack
> >> and a single store of a constant.
> >>
> > If we try to capture all arguments of a specific raw_tp in tracing programs,
> > We first obtain the arguments count from the format file in debugfs or BTF
> > and pass this count to the BPF program via .bss section or cookie (if
> > available).
> >
> > If we store the count in ctx and get it via get_func_arg_cnt helper in
> > the BPF program,
> > a) It's easier and more efficient to get the arguments count in the BPF program.
> > b) It could use a single BPF program to capture arguments for multiple raw_tps,
> > reduce the number of BPF programs when massive tracing.
> >
>
>
> bpf_get_func_arg() will be very helpful for bpfsnoop[1] when tracing tp_btf.
>
> In bpfsnoop, it can generate a small snippet of bpf instructions to use
> bpf_get_func_arg() for retrieving and filtering arguments. For example,
> with the netif_receive_skb tracepoint, bpfsnoop can use
> bpf_get_func_arg() to filter the skb argument using pcap-filter(7)[2] or
> a custom attribute-based filter. This will allow bpfsnoop to trace
> multiple tracepoints using a single bpf program code.
I doubt you thought it through end to end.
When tracepoint prog attaches we have this check:
/*
* check that program doesn't access arguments beyond what's
* available in this tracepoint
*/
if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
return -EINVAL;
So you cannot have a single bpf prog attached to many tracepoints
to read many arguments as-is.
You can hack around that limit with probe_read,
but the values won't be trusted and you won't be able to pass
such untrusted pointers into skb and other helpers/kfuncs.
On 2025/5/1 00:53, Alexei Starovoitov wrote:
> On Wed, Apr 30, 2025 at 8:55 AM Leon Hwang <leon.hwang@linux.dev> wrote:
>>
>>
>>
>> On 2025/4/30 20:43, Kafai Wan wrote:
>>> On Wed, Apr 30, 2025 at 10:46 AM Alexei Starovoitov
>>> <alexei.starovoitov@gmail.com> wrote:
>>>>
>>>> On Sat, Apr 26, 2025 at 9:00 AM KaFai Wan <mannkafai@gmail.com> wrote:
>>>>>
>>
[...]
>>
>>
>> bpf_get_func_arg() will be very helpful for bpfsnoop[1] when tracing tp_btf.
>>
>> In bpfsnoop, it can generate a small snippet of bpf instructions to use
>> bpf_get_func_arg() for retrieving and filtering arguments. For example,
>> with the netif_receive_skb tracepoint, bpfsnoop can use
>> bpf_get_func_arg() to filter the skb argument using pcap-filter(7)[2] or
>> a custom attribute-based filter. This will allow bpfsnoop to trace
>> multiple tracepoints using a single bpf program code.
>
> I doubt you thought it through end to end.
> When tracepoint prog attaches we have this check:
> /*
> * check that program doesn't access arguments beyond what's
> * available in this tracepoint
> */
> if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
> return -EINVAL;
>
> So you cannot have a single bpf prog attached to many tracepoints
> to read many arguments as-is.
> You can hack around that limit with probe_read,
> but the values won't be trusted and you won't be able to pass
> such untrusted pointers into skb and other helpers/kfuncs.
I understand that a single bpf program cannot be attached to multiple
tracepoints using tp_btf. However, the same bpf code can be reused to
create multiple bpf programs, each attached to a different tracepoint.
For example:
SEC("fentry")
int BPF_PROG(fentry_fn)
{
/* ... */
return BPF_OK;
}
The above fentry code can be compiled into multiple bpf programs to
trace different kernel functions. Each program can then use the
bpf_get_func_arg() helper to access the arguments of the traced function.
With this patch, tp_btf will gain similar flexibility. For example:
SEC("tp_btf")
int BPF_PROG(tp_btf_fn)
{
/* ... */
return BPF_OK;
}
Here, bpf_get_func_arg() can be used to access tracepoint arguments.
Currently, due to the lack of bpf_get_func_arg() support in tp_btf,
bpfsnoop[1] uses bpf_probe_read_kernel() to read tracepoint arguments.
This is also used when filtering specific argument attributes.
For instance, to filter the skb argument of the netif_receive_skb
tracepoint by 'skb->dev->ifindex == 2', the translated bpf instructions
with bpf_probe_read_kernel() would look like this:
bool filter_arg(__u64 * args):
; filter_arg(__u64 *args)
209: (79) r1 = *(u64 *)(r1 +0) /* all tracepoint's argument has been
read into args using bpf_probe_read_kernel() */
210: (bf) r3 = r1
211: (07) r3 += 16
212: (b7) r2 = 8
213: (bf) r1 = r10
214: (07) r1 += -8
215: (85) call bpf_probe_read_kernel#-125280
216: (79) r3 = *(u64 *)(r10 -8)
217: (15) if r3 == 0x0 goto pc+10
218: (07) r3 += 224
219: (b7) r2 = 8
220: (bf) r1 = r10
221: (07) r1 += -8
222: (85) call bpf_probe_read_kernel#-125280
223: (79) r3 = *(u64 *)(r10 -8)
224: (67) r3 <<= 32
225: (77) r3 >>= 32
226: (b7) r0 = 1
227: (15) if r3 == 0x2 goto pc+1
228: (af) r0 ^= r0
229: (95) exit
If bpf_get_func_arg() is supported in tp_btf, the bpf program will
instead look like:
static __noinline bool
filter_skb(void *ctx)
{
struct sk_buff *skb;
(void) bpf_get_func_arg(ctx, 0, (__u64 *) &skb);
return skb->dev->ifindex == 2;
}
This will simplify the generated code and eliminate the need for
bpf_probe_read_kernel() calls. However, in my tests (on kernel
6.8.0-35-generic, Ubuntu 24.04 LTS), the pointer returned by
bpf_get_func_arg() is marked as a scalar rather than a trusted pointer:
0: R1=ctx() R10=fp0
; if (!filter_skb(ctx))
0: (85) call pc+3
caller:
R10=fp0
callee:
frame1: R1=ctx() R10=fp0
4: frame1: R1=ctx() R10=fp0
; filter_skb(void *ctx)
4: (bf) r3 = r10 ; frame1: R3_w=fp0 R10=fp0
;
5: (07) r3 += -8 ; frame1: R3_w=fp-8
; (void) bpf_get_func_arg(ctx, 0, (__u64 *) &skb);
6: (b7) r2 = 0 ; frame1: R2_w=0
7: (85) call bpf_get_func_arg#183 ; frame1: R0_w=scalar()
; return skb->dev->ifindex == 2;
8: (79) r1 = *(u64 *)(r10 -8) ; frame1: R1_w=scalar() R10=fp0
fp-8=mmmmmmmm
; return skb->dev->ifindex == 2;
9: (79) r1 = *(u64 *)(r1 +16)
R1 invalid mem access 'scalar'
processed 7 insns (limit 1000000) max_states_per_insn 0 total_states 0
peak_states 0 mark_read 0
If the returned skb is a trusted pointer, the verifier will accept
something like:
static __noinline bool
filter_skb(struct sk_buff *skb)
{
return skb->dev->ifindex == 2;
}
Which will compile into much simpler and more efficient instructions:
bool filter_skb(struct sk_buff * skb):
; return skb->dev->ifindex == 2;
92: (79) r1 = *(u64 *)(r1 +16)
; return skb->dev->ifindex == 2;
93: (61) r1 = *(u32 *)(r1 +224)
94: (b7) r0 = 1
; return skb->dev->ifindex == 2;
95: (15) if r1 == 0x2 goto pc+1
96: (b7) r0 = 0
; return skb->dev->ifindex == 2;
97: (95) exit
In conclusion:
1. It will be better if the pointer returned by bpf_get_func_arg() is
trusted, only when the argument index is a known constant.
2. Adding bpf_get_func_arg() support to tp_btf will significantly
simplify and improve tools like bpfsnoop.
[1] https://github.com/bpfsnoop/bpfsnoop
Thanks,
Leon
On Fri, May 2, 2025 at 7:26 AM Leon Hwang <leon.hwang@linux.dev> wrote:
>
>
>
> On 2025/5/1 00:53, Alexei Starovoitov wrote:
> > On Wed, Apr 30, 2025 at 8:55 AM Leon Hwang <leon.hwang@linux.dev> wrote:
> >>
> >>
> >>
> >> On 2025/4/30 20:43, Kafai Wan wrote:
> >>> On Wed, Apr 30, 2025 at 10:46 AM Alexei Starovoitov
> >>> <alexei.starovoitov@gmail.com> wrote:
> >>>>
> >>>> On Sat, Apr 26, 2025 at 9:00 AM KaFai Wan <mannkafai@gmail.com> wrote:
> >>>>>
> >>
>
> [...]
>
> >>
> >>
> >> bpf_get_func_arg() will be very helpful for bpfsnoop[1] when tracing tp_btf.
> >>
> >> In bpfsnoop, it can generate a small snippet of bpf instructions to use
> >> bpf_get_func_arg() for retrieving and filtering arguments. For example,
> >> with the netif_receive_skb tracepoint, bpfsnoop can use
> >> bpf_get_func_arg() to filter the skb argument using pcap-filter(7)[2] or
> >> a custom attribute-based filter. This will allow bpfsnoop to trace
> >> multiple tracepoints using a single bpf program code.
> >
> > I doubt you thought it through end to end.
> > When tracepoint prog attaches we have this check:
> > /*
> > * check that program doesn't access arguments beyond what's
> > * available in this tracepoint
> > */
> > if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
> > return -EINVAL;
> >
> > So you cannot have a single bpf prog attached to many tracepoints
> > to read many arguments as-is.
> > You can hack around that limit with probe_read,
> > but the values won't be trusted and you won't be able to pass
> > such untrusted pointers into skb and other helpers/kfuncs.
>
> I understand that a single bpf program cannot be attached to multiple
> tracepoints using tp_btf. However, the same bpf code can be reused to
> create multiple bpf programs, each attached to a different tracepoint.
>
> For example:
>
> SEC("fentry")
> int BPF_PROG(fentry_fn)
> {
> /* ... */
> return BPF_OK;
> }
>
> The above fentry code can be compiled into multiple bpf programs to
> trace different kernel functions. Each program can then use the
> bpf_get_func_arg() helper to access the arguments of the traced function.
>
> With this patch, tp_btf will gain similar flexibility. For example:
>
> SEC("tp_btf")
> int BPF_PROG(tp_btf_fn)
> {
> /* ... */
> return BPF_OK;
> }
>
> Here, bpf_get_func_arg() can be used to access tracepoint arguments.
>
> Currently, due to the lack of bpf_get_func_arg() support in tp_btf,
> bpfsnoop[1] uses bpf_probe_read_kernel() to read tracepoint arguments.
> This is also used when filtering specific argument attributes.
>
> For instance, to filter the skb argument of the netif_receive_skb
> tracepoint by 'skb->dev->ifindex == 2', the translated bpf instructions
> with bpf_probe_read_kernel() would look like this:
>
> bool filter_arg(__u64 * args):
> ; filter_arg(__u64 *args)
> 209: (79) r1 = *(u64 *)(r1 +0) /* all tracepoint's argument has been
> read into args using bpf_probe_read_kernel() */
> 210: (bf) r3 = r1
> 211: (07) r3 += 16
> 212: (b7) r2 = 8
> 213: (bf) r1 = r10
> 214: (07) r1 += -8
> 215: (85) call bpf_probe_read_kernel#-125280
> 216: (79) r3 = *(u64 *)(r10 -8)
> 217: (15) if r3 == 0x0 goto pc+10
> 218: (07) r3 += 224
> 219: (b7) r2 = 8
> 220: (bf) r1 = r10
> 221: (07) r1 += -8
> 222: (85) call bpf_probe_read_kernel#-125280
> 223: (79) r3 = *(u64 *)(r10 -8)
> 224: (67) r3 <<= 32
> 225: (77) r3 >>= 32
> 226: (b7) r0 = 1
> 227: (15) if r3 == 0x2 goto pc+1
> 228: (af) r0 ^= r0
> 229: (95) exit
>
> If bpf_get_func_arg() is supported in tp_btf, the bpf program will
> instead look like:
>
> static __noinline bool
> filter_skb(void *ctx)
> {
> struct sk_buff *skb;
>
> (void) bpf_get_func_arg(ctx, 0, (__u64 *) &skb);
> return skb->dev->ifindex == 2;
> }
>
> This will simplify the generated code and eliminate the need for
> bpf_probe_read_kernel() calls. However, in my tests (on kernel
> 6.8.0-35-generic, Ubuntu 24.04 LTS), the pointer returned by
> bpf_get_func_arg() is marked as a scalar rather than a trusted pointer:
>
> 0: R1=ctx() R10=fp0
> ; if (!filter_skb(ctx))
> 0: (85) call pc+3
> caller:
> R10=fp0
> callee:
> frame1: R1=ctx() R10=fp0
> 4: frame1: R1=ctx() R10=fp0
> ; filter_skb(void *ctx)
> 4: (bf) r3 = r10 ; frame1: R3_w=fp0 R10=fp0
> ;
> 5: (07) r3 += -8 ; frame1: R3_w=fp-8
> ; (void) bpf_get_func_arg(ctx, 0, (__u64 *) &skb);
> 6: (b7) r2 = 0 ; frame1: R2_w=0
> 7: (85) call bpf_get_func_arg#183 ; frame1: R0_w=scalar()
> ; return skb->dev->ifindex == 2;
> 8: (79) r1 = *(u64 *)(r10 -8) ; frame1: R1_w=scalar() R10=fp0
> fp-8=mmmmmmmm
> ; return skb->dev->ifindex == 2;
> 9: (79) r1 = *(u64 *)(r1 +16)
> R1 invalid mem access 'scalar'
> processed 7 insns (limit 1000000) max_states_per_insn 0 total_states 0
> peak_states 0 mark_read 0
>
> If the returned skb is a trusted pointer, the verifier will accept
> something like:
>
> static __noinline bool
> filter_skb(struct sk_buff *skb)
> {
> return skb->dev->ifindex == 2;
> }
>
> Which will compile into much simpler and more efficient instructions:
>
> bool filter_skb(struct sk_buff * skb):
> ; return skb->dev->ifindex == 2;
> 92: (79) r1 = *(u64 *)(r1 +16)
> ; return skb->dev->ifindex == 2;
> 93: (61) r1 = *(u32 *)(r1 +224)
> 94: (b7) r0 = 1
> ; return skb->dev->ifindex == 2;
> 95: (15) if r1 == 0x2 goto pc+1
> 96: (b7) r0 = 0
> ; return skb->dev->ifindex == 2;
> 97: (95) exit
>
> In conclusion:
>
> 1. It will be better if the pointer returned by bpf_get_func_arg() is
> trusted, only when the argument index is a known constant.
bpf_get_func_arg() was never meant to return trusted arguments, so
this, IMO, is pushing it too far.
> 2. Adding bpf_get_func_arg() support to tp_btf will significantly
> simplify and improve tools like bpfsnoop.
"Significantly simplify and improve" is a bit of an exaggeration,
given BPF cookies can be used for getting number of arguments of
tp_btf, as for the getting rid of bpf_probe_read_kernel(), tbh, more
generally useful addition would be an untyped counterpart to
bpf_core_cast(), which wouldn't need BTF type information, but will
treat all accessed memory as raw bytes (but will still install
exception handler just like with bpf_core_cast()).
>
> [1] https://github.com/bpfsnoop/bpfsnoop
>
> Thanks,
> Leon
>
>
On 2025/5/7 05:01, Andrii Nakryiko wrote:
> On Fri, May 2, 2025 at 7:26 AM Leon Hwang <leon.hwang@linux.dev> wrote:
>>
>>
>>
>> On 2025/5/1 00:53, Alexei Starovoitov wrote:
>>> On Wed, Apr 30, 2025 at 8:55 AM Leon Hwang <leon.hwang@linux.dev> wrote:
>>>>
>>>>
>>>>
>>>> On 2025/4/30 20:43, Kafai Wan wrote:
>>>>> On Wed, Apr 30, 2025 at 10:46 AM Alexei Starovoitov
>>>>> <alexei.starovoitov@gmail.com> wrote:
>>>>>>
>>>>>> On Sat, Apr 26, 2025 at 9:00 AM KaFai Wan <mannkafai@gmail.com> wrote:
>>>>>>>
>>>>
>>
>> [...]
>>
>>>>
>>>>
>>>> bpf_get_func_arg() will be very helpful for bpfsnoop[1] when tracing tp_btf.
>>>>
>>>> In bpfsnoop, it can generate a small snippet of bpf instructions to use
>>>> bpf_get_func_arg() for retrieving and filtering arguments. For example,
>>>> with the netif_receive_skb tracepoint, bpfsnoop can use
>>>> bpf_get_func_arg() to filter the skb argument using pcap-filter(7)[2] or
>>>> a custom attribute-based filter. This will allow bpfsnoop to trace
>>>> multiple tracepoints using a single bpf program code.
>>>
>>> I doubt you thought it through end to end.
>>> When tracepoint prog attaches we have this check:
>>> /*
>>> * check that program doesn't access arguments beyond what's
>>> * available in this tracepoint
>>> */
>>> if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
>>> return -EINVAL;
>>>
>>> So you cannot have a single bpf prog attached to many tracepoints
>>> to read many arguments as-is.
>>> You can hack around that limit with probe_read,
>>> but the values won't be trusted and you won't be able to pass
>>> such untrusted pointers into skb and other helpers/kfuncs.
>>
>> I understand that a single bpf program cannot be attached to multiple
>> tracepoints using tp_btf. However, the same bpf code can be reused to
>> create multiple bpf programs, each attached to a different tracepoint.
>>
>> For example:
>>
>> SEC("fentry")
>> int BPF_PROG(fentry_fn)
>> {
>> /* ... */
>> return BPF_OK;
>> }
>>
>> The above fentry code can be compiled into multiple bpf programs to
>> trace different kernel functions. Each program can then use the
>> bpf_get_func_arg() helper to access the arguments of the traced function.
>>
>> With this patch, tp_btf will gain similar flexibility. For example:
>>
>> SEC("tp_btf")
>> int BPF_PROG(tp_btf_fn)
>> {
>> /* ... */
>> return BPF_OK;
>> }
>>
>> Here, bpf_get_func_arg() can be used to access tracepoint arguments.
>>
>> Currently, due to the lack of bpf_get_func_arg() support in tp_btf,
>> bpfsnoop[1] uses bpf_probe_read_kernel() to read tracepoint arguments.
>> This is also used when filtering specific argument attributes.
>>
>> For instance, to filter the skb argument of the netif_receive_skb
>> tracepoint by 'skb->dev->ifindex == 2', the translated bpf instructions
>> with bpf_probe_read_kernel() would look like this:
>>
>> bool filter_arg(__u64 * args):
>> ; filter_arg(__u64 *args)
>> 209: (79) r1 = *(u64 *)(r1 +0) /* all tracepoint's argument has been
>> read into args using bpf_probe_read_kernel() */
>> 210: (bf) r3 = r1
>> 211: (07) r3 += 16
>> 212: (b7) r2 = 8
>> 213: (bf) r1 = r10
>> 214: (07) r1 += -8
>> 215: (85) call bpf_probe_read_kernel#-125280
>> 216: (79) r3 = *(u64 *)(r10 -8)
>> 217: (15) if r3 == 0x0 goto pc+10
>> 218: (07) r3 += 224
>> 219: (b7) r2 = 8
>> 220: (bf) r1 = r10
>> 221: (07) r1 += -8
>> 222: (85) call bpf_probe_read_kernel#-125280
>> 223: (79) r3 = *(u64 *)(r10 -8)
>> 224: (67) r3 <<= 32
>> 225: (77) r3 >>= 32
>> 226: (b7) r0 = 1
>> 227: (15) if r3 == 0x2 goto pc+1
>> 228: (af) r0 ^= r0
>> 229: (95) exit
>>
>> If bpf_get_func_arg() is supported in tp_btf, the bpf program will
>> instead look like:
>>
>> static __noinline bool
>> filter_skb(void *ctx)
>> {
>> struct sk_buff *skb;
>>
>> (void) bpf_get_func_arg(ctx, 0, (__u64 *) &skb);
>> return skb->dev->ifindex == 2;
>> }
>>
>> This will simplify the generated code and eliminate the need for
>> bpf_probe_read_kernel() calls. However, in my tests (on kernel
>> 6.8.0-35-generic, Ubuntu 24.04 LTS), the pointer returned by
>> bpf_get_func_arg() is marked as a scalar rather than a trusted pointer:
>>
>> 0: R1=ctx() R10=fp0
>> ; if (!filter_skb(ctx))
>> 0: (85) call pc+3
>> caller:
>> R10=fp0
>> callee:
>> frame1: R1=ctx() R10=fp0
>> 4: frame1: R1=ctx() R10=fp0
>> ; filter_skb(void *ctx)
>> 4: (bf) r3 = r10 ; frame1: R3_w=fp0 R10=fp0
>> ;
>> 5: (07) r3 += -8 ; frame1: R3_w=fp-8
>> ; (void) bpf_get_func_arg(ctx, 0, (__u64 *) &skb);
>> 6: (b7) r2 = 0 ; frame1: R2_w=0
>> 7: (85) call bpf_get_func_arg#183 ; frame1: R0_w=scalar()
>> ; return skb->dev->ifindex == 2;
>> 8: (79) r1 = *(u64 *)(r10 -8) ; frame1: R1_w=scalar() R10=fp0
>> fp-8=mmmmmmmm
>> ; return skb->dev->ifindex == 2;
>> 9: (79) r1 = *(u64 *)(r1 +16)
>> R1 invalid mem access 'scalar'
>> processed 7 insns (limit 1000000) max_states_per_insn 0 total_states 0
>> peak_states 0 mark_read 0
>>
>> If the returned skb is a trusted pointer, the verifier will accept
>> something like:
>>
>> static __noinline bool
>> filter_skb(struct sk_buff *skb)
>> {
>> return skb->dev->ifindex == 2;
>> }
>>
>> Which will compile into much simpler and more efficient instructions:
>>
>> bool filter_skb(struct sk_buff * skb):
>> ; return skb->dev->ifindex == 2;
>> 92: (79) r1 = *(u64 *)(r1 +16)
>> ; return skb->dev->ifindex == 2;
>> 93: (61) r1 = *(u32 *)(r1 +224)
>> 94: (b7) r0 = 1
>> ; return skb->dev->ifindex == 2;
>> 95: (15) if r1 == 0x2 goto pc+1
>> 96: (b7) r0 = 0
>> ; return skb->dev->ifindex == 2;
>> 97: (95) exit
>>
>> In conclusion:
>>
>> 1. It will be better if the pointer returned by bpf_get_func_arg() is
>> trusted, only when the argument index is a known constant.
>
> bpf_get_func_arg() was never meant to return trusted arguments, so
> this, IMO, is pushing it too far.
>
>> 2. Adding bpf_get_func_arg() support to tp_btf will significantly
>> simplify and improve tools like bpfsnoop.
>
> "Significantly simplify and improve" is a bit of an exaggeration,
> given BPF cookies can be used for getting number of arguments of
> tp_btf, as for the getting rid of bpf_probe_read_kernel(), tbh, more
> generally useful addition would be an untyped counterpart to
> bpf_core_cast(), which wouldn't need BTF type information, but will
> treat all accessed memory as raw bytes (but will still install
> exception handler just like with bpf_core_cast()).
>
Cool! The bpf_rdonly_cast() kfunc used by the bpf_core_cast() macro
works well in bpfsnoop.
The expression 'skb->dev->ifindex == 2' is translated into:
bool filter_arg(__u64 * args):
; filter_arg(__u64 *args)
209: (bf) r9 = r1
210: (79) r8 = *(u64 *)(r9 +0)
211: (bf) r1 = r8
212: (b7) r2 = 6973
213: (bf) r0 = r1
214: (79) r1 = *(u64 *)(r0 +16)
215: (15) if r1 == 0x0 goto pc+12
216: (07) r1 += 224
217: (bf) r3 = r1
218: (b7) r2 = 8
219: (bf) r1 = r10
220: (07) r1 += -8
221: (85) call bpf_probe_read_kernel#-125280
222: (79) r8 = *(u64 *)(r10 -8)
223: (67) r8 <<= 32
224: (77) r8 >>= 32
225: (55) if r8 != 0x2 goto pc+2
226: (b7) r8 = 1
227: (05) goto pc+1
228: (af) r8 ^= r8
229: (bf) r0 = r8
230: (95) exit
However, since bpf_rdonly_cast() is a kfunc, it causes registers r1–r5
to be considered volatile.
If the verifier could trust the pointer fetched by bpf_get_func_arg(),
this extra cost from bpf_rdonly_cast() could be avoided.
Thanks,
Leon
On Mon, May 12, 2025 at 4:12 AM Leon Hwang <leon.hwang@linux.dev> wrote:
>
>
>
> On 2025/5/7 05:01, Andrii Nakryiko wrote:
> > On Fri, May 2, 2025 at 7:26 AM Leon Hwang <leon.hwang@linux.dev> wrote:
> >>
> >>
> >>
> >> On 2025/5/1 00:53, Alexei Starovoitov wrote:
> >>> On Wed, Apr 30, 2025 at 8:55 AM Leon Hwang <leon.hwang@linux.dev> wrote:
> >>>>
> >>>>
> >>>>
> >>>> On 2025/4/30 20:43, Kafai Wan wrote:
> >>>>> On Wed, Apr 30, 2025 at 10:46 AM Alexei Starovoitov
> >>>>> <alexei.starovoitov@gmail.com> wrote:
> >>>>>>
> >>>>>> On Sat, Apr 26, 2025 at 9:00 AM KaFai Wan <mannkafai@gmail.com> wrote:
> >>>>>>>
> >>>>
> >>
> >> [...]
> >>
> >>>>
> >>>>
> >>>> bpf_get_func_arg() will be very helpful for bpfsnoop[1] when tracing tp_btf.
> >>>>
> >>>> In bpfsnoop, it can generate a small snippet of bpf instructions to use
> >>>> bpf_get_func_arg() for retrieving and filtering arguments. For example,
> >>>> with the netif_receive_skb tracepoint, bpfsnoop can use
> >>>> bpf_get_func_arg() to filter the skb argument using pcap-filter(7)[2] or
> >>>> a custom attribute-based filter. This will allow bpfsnoop to trace
> >>>> multiple tracepoints using a single bpf program code.
> >>>
> >>> I doubt you thought it through end to end.
> >>> When tracepoint prog attaches we have this check:
> >>> /*
> >>> * check that program doesn't access arguments beyond what's
> >>> * available in this tracepoint
> >>> */
> >>> if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
> >>> return -EINVAL;
> >>>
> >>> So you cannot have a single bpf prog attached to many tracepoints
> >>> to read many arguments as-is.
> >>> You can hack around that limit with probe_read,
> >>> but the values won't be trusted and you won't be able to pass
> >>> such untrusted pointers into skb and other helpers/kfuncs.
> >>
> >> I understand that a single bpf program cannot be attached to multiple
> >> tracepoints using tp_btf. However, the same bpf code can be reused to
> >> create multiple bpf programs, each attached to a different tracepoint.
> >>
> >> For example:
> >>
> >> SEC("fentry")
> >> int BPF_PROG(fentry_fn)
> >> {
> >> /* ... */
> >> return BPF_OK;
> >> }
> >>
> >> The above fentry code can be compiled into multiple bpf programs to
> >> trace different kernel functions. Each program can then use the
> >> bpf_get_func_arg() helper to access the arguments of the traced function.
> >>
> >> With this patch, tp_btf will gain similar flexibility. For example:
> >>
> >> SEC("tp_btf")
> >> int BPF_PROG(tp_btf_fn)
> >> {
> >> /* ... */
> >> return BPF_OK;
> >> }
> >>
> >> Here, bpf_get_func_arg() can be used to access tracepoint arguments.
> >>
> >> Currently, due to the lack of bpf_get_func_arg() support in tp_btf,
> >> bpfsnoop[1] uses bpf_probe_read_kernel() to read tracepoint arguments.
> >> This is also used when filtering specific argument attributes.
> >>
> >> For instance, to filter the skb argument of the netif_receive_skb
> >> tracepoint by 'skb->dev->ifindex == 2', the translated bpf instructions
> >> with bpf_probe_read_kernel() would look like this:
> >>
> >> bool filter_arg(__u64 * args):
> >> ; filter_arg(__u64 *args)
> >> 209: (79) r1 = *(u64 *)(r1 +0) /* all tracepoint's argument has been
> >> read into args using bpf_probe_read_kernel() */
> >> 210: (bf) r3 = r1
> >> 211: (07) r3 += 16
> >> 212: (b7) r2 = 8
> >> 213: (bf) r1 = r10
> >> 214: (07) r1 += -8
> >> 215: (85) call bpf_probe_read_kernel#-125280
> >> 216: (79) r3 = *(u64 *)(r10 -8)
> >> 217: (15) if r3 == 0x0 goto pc+10
> >> 218: (07) r3 += 224
> >> 219: (b7) r2 = 8
> >> 220: (bf) r1 = r10
> >> 221: (07) r1 += -8
> >> 222: (85) call bpf_probe_read_kernel#-125280
> >> 223: (79) r3 = *(u64 *)(r10 -8)
> >> 224: (67) r3 <<= 32
> >> 225: (77) r3 >>= 32
> >> 226: (b7) r0 = 1
> >> 227: (15) if r3 == 0x2 goto pc+1
> >> 228: (af) r0 ^= r0
> >> 229: (95) exit
> >>
> >> If bpf_get_func_arg() is supported in tp_btf, the bpf program will
> >> instead look like:
> >>
> >> static __noinline bool
> >> filter_skb(void *ctx)
> >> {
> >> struct sk_buff *skb;
> >>
> >> (void) bpf_get_func_arg(ctx, 0, (__u64 *) &skb);
> >> return skb->dev->ifindex == 2;
> >> }
> >>
> >> This will simplify the generated code and eliminate the need for
> >> bpf_probe_read_kernel() calls. However, in my tests (on kernel
> >> 6.8.0-35-generic, Ubuntu 24.04 LTS), the pointer returned by
> >> bpf_get_func_arg() is marked as a scalar rather than a trusted pointer:
> >>
> >> 0: R1=ctx() R10=fp0
> >> ; if (!filter_skb(ctx))
> >> 0: (85) call pc+3
> >> caller:
> >> R10=fp0
> >> callee:
> >> frame1: R1=ctx() R10=fp0
> >> 4: frame1: R1=ctx() R10=fp0
> >> ; filter_skb(void *ctx)
> >> 4: (bf) r3 = r10 ; frame1: R3_w=fp0 R10=fp0
> >> ;
> >> 5: (07) r3 += -8 ; frame1: R3_w=fp-8
> >> ; (void) bpf_get_func_arg(ctx, 0, (__u64 *) &skb);
> >> 6: (b7) r2 = 0 ; frame1: R2_w=0
> >> 7: (85) call bpf_get_func_arg#183 ; frame1: R0_w=scalar()
> >> ; return skb->dev->ifindex == 2;
> >> 8: (79) r1 = *(u64 *)(r10 -8) ; frame1: R1_w=scalar() R10=fp0
> >> fp-8=mmmmmmmm
> >> ; return skb->dev->ifindex == 2;
> >> 9: (79) r1 = *(u64 *)(r1 +16)
> >> R1 invalid mem access 'scalar'
> >> processed 7 insns (limit 1000000) max_states_per_insn 0 total_states 0
> >> peak_states 0 mark_read 0
> >>
> >> If the returned skb is a trusted pointer, the verifier will accept
> >> something like:
> >>
> >> static __noinline bool
> >> filter_skb(struct sk_buff *skb)
> >> {
> >> return skb->dev->ifindex == 2;
> >> }
> >>
> >> Which will compile into much simpler and more efficient instructions:
> >>
> >> bool filter_skb(struct sk_buff * skb):
> >> ; return skb->dev->ifindex == 2;
> >> 92: (79) r1 = *(u64 *)(r1 +16)
> >> ; return skb->dev->ifindex == 2;
> >> 93: (61) r1 = *(u32 *)(r1 +224)
> >> 94: (b7) r0 = 1
> >> ; return skb->dev->ifindex == 2;
> >> 95: (15) if r1 == 0x2 goto pc+1
> >> 96: (b7) r0 = 0
> >> ; return skb->dev->ifindex == 2;
> >> 97: (95) exit
> >>
> >> In conclusion:
> >>
> >> 1. It will be better if the pointer returned by bpf_get_func_arg() is
> >> trusted, only when the argument index is a known constant.
> >
> > bpf_get_func_arg() was never meant to return trusted arguments, so
> > this, IMO, is pushing it too far.
> >
> >> 2. Adding bpf_get_func_arg() support to tp_btf will significantly
> >> simplify and improve tools like bpfsnoop.
> >
> > "Significantly simplify and improve" is a bit of an exaggeration,
> > given BPF cookies can be used for getting number of arguments of
> > tp_btf, as for the getting rid of bpf_probe_read_kernel(), tbh, more
> > generally useful addition would be an untyped counterpart to
> > bpf_core_cast(), which wouldn't need BTF type information, but will
> > treat all accessed memory as raw bytes (but will still install
> > exception handler just like with bpf_core_cast()).
> >
>
> Cool! The bpf_rdonly_cast() kfunc used by the bpf_core_cast() macro
> works well in bpfsnoop.
>
> The expression 'skb->dev->ifindex == 2' is translated into:
>
> bool filter_arg(__u64 * args):
> ; filter_arg(__u64 *args)
> 209: (bf) r9 = r1
> 210: (79) r8 = *(u64 *)(r9 +0)
> 211: (bf) r1 = r8
> 212: (b7) r2 = 6973
> 213: (bf) r0 = r1
> 214: (79) r1 = *(u64 *)(r0 +16)
> 215: (15) if r1 == 0x0 goto pc+12
> 216: (07) r1 += 224
> 217: (bf) r3 = r1
> 218: (b7) r2 = 8
> 219: (bf) r1 = r10
> 220: (07) r1 += -8
> 221: (85) call bpf_probe_read_kernel#-125280
> 222: (79) r8 = *(u64 *)(r10 -8)
> 223: (67) r8 <<= 32
> 224: (77) r8 >>= 32
> 225: (55) if r8 != 0x2 goto pc+2
> 226: (b7) r8 = 1
> 227: (05) goto pc+1
> 228: (af) r8 ^= r8
> 229: (bf) r0 = r8
> 230: (95) exit
>
> However, since bpf_rdonly_cast() is a kfunc, it causes registers r1–r5
> to be considered volatile.
It is not.
See:
BTF_ID_FLAGS(func, bpf_rdonly_cast, KF_FASTCALL)
and relevant commits.
On 2025/5/12 23:25, Alexei Starovoitov wrote: > On Mon, May 12, 2025 at 4:12 AM Leon Hwang <leon.hwang@linux.dev> wrote: >> [...] >> >> However, since bpf_rdonly_cast() is a kfunc, it causes registers r1–r5 >> to be considered volatile. > > It is not. > See: > BTF_ID_FLAGS(func, bpf_rdonly_cast, KF_FASTCALL) > and relevant commits. Thanks for the reminder — you're right, bpf_rdonly_cast() is marked with KF_FASTCALL, so it doesn't make r1–r5 volatile. Thanks, Leon
© 2016 - 2026 Red Hat, Inc.