[PATCH bpf-next v2 2/2] selftests/bpf: add test for address-based single kprobe attach

Hoyeon Lee posted 2 patches 3 days, 22 hours ago
[PATCH bpf-next v2 2/2] selftests/bpf: add test for address-based single kprobe attach
Posted by Hoyeon Lee 3 days, 22 hours ago
Currently, attach_probe covers manual single-kprobe attaches by
func_name, but not by raw address. This commit adds address-based
single-kprobe attach subtests for the two underlying attach paths,
legacy tracefs/debugfs and PMU-based non-legacy. The new subtests
resolve SYS_NANOSLEEP_KPROBE_NAME through kallsyms, pass the result
through bpf_kprobe_opts.addr, and verify that kprobe and kretprobe are
still triggered.

Signed-off-by: Hoyeon Lee <hoyeon.lee@suse.com>
---
 .../selftests/bpf/prog_tests/attach_probe.c   | 49 +++++++++++++++++++
 1 file changed, 49 insertions(+)

diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index 9e77e5da7097..64f2ed75779d 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -123,6 +123,51 @@ static void test_attach_probe_manual(enum probe_attach_mode attach_mode)
 	test_attach_probe_manual__destroy(skel);
 }
 
+/* manual attach address-based kprobe/kretprobe testings */
+static void test_attach_kprobe_by_addr(enum probe_attach_mode attach_mode)
+{
+	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
+	struct bpf_link *kprobe_link, *kretprobe_link;
+	struct test_attach_probe_manual *skel;
+	unsigned long func_addr;
+
+	if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
+		return;
+
+	func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
+	if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
+		return;
+
+	skel = test_attach_probe_manual__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
+		return;
+
+	kprobe_opts.attach_mode = attach_mode;
+	kprobe_opts.retprobe = false;
+	kprobe_opts.addr = func_addr;
+	kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
+						      NULL, &kprobe_opts);
+	if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe_by_addr"))
+		goto cleanup;
+	skel->links.handle_kprobe = kprobe_link;
+
+	kprobe_opts.retprobe = true;
+	kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
+							 NULL, &kprobe_opts);
+	if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe_by_addr"))
+		goto cleanup;
+	skel->links.handle_kretprobe = kretprobe_link;
+
+	/* trigger & validate kprobe && kretprobe */
+	usleep(1);
+
+	ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
+	ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
+
+cleanup:
+	test_attach_probe_manual__destroy(skel);
+}
+
 /* attach uprobe/uretprobe long event name testings */
 static void test_attach_uprobe_long_event_name(void)
 {
@@ -416,6 +461,10 @@ void test_attach_probe(void)
 		test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
 	if (test__start_subtest("manual-link"))
 		test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
+	if (test__start_subtest("kprobe-legacy-by-addr"))
+		test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_LEGACY);
+	if (test__start_subtest("kprobe-perf-by-addr"))
+		test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_PERF);
 
 	if (test__start_subtest("auto"))
 		test_attach_probe_auto(skel);
-- 
2.52.0
Re: [PATCH bpf-next v2 2/2] selftests/bpf: add test for address-based single kprobe attach
Posted by Jiri Olsa 3 days ago
On Sun, Mar 29, 2026 at 09:43:39PM +0900, Hoyeon Lee wrote:
> Currently, attach_probe covers manual single-kprobe attaches by
> func_name, but not by raw address. This commit adds address-based
> single-kprobe attach subtests for the two underlying attach paths,
> legacy tracefs/debugfs and PMU-based non-legacy. The new subtests
> resolve SYS_NANOSLEEP_KPROBE_NAME through kallsyms, pass the result
> through bpf_kprobe_opts.addr, and verify that kprobe and kretprobe are
> still triggered.
> 
> Signed-off-by: Hoyeon Lee <hoyeon.lee@suse.com>
> ---
>  .../selftests/bpf/prog_tests/attach_probe.c   | 49 +++++++++++++++++++
>  1 file changed, 49 insertions(+)
> 
> diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> index 9e77e5da7097..64f2ed75779d 100644
> --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> @@ -123,6 +123,51 @@ static void test_attach_probe_manual(enum probe_attach_mode attach_mode)
>  	test_attach_probe_manual__destroy(skel);
>  }
>  
> +/* manual attach address-based kprobe/kretprobe testings */
> +static void test_attach_kprobe_by_addr(enum probe_attach_mode attach_mode)
> +{
> +	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
> +	struct bpf_link *kprobe_link, *kretprobe_link;
> +	struct test_attach_probe_manual *skel;
> +	unsigned long func_addr;
> +
> +	if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
> +		return;
> +
> +	func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
> +	if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
> +		return;
> +
> +	skel = test_attach_probe_manual__open_and_load();
> +	if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
> +		return;
> +
> +	kprobe_opts.attach_mode = attach_mode;
> +	kprobe_opts.retprobe = false;
> +	kprobe_opts.addr = func_addr;
> +	kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
> +						      NULL, &kprobe_opts);
> +	if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe_by_addr"))
> +		goto cleanup;
> +	skel->links.handle_kprobe = kprobe_link;

we usually use skel->links.handle_kprobe directly, no need to use
kprobe_link or kretprobe_link

> +
> +	kprobe_opts.retprobe = true;
> +	kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
> +							 NULL, &kprobe_opts);
> +	if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe_by_addr"))
> +		goto cleanup;
> +	skel->links.handle_kretprobe = kretprobe_link;
> +
> +	/* trigger & validate kprobe && kretprobe */
> +	usleep(1);
> +
> +	ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
> +	ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
> +
> +cleanup:
> +	test_attach_probe_manual__destroy(skel);
> +}
> +
>  /* attach uprobe/uretprobe long event name testings */
>  static void test_attach_uprobe_long_event_name(void)
>  {
> @@ -416,6 +461,10 @@ void test_attach_probe(void)
>  		test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
>  	if (test__start_subtest("manual-link"))
>  		test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
> +	if (test__start_subtest("kprobe-legacy-by-addr"))
> +		test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_LEGACY);
> +	if (test__start_subtest("kprobe-perf-by-addr"))
> +		test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_PERF);

should we test PROBE_ATTACH_MODE_LINK mode as well?

jirka

>  
>  	if (test__start_subtest("auto"))
>  		test_attach_probe_auto(skel);
> -- 
> 2.52.0
>
Re: [PATCH bpf-next v2 2/2] selftests/bpf: add test for address-based single kprobe attach
Posted by Hoyeon Lee 2 days, 9 hours ago
On Mon, Mar 30, 2026 at 7:08 PM Jiri Olsa <olsajiri@gmail.com> wrote:
>
> On Sun, Mar 29, 2026 at 09:43:39PM +0900, Hoyeon Lee wrote:
> > Currently, attach_probe covers manual single-kprobe attaches by
> > func_name, but not by raw address. This commit adds address-based
> > single-kprobe attach subtests for the two underlying attach paths,
> > legacy tracefs/debugfs and PMU-based non-legacy. The new subtests
> > resolve SYS_NANOSLEEP_KPROBE_NAME through kallsyms, pass the result
> > through bpf_kprobe_opts.addr, and verify that kprobe and kretprobe are
> > still triggered.
> >
> > Signed-off-by: Hoyeon Lee <hoyeon.lee@suse.com>
> > ---
> >  .../selftests/bpf/prog_tests/attach_probe.c   | 49 +++++++++++++++++++
> >  1 file changed, 49 insertions(+)
> >
> > diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> > index 9e77e5da7097..64f2ed75779d 100644
> > --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> > +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> > @@ -123,6 +123,51 @@ static void test_attach_probe_manual(enum probe_attach_mode attach_mode)
> >       test_attach_probe_manual__destroy(skel);
> >  }
> >
> > +/* manual attach address-based kprobe/kretprobe testings */
> > +static void test_attach_kprobe_by_addr(enum probe_attach_mode attach_mode)
> > +{
> > +     DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
> > +     struct bpf_link *kprobe_link, *kretprobe_link;
> > +     struct test_attach_probe_manual *skel;
> > +     unsigned long func_addr;
> > +
> > +     if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
> > +             return;
> > +
> > +     func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
> > +     if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
> > +             return;
> > +
> > +     skel = test_attach_probe_manual__open_and_load();
> > +     if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
> > +             return;
> > +
> > +     kprobe_opts.attach_mode = attach_mode;
> > +     kprobe_opts.retprobe = false;
> > +     kprobe_opts.addr = func_addr;
> > +     kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
> > +                                                   NULL, &kprobe_opts);
> > +     if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe_by_addr"))
> > +             goto cleanup;
> > +     skel->links.handle_kprobe = kprobe_link;
>
> we usually use skel->links.handle_kprobe directly, no need to use
> kprobe_link or kretprobe_link
>

Thanks for the review! I'll change that in v3 and assign directly to
skel->links.handle_kprobe and skel->links.handle_kretprobe.

> > +
> > +     kprobe_opts.retprobe = true;
> > +     kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
> > +                                                      NULL, &kprobe_opts);
> > +     if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe_by_addr"))
> > +             goto cleanup;
> > +     skel->links.handle_kretprobe = kretprobe_link;
> > +
> > +     /* trigger & validate kprobe && kretprobe */
> > +     usleep(1);
> > +
> > +     ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
> > +     ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
> > +
> > +cleanup:
> > +     test_attach_probe_manual__destroy(skel);
> > +}
> > +
> >  /* attach uprobe/uretprobe long event name testings */
> >  static void test_attach_uprobe_long_event_name(void)
> >  {
> > @@ -416,6 +461,10 @@ void test_attach_probe(void)
> >               test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
> >       if (test__start_subtest("manual-link"))
> >               test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
> > +     if (test__start_subtest("kprobe-legacy-by-addr"))
> > +             test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_LEGACY);
> > +     if (test__start_subtest("kprobe-perf-by-addr"))
> > +             test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_PERF);
>
> should we test PROBE_ATTACH_MODE_LINK mode as well?
>

I covered only LEGACY and PERF here, because they test the two
underlying single-kprobe attach paths, legacy tracefs/debugfs and
PMU-based non-legacy.

I treated LINK as duplicate coverage here. If you think explicit LINK
coverage is still worth having, I can add it in v3.

> jirka
>