Currently, attach_probe covers manual single-kprobe attaches by
func_name, but not the raw-address form that the PMU-based
single-kprobe path can accept.
This commit adds PERF and LINK raw-address subtests by resolving
SYS_NANOSLEEP_KPROBE_NAME through kallsyms, passing the absolute address
in bpf_kprobe_opts.offset with func_name = NULL, and verifying that
kprobe and kretprobe are still triggered. It also verifies that LEGACY
rejects the same form.
Signed-off-by: Hoyeon Lee <hoyeon.lee@suse.com>
---
.../selftests/bpf/prog_tests/attach_probe.c | 82 +++++++++++++++++++
1 file changed, 82 insertions(+)
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index 9e77e5da7097..817c4794d54e 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -123,6 +123,82 @@ static void test_attach_probe_manual(enum probe_attach_mode attach_mode)
test_attach_probe_manual__destroy(skel);
}
+/* manual attach address-based kprobe/kretprobe testings */
+static void test_attach_kprobe_by_addr(enum probe_attach_mode attach_mode)
+{
+ DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
+ struct test_attach_probe_manual *skel;
+ unsigned long func_addr;
+
+ if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
+ return;
+
+ func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
+ if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
+ return;
+
+ skel = test_attach_probe_manual__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
+ return;
+
+ kprobe_opts.attach_mode = attach_mode;
+ kprobe_opts.retprobe = false;
+ kprobe_opts.offset = func_addr;
+ skel->links.handle_kprobe =
+ bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
+ NULL, &kprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.handle_kprobe, "attach_kprobe_by_addr"))
+ goto cleanup;
+
+ kprobe_opts.retprobe = true;
+ skel->links.handle_kretprobe =
+ bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
+ NULL, &kprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.handle_kretprobe,
+ "attach_kretprobe_by_addr"))
+ goto cleanup;
+
+ /* trigger & validate kprobe && kretprobe */
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
+ ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
+
+cleanup:
+ test_attach_probe_manual__destroy(skel);
+}
+
+/* reject legacy address-based kprobe attach */
+static void test_attach_kprobe_legacy_by_addr_reject(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
+ struct test_attach_probe_manual *skel;
+ unsigned long func_addr;
+
+ if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
+ return;
+
+ func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
+ if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
+ return;
+
+ skel = test_attach_probe_manual__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
+ return;
+
+ kprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY;
+ kprobe_opts.offset = func_addr;
+ skel->links.handle_kprobe =
+ bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
+ NULL, &kprobe_opts);
+ if (ASSERT_ERR_PTR(skel->links.handle_kprobe,
+ "attach_kprobe_legacy_by_addr"))
+ ASSERT_EQ(libbpf_get_error(skel->links.handle_kprobe), -ENOTSUP,
+ "attach_kprobe_legacy_by_addr_err");
+
+ test_attach_probe_manual__destroy(skel);
+}
+
/* attach uprobe/uretprobe long event name testings */
static void test_attach_uprobe_long_event_name(void)
{
@@ -416,6 +492,12 @@ void test_attach_probe(void)
test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
if (test__start_subtest("manual-link"))
test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
+ if (test__start_subtest("kprobe-perf-by-addr"))
+ test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_PERF);
+ if (test__start_subtest("kprobe-link-by-addr"))
+ test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_LINK);
+ if (test__start_subtest("kprobe-legacy-by-addr-reject"))
+ test_attach_kprobe_legacy_by_addr_reject();
if (test__start_subtest("auto"))
test_attach_probe_auto(skel);
--
2.52.0
On Tue, Mar 31, 2026 at 8:25 AM Hoyeon Lee <hoyeon.lee@suse.com> wrote:
>
> Currently, attach_probe covers manual single-kprobe attaches by
> func_name, but not the raw-address form that the PMU-based
> single-kprobe path can accept.
>
> This commit adds PERF and LINK raw-address subtests by resolving
> SYS_NANOSLEEP_KPROBE_NAME through kallsyms, passing the absolute address
> in bpf_kprobe_opts.offset with func_name = NULL, and verifying that
> kprobe and kretprobe are still triggered. It also verifies that LEGACY
> rejects the same form.
>
> Signed-off-by: Hoyeon Lee <hoyeon.lee@suse.com>
> ---
> .../selftests/bpf/prog_tests/attach_probe.c | 82 +++++++++++++++++++
> 1 file changed, 82 insertions(+)
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> index 9e77e5da7097..817c4794d54e 100644
> --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> @@ -123,6 +123,82 @@ static void test_attach_probe_manual(enum probe_attach_mode attach_mode)
> test_attach_probe_manual__destroy(skel);
> }
>
> +/* manual attach address-based kprobe/kretprobe testings */
> +static void test_attach_kprobe_by_addr(enum probe_attach_mode attach_mode)
> +{
> + DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
DECLARE_LIBBPF_OPTS is a legacy backwards-compatibility macro, prefer
using shorter LIBBPF_OPTS
> + struct test_attach_probe_manual *skel;
> + unsigned long func_addr;
> +
> + if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
> + return;
> +
> + func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
> + if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
> + return;
> +
> + skel = test_attach_probe_manual__open_and_load();
> + if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
> + return;
> +
> + kprobe_opts.attach_mode = attach_mode;
> + kprobe_opts.retprobe = false;
> + kprobe_opts.offset = func_addr;
> + skel->links.handle_kprobe =
> + bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
> + NULL, &kprobe_opts);
> + if (!ASSERT_OK_PTR(skel->links.handle_kprobe, "attach_kprobe_by_addr"))
> + goto cleanup;
> +
> + kprobe_opts.retprobe = true;
> + skel->links.handle_kretprobe =
> + bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
> + NULL, &kprobe_opts);
> + if (!ASSERT_OK_PTR(skel->links.handle_kretprobe,
> + "attach_kretprobe_by_addr"))
> + goto cleanup;
> +
> + /* trigger & validate kprobe && kretprobe */
> + usleep(1);
> +
> + ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
> + ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
> +
> +cleanup:
> + test_attach_probe_manual__destroy(skel);
> +}
> +
> +/* reject legacy address-based kprobe attach */
> +static void test_attach_kprobe_legacy_by_addr_reject(void)
> +{
> + DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
> + struct test_attach_probe_manual *skel;
> + unsigned long func_addr;
> +
> + if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
> + return;
> +
> + func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
> + if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
> + return;
> +
> + skel = test_attach_probe_manual__open_and_load();
> + if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
> + return;
> +
> + kprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY;
> + kprobe_opts.offset = func_addr;
> + skel->links.handle_kprobe =
> + bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
> + NULL, &kprobe_opts);
> + if (ASSERT_ERR_PTR(skel->links.handle_kprobe,
> + "attach_kprobe_legacy_by_addr"))
> + ASSERT_EQ(libbpf_get_error(skel->links.handle_kprobe), -ENOTSUP,
> + "attach_kprobe_legacy_by_addr_err");
> +
> + test_attach_probe_manual__destroy(skel);
> +}
> +
> /* attach uprobe/uretprobe long event name testings */
> static void test_attach_uprobe_long_event_name(void)
> {
> @@ -416,6 +492,12 @@ void test_attach_probe(void)
> test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
> if (test__start_subtest("manual-link"))
> test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
> + if (test__start_subtest("kprobe-perf-by-addr"))
> + test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_PERF);
> + if (test__start_subtest("kprobe-link-by-addr"))
> + test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_LINK);
> + if (test__start_subtest("kprobe-legacy-by-addr-reject"))
> + test_attach_kprobe_legacy_by_addr_reject();
>
> if (test__start_subtest("auto"))
> test_attach_probe_auto(skel);
> --
> 2.52.0
>
On Wed, Apr 1, 2026 at 8:06 AM Andrii Nakryiko
<andrii.nakryiko@gmail.com> wrote:
>
> On Tue, Mar 31, 2026 at 8:25 AM Hoyeon Lee <hoyeon.lee@suse.com> wrote:
> >
> > Currently, attach_probe covers manual single-kprobe attaches by
> > func_name, but not the raw-address form that the PMU-based
> > single-kprobe path can accept.
> >
> > This commit adds PERF and LINK raw-address subtests by resolving
> > SYS_NANOSLEEP_KPROBE_NAME through kallsyms, passing the absolute address
> > in bpf_kprobe_opts.offset with func_name = NULL, and verifying that
> > kprobe and kretprobe are still triggered. It also verifies that LEGACY
> > rejects the same form.
> >
> > Signed-off-by: Hoyeon Lee <hoyeon.lee@suse.com>
> > ---
> > .../selftests/bpf/prog_tests/attach_probe.c | 82 +++++++++++++++++++
> > 1 file changed, 82 insertions(+)
> >
> > diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> > index 9e77e5da7097..817c4794d54e 100644
> > --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> > +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> > @@ -123,6 +123,82 @@ static void test_attach_probe_manual(enum probe_attach_mode attach_mode)
> > test_attach_probe_manual__destroy(skel);
> > }
> >
> > +/* manual attach address-based kprobe/kretprobe testings */
> > +static void test_attach_kprobe_by_addr(enum probe_attach_mode attach_mode)
> > +{
> > + DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
>
> DECLARE_LIBBPF_OPTS is a legacy backwards-compatibility macro, prefer
> using shorter LIBBPF_OPTS
>
Got it, I'll switch to LIBBPF_OPTS and updated the reject
expectation to -EOPNOTSUPP in v5.
> > + struct test_attach_probe_manual *skel;
> > + unsigned long func_addr;
> > +
> > + if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
> > + return;
> > +
> > + func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
> > + if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
> > + return;
> > +
> > + skel = test_attach_probe_manual__open_and_load();
> > + if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
> > + return;
> > +
> > + kprobe_opts.attach_mode = attach_mode;
> > + kprobe_opts.retprobe = false;
> > + kprobe_opts.offset = func_addr;
> > + skel->links.handle_kprobe =
> > + bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
> > + NULL, &kprobe_opts);
> > + if (!ASSERT_OK_PTR(skel->links.handle_kprobe, "attach_kprobe_by_addr"))
> > + goto cleanup;
> > +
> > + kprobe_opts.retprobe = true;
> > + skel->links.handle_kretprobe =
> > + bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
> > + NULL, &kprobe_opts);
> > + if (!ASSERT_OK_PTR(skel->links.handle_kretprobe,
> > + "attach_kretprobe_by_addr"))
> > + goto cleanup;
> > +
> > + /* trigger & validate kprobe && kretprobe */
> > + usleep(1);
> > +
> > + ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
> > + ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
> > +
> > +cleanup:
> > + test_attach_probe_manual__destroy(skel);
> > +}
> > +
> > +/* reject legacy address-based kprobe attach */
> > +static void test_attach_kprobe_legacy_by_addr_reject(void)
> > +{
> > + DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
> > + struct test_attach_probe_manual *skel;
> > + unsigned long func_addr;
> > +
> > + if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
> > + return;
> > +
> > + func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
> > + if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
> > + return;
> > +
> > + skel = test_attach_probe_manual__open_and_load();
> > + if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
> > + return;
> > +
> > + kprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY;
> > + kprobe_opts.offset = func_addr;
> > + skel->links.handle_kprobe =
> > + bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
> > + NULL, &kprobe_opts);
> > + if (ASSERT_ERR_PTR(skel->links.handle_kprobe,
> > + "attach_kprobe_legacy_by_addr"))
> > + ASSERT_EQ(libbpf_get_error(skel->links.handle_kprobe), -ENOTSUP,
> > + "attach_kprobe_legacy_by_addr_err");
> > +
> > + test_attach_probe_manual__destroy(skel);
> > +}
> > +
> > /* attach uprobe/uretprobe long event name testings */
> > static void test_attach_uprobe_long_event_name(void)
> > {
> > @@ -416,6 +492,12 @@ void test_attach_probe(void)
> > test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
> > if (test__start_subtest("manual-link"))
> > test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
> > + if (test__start_subtest("kprobe-perf-by-addr"))
> > + test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_PERF);
> > + if (test__start_subtest("kprobe-link-by-addr"))
> > + test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_LINK);
> > + if (test__start_subtest("kprobe-legacy-by-addr-reject"))
> > + test_attach_kprobe_legacy_by_addr_reject();
> >
> > if (test__start_subtest("auto"))
> > test_attach_probe_auto(skel);
> > --
> > 2.52.0
> >
© 2016 - 2026 Red Hat, Inc.