implement libbpf sockmap cpu affinity
Signed-off-by: Jiayuan Chen <mrpre@163.com>
---
tools/include/uapi/linux/bpf.h | 4 ++++
tools/lib/bpf/bpf.c | 22 +++++++++++++++++++
tools/lib/bpf/bpf.h | 9 ++++++++
tools/lib/bpf/libbpf.map | 1 +
.../selftests/bpf/prog_tests/sockmap_basic.c | 19 ++++++++++++----
5 files changed, 51 insertions(+), 4 deletions(-)
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index f28b6527e815..ba6c39f40f10 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1509,6 +1509,10 @@ union bpf_attr {
__aligned_u64 next_key;
};
__u64 flags;
+ union {
+ /* specify the CPU where sockmap job run on */
+ __aligned_u64 target_cpu;
+ };
};
struct { /* struct used by BPF_MAP_*_BATCH commands */
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 2a4c71501a17..13c3f3cfe889 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -401,6 +401,28 @@ int bpf_map_update_elem(int fd, const void *key, const void *value,
return libbpf_err_errno(ret);
}
+int bpf_map_update_elem_opts(int fd, const void *key, const void *value,
+ __u64 flags, const struct bpf_map_update_opts *opts)
+{
+ union bpf_attr attr;
+ int ret;
+ __u64 *target_cpu;
+
+ if (!OPTS_VALID(opts, bpf_map_update_opts))
+ return libbpf_err(-EINVAL);
+
+ target_cpu = OPTS_GET(opts, target_cpu, NULL);
+ memset(&attr, 0, sizeof(attr));
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.value = ptr_to_u64(value);
+ attr.flags = flags;
+ attr.target_cpu = ptr_to_u64(target_cpu);
+
+ ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
+}
+
int bpf_map_lookup_elem(int fd, const void *key, void *value)
{
const size_t attr_sz = offsetofend(union bpf_attr, flags);
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index a4a7b1ad1b63..aec6dfddf697 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -147,6 +147,15 @@ LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
__u64 flags);
+struct bpf_map_update_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ /* specify the CPU where the sockmap job run on */
+ __u64 *target_cpu;
+ size_t :0;
+};
+#define bpf_map_update_opts__last_field target_cpu
+LIBBPF_API int bpf_map_update_elem_opts(int fd, const void *key, const void *value,
+ __u64 flags, const struct bpf_map_update_opts *opts);
LIBBPF_API int bpf_map_lookup_elem(int fd, const void *key, void *value);
LIBBPF_API int bpf_map_lookup_elem_flags(int fd, const void *key, void *value,
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 54b6f312cfa8..5a26a1d8624f 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -17,6 +17,7 @@ LIBBPF_0.0.1 {
bpf_map_lookup_and_delete_elem;
bpf_map_lookup_elem;
bpf_map_update_elem;
+ bpf_map_update_elem_opts;
bpf_obj_get;
bpf_obj_get_info_by_fd;
bpf_obj_pin;
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
index 82bfb266741c..84a35cb4b9fe 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
@@ -190,13 +190,18 @@ static void test_skmsg_helpers_with_link(enum bpf_map_type map_type)
test_skmsg_load_helpers__destroy(skel);
}
-static void test_sockmap_update(enum bpf_map_type map_type)
+static void test_sockmap_update(enum bpf_map_type map_type, bool cpu_affinity)
{
int err, prog, src;
struct test_sockmap_update *skel;
struct bpf_map *dst_map;
const __u32 zero = 0;
char dummy[14] = {0};
+ __u64 target_cpu = 0;
+
+ LIBBPF_OPTS(bpf_map_update_opts, update_opts,
+ .target_cpu = &target_cpu,
+ );
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = dummy,
.data_size_in = sizeof(dummy),
@@ -219,7 +224,11 @@ static void test_sockmap_update(enum bpf_map_type map_type)
else
dst_map = skel->maps.dst_sock_hash;
- err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST);
+ if (cpu_affinity)
+ err = bpf_map_update_elem_opts(src, &zero, &sk, BPF_NOEXIST, &update_opts);
+ else
+ err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST);
+
if (!ASSERT_OK(err, "update_elem(src)"))
goto out;
@@ -896,9 +905,11 @@ void test_sockmap_basic(void)
if (test__start_subtest("sockhash sk_msg load helpers"))
test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH);
if (test__start_subtest("sockmap update"))
- test_sockmap_update(BPF_MAP_TYPE_SOCKMAP);
+ test_sockmap_update(BPF_MAP_TYPE_SOCKMAP, false);
+ if (test__start_subtest("sockmap update cpu affinity"))
+ test_sockmap_update(BPF_MAP_TYPE_SOCKMAP, true);
if (test__start_subtest("sockhash update"))
- test_sockmap_update(BPF_MAP_TYPE_SOCKHASH);
+ test_sockmap_update(BPF_MAP_TYPE_SOCKHASH, false);
if (test__start_subtest("sockmap update in unsafe context"))
test_sockmap_invalid_update();
if (test__start_subtest("sockmap copy"))
--
2.43.5
On Fri, Nov 1, 2024 at 9:17 AM mrpre <mrpre@163.com> wrote: > > implement libbpf sockmap cpu affinity > > Signed-off-by: Jiayuan Chen <mrpre@163.com> > --- > tools/include/uapi/linux/bpf.h | 4 ++++ > tools/lib/bpf/bpf.c | 22 +++++++++++++++++++ > tools/lib/bpf/bpf.h | 9 ++++++++ > tools/lib/bpf/libbpf.map | 1 + > .../selftests/bpf/prog_tests/sockmap_basic.c | 19 ++++++++++++---- please split out selftests into a separate patch from libbpf changes (but I hope we won't need libbpf changes at all) > 5 files changed, 51 insertions(+), 4 deletions(-) > > diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h > index f28b6527e815..ba6c39f40f10 100644 > --- a/tools/include/uapi/linux/bpf.h > +++ b/tools/include/uapi/linux/bpf.h > @@ -1509,6 +1509,10 @@ union bpf_attr { > __aligned_u64 next_key; > }; > __u64 flags; > + union { > + /* specify the CPU where sockmap job run on */ > + __aligned_u64 target_cpu; > + }; > }; > > struct { /* struct used by BPF_MAP_*_BATCH commands */ > diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c > index 2a4c71501a17..13c3f3cfe889 100644 > --- a/tools/lib/bpf/bpf.c > +++ b/tools/lib/bpf/bpf.c > @@ -401,6 +401,28 @@ int bpf_map_update_elem(int fd, const void *key, const void *value, > return libbpf_err_errno(ret); > } > > +int bpf_map_update_elem_opts(int fd, const void *key, const void *value, > + __u64 flags, const struct bpf_map_update_opts *opts) > +{ > + union bpf_attr attr; > + int ret; > + __u64 *target_cpu; > + > + if (!OPTS_VALID(opts, bpf_map_update_opts)) > + return libbpf_err(-EINVAL); > + > + target_cpu = OPTS_GET(opts, target_cpu, NULL); > + memset(&attr, 0, sizeof(attr)); > + attr.map_fd = fd; > + attr.key = ptr_to_u64(key); > + attr.value = ptr_to_u64(value); > + attr.flags = flags; > + attr.target_cpu = ptr_to_u64(target_cpu); > + > + ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); > + return libbpf_err_errno(ret); > +} > + > int bpf_map_lookup_elem(int fd, const void *key, void *value) > { > const size_t attr_sz = offsetofend(union bpf_attr, flags); > diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h > index a4a7b1ad1b63..aec6dfddf697 100644 > --- a/tools/lib/bpf/bpf.h > +++ b/tools/lib/bpf/bpf.h > @@ -147,6 +147,15 @@ LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size, > > LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value, > __u64 flags); > +struct bpf_map_update_opts { > + size_t sz; /* size of this struct for forward/backward compatibility */ > + /* specify the CPU where the sockmap job run on */ > + __u64 *target_cpu; > + size_t :0; > +}; > +#define bpf_map_update_opts__last_field target_cpu > +LIBBPF_API int bpf_map_update_elem_opts(int fd, const void *key, const void *value, > + __u64 flags, const struct bpf_map_update_opts *opts); > > LIBBPF_API int bpf_map_lookup_elem(int fd, const void *key, void *value); > LIBBPF_API int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, > diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map > index 54b6f312cfa8..5a26a1d8624f 100644 > --- a/tools/lib/bpf/libbpf.map > +++ b/tools/lib/bpf/libbpf.map > @@ -17,6 +17,7 @@ LIBBPF_0.0.1 { > bpf_map_lookup_and_delete_elem; > bpf_map_lookup_elem; > bpf_map_update_elem; > + bpf_map_update_elem_opts; when you are touching unfamiliar code, look around and see what others did. Did you notice versioned sections in this file? Do you think adding a new API to 0.0.1 section makes sense when we are already at 1.6.0? > bpf_obj_get; > bpf_obj_get_info_by_fd; > bpf_obj_pin; > diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c > index 82bfb266741c..84a35cb4b9fe 100644 > --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c > +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c > @@ -190,13 +190,18 @@ static void test_skmsg_helpers_with_link(enum bpf_map_type map_type) > test_skmsg_load_helpers__destroy(skel); > } > > -static void test_sockmap_update(enum bpf_map_type map_type) > +static void test_sockmap_update(enum bpf_map_type map_type, bool cpu_affinity) > { > int err, prog, src; > struct test_sockmap_update *skel; > struct bpf_map *dst_map; > const __u32 zero = 0; > char dummy[14] = {0}; > + __u64 target_cpu = 0; > + > + LIBBPF_OPTS(bpf_map_update_opts, update_opts, > + .target_cpu = &target_cpu, > + ); > LIBBPF_OPTS(bpf_test_run_opts, topts, > .data_in = dummy, > .data_size_in = sizeof(dummy), > @@ -219,7 +224,11 @@ static void test_sockmap_update(enum bpf_map_type map_type) > else > dst_map = skel->maps.dst_sock_hash; > > - err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST); > + if (cpu_affinity) > + err = bpf_map_update_elem_opts(src, &zero, &sk, BPF_NOEXIST, &update_opts); > + else > + err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST); > + > if (!ASSERT_OK(err, "update_elem(src)")) > goto out; > > @@ -896,9 +905,11 @@ void test_sockmap_basic(void) > if (test__start_subtest("sockhash sk_msg load helpers")) > test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH); > if (test__start_subtest("sockmap update")) > - test_sockmap_update(BPF_MAP_TYPE_SOCKMAP); > + test_sockmap_update(BPF_MAP_TYPE_SOCKMAP, false); > + if (test__start_subtest("sockmap update cpu affinity")) > + test_sockmap_update(BPF_MAP_TYPE_SOCKMAP, true); > if (test__start_subtest("sockhash update")) > - test_sockmap_update(BPF_MAP_TYPE_SOCKHASH); > + test_sockmap_update(BPF_MAP_TYPE_SOCKHASH, false); > if (test__start_subtest("sockmap update in unsafe context")) > test_sockmap_invalid_update(); > if (test__start_subtest("sockmap copy")) > -- > 2.43.5 > >
© 2016 - 2024 Red Hat, Inc.