From: Hui Zhu <zhuhui@kylinos.cn>
Add a new selftest, `test_memcg_ops_hierarchies`, to validate the
behavior of attaching `memcg_bpf_ops` in a nested cgroup hierarchy,
specifically testing the `BPF_F_ALLOW_OVERRIDE` flag.
The test case performs the following steps:
1. Creates a three-level deep cgroup hierarchy: `/cg`, `/cg/cg`, and
`/cg/cg/cg`.
2. Attaches a BPF struct_ops to the top-level cgroup (`/cg`) with the
`BPF_F_ALLOW_OVERRIDE` flag.
3. Successfully attaches a new struct_ops to the middle cgroup
(`/cg/cg`) without the flag, overriding the inherited one.
4. Asserts that attaching another struct_ops to the deepest cgroup
(`/cg/cg/cg`) fails with -EBUSY, because its parent did not specify
`BPF_F_ALLOW_OVERRIDE`.
This test ensures that the attachment logic correctly enforces the
override rules across a cgroup subtree.
Signed-off-by: Geliang Tang <geliang@kernel.org>
Signed-off-by: Hui Zhu <zhuhui@kylinos.cn>
---
.../selftests/bpf/prog_tests/memcg_ops.c | 71 +++++++++++++++++++
1 file changed, 71 insertions(+)
diff --git a/tools/testing/selftests/bpf/prog_tests/memcg_ops.c b/tools/testing/selftests/bpf/prog_tests/memcg_ops.c
index 8c787439f83c..378ee3b3bc01 100644
--- a/tools/testing/selftests/bpf/prog_tests/memcg_ops.c
+++ b/tools/testing/selftests/bpf/prog_tests/memcg_ops.c
@@ -553,3 +553,74 @@ void test_memcg_ops_below_min_over_high(void)
close(low_cgroup_fd);
cleanup_cgroup_environment();
}
+
+void test_memcg_ops_hierarchies(void)
+{
+ int ret, first = -1, second = -1, third = -1;
+ struct memcg_ops *skel = NULL;
+ struct bpf_map *map;
+ struct bpf_link *link1 = NULL, *link2 = NULL, *link3 = NULL;
+ DECLARE_LIBBPF_OPTS(bpf_struct_ops_opts, opts);
+
+ ret = setup_cgroup_environment();
+ if (!ASSERT_OK(ret, "setup_cgroup_environment"))
+ goto cleanup;
+
+ first = create_and_get_cgroup("/cg");
+ if (!ASSERT_GE(first, 0, "create_and_get_cgroup /cg"))
+ goto cleanup;
+ ret = enable_controllers("/cg", "memory");
+ if (!ASSERT_OK(ret, "enable_controllers"))
+ goto cleanup;
+
+ second = create_and_get_cgroup("/cg/cg");
+ if (!ASSERT_GE(second, 0, "create_and_get_cgroup /cg/cg"))
+ goto cleanup;
+ ret = enable_controllers("/cg/cg", "memory");
+ if (!ASSERT_OK(ret, "enable_controllers"))
+ goto cleanup;
+
+ third = create_and_get_cgroup("/cg/cg/cg");
+ if (!ASSERT_GE(third, 0, "create_and_get_cgroup /cg/cg/cg"))
+ goto cleanup;
+ ret = enable_controllers("/cg/cg/cg", "memory");
+ if (!ASSERT_OK(ret, "enable_controllers"))
+ goto cleanup;
+
+ skel = memcg_ops__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "memcg_ops__open_and_load"))
+ goto cleanup;
+
+ map = bpf_object__find_map_by_name(skel->obj, "low_mcg_ops");
+ if (!ASSERT_OK_PTR(map, "bpf_object__find_map_by_name low_mcg_ops"))
+ goto cleanup;
+
+ opts.relative_fd = first;
+ opts.flags = BPF_F_ALLOW_OVERRIDE;
+ link1 = bpf_map__attach_struct_ops_opts(map, &opts);
+ if (!ASSERT_OK_PTR(link1, "bpf_map__attach_struct_ops_opts"))
+ goto cleanup;
+
+ opts.relative_fd = second;
+ opts.flags = 0;
+ link2 = bpf_map__attach_struct_ops_opts(map, &opts);
+ if (!ASSERT_OK_PTR(link2, "bpf_map__attach_struct_ops_opts"))
+ goto cleanup;
+
+ opts.relative_fd = third;
+ opts.flags = 0;
+ link3 = bpf_map__attach_struct_ops_opts(map, &opts);
+ if (!ASSERT_ERR_PTR(link3, "bpf_map__attach_struct_ops_opts"))
+ goto cleanup;
+
+cleanup:
+ bpf_link__destroy(link1);
+ bpf_link__destroy(link2);
+ bpf_link__destroy(link3);
+ memcg_ops__detach(skel);
+ memcg_ops__destroy(skel);
+ close(first);
+ close(second);
+ close(third);
+ cleanup_cgroup_environment();
+}
--
2.43.0