From: Geliang Tang <tanggeliang@kylinos.cn>
This patch adds a subtest named test_subflow in test_mptcp to load and
verify the newly added MPTCP subflow BPF program. To goal is to make
sure it is possible to set different socket options per subflows, while
the userspace socket interface only lets the application to set the same
socket options for the whole MPTCP connection and its multiple subflows.
To check that, a client and a server are started in a dedicated netns,
with veth interfaces to simulate multiple paths. They will exchange data
to allow the creation of an additional subflow.
When the different subflows are being created, the new MPTCP subflow BPF
program will set some socket options: marks and TCP CC. The validation
is done by the same program, when the userspace checks the value of the
modified socket options. On the userspace side, it will see that the
default values are still being used on the MPTCP connection, while the
BPF program will see different options set per subflow of the same MPTCP
connection.
Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/76
Signed-off-by: Geliang Tang <tanggeliang@kylinos.cn>
Reviewed-by: Mat Martineau <martineau@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
---
Notes:
- v2 -> v3:
- Use './mptcp_pm_nl_ctl' instead of 'ip mptcp', not supported by the
BPF CI running IPRoute 5.5.0.
- Use SYS_NOFAIL() in _ss_search() instead of calling system()
- v3 -> v4:
- Drop './mptcp_pm_nl_ctl', but skip this new test if 'ip mptcp' is
not supported.
- v4 -> v5:
- Note that this new test is no longer skipped on the BPF CI, because
'ip mptcp' is now supported after the switch from Ubuntu 20.04 to
22.04.
- Update the commit message, reflecting the latest version.
- The validations are no longer done using 'ss', but using the new
BPF program added in the previous patch, to reduce the use of
external dependences. (Martin)
- v5 -> v6:
- Use usleep() instead of sleep().
---
tools/testing/selftests/bpf/prog_tests/mptcp.c | 127 +++++++++++++++++++++++++
1 file changed, 127 insertions(+)
diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c
index d2ca32fa3b21..c76a0d8c8f93 100644
--- a/tools/testing/selftests/bpf/prog_tests/mptcp.c
+++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c
@@ -5,12 +5,17 @@
#include <linux/const.h>
#include <netinet/in.h>
#include <test_progs.h>
+#include <unistd.h>
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "mptcp_sock.skel.h"
#include "mptcpify.skel.h"
+#include "mptcp_subflow.skel.h"
#define NS_TEST "mptcp_ns"
+#define ADDR_1 "10.0.1.1"
+#define ADDR_2 "10.0.1.2"
+#define PORT_1 10001
#ifndef IPPROTO_MPTCP
#define IPPROTO_MPTCP 262
@@ -335,10 +340,132 @@ static void test_mptcpify(void)
close(cgroup_fd);
}
+static int endpoint_init(char *flags)
+{
+ SYS(fail, "ip -net %s link add veth1 type veth peer name veth2", NS_TEST);
+ SYS(fail, "ip -net %s addr add %s/24 dev veth1", NS_TEST, ADDR_1);
+ SYS(fail, "ip -net %s link set dev veth1 up", NS_TEST);
+ SYS(fail, "ip -net %s addr add %s/24 dev veth2", NS_TEST, ADDR_2);
+ SYS(fail, "ip -net %s link set dev veth2 up", NS_TEST);
+ if (SYS_NOFAIL("ip -net %s mptcp endpoint add %s %s", NS_TEST, ADDR_2, flags)) {
+ printf("'ip mptcp' not supported, skip this test.\n");
+ test__skip();
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void wait_for_new_subflows(int fd)
+{
+ socklen_t len;
+ u8 subflows;
+ int err, i;
+
+ len = sizeof(subflows);
+ /* Wait max 1 sec for new subflows to be created */
+ for (i = 0; i < 10; i++) {
+ err = getsockopt(fd, SOL_MPTCP, MPTCP_INFO, &subflows, &len);
+ if (!err && subflows > 0)
+ break;
+
+ usleep(100000); /* 0.1s */
+ }
+}
+
+static void run_subflow(void)
+{
+ int server_fd, client_fd, err;
+ char new[TCP_CA_NAME_MAX];
+ char cc[TCP_CA_NAME_MAX];
+ unsigned int mark;
+ socklen_t len;
+
+ server_fd = start_mptcp_server(AF_INET, ADDR_1, PORT_1, 0);
+ if (!ASSERT_OK_FD(server_fd, "start_mptcp_server"))
+ return;
+
+ client_fd = connect_to_fd(server_fd, 0);
+ if (!ASSERT_OK_FD(client_fd, "connect_to_fd"))
+ goto close_server;
+
+ send_byte(client_fd);
+ wait_for_new_subflows(client_fd);
+
+ len = sizeof(mark);
+ err = getsockopt(client_fd, SOL_SOCKET, SO_MARK, &mark, &len);
+ if (ASSERT_OK(err, "getsockopt(client_fd, SO_MARK)"))
+ ASSERT_EQ(mark, 0, "mark");
+
+ len = sizeof(new);
+ err = getsockopt(client_fd, SOL_TCP, TCP_CONGESTION, new, &len);
+ if (ASSERT_OK(err, "getsockopt(client_fd, TCP_CONGESTION)")) {
+ get_msk_ca_name(cc);
+ ASSERT_STREQ(new, cc, "cc");
+ }
+
+ close(client_fd);
+close_server:
+ close(server_fd);
+}
+
+static void test_subflow(void)
+{
+ int cgroup_fd, prog_fd, err;
+ struct mptcp_subflow *skel;
+ struct nstoken *nstoken;
+ struct bpf_link *link;
+
+ cgroup_fd = test__join_cgroup("/mptcp_subflow");
+ if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup: mptcp_subflow"))
+ return;
+
+ skel = mptcp_subflow__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_load: mptcp_subflow"))
+ goto close_cgroup;
+
+ skel->bss->pid = getpid();
+
+ err = mptcp_subflow__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach: mptcp_subflow"))
+ goto skel_destroy;
+
+ prog_fd = bpf_program__fd(skel->progs.mptcp_subflow);
+ err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0);
+ if (!ASSERT_OK(err, "prog_attach"))
+ goto skel_destroy;
+
+ nstoken = create_netns();
+ if (!ASSERT_OK_PTR(nstoken, "create_netns: mptcp_subflow"))
+ goto skel_destroy;
+
+ if (endpoint_init("subflow") < 0)
+ goto close_netns;
+
+ link = bpf_program__attach_cgroup(skel->progs._getsockopt_subflow,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link, "getsockopt prog"))
+ goto close_netns;
+
+ run_subflow();
+
+ bpf_link__destroy(link);
+close_netns:
+ cleanup_netns(nstoken);
+skel_destroy:
+ mptcp_subflow__destroy(skel);
+close_cgroup:
+ close(cgroup_fd);
+}
+
void test_mptcp(void)
{
if (test__start_subtest("base"))
test_base();
if (test__start_subtest("mptcpify"))
test_mptcpify();
+ if (test__start_subtest("subflow"))
+ test_subflow();
}
--
2.45.2
On 9/11/24 8:16 AM, Matthieu Baerts (NGI0) wrote: > +static void test_subflow(void) > +{ > + int cgroup_fd, prog_fd, err; > + struct mptcp_subflow *skel; > + struct nstoken *nstoken; > + struct bpf_link *link; > + > + cgroup_fd = test__join_cgroup("/mptcp_subflow"); > + if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup: mptcp_subflow")) > + return; > + > + skel = mptcp_subflow__open_and_load(); > + if (!ASSERT_OK_PTR(skel, "skel_open_load: mptcp_subflow")) > + goto close_cgroup; > + > + skel->bss->pid = getpid(); > + > + err = mptcp_subflow__attach(skel); This is not needed. > + if (!ASSERT_OK(err, "skel_attach: mptcp_subflow")) > + goto skel_destroy; > + > + prog_fd = bpf_program__fd(skel->progs.mptcp_subflow); > + err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0); Use bpf_program__attach_cgroup here instead since ... > + if (!ASSERT_OK(err, "prog_attach")) > + goto skel_destroy; > + > + nstoken = create_netns(); > + if (!ASSERT_OK_PTR(nstoken, "create_netns: mptcp_subflow")) > + goto skel_destroy; > + > + if (endpoint_init("subflow") < 0) > + goto close_netns; > + > + link = bpf_program__attach_cgroup(skel->progs._getsockopt_subflow, > + cgroup_fd); ... bpf_program__attach_cgroup is used here also. Instead of declaring a local "link", use the skel->links.{mptcp_subflow, _getsockopt_subflow}. Then mptcp_subflow__destroy(skel) will take care of them also. e.g. "skel->links._getsockopt_subflow = bpf_program__attach_cgroup(...)" pw-bot: cr
© 2016 - 2024 Red Hat, Inc.