This patch implements the redundant BPF MPTCP scheduler, named bpf_red,
which sends all packets redundantly on all available subflows.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
.../selftests/bpf/progs/mptcp_bpf_red.c | 45 +++++++++++++++++++
1 file changed, 45 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/mptcp_bpf_red.c
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf_red.c b/tools/testing/selftests/bpf/progs/mptcp_bpf_red.c
new file mode 100644
index 000000000000..30dd6f521b7f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf_red.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, SUSE. */
+
+#include <linux/bpf.h>
+#include "bpf_tcp_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops/mptcp_sched_red_init")
+void BPF_PROG(mptcp_sched_red_init, const struct mptcp_sock *msk)
+{
+}
+
+SEC("struct_ops/mptcp_sched_red_release")
+void BPF_PROG(mptcp_sched_red_release, const struct mptcp_sock *msk)
+{
+}
+
+void BPF_STRUCT_OPS(bpf_red_data_init, const struct mptcp_sock *msk,
+ struct mptcp_sched_data *data)
+{
+ mptcp_sched_data_set_contexts(msk, data);
+}
+
+int BPF_STRUCT_OPS(bpf_red_get_subflow, const struct mptcp_sock *msk,
+ struct mptcp_sched_data *data)
+{
+ for (int i = 0; i < MPTCP_SUBFLOWS_MAX; i++) {
+ if (!data->contexts[i])
+ break;
+
+ mptcp_subflow_set_scheduled(data->contexts[i], true);
+ }
+
+ return 0;
+}
+
+SEC(".struct_ops")
+struct mptcp_sched_ops red = {
+ .init = (void *)mptcp_sched_red_init,
+ .release = (void *)mptcp_sched_red_release,
+ .data_init = (void *)bpf_red_data_init,
+ .get_subflow = (void *)bpf_red_get_subflow,
+ .name = "bpf_red",
+};
--
2.35.3