This uses the helper and APIs to schedule one MSS on alternance
on each subflow.
This now really acts at a round-robin scheduler: packets are equally
balanced on each path
Signed-off-by: Gregory Detal <gregory.detal@gmail.com>
---
tools/testing/selftests/bpf/progs/mptcp_bpf.h | 1 +
tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c | 18 ++++++++++++++++++
2 files changed, 19 insertions(+)
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf.h b/tools/testing/selftests/bpf/progs/mptcp_bpf.h
index 782f36ed027e..a289746666dd 100644
--- a/tools/testing/selftests/bpf/progs/mptcp_bpf.h
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf.h
@@ -6,6 +6,7 @@
#include <bpf/bpf_core_read.h>
#define MPTCP_SUBFLOWS_MAX 8
+#define MPTCP_SCHED_FLAG_RESCHEDULE (1 << 0)
extern void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
bool scheduled) __ksym;
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c b/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
index 638ea6aa63b7..42c11fa483b1 100644
--- a/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
@@ -69,10 +69,28 @@ int BPF_PROG(bpf_rr_get_subflow, struct mptcp_sock *msk,
return 0;
}
+SEC("struct_ops")
+void BPF_PROG(bpf_rr_push, struct mptcp_sock *msk,
+ struct mptcp_subflow_context *subflow,
+ struct mptcp_sched_chunk *chunk)
+{
+ struct tcp_sock *tp = bpf_skc_to_tcp_sock(mptcp_subflow_tcp_sock(subflow));
+
+ if (!tp) {
+ /* Should not happen, in that case let default behavior. */
+ return;
+ }
+
+ /* Make sure to reschedule for each MSS. */
+ chunk->limit = tp->mss_cache;
+ chunk->flags |= MPTCP_SCHED_FLAG_RESCHEDULE;
+}
+
SEC(".struct_ops")
struct mptcp_sched_ops rr = {
.init = (void *)mptcp_sched_rr_init,
.release = (void *)mptcp_sched_rr_release,
.get_subflow = (void *)bpf_rr_get_subflow,
+ .push = (void *)bpf_rr_push,
.name = "bpf_rr",
};
--
2.43.0