From: Geliang Tang <tanggeliang@kylinos.cn>
Keep mptcp scheduler API unchanged.
Use mptcp_subflow_sched iter instead of mptcp_subflow.
Signed-off-by: Geliang Tang <tanggeliang@kylinos.cn>
---
tools/testing/selftests/bpf/bpf_experimental.h | 9 +++++++++
tools/testing/selftests/bpf/progs/mptcp_bpf_bkup.c | 5 +++--
2 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 6a96c56f0725..b8edad5c8bf0 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -583,6 +583,15 @@ bpf_iter_mptcp_subflow_next(struct bpf_iter_mptcp_subflow *it) __weak __ksym;
extern void
bpf_iter_mptcp_subflow_destroy(struct bpf_iter_mptcp_subflow *it) __weak __ksym;
+struct bpf_iter_mptcp_subflow_sched;
+extern int bpf_iter_mptcp_subflow_sched_new(struct bpf_iter_mptcp_subflow_sched *it,
+ struct sock *sk,
+ struct mptcp_sched_data *data) __weak __ksym;
+extern struct mptcp_subflow_context *
+bpf_iter_mptcp_subflow_sched_next(struct bpf_iter_mptcp_subflow_sched *it) __weak __ksym;
+extern void
+bpf_iter_mptcp_subflow_sched_destroy(struct bpf_iter_mptcp_subflow_sched *it) __weak __ksym;
+
extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf_bkup.c b/tools/testing/selftests/bpf/progs/mptcp_bpf_bkup.c
index 284cca708de0..1719b0b2a182 100644
--- a/tools/testing/selftests/bpf/progs/mptcp_bpf_bkup.c
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf_bkup.c
@@ -17,11 +17,12 @@ void BPF_PROG(mptcp_sched_bkup_release, struct mptcp_sock *msk)
}
SEC("struct_ops")
-int BPF_PROG(bpf_bkup_get_send, struct mptcp_sock *msk)
+int BPF_PROG(bpf_bkup_get_send, struct mptcp_sock *msk,
+ struct mptcp_sched_data *data)
{
struct mptcp_subflow_context *subflow;
- bpf_for_each(mptcp_subflow, subflow, (struct sock *)msk) {
+ bpf_for_each(mptcp_subflow_sched, subflow, (struct sock *)msk, data) {
if (!BPF_CORE_READ_BITFIELD_PROBED(subflow, backup) ||
!BPF_CORE_READ_BITFIELD_PROBED(subflow, request_bkup)) {
mptcp_subflow_set_scheduled(subflow, true);
--
2.43.0