This patch implements a new helper bpf_mptcp_get_next_subflow() to get the
next available subflow of msk.
Register this helper in bpf_mptcp_sched_kfunc_init() to make sure it can
be invoked from the BPF context.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
net/mptcp/bpf.c | 34 ++++++++++++++++++++++++++++++++++
1 file changed, 34 insertions(+)
diff --git a/net/mptcp/bpf.c b/net/mptcp/bpf.c
index a3c41c079fe4..94671d5e80d1 100644
--- a/net/mptcp/bpf.c
+++ b/net/mptcp/bpf.c
@@ -114,6 +114,22 @@ struct bpf_struct_ops bpf_mptcp_sched_ops = {
.name = "mptcp_sched_ops",
};
+BTF_SET_START(bpf_mptcp_sched_kfunc_ids)
+BTF_ID(func, bpf_mptcp_get_next_subflow)
+BTF_SET_END(bpf_mptcp_sched_kfunc_ids)
+
+static const struct btf_kfunc_id_set bpf_mptcp_sched_kfunc_set = {
+ .owner = THIS_MODULE,
+ .check_set = &bpf_mptcp_sched_kfunc_ids,
+};
+
+static int __init bpf_mptcp_sched_kfunc_init(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &bpf_mptcp_sched_kfunc_set);
+}
+late_initcall(bpf_mptcp_sched_kfunc_init);
+
struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk)
{
if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && sk_is_mptcp(sk))
@@ -122,3 +138,21 @@ struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk)
return NULL;
}
EXPORT_SYMBOL(bpf_mptcp_sock_from_subflow);
+
+struct mptcp_subflow_context *bpf_mptcp_get_next_subflow(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow, *next;
+
+ next = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
+ mptcp_for_each_subflow(msk, subflow) {
+ if (msk->last_snd && subflow->tcp_sock == msk->last_snd) {
+ if (!list_is_last(&subflow->node, &msk->conn_list)) {
+ next = list_next_entry(subflow, node);
+ break;
+ }
+ }
+ }
+
+ return next;
+}
+EXPORT_SYMBOL(bpf_mptcp_get_next_subflow);
--
2.34.1