This patch added a new struct member sched in struct mptcp_sock.
And two helpers mptcp_init_sched() and mptcp_release_sched() to
init and release it.
Init it with the sysctl scheduler in mptcp_init_sock(), copy the
scheduler from the parent in mptcp_sk_clone(), and release it in
__mptcp_destroy_sock().
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
net/mptcp/protocol.c | 4 ++++
net/mptcp/protocol.h | 4 ++++
net/mptcp/sched.c | 21 +++++++++++++++++++++
3 files changed, 29 insertions(+)
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 81b77ade9511..c7969b9b567e 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2661,6 +2661,8 @@ static int mptcp_init_sock(struct sock *sk)
* propagate the correct value
*/
mptcp_ca_reset(sk);
+ mptcp_init_sched(mptcp_sk(sk),
+ mptcp_sched_find(net, mptcp_get_scheduler(net)));
sk_sockets_allocated_inc(sk);
sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
@@ -2816,6 +2818,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
sk_stop_timer(sk, &sk->sk_timer);
mptcp_data_unlock(sk);
msk->pm.status = 0;
+ mptcp_release_sched(msk);
/* clears msk->subflow, allowing the following loop to close
* even the initial subflow
@@ -2993,6 +2996,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
msk->snd_una = msk->write_seq;
msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd;
msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
+ mptcp_init_sched(msk, mptcp_sk(sk)->sched);
if (mp_opt->suboptions & OPTIONS_MPTCP_MPC) {
msk->can_ack = true;
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 3e69c1e5600a..ad45281e8562 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -288,6 +288,7 @@ struct mptcp_sock {
struct socket *subflow; /* outgoing connect/listener/!mp_capable */
struct sock *first;
struct mptcp_pm_data pm;
+ struct mptcp_sched_ops *sched;
struct {
u32 space; /* bytes copied in last measurement window */
u32 copied; /* bytes copied in this measurement window */
@@ -617,6 +618,9 @@ void mptcp_unregister_scheduler(const struct net *net,
struct mptcp_sched_ops *sched);
struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk);
void mptcp_sched_init(void);
+void mptcp_init_sched(struct mptcp_sock *msk,
+ struct mptcp_sched_ops *sched);
+void mptcp_release_sched(struct mptcp_sock *msk);
static inline bool __mptcp_subflow_active(struct mptcp_subflow_context *subflow)
{
diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
index f6ace8cfe865..e431b1eec5e1 100644
--- a/net/mptcp/sched.c
+++ b/net/mptcp/sched.c
@@ -122,3 +122,24 @@ void mptcp_sched_init(void)
panic("Failed to register MPTCP sched pernet subsystem.\n");
mptcp_register_scheduler(&init_net, &mptcp_sched_default);
}
+
+void mptcp_init_sched(struct mptcp_sock *msk,
+ struct mptcp_sched_ops *sched)
+{
+ if (!sched)
+ msk->sched = &mptcp_sched_default;
+ else
+ msk->sched = sched;
+
+ if (msk->sched->init)
+ msk->sched->init(msk);
+
+ pr_debug("sched=%s", msk->sched->name);
+}
+
+void mptcp_release_sched(struct mptcp_sock *msk)
+{
+ if (msk->sched && msk->sched->release)
+ msk->sched->release(msk);
+ msk->sched = NULL;
+}
--
2.34.1