From: Menglong Dong <imagedong@tencent.com>
Do the statistics of mptcp socket in use with sock_prot_inuse_add().
Therefore, we can get the count of used mptcp socket from
/proc/net/protocols:
& cat /proc/net/protocols
protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em
MPTCPv6 2048 0 0 no 0 yes kernel y n y y y y y y y y y y n n n y y y n
MPTCP 1896 1 0 no 0 yes kernel y n y y y y y y y y y y n n n y y y n
Signed-off-by: Menglong Dong <imagedong@tencent.com>
---
v6:
- introduce the 'MPTCP_INUSE' flag and check if msk is in use by it
v5:
- rebase to solve merge conflict
v4:
- rename MPTCP_DESTROIED to MPTCP_DESTROYED
v2:
- decrease the statistics for listening mptcp socket inuse with
mptcp_listen_inuse_dec()
- add MPTCP_DESTROIED flags to store if mptcp_destroy_common() was
called on the msk. For fallback case, we need to decrease the
statistics only once, and mptcp_destroy_common() can be called
more than once.
---
net/mptcp/protocol.c | 8 +++++++-
net/mptcp/protocol.h | 13 +++++++++++++
net/mptcp/subflow.c | 1 +
3 files changed, 21 insertions(+), 1 deletion(-)
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index f9bcc724d9e2..9c1152e16005 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -3073,6 +3073,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
msk->snd_una = msk->write_seq;
msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd;
msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
+ clear_bit(MPTCP_INUSE, &msk->flags);
if (mp_opt->suboptions & OPTIONS_MPTCP_MPC) {
msk->can_ack = true;
@@ -3179,6 +3180,8 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
skb_rbtree_purge(&msk->out_of_order_queue);
mptcp_data_unlock(sk);
+ mptcp_inuse_dec(sk);
+
/* move all the rx fwd alloc into the sk_mem_reclaim_final in
* inet_sock_destruct() will dispose it
*/
@@ -3542,6 +3545,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
mptcp_token_destroy(msk);
inet_sk_state_store(sk, TCP_SYN_SENT);
+ mptcp_inuse_inc(sk);
subflow = mptcp_subflow_ctx(ssock->sk);
#ifdef CONFIG_TCP_MD5SIG
/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
@@ -3674,8 +3678,10 @@ static int mptcp_listen(struct socket *sock, int backlog)
err = ssock->ops->listen(ssock, backlog);
inet_sk_state_store(sk, inet_sk_state_load(ssock->sk));
- if (!err)
+ if (!err) {
+ mptcp_inuse_inc(sk);
mptcp_copy_inaddrs(sk, ssock->sk);
+ }
unlock:
release_sock(sk);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 6a09ab99a12d..441bbd77ae8e 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -116,6 +116,7 @@
#define MPTCP_WORK_EOF 3
#define MPTCP_FALLBACK_DONE 4
#define MPTCP_WORK_CLOSE_SUBFLOW 5
+#define MPTCP_INUSE 6
/* MPTCP socket release cb flags */
#define MPTCP_PUSH_PENDING 1
@@ -382,6 +383,18 @@ static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk)
return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list);
}
+static inline void mptcp_inuse_inc(const struct sock *sk)
+{
+ if (!test_and_set_bit(MPTCP_INUSE, &mptcp_sk(sk)->flags))
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+}
+
+static inline void mptcp_inuse_dec(const struct sock *sk)
+{
+ if (test_and_clear_bit(MPTCP_INUSE, &mptcp_sk(sk)->flags))
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+}
+
struct csum_pseudo_header {
__be64 data_seq;
__be32 subflow_seq;
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 437a283ba6ea..2e3bf29b4006 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -749,6 +749,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
+ mptcp_inuse_inc(new_msk);
ctx->conn = new_msk;
new_msk = NULL;
--
2.37.2
Hello,
I'm sorry for the long delay with my reply.
On Tue, 2022-11-22 at 11:49 +0800, menglong8.dong@gmail.com wrote:
> @@ -382,6 +383,18 @@ static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk)
> return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list);
> }
>
> +static inline void mptcp_inuse_inc(const struct sock *sk)
> +{
> + if (!test_and_set_bit(MPTCP_INUSE, &mptcp_sk(sk)->flags))
> + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
> +}
> +
> +static inline void mptcp_inuse_dec(const struct sock *sk)
> +{
> + if (test_and_clear_bit(MPTCP_INUSE, &mptcp_sk(sk)->flags))
> + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
> +}
What concern me most with this patch, is the need for an additional
msk flag and the related atomic operations.
If tracking non fallback sockets is good enough for you - that would
probably make sense - you could place the sock_prot_inuse_add() in the
successful path of mptcp_token_new_connect(), mptcp_token_accept() and
mptcp_token_destroy() - the in use decrement.
As the token is unique, it's creation/destruction correspond to the
creation/destruction of a non fallback mptcp socket. Additionally that
somewhat correspond to TCP hash/unhash operation, so it will match more
closely the existing accounting in TCP code.
WDYT?
Thanks!
Paolo
Hello,
On Fri, Dec 2, 2022 at 8:37 PM Paolo Abeni <pabeni@redhat.com> wrote:
>
> Hello,
>
> I'm sorry for the long delay with my reply.
>
> On Tue, 2022-11-22 at 11:49 +0800, menglong8.dong@gmail.com wrote:
> > @@ -382,6 +383,18 @@ static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk)
> > return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list);
> > }
> >
> > +static inline void mptcp_inuse_inc(const struct sock *sk)
> > +{
> > + if (!test_and_set_bit(MPTCP_INUSE, &mptcp_sk(sk)->flags))
> > + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
> > +}
> > +
> > +static inline void mptcp_inuse_dec(const struct sock *sk)
> > +{
> > + if (test_and_clear_bit(MPTCP_INUSE, &mptcp_sk(sk)->flags))
> > + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
> > +}
>
> What concern me most with this patch, is the need for an additional
> msk flag and the related atomic operations.
>
> If tracking non fallback sockets is good enough for you - that would
> probably make sense - you could place the sock_prot_inuse_add() in the
> successful path of mptcp_token_new_connect(), mptcp_token_accept() and
> mptcp_token_destroy() - the in use decrement.
>
> As the token is unique, it's creation/destruction correspond to the
> creation/destruction of a non fallback mptcp socket. Additionally that
> somewhat correspond to TCP hash/unhash operation, so it will match more
> closely the existing accounting in TCP code.
>
I think it's a perfect idea. I considered to use mptcp_token_new_connect,
mptcp_token_accept and mptcp_token_destroy before, but I found it's hard to
trace the fallback case, as I thought the fallback in the active connection case
is not hashed. However, the token of the 'msk' in that case is not destroyed.
Therefore, we can now do the statistics according to the token create
and destroy for connection case, as all 'msk' has the token except the
listening msk.
For listening msk, we use the method we used before.
BTW, is it ok to keep the fallback msk in the hash table? Which I think
may have a little impact on the performance.
Thanks!
Menglong Dong
> WDYT?
>
> Thanks!
>
> Paolo
>
© 2016 - 2025 Red Hat, Inc.