When cleaning up unaccepted mptcp socket still laying inside
the listener queue at listener close time, such sockets will
go through a regular close, waiting for a timeout before
shutting down the subflows.
There is no need to keep the kernel resources in use for
such a possibly long time: short-circuit to fast-close.
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
net/mptcp/protocol.c | 7 +++++--
net/mptcp/subflow.c | 2 +-
2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index b867e3eec5b9..f742d558a1b8 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2977,10 +2977,13 @@ bool __mptcp_close(struct sock *sk, long timeout)
goto cleanup;
}
- if (mptcp_check_readable(msk)) {
- /* the msk has read data, do the MPTCP equivalent of TCP reset */
+ if (mptcp_check_readable(msk) || timeout < 0) {
+ /* If the msk has read data, or the caller explicitly ask it,
+ * do the MPTCP equivalent of TCP reset, aka MTCP fastclose
+ */
inet_sk_state_store(sk, TCP_CLOSE);
mptcp_do_fastclose(sk);
+ timeout = 0;
} else if (mptcp_close_state(sk)) {
__mptcp_wr_shutdown(sk);
}
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 6f198d6e1b22..a1f8bb745c1b 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1856,7 +1856,7 @@ void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_s
if (msk->first)
sock_hold(msk->first);
- do_cancel_work = __mptcp_close(sk, 0);
+ do_cancel_work = __mptcp_close(sk, -1);
release_sock(sk);
if (do_cancel_work) {
/* lockdep will report a false positive ABBA deadlock
--
2.39.0