mptcp_mp_fail_no_response shouldn't be invoked on each worker run, it
should be invoked only when MP_FAIL response timeout occurs.
This patch refactors the MP_FAIL response timeout logic. Add fail_tout
in mptcp_sock to record the MP_FAIL timestamp. Check it in mptcp_worker()
before invoking mptcp_mp_fail_no_response(). Drop the code to reuse
sk_timer for MP_FAIL response.
Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/281
Fixes: d9fb797046c5 ("mptcp: Do not traverse the subflow connection list without lock")
Acked-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
net/mptcp/pm.c | 5 +----
net/mptcp/protocol.c | 4 +++-
net/mptcp/protocol.h | 1 +
net/mptcp/subflow.c | 10 ++--------
4 files changed, 7 insertions(+), 13 deletions(-)
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 59a85220edc9..45a9e02abf24 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -299,7 +299,6 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
- struct sock *s = (struct sock *)msk;
pr_debug("fail_seq=%llu", fail_seq);
@@ -312,10 +311,8 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
subflow->send_mp_fail = 1;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
subflow->send_infinite_map = 1;
- } else if (!sock_flag(sk, SOCK_DEAD)) {
+ } else {
pr_debug("MP_FAIL response received");
-
- sk_stop_timer(s, &s->sk_timer);
}
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index d6aef4b13b8a..917df5fb9708 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2562,7 +2562,8 @@ static void mptcp_worker(struct work_struct *work)
if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
__mptcp_retrans(sk);
- mptcp_mp_fail_no_response(msk);
+ if (time_after(jiffies, msk->fail_tout))
+ mptcp_mp_fail_no_response(msk);
unlock:
release_sock(sk);
@@ -2589,6 +2590,7 @@ static int __mptcp_init_sock(struct sock *sk)
WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
WRITE_ONCE(msk->allow_infinite_fallback, true);
msk->recovery = false;
+ msk->fail_tout = 0;
mptcp_pm_data_init(msk);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index d406b5afbee4..f7b01275af94 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -306,6 +306,7 @@ struct mptcp_sock {
u32 setsockopt_seq;
char ca_name[TCP_CA_NAME_MAX];
+ unsigned long fail_tout;
};
#define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 8841e8cd9ad8..866d54a0e83c 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -974,7 +974,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
bool csum_reqd = READ_ONCE(msk->csum_enabled);
- struct sock *sk = (struct sock *)msk;
struct mptcp_ext *mpext;
struct sk_buff *skb;
u16 data_len;
@@ -1016,9 +1015,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
pr_debug("infinite mapping received");
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
subflow->map_data_len = 0;
- if (!sock_flag(ssk, SOCK_DEAD))
- sk_stop_timer(sk, &sk->sk_timer);
-
return MAPPING_INVALID;
}
@@ -1238,11 +1234,9 @@ static bool subflow_check_data_avail(struct sock *ssk)
tcp_send_active_reset(ssk, GFP_ATOMIC);
while ((skb = skb_peek(&ssk->sk_receive_queue)))
sk_eat_skb(ssk, skb);
- } else if (!sock_flag(ssk, SOCK_DEAD)) {
+ } else {
WRITE_ONCE(subflow->mp_fail_response_expect, true);
- sk_reset_timer((struct sock *)msk,
- &((struct sock *)msk)->sk_timer,
- jiffies + TCP_RTO_MAX);
+ msk->fail_tout = jiffies + TCP_RTO_MAX;
}
WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
return true;
--
2.35.3
On Thu, 2022-06-09 at 08:00 +0800, Geliang Tang wrote: > mptcp_mp_fail_no_response shouldn't be invoked on each worker run, it > should be invoked only when MP_FAIL response timeout occurs. > > This patch refactors the MP_FAIL response timeout logic. Add fail_tout > in mptcp_sock to record the MP_FAIL timestamp. Check it in mptcp_worker() > before invoking mptcp_mp_fail_no_response(). Drop the code to reuse > sk_timer for MP_FAIL response. I'm sorry for not noticing it on the previous version - likely for the lack of clarity on my side: we still need to start the timer for mp_fail response: otherwise we are not assured that the mptcp_worker will be triggered. Additionally we need some more care to manipulate the timer reset. I think it's easier if I send a squash-to patch - assuming I'll have some time for that early next week. /P
On Thu, 9 Jun 2022, Paolo Abeni wrote: > On Thu, 2022-06-09 at 08:00 +0800, Geliang Tang wrote: >> mptcp_mp_fail_no_response shouldn't be invoked on each worker run, it >> should be invoked only when MP_FAIL response timeout occurs. >> >> This patch refactors the MP_FAIL response timeout logic. Add fail_tout >> in mptcp_sock to record the MP_FAIL timestamp. Check it in mptcp_worker() >> before invoking mptcp_mp_fail_no_response(). Drop the code to reuse >> sk_timer for MP_FAIL response. > > I'm sorry for not noticing it on the previous version - likely for the > lack of clarity on my side: we still need to start the timer for > mp_fail response: otherwise we are not assured that the mptcp_worker > will be triggered. > (capturing this on the mailing list in addition to the recent meeting) Yes, I agree. I saw that the timer stop calls were removed but missed the timer reset removal. It is imporant for the timer to still be started. > Additionally we need some more care to manipulate the timer reset. I > think it's easier if I send a squash-to patch - assuming I'll have some > time for that early next week. > Thanks Paolo! -- Mat Martineau Intel
© 2016 - 2025 Red Hat, Inc.