From nobody Mon Feb 9 03:47:07 2026 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 62BC211729 for ; Wed, 20 Sep 2023 09:09:35 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1695200974; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=N1VEXmRLu8lM5DF7D5fOLwf5RmNSLuEoxUuzxTZG8QA=; b=S/JKu7k4u4p4x19WF8bvxjsuRMiv1HrbwWUVdosWNo89+gtUejlDeBR4qOm7rtJZOriJDh qa7s+2+ixPNbh3ckRSLImTPWRn70VPiW78R2x+PK+CtLXzEfHyACkkKu6lqnjeOQS1N9d7 rq+8uZOJgJdl8WEeXfqGovDl8spJlgE= Received: from mimecast-mx02.redhat.com (mimecast-mx02.redhat.com [66.187.233.88]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-197-mqx-F-0-MEy4OlA9RCYa7w-1; Wed, 20 Sep 2023 05:09:23 -0400 X-MC-Unique: mqx-F-0-MEy4OlA9RCYa7w-1 Received: from smtp.corp.redhat.com (int-mx06.intmail.prod.int.rdu2.redhat.com [10.11.54.6]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mimecast-mx02.redhat.com (Postfix) with ESMTPS id 59F90801FA9; Wed, 20 Sep 2023 09:09:23 +0000 (UTC) Received: from gerbillo.redhat.com (unknown [10.45.224.123]) by smtp.corp.redhat.com (Postfix) with ESMTP id B71A32156701; Wed, 20 Sep 2023 09:09:22 +0000 (UTC) From: Paolo Abeni To: mptcp@lists.linux.dev Cc: Christoph Paasch Subject: [PATCH mptcp-next v4 4/4] mptcp: refactor sndbuf auto-tuning. Date: Wed, 20 Sep 2023 11:09:12 +0200 Message-ID: <5c1ff76635a9a7453d7a315a739c83d71a44536b.1695200723.git.pabeni@redhat.com> In-Reply-To: References: Precedence: bulk X-Mailing-List: mptcp@lists.linux.dev List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Scanned-By: MIMEDefang 3.1 on 10.11.54.6 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8"; x-default="true" The MPTCP protocol account for the data enqueued on all the subflows to the main socket send buffer, while the send buffer auto-tuning algorithm set the main socket send buffer size as the max size among the subflows. That causes bad performances when at least one subflow is sndbuf limited, e.g. due to very high latency, as the MPTCP scheduler can't even fill such buffer. Change the send-buffer auto-tuning algorithm to compute the main socket send buffer size as the sum of all the subflows buffer size. Signed-off-by: Paolo Abeni --- v2 -> v3: - avoid ingremental updates, always recompute sum(ssk->sndbuf) to avoid drift on memory pressure/decrease --- net/mptcp/protocol.c | 18 +++++++++++++-- net/mptcp/protocol.h | 54 ++++++++++++++++++++++++++++++++++++++++---- net/mptcp/subflow.c | 3 +-- 3 files changed, 66 insertions(+), 9 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index f727a7ee662d..0a9d00e794d4 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -891,6 +891,7 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk,= struct sock *ssk) mptcp_sockopt_sync_locked(msk, ssk); mptcp_subflow_joined(msk, ssk); mptcp_stop_tout_timer(sk); + __mptcp_propagate_sndbuf(sk, ssk); return true; } =20 @@ -1077,15 +1078,16 @@ static void mptcp_enter_memory_pressure(struct sock= *sk) struct mptcp_sock *msk =3D mptcp_sk(sk); bool first =3D true; =20 - sk_stream_moderate_sndbuf(sk); mptcp_for_each_subflow(msk, subflow) { struct sock *ssk =3D mptcp_subflow_tcp_sock(subflow); =20 if (first) tcp_enter_memory_pressure(ssk); sk_stream_moderate_sndbuf(ssk); + first =3D false; } + __mptcp_sync_sndbuf(sk); } =20 /* ensure we get enough memory for the frag hdr, beyond some minimal amoun= t of @@ -2436,6 +2438,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct= sock *ssk, WRITE_ONCE(msk->first, NULL); =20 out: + __mptcp_sync_sndbuf(sk); if (need_push) __mptcp_push_pending(sk, 0); =20 @@ -3214,7 +3217,7 @@ struct sock *mptcp_sk_clone_init(const struct sock *s= k, * uses the correct data */ mptcp_copy_inaddrs(nsk, ssk); - mptcp_propagate_sndbuf(nsk, ssk); + __mptcp_propagate_sndbuf(nsk, ssk); =20 mptcp_rcv_space_init(msk, ssk); bh_unlock_sock(nsk); @@ -3392,6 +3395,8 @@ static void mptcp_release_cb(struct sock *sk) __mptcp_set_connected(sk); if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags)) __mptcp_error_report(sk); + if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags)) + __mptcp_sync_sndbuf(sk); } =20 __mptcp_update_rmem(sk); @@ -3436,6 +3441,14 @@ void mptcp_subflow_process_delegated(struct sock *ss= k, long status) __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags); mptcp_data_unlock(sk); } + if (status & BIT(MPTCP_DELEGATE_SNDBUF)) { + mptcp_data_lock(sk); + if (!sock_owned_by_user(sk)) + __mptcp_sync_sndbuf(sk); + else + __set_bit(MPTCP_SYNC_SNDBUF, &mptcp_sk(sk)->cb_flags); + mptcp_data_unlock(sk); + } if (status & BIT(MPTCP_DELEGATE_ACK)) schedule_3rdack_retransmission(ssk); } @@ -3520,6 +3533,7 @@ bool mptcp_finish_join(struct sock *ssk) /* active subflow, already present inside the conn_list */ if (!list_empty(&subflow->node)) { mptcp_subflow_joined(msk, ssk); + mptcp_propagate_sndbuf(parent, ssk); return true; } =20 diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 7c7ad087d8ac..ab775e48c11d 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -123,6 +123,7 @@ #define MPTCP_RETRANSMIT 4 #define MPTCP_FLUSH_JOIN_LIST 5 #define MPTCP_CONNECTED 6 +#define MPTCP_SYNC_SNDBUF 7 =20 struct mptcp_skb_cb { u64 map_seq; @@ -447,6 +448,7 @@ DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_de= legated_actions); #define MPTCP_DELEGATE_SCHEDULED 0 #define MPTCP_DELEGATE_SEND 1 #define MPTCP_DELEGATE_ACK 2 +#define MPTCP_DELEGATE_SNDBUF 3 =20 #define MPTCP_DELEGATE_ACTIONS_MASK (~BIT(MPTCP_DELEGATE_SCHEDULED)) /* MPTCP subflow context */ @@ -520,6 +522,9 @@ struct mptcp_subflow_context { =20 u32 setsockopt_seq; u32 stale_rcv_tstamp; + int cached_sndbuf; /* sndbuf size when last synced with the msk s= ndbuf, + * protected by the msk socket lock + */ =20 struct sock *tcp_sock; /* tcp sk backpointer */ struct sock *conn; /* parent mptcp_sock */ @@ -768,13 +773,52 @@ static inline bool mptcp_data_fin_enabled(const struc= t mptcp_sock *msk) READ_ONCE(msk->write_seq) =3D=3D READ_ONCE(msk->snd_nxt); } =20 -static inline bool mptcp_propagate_sndbuf(struct sock *sk, struct sock *ss= k) +static inline void __mptcp_sync_sndbuf(struct sock *sk) { - if ((sk->sk_userlocks & SOCK_SNDBUF_LOCK) || ssk->sk_sndbuf <=3D READ_ONC= E(sk->sk_sndbuf)) - return false; + struct mptcp_subflow_context *subflow; + int ssk_sndbuf, new_sndbuf; + + if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) + return; + + new_sndbuf =3D sock_net(sk)->ipv4.sysctl_tcp_wmem[0]; + mptcp_for_each_subflow(mptcp_sk(sk), subflow) { + ssk_sndbuf =3D READ_ONCE(mptcp_subflow_tcp_sock(subflow)->sk_sndbuf); + + subflow->cached_sndbuf =3D ssk_sndbuf; + new_sndbuf +=3D ssk_sndbuf; + } + + /* the msk max wmem limit is * tcp wmem[2] */ + WRITE_ONCE(sk->sk_sndbuf, new_sndbuf); +} + +/* The called held both the msk socket and the subflow socket locks, + * possibly under BH + */ +static inline void __mptcp_propagate_sndbuf(struct sock *sk, struct sock *= ssk) +{ + struct mptcp_subflow_context *subflow =3D mptcp_subflow_ctx(ssk); + + if (READ_ONCE(ssk->sk_sndbuf) - subflow->cached_sndbuf) + __mptcp_sync_sndbuf(sk); +} + +/* the caller held only the subflow socket lock, either in process or + * BH context. Additionally this can be called under the msk data lock, + * so we can't acquire such lock here: let the delegate action acquires + * the needed locks in suitable order. + */ +static inline void mptcp_propagate_sndbuf(struct sock *sk, struct sock *ss= k) +{ + struct mptcp_subflow_context *subflow =3D mptcp_subflow_ctx(ssk); + + if (likely(READ_ONCE(ssk->sk_sndbuf) =3D=3D subflow->cached_sndbuf)) + return; =20 - WRITE_ONCE(sk->sk_sndbuf, ssk->sk_sndbuf); - return true; + local_bh_disable(); + mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_SNDBUF); + local_bh_enable(); } =20 static inline void mptcp_write_space(struct sock *sk) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 6102e74121c3..0eae952064b1 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -421,6 +421,7 @@ static bool subflow_use_different_dport(struct mptcp_so= ck *msk, const struct soc =20 void __mptcp_set_connected(struct sock *sk) { + __mptcp_propagate_sndbuf(sk, mptcp_sk(sk)->first); if (sk->sk_state =3D=3D TCP_SYN_SENT) { inet_sk_state_store(sk, TCP_ESTABLISHED); sk->sk_state_change(sk); @@ -472,7 +473,6 @@ static void subflow_finish_connect(struct sock *sk, con= st struct sk_buff *skb) return; =20 msk =3D mptcp_sk(parent); - mptcp_propagate_sndbuf(parent, sk); subflow->rel_write_seq =3D 1; subflow->conn_finished =3D 1; subflow->ssn_offset =3D TCP_SKB_CB(skb)->seq; @@ -1728,7 +1728,6 @@ static void subflow_state_change(struct sock *sk) =20 msk =3D mptcp_sk(parent); if (subflow_simultaneous_connect(sk)) { - mptcp_propagate_sndbuf(parent, sk); mptcp_do_fallback(sk); mptcp_rcv_space_init(msk, sk); pr_fallback(msk); --=20 2.41.0