After commit c2e6048fa1cf ("mptcp: fix race in release_cb") it's
pretty straight forward move the whole MPTCP rx path under the socket
lock leveraging the release_cb.
We can drop a bunch of spin_lock pairs in the receive functions, use
a single receive queue and invoke __mptcp_move_skbs only when subflows
ask for it.
This will allow more cleanup in the next patch
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
net/mptcp/fastopen.c | 2 ++
net/mptcp/protocol.c | 76 +++++++++++++++++++-------------------------
net/mptcp/protocol.h | 2 +-
3 files changed, 36 insertions(+), 44 deletions(-)
diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
index a29ff901df75..fb945c0d50bf 100644
--- a/net/mptcp/fastopen.c
+++ b/net/mptcp/fastopen.c
@@ -49,6 +49,7 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf
MPTCP_SKB_CB(skb)->has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
mptcp_data_lock(sk);
+ DEBUG_NET_WARN_ON_ONCE(sock_owned_by_user_nocheck(sk));
mptcp_set_owner_r(skb, sk);
__skb_queue_tail(&sk->sk_receive_queue, skb);
@@ -65,6 +66,7 @@ void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflo
struct sock *sk = (struct sock *)msk;
struct sk_buff *skb;
+ DEBUG_NET_WARN_ON_ONCE(sock_owned_by_user_nocheck(sk));
skb = skb_peek_tail(&sk->sk_receive_queue);
if (skb) {
WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq);
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index f768aa4473fb..159add48f6d9 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -845,7 +845,7 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
return moved > 0;
}
-void mptcp_data_ready(struct sock *sk, struct sock *ssk)
+static void __mptcp_data_ready(struct sock *sk, struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -868,9 +868,17 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
return;
/* Wake-up the reader only for in-sequence data */
- mptcp_data_lock(sk);
if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk))
sk->sk_data_ready(sk);
+}
+
+void mptcp_data_ready(struct sock *sk, struct sock *ssk)
+{
+ mptcp_data_lock(sk);
+ if (!sock_owned_by_user(sk))
+ __mptcp_data_ready(sk, ssk);
+ else
+ __set_bit(MPTCP_DEQUEUE, &mptcp_sk(sk)->cb_flags);
mptcp_data_unlock(sk);
}
@@ -1077,9 +1085,7 @@ static void __mptcp_clean_una_wakeup(struct sock *sk)
static void mptcp_clean_una_wakeup(struct sock *sk)
{
- mptcp_data_lock(sk);
__mptcp_clean_una_wakeup(sk);
- mptcp_data_unlock(sk);
}
static void mptcp_enter_memory_pressure(struct sock *sk)
@@ -1939,16 +1945,22 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
goto out;
}
-static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
+static bool __mptcp_move_skbs(struct sock *sk);
+
+static int __mptcp_recvmsg_mskq(struct sock *sk,
struct msghdr *msg,
size_t len, int flags,
struct scm_timestamping_internal *tss,
int *cmsg_flags)
{
+ struct mptcp_sock *msk = mptcp_sk(sk);
struct sk_buff *skb, *tmp;
int copied = 0;
- skb_queue_walk_safe(&msk->receive_queue, skb, tmp) {
+ if (skb_queue_empty(&sk->sk_receive_queue) && !__mptcp_move_skbs(sk))
+ return 0;
+
+ skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) {
u32 offset = MPTCP_SKB_CB(skb)->offset;
u32 data_len = skb->len - offset;
u32 count = min_t(size_t, len - copied, data_len);
@@ -1983,7 +1995,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
/* we will bulk release the skb memory later */
skb->destructor = NULL;
WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize);
- __skb_unlink(skb, &msk->receive_queue);
+ __skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
msk->bytes_consumed += count;
}
@@ -2107,16 +2119,9 @@ static void __mptcp_update_rmem(struct sock *sk)
WRITE_ONCE(msk->rmem_released, 0);
}
-static void __mptcp_splice_receive_queue(struct sock *sk)
+static bool __mptcp_move_skbs(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
-
- skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue);
-}
-
-static bool __mptcp_move_skbs(struct mptcp_sock *msk)
-{
- struct sock *sk = (struct sock *)msk;
unsigned int moved = 0;
bool ret, done;
@@ -2124,37 +2129,27 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
struct sock *ssk = mptcp_subflow_recv_lookup(msk);
bool slowpath;
- /* we can have data pending in the subflows only if the msk
- * receive buffer was full at subflow_data_ready() time,
- * that is an unlikely slow path.
- */
- if (likely(!ssk))
+ if (unlikely(!ssk))
break;
slowpath = lock_sock_fast(ssk);
- mptcp_data_lock(sk);
__mptcp_update_rmem(sk);
done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
- mptcp_data_unlock(sk);
if (unlikely(ssk->sk_err))
__mptcp_error_report(sk);
unlock_sock_fast(ssk, slowpath);
} while (!done);
- /* acquire the data lock only if some input data is pending */
ret = moved > 0;
if (!RB_EMPTY_ROOT(&msk->out_of_order_queue) ||
- !skb_queue_empty_lockless(&sk->sk_receive_queue)) {
- mptcp_data_lock(sk);
+ !skb_queue_empty(&sk->sk_receive_queue)) {
__mptcp_update_rmem(sk);
ret |= __mptcp_ofo_queue(msk);
- __mptcp_splice_receive_queue(sk);
- mptcp_data_unlock(sk);
}
if (ret)
mptcp_check_data_fin((struct sock *)msk);
- return !skb_queue_empty(&msk->receive_queue);
+ return ret;
}
static unsigned int mptcp_inq_hint(const struct sock *sk)
@@ -2162,7 +2157,7 @@ static unsigned int mptcp_inq_hint(const struct sock *sk)
const struct mptcp_sock *msk = mptcp_sk(sk);
const struct sk_buff *skb;
- skb = skb_peek(&msk->receive_queue);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb) {
u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq;
@@ -2208,7 +2203,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
while (copied < len) {
int err, bytes_read;
- bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags);
+ bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, &tss, &cmsg_flags);
if (unlikely(bytes_read < 0)) {
if (!copied)
copied = bytes_read;
@@ -2220,8 +2215,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
/* be sure to advertise window change */
mptcp_cleanup_rbuf(msk);
- if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
- continue;
/* only the MPTCP socket status is relevant here. The exit
* conditions mirror closely tcp_recvmsg()
@@ -2246,7 +2239,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
/* race breaker: the shutdown could be after the
* previous receive queue check
*/
- if (__mptcp_move_skbs(msk))
+ if (__mptcp_move_skbs(sk))
continue;
break;
}
@@ -2290,9 +2283,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
}
}
- pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n",
- msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
- skb_queue_empty(&msk->receive_queue), copied);
+ pr_debug("msk=%p rx queue empty=%d copied=%d",
+ msk, skb_queue_empty(&sk->sk_receive_queue), copied);
release_sock(sk);
return copied;
@@ -2819,7 +2811,6 @@ static void __mptcp_init_sock(struct sock *sk)
INIT_LIST_HEAD(&msk->join_list);
INIT_LIST_HEAD(&msk->rtx_queue);
INIT_WORK(&msk->work, mptcp_worker);
- __skb_queue_head_init(&msk->receive_queue);
msk->out_of_order_queue = RB_ROOT;
msk->first_pending = NULL;
WRITE_ONCE(msk->rmem_fwd_alloc, 0);
@@ -3402,12 +3393,8 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
mptcp_for_each_subflow_safe(msk, subflow, tmp)
__mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags);
- /* move to sk_receive_queue, sk_stream_kill_queues will purge it */
- mptcp_data_lock(sk);
- skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue);
__skb_queue_purge(&sk->sk_receive_queue);
skb_rbtree_purge(&msk->out_of_order_queue);
- mptcp_data_unlock(sk);
/* move all the rx fwd alloc into the sk_mem_reclaim_final in
* inet_sock_destruct() will dispose it
@@ -3450,7 +3437,8 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
#define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
BIT(MPTCP_RETRANSMIT) | \
- BIT(MPTCP_FLUSH_JOIN_LIST))
+ BIT(MPTCP_FLUSH_JOIN_LIST) | \
+ BIT(MPTCP_DEQUEUE))
/* processes deferred events and flush wmem */
static void mptcp_release_cb(struct sock *sk)
@@ -3484,6 +3472,8 @@ static void mptcp_release_cb(struct sock *sk)
__mptcp_push_pending(sk, 0);
if (flags & BIT(MPTCP_RETRANSMIT))
__mptcp_retrans(sk);
+ if ((flags & BIT(MPTCP_DEQUEUE)) && __mptcp_move_skbs(sk))
+ sk->sk_data_ready(sk);
cond_resched();
spin_lock_bh(&sk->sk_lock.slock);
@@ -3721,7 +3711,7 @@ static int mptcp_ioctl(struct sock *sk, int cmd, int *karg)
return -EINVAL;
lock_sock(sk);
- __mptcp_move_skbs(msk);
+ __mptcp_move_skbs(sk);
*karg = mptcp_inq_hint(sk);
release_sock(sk);
break;
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index b4c72a73594f..ad940cc1f26f 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -124,6 +124,7 @@
#define MPTCP_FLUSH_JOIN_LIST 5
#define MPTCP_SYNC_STATE 6
#define MPTCP_SYNC_SNDBUF 7
+#define MPTCP_DEQUEUE 8
struct mptcp_skb_cb {
u64 map_seq;
@@ -322,7 +323,6 @@ struct mptcp_sock {
struct work_struct work;
struct sk_buff *ooo_last_skb;
struct rb_root out_of_order_queue;
- struct sk_buff_head receive_queue;
struct list_head conn_list;
struct list_head rtx_queue;
struct mptcp_data_frag *first_pending;
--
2.45.2
Hi Paolo, On 29/11/2024 18:45, Paolo Abeni wrote: > After commit c2e6048fa1cf ("mptcp: fix race in release_cb") it's > pretty straight forward move the whole MPTCP rx path under the socket > lock leveraging the release_cb. > > We can drop a bunch of spin_lock pairs in the receive functions, use > a single receive queue and invoke __mptcp_move_skbs only when subflows > ask for it. > > This will allow more cleanup in the next patch (...) > diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c > index f768aa4473fb..159add48f6d9 100644 > --- a/net/mptcp/protocol.c > +++ b/net/mptcp/protocol.c (...) > @@ -2290,9 +2283,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, > } > } > > - pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n", > - msk, skb_queue_empty_lockless(&sk->sk_receive_queue), > - skb_queue_empty(&msk->receive_queue), copied); > + pr_debug("msk=%p rx queue empty=%d copied=%d", A small detail: the '\n' at the end is missing: no need to delay the output if it is not supposed to be used with a pr_cont(). (Something we can fix when applying the patches.) Cheers, Matt -- Sponsored by the NGI0 Core fund.
On 12/2/24 17:56, Matthieu Baerts wrote: > Hi Paolo, > > On 29/11/2024 18:45, Paolo Abeni wrote: >> After commit c2e6048fa1cf ("mptcp: fix race in release_cb") it's >> pretty straight forward move the whole MPTCP rx path under the socket >> lock leveraging the release_cb. >> >> We can drop a bunch of spin_lock pairs in the receive functions, use >> a single receive queue and invoke __mptcp_move_skbs only when subflows >> ask for it. >> >> This will allow more cleanup in the next patch > > (...) > >> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c >> index f768aa4473fb..159add48f6d9 100644 >> --- a/net/mptcp/protocol.c >> +++ b/net/mptcp/protocol.c > > (...) > >> @@ -2290,9 +2283,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, >> } >> } >> >> - pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n", >> - msk, skb_queue_empty_lockless(&sk->sk_receive_queue), >> - skb_queue_empty(&msk->receive_queue), copied); >> + pr_debug("msk=%p rx queue empty=%d copied=%d", > > A small detail: the '\n' at the end is missing: no need to delay the > output if it is not supposed to be used with a pr_cont(). Thanks! I'll fix in the next revision. I think there is at least a fixable race in the current code (the one causing the ST failure - different root cause from https://github.com/multipath-tcp/mptcp_net-next/issues/483, this is specific to the refactor). I hope to be able to share the next version much later today. Cheers, Paolo > > (Something we can fix when applying the patches.) > > Cheers, > Matt
© 2016 - 2024 Red Hat, Inc.