[RFC mptcp-next v10 05/14] mptcp: avoid sleeping in read_sock path under softirq

Geliang Tang posted 14 patches 4 days, 23 hours ago
There is a newer version of this series
[RFC mptcp-next v10 05/14] mptcp: avoid sleeping in read_sock path under softirq
Posted by Geliang Tang 4 days, 23 hours ago
From: Geliang Tang <tanggeliang@kylinos.cn>

When mptcp_read_sock() is called from softirq context via TLS
read_sock, lock_sock_fast() in mptcp_rcv_space_adjust() and
mptcp_cleanup_rbuf() may trigger might_sleep() warnings or
illegal sleeps, as softirq context cannot block.

Replace lock_sock_fast() with spin_trylock_bh() to make locking
non-blocking and context-safe. Skip operations if the lock cannot
be acquired.

Also introduce mptcp_data_trylock() in mptcp_move_skbs() to make
data locking non-blocking in the read_sock path.

Co-developed-by: Gang Yan <yangang@kylinos.cn>
Signed-off-by: Gang Yan <yangang@kylinos.cn>
Signed-off-by: Geliang Tang <tanggeliang@kylinos.cn>
---
 net/mptcp/protocol.c | 21 +++++++++++----------
 net/mptcp/protocol.h |  1 +
 2 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 5585f43cf879..1903f5b1fc44 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -561,12 +561,11 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
 
 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
 {
-	bool slow;
-
-	slow = lock_sock_fast(ssk);
-	if (tcp_can_send_ack(ssk))
+	if (!spin_trylock_bh(&ssk->sk_lock.slock))
+		return;
+	if (!sock_owned_by_user(ssk) && tcp_can_send_ack(ssk))
 		tcp_cleanup_rbuf(ssk, copied);
-	unlock_sock_fast(ssk, slow);
+	spin_unlock_bh(&ssk->sk_lock.slock);
 }
 
 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
@@ -2194,14 +2193,15 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
 		 */
 		mptcp_for_each_subflow(msk, subflow) {
 			struct sock *ssk;
-			bool slow;
 
 			ssk = mptcp_subflow_tcp_sock(subflow);
-			slow = lock_sock_fast(ssk);
+			if (!spin_trylock_bh(&ssk->sk_lock.slock))
+				continue;
 			/* subflows can be added before tcp_init_transfer() */
-			if (tcp_sk(ssk)->rcvq_space.space)
+			if (!sock_owned_by_user(ssk) &&
+			    tcp_sk(ssk)->rcvq_space.space)
 				tcp_rcvbuf_grow(ssk, copied);
-			unlock_sock_fast(ssk, slow);
+			spin_unlock_bh(&ssk->sk_lock.slock);
 		}
 	}
 
@@ -2299,7 +2299,8 @@ static bool mptcp_move_skbs(struct sock *sk)
 	bool enqueued = false;
 	u32 moved;
 
-	mptcp_data_lock(sk);
+	if (!mptcp_data_trylock(sk))
+		return false;
 	while (mptcp_can_spool_backlog(sk, &skbs)) {
 		mptcp_data_unlock(sk);
 		enqueued |= __mptcp_move_skbs(sk, &skbs, &moved);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index f0eaba2c61fa..7d8531837736 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -378,6 +378,7 @@ struct mptcp_sock {
 };
 
 #define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
+#define mptcp_data_trylock(sk) spin_trylock_bh(&(sk)->sk_lock.slock)
 #define mptcp_data_unlock(sk) spin_unlock_bh(&(sk)->sk_lock.slock)
 
 #define mptcp_for_each_subflow(__msk, __subflow)			\
-- 
2.53.0