This patch adds the redundant subflows support, sending all packets
redundantly on all available subflows.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
include/net/mptcp.h | 1 +
net/mptcp/protocol.c | 13 ++++++++-----
net/mptcp/sched.c | 1 +
3 files changed, 10 insertions(+), 5 deletions(-)
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 345f27a68eaa..d48c66de8466 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -101,6 +101,7 @@ struct mptcp_out_options {
struct mptcp_sched_data {
struct sock *sock;
bool call_again;
+ u8 subflows;
struct mptcp_subflow_context *contexts[MPTCP_SUBFLOWS_MAX];
};
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 4a55a6e89ed5..e3e1026aad97 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1591,13 +1591,16 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
goto out;
}
- info.sent += ret;
- copied += ret;
- len -= ret;
+ if (!call_again) {
+ info.sent += ret;
+ copied += ret;
+ len -= ret;
- mptcp_update_post_push(msk, dfrag, ret);
+ mptcp_update_post_push(msk, dfrag, ret);
+ }
}
- WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
+ if (!call_again)
+ WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
}
/* at this point we held the socket lock for the last subflow we used */
diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
index 83377cd1a4de..0d5fc96a2ce0 100644
--- a/net/mptcp/sched.c
+++ b/net/mptcp/sched.c
@@ -104,6 +104,7 @@ static int mptcp_sched_data_init(struct mptcp_sock *msk,
}
data->contexts[i++] = subflow;
}
+ data->subflows = i;
for (; i < MPTCP_SUBFLOWS_MAX; i++)
data->contexts[i++] = NULL;
--
2.34.1
On Mon, 9 May 2022, Geliang Tang wrote:
> This patch adds the redundant subflows support, sending all packets
> redundantly on all available subflows.
>
> Signed-off-by: Geliang Tang <geliang.tang@suse.com>
> ---
> include/net/mptcp.h | 1 +
> net/mptcp/protocol.c | 13 ++++++++-----
> net/mptcp/sched.c | 1 +
> 3 files changed, 10 insertions(+), 5 deletions(-)
>
> diff --git a/include/net/mptcp.h b/include/net/mptcp.h
> index 345f27a68eaa..d48c66de8466 100644
> --- a/include/net/mptcp.h
> +++ b/include/net/mptcp.h
> @@ -101,6 +101,7 @@ struct mptcp_out_options {
> struct mptcp_sched_data {
> struct sock *sock;
> bool call_again;
> + u8 subflows;
> struct mptcp_subflow_context *contexts[MPTCP_SUBFLOWS_MAX];
> };
>
> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
> index 4a55a6e89ed5..e3e1026aad97 100644
> --- a/net/mptcp/protocol.c
> +++ b/net/mptcp/protocol.c
> @@ -1591,13 +1591,16 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
> goto out;
If the updates below were skipped on a previous iteration of the loop,
they would need to be handled before the 'goto' here.
> }
>
> - info.sent += ret;
> - copied += ret;
> - len -= ret;
> + if (!call_again) {
> + info.sent += ret;
> + copied += ret;
> + len -= ret;
>
> - mptcp_update_post_push(msk, dfrag, ret);
> + mptcp_update_post_push(msk, dfrag, ret);
I'm not sure this works, 'ret' is the number of bytes sent on the final
scheduled subflow. A different amount of data could have been sent on
other subflows.
The simple fix is to keep track of the largest value of 'ret' when
repeating this loop and use that to track the amount of data sent. But I'm
not sure how that will behave when the multiple subflows have different
latencies, speeds, and window sizes.
Do we want to send the latest unsent data on every scheduled subflow? Or
is it better to keep the data on the slower subflows contiguous until the
received DATA_ACKs move msk->snd_una forward? The former case means that
the slow subflows skip unacked data and increase the possible need for
reinjecting data. The latter would only skip acked data, leading to fewer
MPTCP reinjections.
The best thing for now may be to try the simple approach (track the
largest copied amount) and see how it behaves, then we can discuss based
on that data.
> + }
> }
> - WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
> + if (!call_again)
> + WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
> }
>
> /* at this point we held the socket lock for the last subflow we used */
> diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
> index 83377cd1a4de..0d5fc96a2ce0 100644
> --- a/net/mptcp/sched.c
> +++ b/net/mptcp/sched.c
> @@ -104,6 +104,7 @@ static int mptcp_sched_data_init(struct mptcp_sock *msk,
> }
> data->contexts[i++] = subflow;
> }
> + data->subflows = i;
>
> for (; i < MPTCP_SUBFLOWS_MAX; i++)
> data->contexts[i++] = NULL;
There's also the more complex __mptcp_subflow_push_pending() path and the
chaining of further sends through mptcp_subflow_delegate(), and the
retransmit loop.
--
Mat Martineau
Intel
© 2016 - 2026 Red Hat, Inc.