On Mon, 2022-06-06 at 17:47 +0800, Geliang Tang wrote:
> This patch defines the wrapper mptcp_sched_get_send(), invoke
> __mptcp_sched_get_send() in it. Use this wrapper instead of using
> mptcp_subflow_get_retrans() directly in __mptcp_subflow_push_pending().
>
> Signed-off-by: Geliang Tang <geliang.tang@suse.com>
> ---
> net/mptcp/protocol.c | 4 ++--
> net/mptcp/protocol.h | 1 +
> net/mptcp/sched.c | 26 ++++++++++++++++++++++++++
> 3 files changed, 29 insertions(+), 2 deletions(-)
>
> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
> index 999e83eae821..16612bed5890 100644
> --- a/net/mptcp/protocol.c
> +++ b/net/mptcp/protocol.c
> @@ -1635,7 +1635,7 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
> * check for a different subflow usage only after
> * spooling the first chunk of data
> */
> - xmit_ssk = first ? ssk : mptcp_subflow_get_send(mptcp_sk(sk));
> + xmit_ssk = first ? ssk : mptcp_sched_get_send(mptcp_sk(sk));
> if (!xmit_ssk)
> goto out;
> if (xmit_ssk != ssk) {
> @@ -3125,7 +3125,7 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
> return;
>
> if (!sock_owned_by_user(sk)) {
> - struct sock *xmit_ssk = mptcp_subflow_get_send(mptcp_sk(sk));
> + struct sock *xmit_ssk = mptcp_sched_get_send(mptcp_sk(sk));
>
> if (xmit_ssk == ssk)
> __mptcp_subflow_push_pending(sk, ssk);
> diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
> index 4669d34f4d7c..6d9e8ec5c96f 100644
> --- a/net/mptcp/protocol.h
> +++ b/net/mptcp/protocol.h
> @@ -634,6 +634,7 @@ void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
> struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk);
> struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk);
> int __mptcp_sched_get_send(struct mptcp_sock *msk);
> +struct sock *mptcp_sched_get_send(struct mptcp_sock *msk);
> int __mptcp_sched_get_retrans(struct mptcp_sock *msk);
>
> static inline bool __mptcp_subflow_active(struct mptcp_subflow_context *subflow)
> diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
> index 70ab43c59989..70c6b7b86f23 100644
> --- a/net/mptcp/sched.c
> +++ b/net/mptcp/sched.c
> @@ -147,6 +147,32 @@ int __mptcp_sched_get_send(struct mptcp_sock *msk)
> return 0;
> }
>
> +struct sock *mptcp_sched_get_send(struct mptcp_sock *msk)
> +{
> + struct mptcp_subflow_context *subflow;
> + struct sock *ssk = NULL;
> + int err;
> +
> + err = __mptcp_sched_get_send(msk);
> + if (err)
> + return NULL;
> +
> + mptcp_for_each_subflow(msk, subflow) {
> + if (READ_ONCE(subflow->scheduled)) {
> + /*
> + * TODO: Redundant subflows are not supported in
> + * __mptcp_subflow_push_pending() yet. Here's a
> + * placeholder to pick the first subflow for the
> + * redundant subflows case.
> + */
> + ssk = subflow->tcp_sock;
> + break;
> + }
> + }
> +
> + return ssk;
> +}
> +
> int __mptcp_sched_get_retrans(struct mptcp_sock *msk)
> {
> struct mptcp_sched_data data;
I think we should try to preserver the optimization to
mptcp_subflow_get_send() for the non eBPF case, to avoid unnecessary
subflow traversal.
Thanks!
Paolo