Note that we can't guess anymore the listener family based on the
client target address; use always IPv6.
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
.../testing/selftests/net/mptcp/mptcp_join.sh | 51 +++++++++++++++----
1 file changed, 42 insertions(+), 9 deletions(-)
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index d11d3d566608..6ef4787ad244 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -774,24 +774,17 @@ do_transfer()
addr_nr_ns2=${addr_nr_ns2:9}
fi
- local local_addr
- if is_v6 "${connect_addr}"; then
- local_addr="::"
- else
- local_addr="0.0.0.0"
- fi
-
extra_srv_args="$extra_args $extra_srv_args"
if [ "$test_link_fail" -gt 1 ];then
timeout ${timeout_test} \
ip netns exec ${listener_ns} \
./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
- $extra_srv_args ${local_addr} < "$sinfail" > "$sout" &
+ $extra_srv_args "::" < "$sinfail" > "$sout" &
else
timeout ${timeout_test} \
ip netns exec ${listener_ns} \
./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
- $extra_srv_args ${local_addr} < "$sin" > "$sout" &
+ $extra_srv_args "::" < "$sin" > "$sout" &
fi
local spid=$!
@@ -2448,6 +2441,45 @@ v4mapped_tests()
fi
}
+mixed_tests()
+{
+ # subflow IPv4-mapped to IPv4-mapped
+ if reset "simult IPv4 and IPv6 subflows, first is IPv4"; then
+ pm_nl_set_limits $ns1 0 1
+ pm_nl_set_limits $ns2 1 1
+ pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ chk_join_nr 1 1 1
+ fi
+
+ # subflow IPv4-mapped to IPv4-mapped
+ if reset "simult IPv4 and IPv6 subflows, first is IPv6"; then
+ pm_nl_set_limits $ns1 0 1
+ pm_nl_set_limits $ns2 1 1
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
+ run_tests $ns1 $ns2 dead:beef:2::1 0 0 0 slow
+ chk_join_nr 1 1 1
+ fi
+
+ if reset "simult IPv4 and IPv6 subflows, fullmesh 1x1"; then
+ pm_nl_set_limits $ns1 0 4
+ pm_nl_set_limits $ns2 1 4
+ pm_nl_add_endpoint $ns2 dead:beef:2::2 flags subflow,fullmesh
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
+ run_tests $ns1 $ns2 dead:beef:2::1 0 0 0 slow
+ chk_join_nr 1 1 1
+ fi
+
+ if reset "simult IPv4 and IPv6 subflows, fullmesh 2x2"; then
+ pm_nl_set_limits $ns1 0 4
+ pm_nl_set_limits $ns2 2 4
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
+ run_tests $ns1 $ns2 dead:beef:1::1 0 0 fullmesh_1 slow
+ chk_join_nr 4 4 4
+ fi
+}
+
backup_tests()
{
# single subflow, backup
@@ -3120,6 +3152,7 @@ all_tests_sorted=(
a@add_tests
6@ipv6_tests
4@v4mapped_tests
+ M@mixed_tests
b@backup_tests
p@add_addr_ports_tests
k@syncookies_tests
--
2.38.1
Hi Paolo,
On 23/12/2022 13:51, Paolo Abeni wrote:
> Note that we can't guess anymore the listener family based on the
> client target address; use always IPv6.
>
> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
> ---
> .../testing/selftests/net/mptcp/mptcp_join.sh | 51 +++++++++++++++----
> 1 file changed, 42 insertions(+), 9 deletions(-)
>
> diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
> index d11d3d566608..6ef4787ad244 100755
> --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
> +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
> @@ -774,24 +774,17 @@ do_transfer()
> addr_nr_ns2=${addr_nr_ns2:9}
> fi
>
> - local local_addr
> - if is_v6 "${connect_addr}"; then
> - local_addr="::"
> - else
> - local_addr="0.0.0.0"
> - fi
> -
> extra_srv_args="$extra_args $extra_srv_args"
> if [ "$test_link_fail" -gt 1 ];then
> timeout ${timeout_test} \
> ip netns exec ${listener_ns} \
> ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
> - $extra_srv_args ${local_addr} < "$sinfail" > "$sout" &
> + $extra_srv_args "::" < "$sinfail" > "$sout" &
> else
> timeout ${timeout_test} \
> ip netns exec ${listener_ns} \
> ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
> - $extra_srv_args ${local_addr} < "$sin" > "$sout" &
> + $extra_srv_args "::" < "$sin" > "$sout" &
> fi
> local spid=$!
>
> @@ -2448,6 +2441,45 @@ v4mapped_tests()
> fi
> }
>
> +mixed_tests()
> +{
> + # subflow IPv4-mapped to IPv4-mapped
(small detail: the comment is not adapted. I can remove it when applying
the patch if no other modifications are needed. Same just below, the
next test)
> + if reset "simult IPv4 and IPv6 subflows, first is IPv4"; then
> + pm_nl_set_limits $ns1 0 1
> + pm_nl_set_limits $ns2 1 1
> + pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
> + run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
(linked to my question from patch 1/3)
If I'm not mistaken, by default, mptcp_connect will create an MPTCP
socket with AF_INET family if the initial destination is an IPv4
address, no?
Here should we not try to connect to ::ffff:10.0.1.1 instead (or pass -6
option? Maybe not enough)?
Also, I guess we should have an extra test to make sure a v4 only
sockets is not allowing extra subflows in v6, no?
> + chk_join_nr 1 1 1
> + fi
> +
> + # subflow IPv4-mapped to IPv4-mapped
> + if reset "simult IPv4 and IPv6 subflows, first is IPv6"; then
> + pm_nl_set_limits $ns1 0 1
> + pm_nl_set_limits $ns2 1 1
> + pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
> + run_tests $ns1 $ns2 dead:beef:2::1 0 0 0 slow
Linked to the previous question: that makes me think we properly don't
propagate the IPV6_V6ONLY socket option (sk_ipv6only) to new subflows
(just before the connect). Now that we can connect to v4 addresses, it
might be an issue. Easy to fix with this I guess:
--------------------- 8< ---------------------
> diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
> index 582ed93bcc8a..9986681aaf40 100644
> --- a/net/mptcp/sockopt.c
> +++ b/net/mptcp/sockopt.c
> @@ -1255,6 +1255,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
> ssk->sk_priority = sk->sk_priority;
> ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
> ssk->sk_incoming_cpu = sk->sk_incoming_cpu;
> + ssk->sk_ipv6only = sk->sk_ipv6only;
> __ip_sock_set_tos(ssk, inet_sk(sk)->tos);
>
> if (sk->sk_userlocks & tx_rx_locks) {
--------------------- 8< ---------------------
(+ a test checking that for v6 only socket as well I suppose)
> + chk_join_nr 1 1 1
> + fi
> +
> + if reset "simult IPv4 and IPv6 subflows, fullmesh 1x1"; then
> + pm_nl_set_limits $ns1 0 4
> + pm_nl_set_limits $ns2 1 4
Does the limit here mean 1 subflow or 1 extra subflow? (it might answer
my question below)
> + pm_nl_add_endpoint $ns2 dead:beef:2::2 flags subflow,fullmesh
> + pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
> + run_tests $ns1 $ns2 dead:beef:2::1 0 0 0 slow
> + chk_join_nr 1 1 1
Why do you not have an extra subflow in IPv6 between the second IPv6 of
ns2 (subflow,fullmesh) and the main IPV6 of the server on ns1?
> + fi
> +
> + if reset "simult IPv4 and IPv6 subflows, fullmesh 2x2"; then
> + pm_nl_set_limits $ns1 0 4
> + pm_nl_set_limits $ns2 2 4
> + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
> + pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
> + run_tests $ns1 $ns2 dead:beef:1::1 0 0 fullmesh_1 slow
> + chk_join_nr 4 4 4
Why do you have so many subflows here? The client has one IPv6 and the
server 2, no?
Cheers,
Matt
--
Tessares | Belgium | Hybrid Access Solutions
www.tessares.net
On Fri, 2022-12-23 at 15:35 +0100, Matthieu Baerts wrote:
> Hi Paolo,
>
> On 23/12/2022 13:51, Paolo Abeni wrote:
> > Note that we can't guess anymore the listener family based on the
> > client target address; use always IPv6.
> >
> > Signed-off-by: Paolo Abeni <pabeni@redhat.com>
> > ---
> > .../testing/selftests/net/mptcp/mptcp_join.sh | 51 +++++++++++++++----
> > 1 file changed, 42 insertions(+), 9 deletions(-)
> >
> > diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
> > index d11d3d566608..6ef4787ad244 100755
> > --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
> > +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
> > @@ -774,24 +774,17 @@ do_transfer()
> > addr_nr_ns2=${addr_nr_ns2:9}
> > fi
> >
> > - local local_addr
> > - if is_v6 "${connect_addr}"; then
> > - local_addr="::"
> > - else
> > - local_addr="0.0.0.0"
> > - fi
> > -
> > extra_srv_args="$extra_args $extra_srv_args"
> > if [ "$test_link_fail" -gt 1 ];then
> > timeout ${timeout_test} \
> > ip netns exec ${listener_ns} \
> > ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
> > - $extra_srv_args ${local_addr} < "$sinfail" > "$sout" &
> > + $extra_srv_args "::" < "$sinfail" > "$sout" &
> > else
> > timeout ${timeout_test} \
> > ip netns exec ${listener_ns} \
> > ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
> > - $extra_srv_args ${local_addr} < "$sin" > "$sout" &
> > + $extra_srv_args "::" < "$sin" > "$sout" &
> > fi
> > local spid=$!
> >
> > @@ -2448,6 +2441,45 @@ v4mapped_tests()
> > fi
> > }
> >
> > +mixed_tests()
> > +{
> > + # subflow IPv4-mapped to IPv4-mapped
>
> (small detail: the comment is not adapted. I can remove it when applying
> the patch if no other modifications are needed. Same just below, the
> next test)
>
> > + if reset "simult IPv4 and IPv6 subflows, first is IPv4"; then
> > + pm_nl_set_limits $ns1 0 1
> > + pm_nl_set_limits $ns2 1 1
> > + pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
> > + run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
>
> (linked to my question from patch 1/3)
>
> If I'm not mistaken, by default, mptcp_connect will create an MPTCP
> socket with AF_INET family if the initial destination is an IPv4
> address, no?
> Here should we not try to connect to ::ffff:10.0.1.1 instead (or pass -6
> option? Maybe not enough)?
>
> Also, I guess we should have an extra test to make sure a v4 only
> sockets is not allowing extra subflows in v6, no?
>
> > + chk_join_nr 1 1 1
> > + fi
> > +
> > + # subflow IPv4-mapped to IPv4-mapped
> > + if reset "simult IPv4 and IPv6 subflows, first is IPv6"; then
> > + pm_nl_set_limits $ns1 0 1
> > + pm_nl_set_limits $ns2 1 1
> > + pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
> > + run_tests $ns1 $ns2 dead:beef:2::1 0 0 0 slow
>
> Linked to the previous question: that makes me think we properly don't
> propagate the IPV6_V6ONLY socket option (sk_ipv6only) to new subflows
> (just before the connect). Now that we can connect to v4 addresses, it
> might be an issue. Easy to fix with this I guess:
>
> --------------------- 8< ---------------------
> > diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
> > index 582ed93bcc8a..9986681aaf40 100644
> > --- a/net/mptcp/sockopt.c
> > +++ b/net/mptcp/sockopt.c
> > @@ -1255,6 +1255,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
> > ssk->sk_priority = sk->sk_priority;
> > ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
> > ssk->sk_incoming_cpu = sk->sk_incoming_cpu;
> > + ssk->sk_ipv6only = sk->sk_ipv6only;
> > __ip_sock_set_tos(ssk, inet_sk(sk)->tos);
> >
> > if (sk->sk_userlocks & tx_rx_locks) {
> --------------------- 8< ---------------------
>
> (+ a test checking that for v6 only socket as well I suppose)
This is an unrelated patch. Additionally, I think sk_ipv6only on
subflow other then the first one is not needed, as we already match
carefully the sk family and address family (we create ipv6 subflow for
ipv6 addr)
> > + chk_join_nr 1 1 1
> > + fi
> > +
> > + if reset "simult IPv4 and IPv6 subflows, fullmesh 1x1"; then
> > + pm_nl_set_limits $ns1 0 4
> > + pm_nl_set_limits $ns2 1 4
>
> Does the limit here mean 1 subflow or 1 extra subflow? (it might answer
> my question below)
>
> > + pm_nl_add_endpoint $ns2 dead:beef:2::2 flags subflow,fullmesh
> > + pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
> > + run_tests $ns1 $ns2 dead:beef:2::1 0 0 0 slow
> > + chk_join_nr 1 1 1
>
> Why do you not have an extra subflow in IPv6 between the second IPv6 of
> ns2 (subflow,fullmesh) and the main IPV6 of the server on ns1?
becayse there is no second ipv6 address in ns2: dead:beef:2::2 is used
as source by the first subflow (to connect towards dead:beef:2::1).
> > + fi
> > +
> > + if reset "simult IPv4 and IPv6 subflows, fullmesh 2x2"; then
> > + pm_nl_set_limits $ns1 0 4
> > + pm_nl_set_limits $ns2 2 4
> > + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
> > + pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
> > + run_tests $ns1 $ns2 dead:beef:1::1 0 0 fullmesh_1 slow
> > + chk_join_nr 4 4 4
>
> Why do you have so many subflows here? The client has one IPv6 and the
> server 2, no?
Here the client has 2 IPv6 addresses: dead:beef:1::2 and the one added
by the 'fullmesh_1' argument (dead:beef:3::2, IIRC).
/P
Hi Paolo,
Thank you for the replies!
On 23/12/2022 19:29, Paolo Abeni wrote:
> On Fri, 2022-12-23 at 15:35 +0100, Matthieu Baerts wrote:
>> Hi Paolo,
>>
>> On 23/12/2022 13:51, Paolo Abeni wrote:
>>> Note that we can't guess anymore the listener family based on the
>>> client target address; use always IPv6.
>>>
>>> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
>>> ---
>>> .../testing/selftests/net/mptcp/mptcp_join.sh | 51 +++++++++++++++----
>>> 1 file changed, 42 insertions(+), 9 deletions(-)
>>>
>>> diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
>>> index d11d3d566608..6ef4787ad244 100755
>>> --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
>>> +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
(...)
>>> + chk_join_nr 1 1 1
>>> + fi
>>> +
>>> + # subflow IPv4-mapped to IPv4-mapped
>>> + if reset "simult IPv4 and IPv6 subflows, first is IPv6"; then
>>> + pm_nl_set_limits $ns1 0 1
>>> + pm_nl_set_limits $ns2 1 1
>>> + pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
>>> + run_tests $ns1 $ns2 dead:beef:2::1 0 0 0 slow
>>
>> Linked to the previous question: that makes me think we properly don't
>> propagate the IPV6_V6ONLY socket option (sk_ipv6only) to new subflows
>> (just before the connect). Now that we can connect to v4 addresses, it
>> might be an issue. Easy to fix with this I guess:
>>
>> --------------------- 8< ---------------------
>>> diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
>>> index 582ed93bcc8a..9986681aaf40 100644
>>> --- a/net/mptcp/sockopt.c
>>> +++ b/net/mptcp/sockopt.c
>>> @@ -1255,6 +1255,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
>>> ssk->sk_priority = sk->sk_priority;
>>> ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
>>> ssk->sk_incoming_cpu = sk->sk_incoming_cpu;
>>> + ssk->sk_ipv6only = sk->sk_ipv6only;
>>> __ip_sock_set_tos(ssk, inet_sk(sk)->tos);
>>>
>>> if (sk->sk_userlocks & tx_rx_locks) {
>> --------------------- 8< ---------------------
>>
>> (+ a test checking that for v6 only socket as well I suppose)
>
> This is an unrelated patch. Additionally, I think sk_ipv6only on
> subflow other then the first one is not needed, as we already match
> carefully the sk family and address family (we create ipv6 subflow for
> ipv6 addr)
I'm sorry, I'm not sure to understand. If someone wants to have only v6
connections, it has to create an AF_INET6 socket and set IPV6_V6ONLY
option. Why not reflecting that to the different subflows to have
subflow in v6 only?
But I agree it is not directly related.
>>> + chk_join_nr 1 1 1
>>> + fi
>>> +
>>> + if reset "simult IPv4 and IPv6 subflows, fullmesh 1x1"; then
>>> + pm_nl_set_limits $ns1 0 4
>>> + pm_nl_set_limits $ns2 1 4
>>
>> Does the limit here mean 1 subflow or 1 extra subflow? (it might answer
>> my question below)
>>
>>> + pm_nl_add_endpoint $ns2 dead:beef:2::2 flags subflow,fullmesh
>>> + pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
>>> + run_tests $ns1 $ns2 dead:beef:2::1 0 0 0 slow
>>> + chk_join_nr 1 1 1
>>
>> Why do you not have an extra subflow in IPv6 between the second IPv6 of
>> ns2 (subflow,fullmesh) and the main IPV6 of the server on ns1?
>
> becayse there is no second ipv6 address in ns2: dead:beef:2::2 is used
> as source by the first subflow (to connect towards dead:beef:2::1).
Ah yes indeed, I don't know why I had another topology in mind...
>>> + fi
>>> +
>>> + if reset "simult IPv4 and IPv6 subflows, fullmesh 2x2"; then
>>> + pm_nl_set_limits $ns1 0 4
>>> + pm_nl_set_limits $ns2 2 4
>>> + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
>>> + pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
>>> + run_tests $ns1 $ns2 dead:beef:1::1 0 0 fullmesh_1 slow
>>> + chk_join_nr 4 4 4
>>
>> Why do you have so many subflows here? The client has one IPv6 and the
>> server 2, no?
>
> Here the client has 2 IPv6 addresses: dead:beef:1::2 and the one added
> by the 'fullmesh_1' argument (dead:beef:3::2, IIRC).
Indeed, thank you. There will then be 3 subflows in v6 and 1 in v4.
Cheers,
Matt
--
Tessares | Belgium | Hybrid Access Solutions
www.tessares.net
On 26/12/2022 14:09, Matthieu Baerts wrote:
> Hi Paolo,
>
> Thank you for the replies!
>
> On 23/12/2022 19:29, Paolo Abeni wrote:
>> On Fri, 2022-12-23 at 15:35 +0100, Matthieu Baerts wrote:
>>> Hi Paolo,
>>>
>>> On 23/12/2022 13:51, Paolo Abeni wrote:
>>>> Note that we can't guess anymore the listener family based on the
>>>> client target address; use always IPv6.
>>>>
>>>> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
>>>> ---
>>>> .../testing/selftests/net/mptcp/mptcp_join.sh | 51 +++++++++++++++----
>>>> 1 file changed, 42 insertions(+), 9 deletions(-)
>>>>
>>>> diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
>>>> index d11d3d566608..6ef4787ad244 100755
>>>> --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
>>>> +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
>
> (...)
>
>>>> + chk_join_nr 1 1 1
>>>> + fi
>>>> +
>>>> + # subflow IPv4-mapped to IPv4-mapped
>>>> + if reset "simult IPv4 and IPv6 subflows, first is IPv6"; then
>>>> + pm_nl_set_limits $ns1 0 1
>>>> + pm_nl_set_limits $ns2 1 1
>>>> + pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
>>>> + run_tests $ns1 $ns2 dead:beef:2::1 0 0 0 slow
>>>
>>> Linked to the previous question: that makes me think we properly don't
>>> propagate the IPV6_V6ONLY socket option (sk_ipv6only) to new subflows
>>> (just before the connect). Now that we can connect to v4 addresses, it
>>> might be an issue. Easy to fix with this I guess:
>>>
>>> --------------------- 8< ---------------------
>>>> diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
>>>> index 582ed93bcc8a..9986681aaf40 100644
>>>> --- a/net/mptcp/sockopt.c
>>>> +++ b/net/mptcp/sockopt.c
>>>> @@ -1255,6 +1255,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
>>>> ssk->sk_priority = sk->sk_priority;
>>>> ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
>>>> ssk->sk_incoming_cpu = sk->sk_incoming_cpu;
>>>> + ssk->sk_ipv6only = sk->sk_ipv6only;
>>>> __ip_sock_set_tos(ssk, inet_sk(sk)->tos);
>>>>
>>>> if (sk->sk_userlocks & tx_rx_locks) {
>>> --------------------- 8< ---------------------
>>>
>>> (+ a test checking that for v6 only socket as well I suppose)
>>
>> This is an unrelated patch. Additionally, I think sk_ipv6only on
>> subflow other then the first one is not needed, as we already match
>> carefully the sk family and address family (we create ipv6 subflow for
>> ipv6 addr)
>
> I'm sorry, I'm not sure to understand. If someone wants to have only v6
> connections, it has to create an AF_INET6 socket and set IPV6_V6ONLY
> option. Why not reflecting that to the different subflows to have
> subflow in v6 only?
>
> But I agree it is not directly related.
I just noticed that in your v2, you added a check in pm_netlink.h. But I
guess it is fine to also apply the patch, just to be sure the check for
pm_netlink.h is not bypassed somehow (by the userspace PM, a future one,
with BPF, etc.), no?
Cheers,
Matt
--
Tessares | Belgium | Hybrid Access Solutions
www.tessares.net
© 2016 - 2026 Red Hat, Inc.