[PATCH net-next v2 3/3] net: ethtool: prevent user from breaking devmem single-binding rule

Bobby Eshleman posted 3 patches 2 weeks, 6 days ago
There is a newer version of this series
[PATCH net-next v2 3/3] net: ethtool: prevent user from breaking devmem single-binding rule
Posted by Bobby Eshleman 2 weeks, 6 days ago
From: Bobby Eshleman <bobbyeshleman@meta.com>

Prevent the user from breaking devmem's single-binding rule by rejecting
ethtool TCP/IP requests to modify or delete rules that will redirect a
devmem socket to a queue with a different dmabuf binding. This is done
in a "best effort" approach because not all steering rule types are
validated.

If an ethtool_rxnfc flow steering rule evaluates true for:

1) matching a devmem socket's ip addr
2) selecting a queue with a different dmabuf binding
3) is TCP/IP (v4 or v6)

... then reject the ethtool_rxnfc request with -EBUSY to indicate a
devmem socket is using the current rules that steer it to its dmabuf
binding.

Non-TCP/IP rules are completely ignored, and if they do match a devmem
flow then they can still break devmem sockets. For example, bytes 0 and
1 of L2 headers, etc... it is still unknown to me if these are possible
to evaluate at the time of the ethtool call, and so are left to future
work (or never, if not possible).

FLOW_RSS rules which guide flows to an RSS context are also not
evaluated yet. This seems feasible, but the correct path towards
retrieving the RSS context and scanning the queues for dmabuf bindings
seems unclear and maybe overkill (re-use parts of ethtool_get_rxnfc?).

Signed-off-by: Bobby Eshleman <bobbyeshleman@meta.com>
---
 include/net/sock.h  |   1 +
 net/ethtool/ioctl.c | 144 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 net/ipv4/tcp.c      |   9 ++++
 net/ipv4/tcp_ipv4.c |   6 +++
 4 files changed, 160 insertions(+)

diff --git a/include/net/sock.h b/include/net/sock.h
index 304aad494764..73a1ff59dcde 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -579,6 +579,7 @@ struct sock {
 		struct net_devmem_dmabuf_binding	*binding;
 		atomic_t				*urefs;
 	} sk_user_frags;
+	struct list_head	sk_devmem_list;
 
 #if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
 	struct module		*sk_owner;
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 0b2a4d0573b3..99676ac9bbaa 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -29,11 +29,16 @@
 #include <linux/utsname.h>
 #include <net/devlink.h>
 #include <net/ipv6.h>
+#include <net/netdev_rx_queue.h>
 #include <net/xdp_sock_drv.h>
 #include <net/flow_offload.h>
 #include <net/netdev_lock.h>
 #include <linux/ethtool_netlink.h>
 #include "common.h"
+#include "../core/devmem.h"
+
+extern struct list_head devmem_sockets_list;
+extern spinlock_t devmem_sockets_lock;
 
 /* State held across locks and calls for commands which have devlink fallback */
 struct ethtool_devlink_compat {
@@ -1169,6 +1174,142 @@ ethtool_get_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr)
 	return ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL);
 }
 
+static bool
+__ethtool_rx_flow_spec_breaks_devmem_sk(struct ethtool_rx_flow_spec *fs,
+					struct net_device *dev,
+					struct sock *sk)
+{
+	struct in6_addr saddr6, smask6, daddr6, dmask6;
+	struct sockaddr_storage saddr, daddr;
+	struct sockaddr_in6 *src6, *dst6;
+	struct sockaddr_in *src4, *dst4;
+	struct netdev_rx_queue *rxq;
+	__u32 flow_type;
+
+	if (dev != __sk_dst_get(sk)->dev)
+		return false;
+
+	src6 = (struct sockaddr_in6 *)&saddr;
+	dst6 = (struct sockaddr_in6 *)&daddr;
+	src4 = (struct sockaddr_in *)&saddr;
+	dst4 = (struct sockaddr_in *)&daddr;
+
+	if (sk->sk_family == AF_INET6) {
+		src6->sin6_port = inet_sk(sk)->inet_sport;
+		src6->sin6_addr = inet6_sk(sk)->saddr;
+		dst6->sin6_port = inet_sk(sk)->inet_dport;
+		dst6->sin6_addr = sk->sk_v6_daddr;
+	} else {
+		src4->sin_port = inet_sk(sk)->inet_sport;
+		src4->sin_addr.s_addr = inet_sk(sk)->inet_saddr;
+		dst4->sin_port = inet_sk(sk)->inet_dport;
+		dst4->sin_addr.s_addr = inet_sk(sk)->inet_daddr;
+	}
+
+	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+
+	rxq = __netif_get_rx_queue(dev, fs->ring_cookie);
+	if (!rxq)
+		return false;
+
+	/* If the requested binding and the sk binding is equal then we know
+	 * this rule can't redirect to a different binding.
+	 */
+	if (rxq->mp_params.mp_priv == sk->sk_user_frags.binding)
+		return false;
+
+	/* Reject rules that redirect RX devmem sockets to a queue with a
+	 * different dmabuf binding. Because these sockets are on the RX side
+	 * (registered in the recvmsg() path), we compare the opposite
+	 * endpoints: the socket source with the rule destination, and the
+	 * socket destination with the rule source.
+	 *
+	 * Only perform checks on the simplest rules to check, that is, IP/TCP
+	 * rules. Flow hash options are not verified, so may still break TCP
+	 * devmem flows in theory (VLAN tag, bytes 0 and 1 of L4 header,
+	 * etc...). The author of this function was simply not sure how
+	 * to validate these at the time of the ethtool call.
+	 */
+	switch (flow_type) {
+	case IPV4_USER_FLOW: {
+		const struct ethtool_usrip4_spec *v4_usr_spec, *v4_usr_m_spec;
+
+		v4_usr_spec = &fs->h_u.usr_ip4_spec;
+		v4_usr_m_spec = &fs->m_u.usr_ip4_spec;
+
+		if (((v4_usr_spec->ip4src ^ dst4->sin_addr.s_addr) & v4_usr_m_spec->ip4src) ||
+		    (v4_usr_spec->ip4dst ^ src4->sin_addr.s_addr) & v4_usr_m_spec->ip4dst) {
+			return true;
+		}
+
+		return false;
+	}
+	case TCP_V4_FLOW: {
+		const struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
+
+		v4_spec = &fs->h_u.tcp_ip4_spec;
+		v4_m_spec = &fs->m_u.tcp_ip4_spec;
+
+		if (((v4_spec->ip4src ^ dst4->sin_addr.s_addr) & v4_m_spec->ip4src) ||
+		    ((v4_spec->ip4dst ^ src4->sin_addr.s_addr) & v4_m_spec->ip4dst))
+			return true;
+
+		return false;
+	}
+	case IPV6_USER_FLOW: {
+		const struct ethtool_usrip6_spec *v6_usr_spec, *v6_usr_m_spec;
+
+		v6_usr_spec = &fs->h_u.usr_ip6_spec;
+		v6_usr_m_spec = &fs->m_u.usr_ip6_spec;
+
+		memcpy(&daddr6, v6_usr_spec->ip6dst, sizeof(daddr6));
+		memcpy(&dmask6, v6_usr_m_spec->ip6dst, sizeof(dmask6));
+		memcpy(&saddr6, v6_usr_spec->ip6src, sizeof(saddr6));
+		memcpy(&smask6, v6_usr_m_spec->ip6src, sizeof(smask6));
+
+		return !ipv6_masked_addr_cmp(&saddr6, &smask6, &dst6->sin6_addr) &&
+		       !ipv6_masked_addr_cmp(&daddr6, &dmask6, &src6->sin6_addr);
+	}
+	case TCP_V6_FLOW: {
+		const struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
+
+		v6_spec = &fs->h_u.tcp_ip6_spec;
+		v6_m_spec = &fs->m_u.tcp_ip6_spec;
+
+		memcpy(&daddr6, v6_spec->ip6dst, sizeof(daddr6));
+		memcpy(&dmask6, v6_m_spec->ip6dst, sizeof(dmask6));
+		memcpy(&saddr6, v6_spec->ip6src, sizeof(saddr6));
+		memcpy(&smask6, v6_m_spec->ip6src, sizeof(smask6));
+
+		return !ipv6_masked_addr_cmp(&daddr6, &dmask6, &src6->sin6_addr) &&
+		       !ipv6_masked_addr_cmp(&saddr6, &smask6, &dst6->sin6_addr);
+	}
+	default:
+		return false;
+	}
+}
+
+static bool
+ethtool_rx_flow_spec_breaks_devmem_sk(struct ethtool_rx_flow_spec *fs,
+				      struct net_device *dev)
+{
+	struct sock *sk;
+	bool ret;
+
+	ret = false;
+
+	spin_lock_bh(&devmem_sockets_lock);
+	list_for_each_entry(sk, &devmem_sockets_list, sk_devmem_list) {
+		if (__ethtool_rx_flow_spec_breaks_devmem_sk(fs, dev, sk)) {
+			ret = true;
+			break;
+		}
+	}
+	spin_unlock_bh(&devmem_sockets_lock);
+
+	return ret;
+}
+
 static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
 						u32 cmd, void __user *useraddr)
 {
@@ -1197,6 +1338,9 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
 			return -EINVAL;
 	}
 
+	if (ethtool_rx_flow_spec_breaks_devmem_sk(&info.fs, dev))
+		return -EBUSY;
+
 	rc = ops->set_rxnfc(dev, &info);
 	if (rc)
 		return rc;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 438b8132ed89..3f57e658ea80 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -311,6 +311,12 @@ DEFINE_STATIC_KEY_FALSE(tcp_have_smc);
 EXPORT_SYMBOL(tcp_have_smc);
 #endif
 
+struct list_head devmem_sockets_list;
+EXPORT_SYMBOL_GPL(devmem_sockets_list);
+
+DEFINE_SPINLOCK(devmem_sockets_lock);
+EXPORT_SYMBOL_GPL(devmem_sockets_lock);
+
 /*
  * Current number of TCP sockets.
  */
@@ -5229,4 +5235,7 @@ void __init tcp_init(void)
 	BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
 	tcp_tsq_work_init();
 	mptcp_init();
+
+	spin_lock_init(&devmem_sockets_lock);
+	INIT_LIST_HEAD(&devmem_sockets_list);
 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 68ebf96d06f8..a3213c97aed9 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -92,6 +92,9 @@
 
 #include <trace/events/tcp.h>
 
+extern struct list_head devmem_sockets_list;
+extern spinlock_t devmem_sockets_lock;
+
 #ifdef CONFIG_TCP_MD5SIG
 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
@@ -2559,6 +2562,9 @@ static void tcp_release_user_frags(struct sock *sk)
 	sk->sk_user_frags.binding = NULL;
 	kvfree(sk->sk_user_frags.urefs);
 	sk->sk_user_frags.urefs = NULL;
+	spin_lock_bh(&devmem_sockets_lock);
+	list_del(&sk->sk_devmem_list);
+	spin_unlock_bh(&devmem_sockets_lock);
 #endif
 }
 

-- 
2.47.3
Re: [PATCH net-next v2 3/3] net: ethtool: prevent user from breaking devmem single-binding rule
Posted by Stanislav Fomichev 2 weeks, 5 days ago
On 09/11, Bobby Eshleman wrote:
> From: Bobby Eshleman <bobbyeshleman@meta.com>
> 
> Prevent the user from breaking devmem's single-binding rule by rejecting
> ethtool TCP/IP requests to modify or delete rules that will redirect a
> devmem socket to a queue with a different dmabuf binding. This is done
> in a "best effort" approach because not all steering rule types are
> validated.
> 
> If an ethtool_rxnfc flow steering rule evaluates true for:
> 
> 1) matching a devmem socket's ip addr
> 2) selecting a queue with a different dmabuf binding
> 3) is TCP/IP (v4 or v6)
> 
> ... then reject the ethtool_rxnfc request with -EBUSY to indicate a
> devmem socket is using the current rules that steer it to its dmabuf
> binding.
> 
> Non-TCP/IP rules are completely ignored, and if they do match a devmem
> flow then they can still break devmem sockets. For example, bytes 0 and
> 1 of L2 headers, etc... it is still unknown to me if these are possible
> to evaluate at the time of the ethtool call, and so are left to future
> work (or never, if not possible).
> 
> FLOW_RSS rules which guide flows to an RSS context are also not
> evaluated yet. This seems feasible, but the correct path towards
> retrieving the RSS context and scanning the queues for dmabuf bindings
> seems unclear and maybe overkill (re-use parts of ethtool_get_rxnfc?).
> 
> Signed-off-by: Bobby Eshleman <bobbyeshleman@meta.com>
> ---
>  include/net/sock.h  |   1 +
>  net/ethtool/ioctl.c | 144 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>  net/ipv4/tcp.c      |   9 ++++
>  net/ipv4/tcp_ipv4.c |   6 +++
>  4 files changed, 160 insertions(+)
> 
> diff --git a/include/net/sock.h b/include/net/sock.h
> index 304aad494764..73a1ff59dcde 100644
> --- a/include/net/sock.h
> +++ b/include/net/sock.h
> @@ -579,6 +579,7 @@ struct sock {
>  		struct net_devmem_dmabuf_binding	*binding;
>  		atomic_t				*urefs;
>  	} sk_user_frags;
> +	struct list_head	sk_devmem_list;
>  
>  #if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
>  	struct module		*sk_owner;
> diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
> index 0b2a4d0573b3..99676ac9bbaa 100644
> --- a/net/ethtool/ioctl.c
> +++ b/net/ethtool/ioctl.c
> @@ -29,11 +29,16 @@
>  #include <linux/utsname.h>
>  #include <net/devlink.h>
>  #include <net/ipv6.h>
> +#include <net/netdev_rx_queue.h>
>  #include <net/xdp_sock_drv.h>
>  #include <net/flow_offload.h>
>  #include <net/netdev_lock.h>
>  #include <linux/ethtool_netlink.h>
>  #include "common.h"
> +#include "../core/devmem.h"
> +
> +extern struct list_head devmem_sockets_list;
> +extern spinlock_t devmem_sockets_lock;
>  
>  /* State held across locks and calls for commands which have devlink fallback */
>  struct ethtool_devlink_compat {
> @@ -1169,6 +1174,142 @@ ethtool_get_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr)
>  	return ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL);
>  }
>  
> +static bool
> +__ethtool_rx_flow_spec_breaks_devmem_sk(struct ethtool_rx_flow_spec *fs,
> +					struct net_device *dev,
> +					struct sock *sk)
> +{
> +	struct in6_addr saddr6, smask6, daddr6, dmask6;
> +	struct sockaddr_storage saddr, daddr;
> +	struct sockaddr_in6 *src6, *dst6;
> +	struct sockaddr_in *src4, *dst4;
> +	struct netdev_rx_queue *rxq;
> +	__u32 flow_type;
> +
> +	if (dev != __sk_dst_get(sk)->dev)
> +		return false;
> +
> +	src6 = (struct sockaddr_in6 *)&saddr;
> +	dst6 = (struct sockaddr_in6 *)&daddr;
> +	src4 = (struct sockaddr_in *)&saddr;
> +	dst4 = (struct sockaddr_in *)&daddr;
> +
> +	if (sk->sk_family == AF_INET6) {
> +		src6->sin6_port = inet_sk(sk)->inet_sport;
> +		src6->sin6_addr = inet6_sk(sk)->saddr;
> +		dst6->sin6_port = inet_sk(sk)->inet_dport;
> +		dst6->sin6_addr = sk->sk_v6_daddr;
> +	} else {
> +		src4->sin_port = inet_sk(sk)->inet_sport;
> +		src4->sin_addr.s_addr = inet_sk(sk)->inet_saddr;
> +		dst4->sin_port = inet_sk(sk)->inet_dport;
> +		dst4->sin_addr.s_addr = inet_sk(sk)->inet_daddr;
> +	}
> +
> +	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
> +
> +	rxq = __netif_get_rx_queue(dev, fs->ring_cookie);
> +	if (!rxq)
> +		return false;
> +
> +	/* If the requested binding and the sk binding is equal then we know
> +	 * this rule can't redirect to a different binding.
> +	 */
> +	if (rxq->mp_params.mp_priv == sk->sk_user_frags.binding)
> +		return false;
> +
> +	/* Reject rules that redirect RX devmem sockets to a queue with a
> +	 * different dmabuf binding. Because these sockets are on the RX side
> +	 * (registered in the recvmsg() path), we compare the opposite
> +	 * endpoints: the socket source with the rule destination, and the
> +	 * socket destination with the rule source.
> +	 *
> +	 * Only perform checks on the simplest rules to check, that is, IP/TCP
> +	 * rules. Flow hash options are not verified, so may still break TCP
> +	 * devmem flows in theory (VLAN tag, bytes 0 and 1 of L4 header,
> +	 * etc...). The author of this function was simply not sure how
> +	 * to validate these at the time of the ethtool call.
> +	 */
> +	switch (flow_type) {
> +	case IPV4_USER_FLOW: {
> +		const struct ethtool_usrip4_spec *v4_usr_spec, *v4_usr_m_spec;
> +
> +		v4_usr_spec = &fs->h_u.usr_ip4_spec;
> +		v4_usr_m_spec = &fs->m_u.usr_ip4_spec;
> +
> +		if (((v4_usr_spec->ip4src ^ dst4->sin_addr.s_addr) & v4_usr_m_spec->ip4src) ||
> +		    (v4_usr_spec->ip4dst ^ src4->sin_addr.s_addr) & v4_usr_m_spec->ip4dst) {
> +			return true;
> +		}
> +
> +		return false;
> +	}
> +	case TCP_V4_FLOW: {
> +		const struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
> +
> +		v4_spec = &fs->h_u.tcp_ip4_spec;
> +		v4_m_spec = &fs->m_u.tcp_ip4_spec;
> +
> +		if (((v4_spec->ip4src ^ dst4->sin_addr.s_addr) & v4_m_spec->ip4src) ||
> +		    ((v4_spec->ip4dst ^ src4->sin_addr.s_addr) & v4_m_spec->ip4dst))
> +			return true;
> +

The ports need to be checked as well? But my preference overall would
be to go back to checking this condition during recvmsg. We can pick
some new obscure errno number to clearly explain to the user what
happened. EPIPE or something similar, to mean that the socket is cooked.
But let's see if Mina has a different opinion..
Re: [PATCH net-next v2 3/3] net: ethtool: prevent user from breaking devmem single-binding rule
Posted by Mina Almasry 2 weeks ago
On Fri, Sep 12, 2025 at 3:23 PM Stanislav Fomichev <stfomichev@gmail.com> wrote:
>
> On 09/11, Bobby Eshleman wrote:
> > From: Bobby Eshleman <bobbyeshleman@meta.com>
> >
> > Prevent the user from breaking devmem's single-binding rule by rejecting
> > ethtool TCP/IP requests to modify or delete rules that will redirect a
> > devmem socket to a queue with a different dmabuf binding. This is done
> > in a "best effort" approach because not all steering rule types are
> > validated.
> >
> > If an ethtool_rxnfc flow steering rule evaluates true for:
> >
> > 1) matching a devmem socket's ip addr
> > 2) selecting a queue with a different dmabuf binding
> > 3) is TCP/IP (v4 or v6)
> >
> > ... then reject the ethtool_rxnfc request with -EBUSY to indicate a
> > devmem socket is using the current rules that steer it to its dmabuf
> > binding.
> >
> > Non-TCP/IP rules are completely ignored, and if they do match a devmem
> > flow then they can still break devmem sockets. For example, bytes 0 and
> > 1 of L2 headers, etc... it is still unknown to me if these are possible
> > to evaluate at the time of the ethtool call, and so are left to future
> > work (or never, if not possible).
> >
> > FLOW_RSS rules which guide flows to an RSS context are also not
> > evaluated yet. This seems feasible, but the correct path towards
> > retrieving the RSS context and scanning the queues for dmabuf bindings
> > seems unclear and maybe overkill (re-use parts of ethtool_get_rxnfc?).
> >
> > Signed-off-by: Bobby Eshleman <bobbyeshleman@meta.com>
> > ---
> >  include/net/sock.h  |   1 +
> >  net/ethtool/ioctl.c | 144 ++++++++++++++++++++++++++++++++++++++++++++++++++++
> >  net/ipv4/tcp.c      |   9 ++++
> >  net/ipv4/tcp_ipv4.c |   6 +++
> >  4 files changed, 160 insertions(+)
> >
> > diff --git a/include/net/sock.h b/include/net/sock.h
> > index 304aad494764..73a1ff59dcde 100644
> > --- a/include/net/sock.h
> > +++ b/include/net/sock.h
> > @@ -579,6 +579,7 @@ struct sock {
> >               struct net_devmem_dmabuf_binding        *binding;
> >               atomic_t                                *urefs;
> >       } sk_user_frags;
> > +     struct list_head        sk_devmem_list;
> >
> >  #if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
> >       struct module           *sk_owner;
> > diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
> > index 0b2a4d0573b3..99676ac9bbaa 100644
> > --- a/net/ethtool/ioctl.c
> > +++ b/net/ethtool/ioctl.c
> > @@ -29,11 +29,16 @@
> >  #include <linux/utsname.h>
> >  #include <net/devlink.h>
> >  #include <net/ipv6.h>
> > +#include <net/netdev_rx_queue.h>
> >  #include <net/xdp_sock_drv.h>
> >  #include <net/flow_offload.h>
> >  #include <net/netdev_lock.h>
> >  #include <linux/ethtool_netlink.h>
> >  #include "common.h"
> > +#include "../core/devmem.h"
> > +
> > +extern struct list_head devmem_sockets_list;
> > +extern spinlock_t devmem_sockets_lock;
> >
> >  /* State held across locks and calls for commands which have devlink fallback */
> >  struct ethtool_devlink_compat {
> > @@ -1169,6 +1174,142 @@ ethtool_get_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr)
> >       return ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL);
> >  }
> >
> > +static bool
> > +__ethtool_rx_flow_spec_breaks_devmem_sk(struct ethtool_rx_flow_spec *fs,
> > +                                     struct net_device *dev,
> > +                                     struct sock *sk)
> > +{
> > +     struct in6_addr saddr6, smask6, daddr6, dmask6;
> > +     struct sockaddr_storage saddr, daddr;
> > +     struct sockaddr_in6 *src6, *dst6;
> > +     struct sockaddr_in *src4, *dst4;
> > +     struct netdev_rx_queue *rxq;
> > +     __u32 flow_type;
> > +
> > +     if (dev != __sk_dst_get(sk)->dev)
> > +             return false;
> > +
> > +     src6 = (struct sockaddr_in6 *)&saddr;
> > +     dst6 = (struct sockaddr_in6 *)&daddr;
> > +     src4 = (struct sockaddr_in *)&saddr;
> > +     dst4 = (struct sockaddr_in *)&daddr;
> > +
> > +     if (sk->sk_family == AF_INET6) {
> > +             src6->sin6_port = inet_sk(sk)->inet_sport;
> > +             src6->sin6_addr = inet6_sk(sk)->saddr;
> > +             dst6->sin6_port = inet_sk(sk)->inet_dport;
> > +             dst6->sin6_addr = sk->sk_v6_daddr;
> > +     } else {
> > +             src4->sin_port = inet_sk(sk)->inet_sport;
> > +             src4->sin_addr.s_addr = inet_sk(sk)->inet_saddr;
> > +             dst4->sin_port = inet_sk(sk)->inet_dport;
> > +             dst4->sin_addr.s_addr = inet_sk(sk)->inet_daddr;
> > +     }
> > +
> > +     flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
> > +
> > +     rxq = __netif_get_rx_queue(dev, fs->ring_cookie);
> > +     if (!rxq)
> > +             return false;
> > +
> > +     /* If the requested binding and the sk binding is equal then we know
> > +      * this rule can't redirect to a different binding.
> > +      */
> > +     if (rxq->mp_params.mp_priv == sk->sk_user_frags.binding)
> > +             return false;
> > +
> > +     /* Reject rules that redirect RX devmem sockets to a queue with a
> > +      * different dmabuf binding. Because these sockets are on the RX side
> > +      * (registered in the recvmsg() path), we compare the opposite
> > +      * endpoints: the socket source with the rule destination, and the
> > +      * socket destination with the rule source.
> > +      *
> > +      * Only perform checks on the simplest rules to check, that is, IP/TCP
> > +      * rules. Flow hash options are not verified, so may still break TCP
> > +      * devmem flows in theory (VLAN tag, bytes 0 and 1 of L4 header,
> > +      * etc...). The author of this function was simply not sure how
> > +      * to validate these at the time of the ethtool call.
> > +      */
> > +     switch (flow_type) {
> > +     case IPV4_USER_FLOW: {
> > +             const struct ethtool_usrip4_spec *v4_usr_spec, *v4_usr_m_spec;
> > +
> > +             v4_usr_spec = &fs->h_u.usr_ip4_spec;
> > +             v4_usr_m_spec = &fs->m_u.usr_ip4_spec;
> > +
> > +             if (((v4_usr_spec->ip4src ^ dst4->sin_addr.s_addr) & v4_usr_m_spec->ip4src) ||
> > +                 (v4_usr_spec->ip4dst ^ src4->sin_addr.s_addr) & v4_usr_m_spec->ip4dst) {
> > +                     return true;
> > +             }
> > +
> > +             return false;
> > +     }
> > +     case TCP_V4_FLOW: {
> > +             const struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
> > +
> > +             v4_spec = &fs->h_u.tcp_ip4_spec;
> > +             v4_m_spec = &fs->m_u.tcp_ip4_spec;
> > +
> > +             if (((v4_spec->ip4src ^ dst4->sin_addr.s_addr) & v4_m_spec->ip4src) ||
> > +                 ((v4_spec->ip4dst ^ src4->sin_addr.s_addr) & v4_m_spec->ip4dst))
> > +                     return true;
> > +
>
> The ports need to be checked as well? But my preference overall would
> be to go back to checking this condition during recvmsg. We can pick
> some new obscure errno number to clearly explain to the user what
> happened. EPIPE or something similar, to mean that the socket is cooked.
> But let's see if Mina has a different opinion..

Sorry for the late reply.

IIU it looks to me like AF_XDP set the precedent that the user can
break the socket if they mess with the flow steering rules, and I'm
guessing io_uring zc does something similar. Only devmem tries to have
the socket work regardless on which rxqueue the incoming packets land
on, but that was predicated on the being able to do the tracking
efficiently which seems to not entirely be the case.

I think I'm OK with dropping this patch. We should probably add to the
docs the new restriction on devmem sockets. In our prod code we don't
reprogram rules while the socket is running. I don't think this will
break us, IDK if it will break anyone else, but it is unlikely.

-- 
Thanks,
Mina