All drivers that use queue API are already converted to use
netdev instance lock. Move netdev instance lock management to
the netlink layer and drop rtnl_lock.
Cc: Mina Almasry <almasrymina@google.com>
Signed-off-by: Stanislav Fomichev <sdf@fomichev.me>
---
drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 ++--
drivers/net/netdevsim/netdev.c | 4 ++--
net/core/devmem.c | 3 ++-
net/core/netdev-genl.c | 18 +++++-------------
net/core/netdev_rx_queue.c | 15 +++++----------
5 files changed, 16 insertions(+), 28 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1a1e6da77777..9b936cfc1d29 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -11380,14 +11380,14 @@ static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
return;
- rtnl_lock();
+ netdev_lock(irq->bp->dev);
if (netif_running(irq->bp->dev)) {
err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
if (err)
netdev_err(irq->bp->dev,
"RX queue restart failed: err=%d\n", err);
}
- rtnl_unlock();
+ netdev_unlock(irq->bp->dev);
}
static void bnxt_irq_affinity_release(struct kref *ref)
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 54d03b0628d2..1ba2ff5e4453 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -786,7 +786,7 @@ nsim_qreset_write(struct file *file, const char __user *data,
if (ret != 2)
return -EINVAL;
- rtnl_lock();
+ netdev_lock(ns->netdev);
if (queue >= ns->netdev->real_num_rx_queues) {
ret = -EINVAL;
goto exit_unlock;
@@ -800,7 +800,7 @@ nsim_qreset_write(struct file *file, const char __user *data,
ret = count;
exit_unlock:
- rtnl_unlock();
+ netdev_unlock(ns->netdev);
return ret;
}
diff --git a/net/core/devmem.c b/net/core/devmem.c
index c16cdac46bed..e48eb42d7377 100644
--- a/net/core/devmem.c
+++ b/net/core/devmem.c
@@ -132,9 +132,10 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
rxq->mp_params.mp_priv = NULL;
rxq->mp_params.mp_ops = NULL;
+ netdev_lock(binding->dev);
rxq_idx = get_netdev_rx_queue_index(rxq);
-
WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx));
+ netdev_unlock(binding->dev);
}
net_devmem_dmabuf_binding_put(binding);
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 8acdeeae24e7..1de28f9e0219 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -481,8 +481,6 @@ int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info)
if (!rsp)
return -ENOMEM;
- rtnl_lock();
-
netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex);
if (netdev) {
err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info);
@@ -491,8 +489,6 @@ int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info)
err = -ENODEV;
}
- rtnl_unlock();
-
if (err)
goto err_free_msg;
@@ -541,7 +537,6 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
if (info->attrs[NETDEV_A_QUEUE_IFINDEX])
ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
- rtnl_lock();
if (ifindex) {
netdev = netdev_get_by_index_lock(net, ifindex);
if (netdev) {
@@ -559,7 +554,6 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
ctx->txq_idx = 0;
}
}
- rtnl_unlock();
return err;
}
@@ -860,12 +854,11 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
}
mutex_lock(&priv->lock);
- rtnl_lock();
- netdev = __dev_get_by_index(genl_info_net(info), ifindex);
+ netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex);
if (!netdev || !netif_device_present(netdev)) {
err = -ENODEV;
- goto err_unlock;
+ goto err_unlock_sock;
}
if (dev_xdp_prog_count(netdev)) {
@@ -918,14 +911,15 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
if (err)
goto err_unbind;
- rtnl_unlock();
+ netdev_unlock(netdev);
return 0;
err_unbind:
net_devmem_unbind_dmabuf(binding);
err_unlock:
- rtnl_unlock();
+ netdev_unlock(netdev);
+err_unlock_sock:
mutex_unlock(&priv->lock);
err_genlmsg_free:
nlmsg_free(rsp);
@@ -945,9 +939,7 @@ void netdev_nl_sock_priv_destroy(struct netdev_nl_sock *priv)
mutex_lock(&priv->lock);
list_for_each_entry_safe(binding, temp, &priv->bindings, list) {
- rtnl_lock();
net_devmem_unbind_dmabuf(binding);
- rtnl_unlock();
}
mutex_unlock(&priv->lock);
}
diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c
index 7419c41fd3cb..8fd5d784ac09 100644
--- a/net/core/netdev_rx_queue.c
+++ b/net/core/netdev_rx_queue.c
@@ -18,7 +18,7 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
!qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
return -EOPNOTSUPP;
- ASSERT_RTNL();
+ netdev_assert_locked(dev);
new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
if (!new_mem)
@@ -30,8 +30,6 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
goto err_free_new_mem;
}
- netdev_lock(dev);
-
err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx);
if (err)
goto err_free_old_mem;
@@ -54,8 +52,6 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
qops->ndo_queue_mem_free(dev, old_mem);
- netdev_unlock(dev);
-
kvfree(old_mem);
kvfree(new_mem);
@@ -80,7 +76,6 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
qops->ndo_queue_mem_free(dev, new_mem);
err_free_old_mem:
- netdev_unlock(dev);
kvfree(old_mem);
err_free_new_mem:
@@ -118,9 +113,9 @@ int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
{
int ret;
- rtnl_lock();
+ netdev_lock(dev);
ret = __net_mp_open_rxq(dev, ifq_idx, p);
- rtnl_unlock();
+ netdev_unlock(dev);
return ret;
}
@@ -153,7 +148,7 @@ static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
struct pp_memory_provider_params *old_p)
{
- rtnl_lock();
+ netdev_lock(dev);
__net_mp_close_rxq(dev, ifq_idx, old_p);
- rtnl_unlock();
+ netdev_unlock(dev);
}
--
2.48.1
On Fri, 7 Mar 2025 07:57:25 -0800 Stanislav Fomichev wrote:
> All drivers that use queue API are already converted to use
> netdev instance lock. Move netdev instance lock management to
> the netlink layer and drop rtnl_lock.
> @@ -860,12 +854,11 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
> }
>
> mutex_lock(&priv->lock);
> - rtnl_lock();
>
> - netdev = __dev_get_by_index(genl_info_net(info), ifindex);
> + netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex);
> if (!netdev || !netif_device_present(netdev)) {
> err = -ENODEV;
> - goto err_unlock;
> + goto err_unlock_sock;
> }
>
> if (dev_xdp_prog_count(netdev)) {
> @@ -918,14 +911,15 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
> if (err)
> goto err_unbind;
>
> - rtnl_unlock();
> + netdev_unlock(netdev);
Ah, here's the unlock :)
Looks good for the devmem binding, I think, the other functions will
need a bit more careful handling. So perhaps drop the queue get changes?
I'm cooking some patches for the queue get and queue stats.
AFAIU we need helpers which will go over netdevs and either take rtnl
lock or instance lock, depending on whether the driver is "ops locked"
On 03/07, Jakub Kicinski wrote:
> On Fri, 7 Mar 2025 07:57:25 -0800 Stanislav Fomichev wrote:
> > All drivers that use queue API are already converted to use
> > netdev instance lock. Move netdev instance lock management to
> > the netlink layer and drop rtnl_lock.
>
> > @@ -860,12 +854,11 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
> > }
> >
> > mutex_lock(&priv->lock);
> > - rtnl_lock();
> >
> > - netdev = __dev_get_by_index(genl_info_net(info), ifindex);
> > + netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex);
> > if (!netdev || !netif_device_present(netdev)) {
> > err = -ENODEV;
> > - goto err_unlock;
> > + goto err_unlock_sock;
> > }
> >
> > if (dev_xdp_prog_count(netdev)) {
> > @@ -918,14 +911,15 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
> > if (err)
> > goto err_unbind;
> >
> > - rtnl_unlock();
> > + netdev_unlock(netdev);
>
> Ah, here's the unlock :)
mutex_unlock(&priv->lock) is still missing :(
> Looks good for the devmem binding, I think, the other functions will
> need a bit more careful handling. So perhaps drop the queue get changes?
> I'm cooking some patches for the queue get and queue stats.
> AFAIU we need helpers which will go over netdevs and either take rtnl
> lock or instance lock, depending on whether the driver is "ops locked"
Here is what I was tentatively playing with (rtnl_netdev_{,un}lock_ops
abomination):
https://github.com/fomichev/linux/commit/f791a23c358c7db0e798bc4181dc6c243c8ff77d
Which sort of does what you're suggesting in:
https://github.com/fomichev/linux/commit/392ae1f3ca823dc412a2dac2263b6c8355f6925d
Although I'm still unconditionally holding rtnl_lock during
for_each_netdev_dump..
© 2016 - 2026 Red Hat, Inc.