Prepare for NAPI to queue mapping by holding RTNL in code paths where
NAPIs will be mapped to queue IDs and RTNL is not currently held.
Signed-off-by: Joe Damato <jdamato@fastly.com>
---
drivers/net/virtio_net.c | 17 ++++++++++++++---
1 file changed, 14 insertions(+), 3 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cff18c66b54a..4e88d352d3eb 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2803,11 +2803,17 @@ static void virtnet_napi_do_enable(struct virtqueue *vq,
local_bh_enable();
}
-static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
+static void virtnet_napi_enable_lock(struct virtqueue *vq,
+ struct napi_struct *napi)
{
virtnet_napi_do_enable(vq, napi);
}
+static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
+{
+ virtnet_napi_enable_lock(vq, napi);
+}
+
static void virtnet_napi_tx_enable(struct virtnet_info *vi,
struct virtqueue *vq,
struct napi_struct *napi)
@@ -2844,7 +2850,7 @@ static void refill_work(struct work_struct *work)
napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable(rq->vq, &rq->napi);
+ virtnet_napi_enable_lock(rq->vq, &rq->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
@@ -5621,8 +5627,11 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
- if (netif_running(vi->dev))
+ if (netif_running(vi->dev)) {
+ rtnl_lock();
virtnet_close(vi->dev);
+ rtnl_unlock();
+ }
}
static int init_vqs(struct virtnet_info *vi);
@@ -5642,7 +5651,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
enable_rx_mode_work(vi);
if (netif_running(vi->dev)) {
+ rtnl_lock();
err = virtnet_open(vi->dev);
+ rtnl_unlock();
if (err)
return err;
}
--
2.25.1
On 10.01.25 21:26, Joe Damato wrote:
> Prepare for NAPI to queue mapping by holding RTNL in code paths where
> NAPIs will be mapped to queue IDs and RTNL is not currently held.
>
> Signed-off-by: Joe Damato <jdamato@fastly.com>
> ---
> drivers/net/virtio_net.c | 17 ++++++++++++++---
> 1 file changed, 14 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index cff18c66b54a..4e88d352d3eb 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -2803,11 +2803,17 @@ static void virtnet_napi_do_enable(struct virtqueue *vq,
> local_bh_enable();
> }
>
> -static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
> +static void virtnet_napi_enable_lock(struct virtqueue *vq,
> + struct napi_struct *napi)
> {
> virtnet_napi_do_enable(vq, napi);
> }
>
> +static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
> +{
> + virtnet_napi_enable_lock(vq, napi);
> +}
> +
> static void virtnet_napi_tx_enable(struct virtnet_info *vi,
> struct virtqueue *vq,
> struct napi_struct *napi)
> @@ -2844,7 +2850,7 @@ static void refill_work(struct work_struct *work)
>
> napi_disable(&rq->napi);
> still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
> - virtnet_napi_enable(rq->vq, &rq->napi);
> + virtnet_napi_enable_lock(rq->vq, &rq->napi);
>
> /* In theory, this can happen: if we don't get any buffers in
> * we will *never* try to fill again.
> @@ -5621,8 +5627,11 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
> netif_tx_lock_bh(vi->dev);
> netif_device_detach(vi->dev);
> netif_tx_unlock_bh(vi->dev);
> - if (netif_running(vi->dev))
> + if (netif_running(vi->dev)) {
> + rtnl_lock();
> virtnet_close(vi->dev);
> + rtnl_unlock();
> + }
> }
>
> static int init_vqs(struct virtnet_info *vi);
> @@ -5642,7 +5651,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
> enable_rx_mode_work(vi);
>
> if (netif_running(vi->dev)) {
> + rtnl_lock();
> err = virtnet_open(vi->dev);
> + rtnl_unlock();
> if (err)
> return err;
> }
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
© 2016 - 2026 Red Hat, Inc.