[PATCH net v2] virtio-net: enable all napis before scheduling refill work

Bui Quang Minh posted 1 patch 1 month, 3 weeks ago
drivers/net/virtio_net.c | 71 +++++++++++++++++++++++++---------------
1 file changed, 45 insertions(+), 26 deletions(-)
[PATCH net v2] virtio-net: enable all napis before scheduling refill work
Posted by Bui Quang Minh 1 month, 3 weeks ago
Calling napi_disable() on an already disabled napi can cause the
deadlock. In commit 4bc12818b363 ("virtio-net: disable delayed refill
when pausing rx"), to avoid the deadlock, when pausing the RX in
virtnet_rx_pause[_all](), we disable and cancel the delayed refill work.
However, in the virtnet_rx_resume_all(), we enable the delayed refill
work too early before enabling all the receive queue napis.

The deadlock can be reproduced by running
selftests/drivers/net/hw/xsk_reconfig.py with multiqueue virtio-net
device and inserting a cond_resched() inside the for loop in
virtnet_rx_resume_all() to increase the success rate. Because the worker
processing the delayed refilled work runs on the same CPU as
virtnet_rx_resume_all(), a reschedule is needed to cause the deadlock.
In real scenario, the contention on netdev_lock can cause the
reschedule.

This fixes the deadlock by ensuring all receive queue's napis are
enabled before we enable the delayed refill work in
virtnet_rx_resume_all() and virtnet_open().

Fixes: 4bc12818b363 ("virtio-net: disable delayed refill when pausing rx")
Reported-by: Paolo Abeni <pabeni@redhat.com>
Closes: https://netdev-ctrl.bots.linux.dev/logs/vmksft/drv-hw-dbg/results/400961/3-xdp-py/stderr
Cc: stable@vger.kernel.org
Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
---
Changes in v2:
- Move try_fill_recv() before rx napi_enable()
- Link to v1: https://lore.kernel.org/netdev/20251208153419.18196-1-minhquangbui99@gmail.com/
---
 drivers/net/virtio_net.c | 71 +++++++++++++++++++++++++---------------
 1 file changed, 45 insertions(+), 26 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8e04adb57f52..4e08880a9467 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3214,21 +3214,31 @@ static void virtnet_update_settings(struct virtnet_info *vi)
 static int virtnet_open(struct net_device *dev)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
+	bool schedule_refill = false;
 	int i, err;
 
-	enable_delayed_refill(vi);
-
+	/* - We must call try_fill_recv before enabling napi of the same receive
+	 * queue so that it doesn't race with the call in virtnet_receive.
+	 * - We must enable and schedule delayed refill work only when we have
+	 * enabled all the receive queue's napi. Otherwise, in refill_work, we
+	 * have a deadlock when calling napi_disable on an already disabled
+	 * napi.
+	 */
 	for (i = 0; i < vi->max_queue_pairs; i++) {
 		if (i < vi->curr_queue_pairs)
 			/* Make sure we have some buffers: if oom use wq. */
 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
-				schedule_delayed_work(&vi->refill, 0);
+				schedule_refill = true;
 
 		err = virtnet_enable_queue_pair(vi, i);
 		if (err < 0)
 			goto err_enable_qp;
 	}
 
+	enable_delayed_refill(vi);
+	if (schedule_refill)
+		schedule_delayed_work(&vi->refill, 0);
+
 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
 		if (vi->status & VIRTIO_NET_S_LINK_UP)
 			netif_carrier_on(vi->dev);
@@ -3463,39 +3473,48 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
 	__virtnet_rx_pause(vi, rq);
 }
 
-static void __virtnet_rx_resume(struct virtnet_info *vi,
-				struct receive_queue *rq,
-				bool refill)
+static void virtnet_rx_resume_all(struct virtnet_info *vi)
 {
-	bool running = netif_running(vi->dev);
 	bool schedule_refill = false;
+	int i;
 
-	if (refill && !try_fill_recv(vi, rq, GFP_KERNEL))
-		schedule_refill = true;
-	if (running)
-		virtnet_napi_enable(rq);
-
-	if (schedule_refill)
-		schedule_delayed_work(&vi->refill, 0);
-}
+	if (netif_running(vi->dev)) {
+		/* See the comment in virtnet_open for the ordering rule
+		 * of try_fill_recv, receive queue napi_enable and delayed
+		 * refill enable/schedule.
+		 */
+		for (i = 0; i < vi->max_queue_pairs; i++) {
+			if (i < vi->curr_queue_pairs)
+				if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
+					schedule_refill = true;
 
-static void virtnet_rx_resume_all(struct virtnet_info *vi)
-{
-	int i;
+			virtnet_napi_enable(&vi->rq[i]);
+		}
 
-	enable_delayed_refill(vi);
-	for (i = 0; i < vi->max_queue_pairs; i++) {
-		if (i < vi->curr_queue_pairs)
-			__virtnet_rx_resume(vi, &vi->rq[i], true);
-		else
-			__virtnet_rx_resume(vi, &vi->rq[i], false);
+		enable_delayed_refill(vi);
+		if (schedule_refill)
+			schedule_delayed_work(&vi->refill, 0);
 	}
 }
 
 static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
 {
-	enable_delayed_refill(vi);
-	__virtnet_rx_resume(vi, rq, true);
+	bool schedule_refill = false;
+
+	if (netif_running(vi->dev)) {
+		/* See the comment in virtnet_open for the ordering rule
+		 * of try_fill_recv, receive queue napi_enable and delayed
+		 * refill enable/schedule.
+		 */
+		if (!try_fill_recv(vi, rq, GFP_KERNEL))
+			schedule_refill = true;
+
+		virtnet_napi_enable(rq);
+
+		enable_delayed_refill(vi);
+		if (schedule_refill)
+			schedule_delayed_work(&vi->refill, 0);
+	}
 }
 
 static int virtnet_rx_resize(struct virtnet_info *vi,
-- 
2.43.0
Re: [PATCH net v2] virtio-net: enable all napis before scheduling refill work
Posted by Jason Wang 1 month, 3 weeks ago
On Fri, Dec 12, 2025 at 11:28 PM Bui Quang Minh
<minhquangbui99@gmail.com> wrote:
>
> Calling napi_disable() on an already disabled napi can cause the
> deadlock. In commit 4bc12818b363 ("virtio-net: disable delayed refill
> when pausing rx"), to avoid the deadlock, when pausing the RX in
> virtnet_rx_pause[_all](), we disable and cancel the delayed refill work.
> However, in the virtnet_rx_resume_all(), we enable the delayed refill
> work too early before enabling all the receive queue napis.
>
> The deadlock can be reproduced by running
> selftests/drivers/net/hw/xsk_reconfig.py with multiqueue virtio-net
> device and inserting a cond_resched() inside the for loop in
> virtnet_rx_resume_all() to increase the success rate. Because the worker
> processing the delayed refilled work runs on the same CPU as
> virtnet_rx_resume_all(), a reschedule is needed to cause the deadlock.
> In real scenario, the contention on netdev_lock can cause the
> reschedule.
>
> This fixes the deadlock by ensuring all receive queue's napis are
> enabled before we enable the delayed refill work in
> virtnet_rx_resume_all() and virtnet_open().
>
> Fixes: 4bc12818b363 ("virtio-net: disable delayed refill when pausing rx")
> Reported-by: Paolo Abeni <pabeni@redhat.com>
> Closes: https://netdev-ctrl.bots.linux.dev/logs/vmksft/drv-hw-dbg/results/400961/3-xdp-py/stderr
> Cc: stable@vger.kernel.org
> Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
> ---
> Changes in v2:
> - Move try_fill_recv() before rx napi_enable()
> - Link to v1: https://lore.kernel.org/netdev/20251208153419.18196-1-minhquangbui99@gmail.com/
> ---
>  drivers/net/virtio_net.c | 71 +++++++++++++++++++++++++---------------
>  1 file changed, 45 insertions(+), 26 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 8e04adb57f52..4e08880a9467 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -3214,21 +3214,31 @@ static void virtnet_update_settings(struct virtnet_info *vi)
>  static int virtnet_open(struct net_device *dev)
>  {
>         struct virtnet_info *vi = netdev_priv(dev);
> +       bool schedule_refill = false;
>         int i, err;
>
> -       enable_delayed_refill(vi);
> -
> +       /* - We must call try_fill_recv before enabling napi of the same receive
> +        * queue so that it doesn't race with the call in virtnet_receive.
> +        * - We must enable and schedule delayed refill work only when we have
> +        * enabled all the receive queue's napi. Otherwise, in refill_work, we
> +        * have a deadlock when calling napi_disable on an already disabled
> +        * napi.
> +        */
>         for (i = 0; i < vi->max_queue_pairs; i++) {
>                 if (i < vi->curr_queue_pairs)
>                         /* Make sure we have some buffers: if oom use wq. */
>                         if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
> -                               schedule_delayed_work(&vi->refill, 0);
> +                               schedule_refill = true;
>
>                 err = virtnet_enable_queue_pair(vi, i);
>                 if (err < 0)
>                         goto err_enable_qp;
>         }

So NAPI could be scheduled and it may want to refill but since refill
is not enabled, there would be no refill work.

Is this a problem?


>
> +       enable_delayed_refill(vi);
> +       if (schedule_refill)
> +               schedule_delayed_work(&vi->refill, 0);
> +
>         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
>                 if (vi->status & VIRTIO_NET_S_LINK_UP)
>                         netif_carrier_on(vi->dev);
> @@ -3463,39 +3473,48 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
>         __virtnet_rx_pause(vi, rq);
>  }
>

Thanks
Re: [PATCH net v2] virtio-net: enable all napis before scheduling refill work
Posted by Bui Quang Minh 1 month, 3 weeks ago
On 12/16/25 11:16, Jason Wang wrote:
> On Fri, Dec 12, 2025 at 11:28 PM Bui Quang Minh
> <minhquangbui99@gmail.com> wrote:
>> Calling napi_disable() on an already disabled napi can cause the
>> deadlock. In commit 4bc12818b363 ("virtio-net: disable delayed refill
>> when pausing rx"), to avoid the deadlock, when pausing the RX in
>> virtnet_rx_pause[_all](), we disable and cancel the delayed refill work.
>> However, in the virtnet_rx_resume_all(), we enable the delayed refill
>> work too early before enabling all the receive queue napis.
>>
>> The deadlock can be reproduced by running
>> selftests/drivers/net/hw/xsk_reconfig.py with multiqueue virtio-net
>> device and inserting a cond_resched() inside the for loop in
>> virtnet_rx_resume_all() to increase the success rate. Because the worker
>> processing the delayed refilled work runs on the same CPU as
>> virtnet_rx_resume_all(), a reschedule is needed to cause the deadlock.
>> In real scenario, the contention on netdev_lock can cause the
>> reschedule.
>>
>> This fixes the deadlock by ensuring all receive queue's napis are
>> enabled before we enable the delayed refill work in
>> virtnet_rx_resume_all() and virtnet_open().
>>
>> Fixes: 4bc12818b363 ("virtio-net: disable delayed refill when pausing rx")
>> Reported-by: Paolo Abeni <pabeni@redhat.com>
>> Closes: https://netdev-ctrl.bots.linux.dev/logs/vmksft/drv-hw-dbg/results/400961/3-xdp-py/stderr
>> Cc: stable@vger.kernel.org
>> Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
>> ---
>> Changes in v2:
>> - Move try_fill_recv() before rx napi_enable()
>> - Link to v1: https://lore.kernel.org/netdev/20251208153419.18196-1-minhquangbui99@gmail.com/
>> ---
>>   drivers/net/virtio_net.c | 71 +++++++++++++++++++++++++---------------
>>   1 file changed, 45 insertions(+), 26 deletions(-)
>>
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> index 8e04adb57f52..4e08880a9467 100644
>> --- a/drivers/net/virtio_net.c
>> +++ b/drivers/net/virtio_net.c
>> @@ -3214,21 +3214,31 @@ static void virtnet_update_settings(struct virtnet_info *vi)
>>   static int virtnet_open(struct net_device *dev)
>>   {
>>          struct virtnet_info *vi = netdev_priv(dev);
>> +       bool schedule_refill = false;
>>          int i, err;
>>
>> -       enable_delayed_refill(vi);
>> -
>> +       /* - We must call try_fill_recv before enabling napi of the same receive
>> +        * queue so that it doesn't race with the call in virtnet_receive.
>> +        * - We must enable and schedule delayed refill work only when we have
>> +        * enabled all the receive queue's napi. Otherwise, in refill_work, we
>> +        * have a deadlock when calling napi_disable on an already disabled
>> +        * napi.
>> +        */
>>          for (i = 0; i < vi->max_queue_pairs; i++) {
>>                  if (i < vi->curr_queue_pairs)
>>                          /* Make sure we have some buffers: if oom use wq. */
>>                          if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
>> -                               schedule_delayed_work(&vi->refill, 0);
>> +                               schedule_refill = true;
>>
>>                  err = virtnet_enable_queue_pair(vi, i);
>>                  if (err < 0)
>>                          goto err_enable_qp;
>>          }
> So NAPI could be scheduled and it may want to refill but since refill
> is not enabled, there would be no refill work.
>
> Is this a problem?

You are right. It is indeed a problem.

I think we can unconditionally schedule the delayed refill after 
enabling all the RX NAPIs (don't check the boolean schedule_refill 
anymore) to ensure that we will have refill work. We can still keep the 
try_fill_recv here to fill the receive buffer earlier in normal case. 
What do you think?

>
>
>> +       enable_delayed_refill(vi);
>> +       if (schedule_refill)
>> +               schedule_delayed_work(&vi->refill, 0);
>> +
>>          if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
>>                  if (vi->status & VIRTIO_NET_S_LINK_UP)
>>                          netif_carrier_on(vi->dev);
>> @@ -3463,39 +3473,48 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
>>          __virtnet_rx_pause(vi, rq);
>>   }
>>
> Thanks
>

Thanks,
Quang Minh.

Re: [PATCH net v2] virtio-net: enable all napis before scheduling refill work
Posted by Jason Wang 1 month, 3 weeks ago
On Wed, Dec 17, 2025 at 12:23 AM Bui Quang Minh
<minhquangbui99@gmail.com> wrote:
>
> On 12/16/25 11:16, Jason Wang wrote:
> > On Fri, Dec 12, 2025 at 11:28 PM Bui Quang Minh
> > <minhquangbui99@gmail.com> wrote:
> >> Calling napi_disable() on an already disabled napi can cause the
> >> deadlock. In commit 4bc12818b363 ("virtio-net: disable delayed refill
> >> when pausing rx"), to avoid the deadlock, when pausing the RX in
> >> virtnet_rx_pause[_all](), we disable and cancel the delayed refill work.
> >> However, in the virtnet_rx_resume_all(), we enable the delayed refill
> >> work too early before enabling all the receive queue napis.
> >>
> >> The deadlock can be reproduced by running
> >> selftests/drivers/net/hw/xsk_reconfig.py with multiqueue virtio-net
> >> device and inserting a cond_resched() inside the for loop in
> >> virtnet_rx_resume_all() to increase the success rate. Because the worker
> >> processing the delayed refilled work runs on the same CPU as
> >> virtnet_rx_resume_all(), a reschedule is needed to cause the deadlock.
> >> In real scenario, the contention on netdev_lock can cause the
> >> reschedule.
> >>
> >> This fixes the deadlock by ensuring all receive queue's napis are
> >> enabled before we enable the delayed refill work in
> >> virtnet_rx_resume_all() and virtnet_open().
> >>
> >> Fixes: 4bc12818b363 ("virtio-net: disable delayed refill when pausing rx")
> >> Reported-by: Paolo Abeni <pabeni@redhat.com>
> >> Closes: https://netdev-ctrl.bots.linux.dev/logs/vmksft/drv-hw-dbg/results/400961/3-xdp-py/stderr
> >> Cc: stable@vger.kernel.org
> >> Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
> >> ---
> >> Changes in v2:
> >> - Move try_fill_recv() before rx napi_enable()
> >> - Link to v1: https://lore.kernel.org/netdev/20251208153419.18196-1-minhquangbui99@gmail.com/
> >> ---
> >>   drivers/net/virtio_net.c | 71 +++++++++++++++++++++++++---------------
> >>   1 file changed, 45 insertions(+), 26 deletions(-)
> >>
> >> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> >> index 8e04adb57f52..4e08880a9467 100644
> >> --- a/drivers/net/virtio_net.c
> >> +++ b/drivers/net/virtio_net.c
> >> @@ -3214,21 +3214,31 @@ static void virtnet_update_settings(struct virtnet_info *vi)
> >>   static int virtnet_open(struct net_device *dev)
> >>   {
> >>          struct virtnet_info *vi = netdev_priv(dev);
> >> +       bool schedule_refill = false;
> >>          int i, err;
> >>
> >> -       enable_delayed_refill(vi);
> >> -
> >> +       /* - We must call try_fill_recv before enabling napi of the same receive
> >> +        * queue so that it doesn't race with the call in virtnet_receive.
> >> +        * - We must enable and schedule delayed refill work only when we have
> >> +        * enabled all the receive queue's napi. Otherwise, in refill_work, we
> >> +        * have a deadlock when calling napi_disable on an already disabled
> >> +        * napi.
> >> +        */
> >>          for (i = 0; i < vi->max_queue_pairs; i++) {
> >>                  if (i < vi->curr_queue_pairs)
> >>                          /* Make sure we have some buffers: if oom use wq. */
> >>                          if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
> >> -                               schedule_delayed_work(&vi->refill, 0);
> >> +                               schedule_refill = true;
> >>
> >>                  err = virtnet_enable_queue_pair(vi, i);
> >>                  if (err < 0)
> >>                          goto err_enable_qp;
> >>          }
> > So NAPI could be scheduled and it may want to refill but since refill
> > is not enabled, there would be no refill work.
> >
> > Is this a problem?
>
> You are right. It is indeed a problem.
>
> I think we can unconditionally schedule the delayed refill after
> enabling all the RX NAPIs (don't check the boolean schedule_refill
> anymore) to ensure that we will have refill work. We can still keep the
> try_fill_recv here to fill the receive buffer earlier in normal case.
> What do you think?

Or we can have a reill_pending but basically I think we need something
that is much more simple. That is, using a per rq work instead of a
global one?

Thanks

>
> >
> >
> >> +       enable_delayed_refill(vi);
> >> +       if (schedule_refill)
> >> +               schedule_delayed_work(&vi->refill, 0);
> >> +
> >>          if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
> >>                  if (vi->status & VIRTIO_NET_S_LINK_UP)
> >>                          netif_carrier_on(vi->dev);
> >> @@ -3463,39 +3473,48 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
> >>          __virtnet_rx_pause(vi, rq);
> >>   }
> >>
> > Thanks
> >
>
> Thanks,
> Quang Minh.
>
Re: [PATCH net v2] virtio-net: enable all napis before scheduling refill work
Posted by Bui Quang Minh 1 month, 2 weeks ago
On 12/17/25 09:58, Jason Wang wrote:
> On Wed, Dec 17, 2025 at 12:23 AM Bui Quang Minh
> <minhquangbui99@gmail.com> wrote:
>> On 12/16/25 11:16, Jason Wang wrote:
>>> On Fri, Dec 12, 2025 at 11:28 PM Bui Quang Minh
>>> <minhquangbui99@gmail.com> wrote:
>>>> Calling napi_disable() on an already disabled napi can cause the
>>>> deadlock. In commit 4bc12818b363 ("virtio-net: disable delayed refill
>>>> when pausing rx"), to avoid the deadlock, when pausing the RX in
>>>> virtnet_rx_pause[_all](), we disable and cancel the delayed refill work.
>>>> However, in the virtnet_rx_resume_all(), we enable the delayed refill
>>>> work too early before enabling all the receive queue napis.
>>>>
>>>> The deadlock can be reproduced by running
>>>> selftests/drivers/net/hw/xsk_reconfig.py with multiqueue virtio-net
>>>> device and inserting a cond_resched() inside the for loop in
>>>> virtnet_rx_resume_all() to increase the success rate. Because the worker
>>>> processing the delayed refilled work runs on the same CPU as
>>>> virtnet_rx_resume_all(), a reschedule is needed to cause the deadlock.
>>>> In real scenario, the contention on netdev_lock can cause the
>>>> reschedule.
>>>>
>>>> This fixes the deadlock by ensuring all receive queue's napis are
>>>> enabled before we enable the delayed refill work in
>>>> virtnet_rx_resume_all() and virtnet_open().
>>>>
>>>> Fixes: 4bc12818b363 ("virtio-net: disable delayed refill when pausing rx")
>>>> Reported-by: Paolo Abeni <pabeni@redhat.com>
>>>> Closes: https://netdev-ctrl.bots.linux.dev/logs/vmksft/drv-hw-dbg/results/400961/3-xdp-py/stderr
>>>> Cc: stable@vger.kernel.org
>>>> Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
>>>> ---
>>>> Changes in v2:
>>>> - Move try_fill_recv() before rx napi_enable()
>>>> - Link to v1: https://lore.kernel.org/netdev/20251208153419.18196-1-minhquangbui99@gmail.com/
>>>> ---
>>>>    drivers/net/virtio_net.c | 71 +++++++++++++++++++++++++---------------
>>>>    1 file changed, 45 insertions(+), 26 deletions(-)
>>>>
>>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>>> index 8e04adb57f52..4e08880a9467 100644
>>>> --- a/drivers/net/virtio_net.c
>>>> +++ b/drivers/net/virtio_net.c
>>>> @@ -3214,21 +3214,31 @@ static void virtnet_update_settings(struct virtnet_info *vi)
>>>>    static int virtnet_open(struct net_device *dev)
>>>>    {
>>>>           struct virtnet_info *vi = netdev_priv(dev);
>>>> +       bool schedule_refill = false;
>>>>           int i, err;
>>>>
>>>> -       enable_delayed_refill(vi);
>>>> -
>>>> +       /* - We must call try_fill_recv before enabling napi of the same receive
>>>> +        * queue so that it doesn't race with the call in virtnet_receive.
>>>> +        * - We must enable and schedule delayed refill work only when we have
>>>> +        * enabled all the receive queue's napi. Otherwise, in refill_work, we
>>>> +        * have a deadlock when calling napi_disable on an already disabled
>>>> +        * napi.
>>>> +        */
>>>>           for (i = 0; i < vi->max_queue_pairs; i++) {
>>>>                   if (i < vi->curr_queue_pairs)
>>>>                           /* Make sure we have some buffers: if oom use wq. */
>>>>                           if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
>>>> -                               schedule_delayed_work(&vi->refill, 0);
>>>> +                               schedule_refill = true;
>>>>
>>>>                   err = virtnet_enable_queue_pair(vi, i);
>>>>                   if (err < 0)
>>>>                           goto err_enable_qp;
>>>>           }
>>> So NAPI could be scheduled and it may want to refill but since refill
>>> is not enabled, there would be no refill work.
>>>
>>> Is this a problem?
>> You are right. It is indeed a problem.
>>
>> I think we can unconditionally schedule the delayed refill after
>> enabling all the RX NAPIs (don't check the boolean schedule_refill
>> anymore) to ensure that we will have refill work. We can still keep the
>> try_fill_recv here to fill the receive buffer earlier in normal case.
>> What do you think?
> Or we can have a reill_pending

Okay, let me implement this in the next version.

> but basically I think we need something
> that is much more simple. That is, using a per rq work instead of a
> global one?

I think we can leave this in a net-next patch later.

Thanks,
Quang Minh

>
> Thanks
>
>>>
>>>> +       enable_delayed_refill(vi);
>>>> +       if (schedule_refill)
>>>> +               schedule_delayed_work(&vi->refill, 0);
>>>> +
>>>>           if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
>>>>                   if (vi->status & VIRTIO_NET_S_LINK_UP)
>>>>                           netif_carrier_on(vi->dev);
>>>> @@ -3463,39 +3473,48 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
>>>>           __virtnet_rx_pause(vi, rq);
>>>>    }
>>>>
>>> Thanks
>>>
>> Thanks,
>> Quang Minh.
>>

Re: [PATCH net v2] virtio-net: enable all napis before scheduling refill work
Posted by Michael S. Tsirkin 1 month, 2 weeks ago
On Fri, Dec 19, 2025 at 12:03:29PM +0700, Bui Quang Minh wrote:
> On 12/17/25 09:58, Jason Wang wrote:
> > On Wed, Dec 17, 2025 at 12:23 AM Bui Quang Minh
> > <minhquangbui99@gmail.com> wrote:
> > > On 12/16/25 11:16, Jason Wang wrote:
> > > > On Fri, Dec 12, 2025 at 11:28 PM Bui Quang Minh
> > > > <minhquangbui99@gmail.com> wrote:
> > > > > Calling napi_disable() on an already disabled napi can cause the
> > > > > deadlock. In commit 4bc12818b363 ("virtio-net: disable delayed refill
> > > > > when pausing rx"), to avoid the deadlock, when pausing the RX in
> > > > > virtnet_rx_pause[_all](), we disable and cancel the delayed refill work.
> > > > > However, in the virtnet_rx_resume_all(), we enable the delayed refill
> > > > > work too early before enabling all the receive queue napis.
> > > > > 
> > > > > The deadlock can be reproduced by running
> > > > > selftests/drivers/net/hw/xsk_reconfig.py with multiqueue virtio-net
> > > > > device and inserting a cond_resched() inside the for loop in
> > > > > virtnet_rx_resume_all() to increase the success rate. Because the worker
> > > > > processing the delayed refilled work runs on the same CPU as
> > > > > virtnet_rx_resume_all(), a reschedule is needed to cause the deadlock.
> > > > > In real scenario, the contention on netdev_lock can cause the
> > > > > reschedule.
> > > > > 
> > > > > This fixes the deadlock by ensuring all receive queue's napis are
> > > > > enabled before we enable the delayed refill work in
> > > > > virtnet_rx_resume_all() and virtnet_open().
> > > > > 
> > > > > Fixes: 4bc12818b363 ("virtio-net: disable delayed refill when pausing rx")
> > > > > Reported-by: Paolo Abeni <pabeni@redhat.com>
> > > > > Closes: https://netdev-ctrl.bots.linux.dev/logs/vmksft/drv-hw-dbg/results/400961/3-xdp-py/stderr
> > > > > Cc: stable@vger.kernel.org
> > > > > Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
> > > > > ---
> > > > > Changes in v2:
> > > > > - Move try_fill_recv() before rx napi_enable()
> > > > > - Link to v1: https://lore.kernel.org/netdev/20251208153419.18196-1-minhquangbui99@gmail.com/
> > > > > ---
> > > > >    drivers/net/virtio_net.c | 71 +++++++++++++++++++++++++---------------
> > > > >    1 file changed, 45 insertions(+), 26 deletions(-)
> > > > > 
> > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > index 8e04adb57f52..4e08880a9467 100644
> > > > > --- a/drivers/net/virtio_net.c
> > > > > +++ b/drivers/net/virtio_net.c
> > > > > @@ -3214,21 +3214,31 @@ static void virtnet_update_settings(struct virtnet_info *vi)
> > > > >    static int virtnet_open(struct net_device *dev)
> > > > >    {
> > > > >           struct virtnet_info *vi = netdev_priv(dev);
> > > > > +       bool schedule_refill = false;
> > > > >           int i, err;
> > > > > 
> > > > > -       enable_delayed_refill(vi);
> > > > > -
> > > > > +       /* - We must call try_fill_recv before enabling napi of the same receive
> > > > > +        * queue so that it doesn't race with the call in virtnet_receive.
> > > > > +        * - We must enable and schedule delayed refill work only when we have
> > > > > +        * enabled all the receive queue's napi. Otherwise, in refill_work, we
> > > > > +        * have a deadlock when calling napi_disable on an already disabled
> > > > > +        * napi.
> > > > > +        */
> > > > >           for (i = 0; i < vi->max_queue_pairs; i++) {
> > > > >                   if (i < vi->curr_queue_pairs)
> > > > >                           /* Make sure we have some buffers: if oom use wq. */
> > > > >                           if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
> > > > > -                               schedule_delayed_work(&vi->refill, 0);
> > > > > +                               schedule_refill = true;
> > > > > 
> > > > >                   err = virtnet_enable_queue_pair(vi, i);
> > > > >                   if (err < 0)
> > > > >                           goto err_enable_qp;
> > > > >           }
> > > > So NAPI could be scheduled and it may want to refill but since refill
> > > > is not enabled, there would be no refill work.
> > > > 
> > > > Is this a problem?
> > > You are right. It is indeed a problem.
> > > 
> > > I think we can unconditionally schedule the delayed refill after
> > > enabling all the RX NAPIs (don't check the boolean schedule_refill
> > > anymore) to ensure that we will have refill work. We can still keep the
> > > try_fill_recv here to fill the receive buffer earlier in normal case.
> > > What do you think?
> > Or we can have a reill_pending
> 
> Okay, let me implement this in the next version.
> 
> > but basically I think we need something
> > that is much more simple. That is, using a per rq work instead of a
> > global one?
> 
> I think we can leave this in a net-next patch later.
> 
> Thanks,
> Quang Minh

i am not sure per rq is not simpler than this pile of tricks.


> > 
> > Thanks
> > 
> > > > 
> > > > > +       enable_delayed_refill(vi);
> > > > > +       if (schedule_refill)
> > > > > +               schedule_delayed_work(&vi->refill, 0);
> > > > > +
> > > > >           if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
> > > > >                   if (vi->status & VIRTIO_NET_S_LINK_UP)
> > > > >                           netif_carrier_on(vi->dev);
> > > > > @@ -3463,39 +3473,48 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
> > > > >           __virtnet_rx_pause(vi, rq);
> > > > >    }
> > > > > 
> > > > Thanks
> > > > 
> > > Thanks,
> > > Quang Minh.
> > > 

Re: [PATCH net v2] virtio-net: enable all napis before scheduling refill work
Posted by Paolo Abeni 1 month, 2 weeks ago
On 12/21/25 2:42 PM, Michael S. Tsirkin wrote:
> On Fri, Dec 19, 2025 at 12:03:29PM +0700, Bui Quang Minh wrote:
>> On 12/17/25 09:58, Jason Wang wrote:
>>> On Wed, Dec 17, 2025 at 12:23 AM Bui Quang Minh
>>> <minhquangbui99@gmail.com> wrote:
>>>> I think we can unconditionally schedule the delayed refill after
>>>> enabling all the RX NAPIs (don't check the boolean schedule_refill
>>>> anymore) to ensure that we will have refill work. We can still keep the
>>>> try_fill_recv here to fill the receive buffer earlier in normal case.
>>>> What do you think?
>>> Or we can have a reill_pending
>>
>> Okay, let me implement this in the next version.
>>
>>> but basically I think we need something
>>> that is much more simple. That is, using a per rq work instead of a
>>> global one?
>>
>> I think we can leave this in a net-next patch later.
>>
>> Thanks,
>> Quang Minh
> 
> i am not sure per rq is not simpler than this pile of tricks.
FWIW, I agree with Michael: the diffstat of the current patch is quite
scaring, I don't think a per RQ work would be significantly larger, but
should be significantly simpler to review and maintain.

I suggest doing directly the per RQ work implementation.

Thanks!

Paolo

Re: [PATCH net v2] virtio-net: enable all napis before scheduling refill work
Posted by Michael S. Tsirkin 1 month, 2 weeks ago
On Mon, Dec 22, 2025 at 12:58:01PM +0100, Paolo Abeni wrote:
> On 12/21/25 2:42 PM, Michael S. Tsirkin wrote:
> > On Fri, Dec 19, 2025 at 12:03:29PM +0700, Bui Quang Minh wrote:
> >> On 12/17/25 09:58, Jason Wang wrote:
> >>> On Wed, Dec 17, 2025 at 12:23 AM Bui Quang Minh
> >>> <minhquangbui99@gmail.com> wrote:
> >>>> I think we can unconditionally schedule the delayed refill after
> >>>> enabling all the RX NAPIs (don't check the boolean schedule_refill
> >>>> anymore) to ensure that we will have refill work. We can still keep the
> >>>> try_fill_recv here to fill the receive buffer earlier in normal case.
> >>>> What do you think?
> >>> Or we can have a reill_pending
> >>
> >> Okay, let me implement this in the next version.
> >>
> >>> but basically I think we need something
> >>> that is much more simple. That is, using a per rq work instead of a
> >>> global one?
> >>
> >> I think we can leave this in a net-next patch later.
> >>
> >> Thanks,
> >> Quang Minh
> > 
> > i am not sure per rq is not simpler than this pile of tricks.
> FWIW, I agree with Michael: the diffstat of the current patch is quite
> scaring, I don't think a per RQ work would be significantly larger, but
> should be significantly simpler to review and maintain.
> 
> I suggest doing directly the per RQ work implementation.
> 
> Thanks!
> 
> Paolo

I mean a stupidly mechanical move of all refill to per RQ is
rather trivial (CB). Compiled only, feel free to reuse.

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 0369dda5ed60..4eb90b4e7b0f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -379,6 +379,15 @@ struct receive_queue {
 	struct xdp_rxq_info xsk_rxq_info;
 
 	struct xdp_buff **xsk_buffs;
+
+	/* Work struct for delayed refilling if we run low on memory. */
+	struct delayed_work refill;
+
+	/* Is delayed refill enabled? */
+	bool refill_enabled;
+
+	/* The lock to synchronize the access to refill_enabled */
+	spinlock_t refill_lock;
 };
 
 #define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
@@ -441,9 +450,6 @@ struct virtnet_info {
 	/* Packet virtio header size */
 	u8 hdr_len;
 
-	/* Work struct for delayed refilling if we run low on memory. */
-	struct delayed_work refill;
-
 	/* UDP tunnel support */
 	bool tx_tnl;
 
@@ -451,12 +457,6 @@ struct virtnet_info {
 
 	bool rx_tnl_csum;
 
-	/* Is delayed refill enabled? */
-	bool refill_enabled;
-
-	/* The lock to synchronize the access to refill_enabled */
-	spinlock_t refill_lock;
-
 	/* Work struct for config space updates */
 	struct work_struct config_work;
 
@@ -720,18 +720,11 @@ static void virtnet_rq_free_buf(struct virtnet_info *vi,
 		put_page(virt_to_head_page(buf));
 }
 
-static void enable_delayed_refill(struct virtnet_info *vi)
+static void disable_delayed_refill(struct receive_queue *rq)
 {
-	spin_lock_bh(&vi->refill_lock);
-	vi->refill_enabled = true;
-	spin_unlock_bh(&vi->refill_lock);
-}
-
-static void disable_delayed_refill(struct virtnet_info *vi)
-{
-	spin_lock_bh(&vi->refill_lock);
-	vi->refill_enabled = false;
-	spin_unlock_bh(&vi->refill_lock);
+	spin_lock_bh(&rq->refill_lock);
+	rq->refill_enabled = false;
+	spin_unlock_bh(&rq->refill_lock);
 }
 
 static void enable_rx_mode_work(struct virtnet_info *vi)
@@ -2935,38 +2928,20 @@ static void virtnet_napi_disable(struct receive_queue *rq)
 
 static void refill_work(struct work_struct *work)
 {
-	struct virtnet_info *vi =
-		container_of(work, struct virtnet_info, refill.work);
+	struct receive_queue *rq = container_of(work, struct receive_queue,
+						refill.work);
+	struct virtnet_info *vi = rq->vq->vdev->priv;
 	bool still_empty;
-	int i;
 
-	for (i = 0; i < vi->curr_queue_pairs; i++) {
-		struct receive_queue *rq = &vi->rq[i];
+	napi_disable(&rq->napi);
+	still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
+	virtnet_napi_do_enable(rq->vq, &rq->napi);
 
-		/*
-		 * When queue API support is added in the future and the call
-		 * below becomes napi_disable_locked, this driver will need to
-		 * be refactored.
-		 *
-		 * One possible solution would be to:
-		 *   - cancel refill_work with cancel_delayed_work (note:
-		 *     non-sync)
-		 *   - cancel refill_work with cancel_delayed_work_sync in
-		 *     virtnet_remove after the netdev is unregistered
-		 *   - wrap all of the work in a lock (perhaps the netdev
-		 *     instance lock)
-		 *   - check netif_running() and return early to avoid a race
-		 */
-		napi_disable(&rq->napi);
-		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
-		virtnet_napi_do_enable(rq->vq, &rq->napi);
-
-		/* In theory, this can happen: if we don't get any buffers in
-		 * we will *never* try to fill again.
-		 */
-		if (still_empty)
-			schedule_delayed_work(&vi->refill, HZ/2);
-	}
+	/* In theory, this can happen: if we don't get any buffers in
+	 * we will *never* try to fill again.
+	 */
+	if (still_empty)
+		schedule_delayed_work(&rq->refill, HZ / 2);
 }
 
 static int virtnet_receive_xsk_bufs(struct virtnet_info *vi,
@@ -3033,10 +3008,10 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
 
 	if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
 		if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
-			spin_lock(&vi->refill_lock);
-			if (vi->refill_enabled)
-				schedule_delayed_work(&vi->refill, 0);
-			spin_unlock(&vi->refill_lock);
+			spin_lock(&rq->refill_lock);
+			if (rq->refill_enabled)
+				schedule_delayed_work(&rq->refill, 0);
+			spin_unlock(&rq->refill_lock);
 		}
 	}
 
@@ -3216,13 +3191,14 @@ static int virtnet_open(struct net_device *dev)
 	struct virtnet_info *vi = netdev_priv(dev);
 	int i, err;
 
-	enable_delayed_refill(vi);
-
 	for (i = 0; i < vi->max_queue_pairs; i++) {
+		/* Enable refill work before enabling NAPI */
+		vi->rq[i].refill_enabled = true;
+
 		if (i < vi->curr_queue_pairs)
 			/* Make sure we have some buffers: if oom use wq. */
 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
-				schedule_delayed_work(&vi->refill, 0);
+				schedule_delayed_work(&vi->rq[i].refill, 0);
 
 		err = virtnet_enable_queue_pair(vi, i);
 		if (err < 0)
@@ -3241,11 +3217,10 @@ static int virtnet_open(struct net_device *dev)
 	return 0;
 
 err_enable_qp:
-	disable_delayed_refill(vi);
-	cancel_delayed_work_sync(&vi->refill);
-
 	for (i--; i >= 0; i--) {
+		disable_delayed_refill(&vi->rq[i]);
 		virtnet_disable_queue_pair(vi, i);
+		cancel_delayed_work_sync(&vi->rq[i].refill);
 		virtnet_cancel_dim(vi, &vi->rq[i].dim);
 	}
 
@@ -3437,29 +3412,19 @@ static void __virtnet_rx_pause(struct virtnet_info *vi,
 	}
 }
 
+static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
+{
+	disable_delayed_refill(rq);
+	cancel_delayed_work_sync(&rq->refill);
+	__virtnet_rx_pause(vi, rq);
+}
+
 static void virtnet_rx_pause_all(struct virtnet_info *vi)
 {
 	int i;
 
-	/*
-	 * Make sure refill_work does not run concurrently to
-	 * avoid napi_disable race which leads to deadlock.
-	 */
-	disable_delayed_refill(vi);
-	cancel_delayed_work_sync(&vi->refill);
 	for (i = 0; i < vi->max_queue_pairs; i++)
-		__virtnet_rx_pause(vi, &vi->rq[i]);
-}
-
-static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
-{
-	/*
-	 * Make sure refill_work does not run concurrently to
-	 * avoid napi_disable race which leads to deadlock.
-	 */
-	disable_delayed_refill(vi);
-	cancel_delayed_work_sync(&vi->refill);
-	__virtnet_rx_pause(vi, rq);
+		virtnet_rx_pause(vi, &vi->rq[i]);
 }
 
 static void __virtnet_rx_resume(struct virtnet_info *vi,
@@ -3474,15 +3439,17 @@ static void __virtnet_rx_resume(struct virtnet_info *vi,
 	if (running)
 		virtnet_napi_enable(rq);
 
+	spin_lock_bh(&rq->refill_lock);
+	rq->refill_enabled = true;
 	if (schedule_refill)
-		schedule_delayed_work(&vi->refill, 0);
+		schedule_delayed_work(&rq->refill, 0);
+	spin_unlock_bh(&rq->refill_lock);
 }
 
 static void virtnet_rx_resume_all(struct virtnet_info *vi)
 {
 	int i;
 
-	enable_delayed_refill(vi);
 	for (i = 0; i < vi->max_queue_pairs; i++) {
 		if (i < vi->curr_queue_pairs)
 			__virtnet_rx_resume(vi, &vi->rq[i], true);
@@ -3493,7 +3460,6 @@ static void virtnet_rx_resume_all(struct virtnet_info *vi)
 
 static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
 {
-	enable_delayed_refill(vi);
 	__virtnet_rx_resume(vi, rq, true);
 }
 
@@ -3768,6 +3734,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 	struct virtio_net_rss_config_trailer old_rss_trailer;
 	struct net_device *dev = vi->dev;
 	struct scatterlist sg;
+	int i;
 
 	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
 		return 0;
@@ -3821,10 +3788,14 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 succ:
 	vi->curr_queue_pairs = queue_pairs;
 	/* virtnet_open() will refill when device is going to up. */
-	spin_lock_bh(&vi->refill_lock);
-	if (dev->flags & IFF_UP && vi->refill_enabled)
-		schedule_delayed_work(&vi->refill, 0);
-	spin_unlock_bh(&vi->refill_lock);
+	if (dev->flags & IFF_UP) {
+		for (i = 0; i < vi->curr_queue_pairs; i++) {
+			spin_lock_bh(&vi->rq[i].refill_lock);
+			if (vi->rq[i].refill_enabled)
+				schedule_delayed_work(&vi->rq[i].refill, 0);
+			spin_unlock_bh(&vi->rq[i].refill_lock);
+		}
+	}
 
 	return 0;
 }
@@ -3834,10 +3805,6 @@ static int virtnet_close(struct net_device *dev)
 	struct virtnet_info *vi = netdev_priv(dev);
 	int i;
 
-	/* Make sure NAPI doesn't schedule refill work */
-	disable_delayed_refill(vi);
-	/* Make sure refill_work doesn't re-enable napi! */
-	cancel_delayed_work_sync(&vi->refill);
 	/* Prevent the config change callback from changing carrier
 	 * after close
 	 */
@@ -3848,7 +3815,9 @@ static int virtnet_close(struct net_device *dev)
 	cancel_work_sync(&vi->config_work);
 
 	for (i = 0; i < vi->max_queue_pairs; i++) {
+		disable_delayed_refill(&vi->rq[i]);
 		virtnet_disable_queue_pair(vi, i);
+		cancel_delayed_work_sync(&vi->rq[i].refill);
 		virtnet_cancel_dim(vi, &vi->rq[i].dim);
 	}
 
@@ -5793,7 +5762,6 @@ static int virtnet_restore_up(struct virtio_device *vdev)
 
 	virtio_device_ready(vdev);
 
-	enable_delayed_refill(vi);
 	enable_rx_mode_work(vi);
 
 	if (netif_running(vi->dev)) {
@@ -6550,7 +6518,6 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
 	if (!vi->rq)
 		goto err_rq;
 
-	INIT_DELAYED_WORK(&vi->refill, refill_work);
 	for (i = 0; i < vi->max_queue_pairs; i++) {
 		vi->rq[i].pages = NULL;
 		netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
@@ -6567,6 +6534,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
 		u64_stats_init(&vi->rq[i].stats.syncp);
 		u64_stats_init(&vi->sq[i].stats.syncp);
 		mutex_init(&vi->rq[i].dim_lock);
+		INIT_DELAYED_WORK(&vi->rq[i].refill, refill_work);
+		spin_lock_init(&vi->rq[i].refill_lock);
 	}
 
 	return 0;
@@ -6892,7 +6861,6 @@ static int virtnet_probe(struct virtio_device *vdev)
 
 	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
 	INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
-	spin_lock_init(&vi->refill_lock);
 
 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
 		vi->mergeable_rx_bufs = true;
@@ -7156,7 +7124,8 @@ static int virtnet_probe(struct virtio_device *vdev)
 	net_failover_destroy(vi->failover);
 free_vqs:
 	virtio_reset_device(vdev);
-	cancel_delayed_work_sync(&vi->refill);
+	for (i = 0; i < vi->max_queue_pairs; i++)
+		cancel_delayed_work_sync(&vi->rq[i].refill);
 	free_receive_page_frags(vi);
 	virtnet_del_vqs(vi);
 free: