[PATCH] virtio-blk: simplify virtio_blk_dma_restart_cb()

Stefan Hajnoczi posted 1 patch 1 year, 5 months ago
Patches applied successfully (tree, apply log)
git fetch https://github.com/patchew-project/qemu tags/patchew/20221102182337.252202-1-stefanha@redhat.com
Maintainers: Stefan Hajnoczi <stefanha@redhat.com>, Kevin Wolf <kwolf@redhat.com>, Hanna Reitz <hreitz@redhat.com>, "Michael S. Tsirkin" <mst@redhat.com>
include/hw/virtio/virtio-blk.h  |  2 --
hw/block/dataplane/virtio-blk.c | 17 +++++-------
hw/block/virtio-blk.c           | 46 ++++++++++++++-------------------
3 files changed, 26 insertions(+), 39 deletions(-)
[PATCH] virtio-blk: simplify virtio_blk_dma_restart_cb()
Posted by Stefan Hajnoczi 1 year, 5 months ago
virtio_blk_dma_restart_cb() is tricky because the BH must deal with
virtio_blk_data_plane_start()/virtio_blk_data_plane_stop() being called.

There are two issues with the code:

1. virtio_blk_realize() should use qdev_add_vm_change_state_handler()
   instead of qemu_add_vm_change_state_handler(). This ensures the
   ordering with virtio_init()'s vm change state handler that calls
   virtio_blk_data_plane_start()/virtio_blk_data_plane_stop() is
   well-defined. Then blk's AioContext is guaranteed to be up-to-date in
   virtio_blk_dma_restart_cb() and it's no longer necessary to have a
   special case for virtio_blk_data_plane_start().

2. Only blk_drain() waits for virtio_blk_dma_restart_cb()'s
   blk_inc_in_flight() to be decremented. The bdrv_drain() family of
   functions do not wait for BlockBackend's in_flight counter to reach
   zero. virtio_blk_data_plane_stop() relies on blk_set_aio_context()'s
   implicit drain, but that's a bdrv_drain() and not a blk_drain().
   Note that virtio_blk_reset() already correctly relies on blk_drain().
   If virtio_blk_data_plane_stop() switches to blk_drain() then we can
   properly wait for pending virtio_blk_dma_restart_bh() calls.

Once these issues are taken care of the code becomes simpler. This
change is in preparation for multiple IOThreads in virtio-blk where we
need to clean up the multi-threading behavior.

I ran the reproducer from commit 49b44549ace7 ("virtio-blk: On restart,
process queued requests in the proper context") to check that there is
no regression.

Cc: Sergio Lopez <slp@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 include/hw/virtio/virtio-blk.h  |  2 --
 hw/block/dataplane/virtio-blk.c | 17 +++++-------
 hw/block/virtio-blk.c           | 46 ++++++++++++++-------------------
 3 files changed, 26 insertions(+), 39 deletions(-)

diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index 7f589b4146..dafec432ce 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -55,7 +55,6 @@ struct VirtIOBlock {
     VirtIODevice parent_obj;
     BlockBackend *blk;
     void *rq;
-    QEMUBH *bh;
     VirtIOBlkConf conf;
     unsigned short sector_mask;
     bool original_wce;
@@ -93,6 +92,5 @@ typedef struct MultiReqBuffer {
 } MultiReqBuffer;
 
 void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
-void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh);
 
 #endif
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 26f965cabc..b28d81737e 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -237,9 +237,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
         goto fail_aio_context;
     }
 
-    /* Process queued requests before the ones in vring */
-    virtio_blk_process_queued_requests(vblk, false);
-
     /* Kick right away to begin processing requests already in vring */
     for (i = 0; i < nvqs; i++) {
         VirtQueue *vq = virtio_get_queue(s->vdev, i);
@@ -272,11 +269,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
   fail_host_notifiers:
     k->set_guest_notifiers(qbus->parent, nvqs, false);
   fail_guest_notifiers:
-    /*
-     * If we failed to set up the guest notifiers queued requests will be
-     * processed on the main context.
-     */
-    virtio_blk_process_queued_requests(vblk, false);
     vblk->dataplane_disabled = true;
     s->starting = false;
     vblk->dataplane_started = true;
@@ -325,8 +317,13 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
     aio_context_acquire(s->ctx);
     aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
 
-    /* Drain and try to switch bs back to the QEMU main loop. If other users
-     * keep the BlockBackend in the iothread, that's ok */
+    /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
+    blk_drain(s->conf->conf.blk);
+
+    /*
+     * Try to switch bs back to the QEMU main loop. If other users keep the
+     * BlockBackend in the iothread, that's ok
+     */
     blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
 
     aio_context_release(s->ctx);
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index f717550fdc..1762517878 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -806,8 +806,10 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
     virtio_blk_handle_vq(s, vq);
 }
 
-void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
+static void virtio_blk_dma_restart_bh(void *opaque)
 {
+    VirtIOBlock *s = opaque;
+
     VirtIOBlockReq *req = s->rq;
     MultiReqBuffer mrb = {};
 
@@ -834,43 +836,27 @@ void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
     if (mrb.num_reqs) {
         virtio_blk_submit_multireq(s, &mrb);
     }
-    if (is_bh) {
-        blk_dec_in_flight(s->conf.conf.blk);
-    }
+
+    /* Paired with inc in virtio_blk_dma_restart_cb() */
+    blk_dec_in_flight(s->conf.conf.blk);
+
     aio_context_release(blk_get_aio_context(s->conf.conf.blk));
 }
 
-static void virtio_blk_dma_restart_bh(void *opaque)
-{
-    VirtIOBlock *s = opaque;
-
-    qemu_bh_delete(s->bh);
-    s->bh = NULL;
-
-    virtio_blk_process_queued_requests(s, true);
-}
-
 static void virtio_blk_dma_restart_cb(void *opaque, bool running,
                                       RunState state)
 {
     VirtIOBlock *s = opaque;
-    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
-    VirtioBusState *bus = VIRTIO_BUS(qbus);
 
     if (!running) {
         return;
     }
 
-    /*
-     * If ioeventfd is enabled, don't schedule the BH here as queued
-     * requests will be processed while starting the data plane.
-     */
-    if (!s->bh && !virtio_bus_ioeventfd_enabled(bus)) {
-        s->bh = aio_bh_new(blk_get_aio_context(s->conf.conf.blk),
-                           virtio_blk_dma_restart_bh, s);
-        blk_inc_in_flight(s->conf.conf.blk);
-        qemu_bh_schedule(s->bh);
-    }
+    /* Paired with dec in virtio_blk_dma_restart_bh() */
+    blk_inc_in_flight(s->conf.conf.blk);
+
+    aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.conf.blk),
+            virtio_blk_dma_restart_bh, s);
 }
 
 static void virtio_blk_reset(VirtIODevice *vdev)
@@ -1213,7 +1199,13 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
         return;
     }
 
-    s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
+    /*
+     * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets
+     * called after ->start_ioeventfd() has already set blk's AioContext.
+     */
+    s->change =
+        qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s);
+
     blk_ram_registrar_init(&s->blk_ram_registrar, s->blk);
     blk_set_dev_ops(s->blk, &virtio_block_ops, s);
 
-- 
2.38.1
Re: [PATCH] virtio-blk: simplify virtio_blk_dma_restart_cb()
Posted by Michael S. Tsirkin 1 year, 5 months ago
On Wed, Nov 02, 2022 at 02:23:37PM -0400, Stefan Hajnoczi wrote:
> virtio_blk_dma_restart_cb() is tricky because the BH must deal with
> virtio_blk_data_plane_start()/virtio_blk_data_plane_stop() being called.
> 
> There are two issues with the code:
> 
> 1. virtio_blk_realize() should use qdev_add_vm_change_state_handler()
>    instead of qemu_add_vm_change_state_handler(). This ensures the
>    ordering with virtio_init()'s vm change state handler that calls
>    virtio_blk_data_plane_start()/virtio_blk_data_plane_stop() is
>    well-defined. Then blk's AioContext is guaranteed to be up-to-date in
>    virtio_blk_dma_restart_cb() and it's no longer necessary to have a
>    special case for virtio_blk_data_plane_start().
> 
> 2. Only blk_drain() waits for virtio_blk_dma_restart_cb()'s
>    blk_inc_in_flight() to be decremented. The bdrv_drain() family of
>    functions do not wait for BlockBackend's in_flight counter to reach
>    zero. virtio_blk_data_plane_stop() relies on blk_set_aio_context()'s
>    implicit drain, but that's a bdrv_drain() and not a blk_drain().
>    Note that virtio_blk_reset() already correctly relies on blk_drain().
>    If virtio_blk_data_plane_stop() switches to blk_drain() then we can
>    properly wait for pending virtio_blk_dma_restart_bh() calls.
> 
> Once these issues are taken care of the code becomes simpler. This
> change is in preparation for multiple IOThreads in virtio-blk where we
> need to clean up the multi-threading behavior.
> 
> I ran the reproducer from commit 49b44549ace7 ("virtio-blk: On restart,
> process queued requests in the proper context") to check that there is
> no regression.
> 
> Cc: Sergio Lopez <slp@redhat.com>
> Cc: Kevin Wolf <kwolf@redhat.com>
> Cc: Emanuele Giuseppe Esposito <eesposit@redhat.com>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>


Acked-by: Michael S. Tsirkin <mst@redhat.com>

block tree I presume?


> ---
>  include/hw/virtio/virtio-blk.h  |  2 --
>  hw/block/dataplane/virtio-blk.c | 17 +++++-------
>  hw/block/virtio-blk.c           | 46 ++++++++++++++-------------------
>  3 files changed, 26 insertions(+), 39 deletions(-)
> 
> diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
> index 7f589b4146..dafec432ce 100644
> --- a/include/hw/virtio/virtio-blk.h
> +++ b/include/hw/virtio/virtio-blk.h
> @@ -55,7 +55,6 @@ struct VirtIOBlock {
>      VirtIODevice parent_obj;
>      BlockBackend *blk;
>      void *rq;
> -    QEMUBH *bh;
>      VirtIOBlkConf conf;
>      unsigned short sector_mask;
>      bool original_wce;
> @@ -93,6 +92,5 @@ typedef struct MultiReqBuffer {
>  } MultiReqBuffer;
>  
>  void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
> -void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh);
>  
>  #endif
> diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
> index 26f965cabc..b28d81737e 100644
> --- a/hw/block/dataplane/virtio-blk.c
> +++ b/hw/block/dataplane/virtio-blk.c
> @@ -237,9 +237,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
>          goto fail_aio_context;
>      }
>  
> -    /* Process queued requests before the ones in vring */
> -    virtio_blk_process_queued_requests(vblk, false);
> -
>      /* Kick right away to begin processing requests already in vring */
>      for (i = 0; i < nvqs; i++) {
>          VirtQueue *vq = virtio_get_queue(s->vdev, i);
> @@ -272,11 +269,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
>    fail_host_notifiers:
>      k->set_guest_notifiers(qbus->parent, nvqs, false);
>    fail_guest_notifiers:
> -    /*
> -     * If we failed to set up the guest notifiers queued requests will be
> -     * processed on the main context.
> -     */
> -    virtio_blk_process_queued_requests(vblk, false);
>      vblk->dataplane_disabled = true;
>      s->starting = false;
>      vblk->dataplane_started = true;
> @@ -325,8 +317,13 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
>      aio_context_acquire(s->ctx);
>      aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
>  
> -    /* Drain and try to switch bs back to the QEMU main loop. If other users
> -     * keep the BlockBackend in the iothread, that's ok */
> +    /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
> +    blk_drain(s->conf->conf.blk);
> +
> +    /*
> +     * Try to switch bs back to the QEMU main loop. If other users keep the
> +     * BlockBackend in the iothread, that's ok
> +     */
>      blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
>  
>      aio_context_release(s->ctx);
> diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
> index f717550fdc..1762517878 100644
> --- a/hw/block/virtio-blk.c
> +++ b/hw/block/virtio-blk.c
> @@ -806,8 +806,10 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
>      virtio_blk_handle_vq(s, vq);
>  }
>  
> -void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
> +static void virtio_blk_dma_restart_bh(void *opaque)
>  {
> +    VirtIOBlock *s = opaque;
> +
>      VirtIOBlockReq *req = s->rq;
>      MultiReqBuffer mrb = {};
>  
> @@ -834,43 +836,27 @@ void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
>      if (mrb.num_reqs) {
>          virtio_blk_submit_multireq(s, &mrb);
>      }
> -    if (is_bh) {
> -        blk_dec_in_flight(s->conf.conf.blk);
> -    }
> +
> +    /* Paired with inc in virtio_blk_dma_restart_cb() */
> +    blk_dec_in_flight(s->conf.conf.blk);
> +
>      aio_context_release(blk_get_aio_context(s->conf.conf.blk));
>  }
>  
> -static void virtio_blk_dma_restart_bh(void *opaque)
> -{
> -    VirtIOBlock *s = opaque;
> -
> -    qemu_bh_delete(s->bh);
> -    s->bh = NULL;
> -
> -    virtio_blk_process_queued_requests(s, true);
> -}
> -
>  static void virtio_blk_dma_restart_cb(void *opaque, bool running,
>                                        RunState state)
>  {
>      VirtIOBlock *s = opaque;
> -    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
> -    VirtioBusState *bus = VIRTIO_BUS(qbus);
>  
>      if (!running) {
>          return;
>      }
>  
> -    /*
> -     * If ioeventfd is enabled, don't schedule the BH here as queued
> -     * requests will be processed while starting the data plane.
> -     */
> -    if (!s->bh && !virtio_bus_ioeventfd_enabled(bus)) {
> -        s->bh = aio_bh_new(blk_get_aio_context(s->conf.conf.blk),
> -                           virtio_blk_dma_restart_bh, s);
> -        blk_inc_in_flight(s->conf.conf.blk);
> -        qemu_bh_schedule(s->bh);
> -    }
> +    /* Paired with dec in virtio_blk_dma_restart_bh() */
> +    blk_inc_in_flight(s->conf.conf.blk);
> +
> +    aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.conf.blk),
> +            virtio_blk_dma_restart_bh, s);
>  }
>  
>  static void virtio_blk_reset(VirtIODevice *vdev)
> @@ -1213,7 +1199,13 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
>          return;
>      }
>  
> -    s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
> +    /*
> +     * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets
> +     * called after ->start_ioeventfd() has already set blk's AioContext.
> +     */
> +    s->change =
> +        qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s);
> +
>      blk_ram_registrar_init(&s->blk_ram_registrar, s->blk);
>      blk_set_dev_ops(s->blk, &virtio_block_ops, s);
>  
> -- 
> 2.38.1
Re: [PATCH] virtio-blk: simplify virtio_blk_dma_restart_cb()
Posted by Stefan Hajnoczi 1 year, 5 months ago
On Thu, Nov 10, 2022 at 07:27:59AM -0500, Michael S. Tsirkin wrote:
> On Wed, Nov 02, 2022 at 02:23:37PM -0400, Stefan Hajnoczi wrote:
> > virtio_blk_dma_restart_cb() is tricky because the BH must deal with
> > virtio_blk_data_plane_start()/virtio_blk_data_plane_stop() being called.
> > 
> > There are two issues with the code:
> > 
> > 1. virtio_blk_realize() should use qdev_add_vm_change_state_handler()
> >    instead of qemu_add_vm_change_state_handler(). This ensures the
> >    ordering with virtio_init()'s vm change state handler that calls
> >    virtio_blk_data_plane_start()/virtio_blk_data_plane_stop() is
> >    well-defined. Then blk's AioContext is guaranteed to be up-to-date in
> >    virtio_blk_dma_restart_cb() and it's no longer necessary to have a
> >    special case for virtio_blk_data_plane_start().
> > 
> > 2. Only blk_drain() waits for virtio_blk_dma_restart_cb()'s
> >    blk_inc_in_flight() to be decremented. The bdrv_drain() family of
> >    functions do not wait for BlockBackend's in_flight counter to reach
> >    zero. virtio_blk_data_plane_stop() relies on blk_set_aio_context()'s
> >    implicit drain, but that's a bdrv_drain() and not a blk_drain().
> >    Note that virtio_blk_reset() already correctly relies on blk_drain().
> >    If virtio_blk_data_plane_stop() switches to blk_drain() then we can
> >    properly wait for pending virtio_blk_dma_restart_bh() calls.
> > 
> > Once these issues are taken care of the code becomes simpler. This
> > change is in preparation for multiple IOThreads in virtio-blk where we
> > need to clean up the multi-threading behavior.
> > 
> > I ran the reproducer from commit 49b44549ace7 ("virtio-blk: On restart,
> > process queued requests in the proper context") to check that there is
> > no regression.
> > 
> > Cc: Sergio Lopez <slp@redhat.com>
> > Cc: Kevin Wolf <kwolf@redhat.com>
> > Cc: Emanuele Giuseppe Esposito <eesposit@redhat.com>
> > Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> 
> 
> Acked-by: Michael S. Tsirkin <mst@redhat.com>
> 
> block tree I presume?

Thanks, merged into my block-next tree.

Stefan
Re: [PATCH] virtio-blk: simplify virtio_blk_dma_restart_cb()
Posted by Emanuele Giuseppe Esposito 1 year, 5 months ago

Am 02/11/2022 um 19:23 schrieb Stefan Hajnoczi:
> virtio_blk_dma_restart_cb() is tricky because the BH must deal with
> virtio_blk_data_plane_start()/virtio_blk_data_plane_stop() being called.
> 
> There are two issues with the code:
> 
> 1. virtio_blk_realize() should use qdev_add_vm_change_state_handler()
>    instead of qemu_add_vm_change_state_handler(). This ensures the
>    ordering with virtio_init()'s vm change state handler that calls
>    virtio_blk_data_plane_start()/virtio_blk_data_plane_stop() is
>    well-defined. Then blk's AioContext is guaranteed to be up-to-date in
>    virtio_blk_dma_restart_cb() and it's no longer necessary to have a
>    special case for virtio_blk_data_plane_start().
> 
> 2. Only blk_drain() waits for virtio_blk_dma_restart_cb()'s
>    blk_inc_in_flight() to be decremented. The bdrv_drain() family of
>    functions do not wait for BlockBackend's in_flight counter to reach
>    zero. virtio_blk_data_plane_stop() relies on blk_set_aio_context()'s
>    implicit drain, but that's a bdrv_drain() and not a blk_drain().
>    Note that virtio_blk_reset() already correctly relies on blk_drain().
>    If virtio_blk_data_plane_stop() switches to blk_drain() then we can
>    properly wait for pending virtio_blk_dma_restart_bh() calls.
> 
> Once these issues are taken care of the code becomes simpler. This
> change is in preparation for multiple IOThreads in virtio-blk where we
> need to clean up the multi-threading behavior.
> 
> I ran the reproducer from commit 49b44549ace7 ("virtio-blk: On restart,
> process queued requests in the proper context") to check that there is
> no regression.
> 
> Cc: Sergio Lopez <slp@redhat.com>
> Cc: Kevin Wolf <kwolf@redhat.com>
> Cc: Emanuele Giuseppe Esposito <eesposit@redhat.com>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>

Reviewed-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>

> ---
>  include/hw/virtio/virtio-blk.h  |  2 --
>  hw/block/dataplane/virtio-blk.c | 17 +++++-------
>  hw/block/virtio-blk.c           | 46 ++++++++++++++-------------------
>  3 files changed, 26 insertions(+), 39 deletions(-)
> 
> diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
> index 7f589b4146..dafec432ce 100644
> --- a/include/hw/virtio/virtio-blk.h
> +++ b/include/hw/virtio/virtio-blk.h
> @@ -55,7 +55,6 @@ struct VirtIOBlock {
>      VirtIODevice parent_obj;
>      BlockBackend *blk;
>      void *rq;
> -    QEMUBH *bh;
>      VirtIOBlkConf conf;
>      unsigned short sector_mask;
>      bool original_wce;
> @@ -93,6 +92,5 @@ typedef struct MultiReqBuffer {
>  } MultiReqBuffer;
>  
>  void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
> -void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh);
>  
>  #endif
> diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
> index 26f965cabc..b28d81737e 100644
> --- a/hw/block/dataplane/virtio-blk.c
> +++ b/hw/block/dataplane/virtio-blk.c
> @@ -237,9 +237,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
>          goto fail_aio_context;
>      }
>  
> -    /* Process queued requests before the ones in vring */
> -    virtio_blk_process_queued_requests(vblk, false);
> -
>      /* Kick right away to begin processing requests already in vring */
>      for (i = 0; i < nvqs; i++) {
>          VirtQueue *vq = virtio_get_queue(s->vdev, i);
> @@ -272,11 +269,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
>    fail_host_notifiers:
>      k->set_guest_notifiers(qbus->parent, nvqs, false);
>    fail_guest_notifiers:
> -    /*
> -     * If we failed to set up the guest notifiers queued requests will be
> -     * processed on the main context.
> -     */
> -    virtio_blk_process_queued_requests(vblk, false);
>      vblk->dataplane_disabled = true;
>      s->starting = false;
>      vblk->dataplane_started = true;
> @@ -325,8 +317,13 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
>      aio_context_acquire(s->ctx);
>      aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
>  
> -    /* Drain and try to switch bs back to the QEMU main loop. If other users
> -     * keep the BlockBackend in the iothread, that's ok */
> +    /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
> +    blk_drain(s->conf->conf.blk);
> +
> +    /*
> +     * Try to switch bs back to the QEMU main loop. If other users keep the
> +     * BlockBackend in the iothread, that's ok
> +     */
>      blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
>  
>      aio_context_release(s->ctx);
> diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
> index f717550fdc..1762517878 100644
> --- a/hw/block/virtio-blk.c
> +++ b/hw/block/virtio-blk.c
> @@ -806,8 +806,10 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
>      virtio_blk_handle_vq(s, vq);
>  }
>  
> -void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
> +static void virtio_blk_dma_restart_bh(void *opaque)
>  {
> +    VirtIOBlock *s = opaque;
> +
>      VirtIOBlockReq *req = s->rq;
>      MultiReqBuffer mrb = {};
>  
> @@ -834,43 +836,27 @@ void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
>      if (mrb.num_reqs) {
>          virtio_blk_submit_multireq(s, &mrb);
>      }
> -    if (is_bh) {
> -        blk_dec_in_flight(s->conf.conf.blk);
> -    }
> +
> +    /* Paired with inc in virtio_blk_dma_restart_cb() */
> +    blk_dec_in_flight(s->conf.conf.blk);
> +
>      aio_context_release(blk_get_aio_context(s->conf.conf.blk));
>  }
>  
> -static void virtio_blk_dma_restart_bh(void *opaque)
> -{
> -    VirtIOBlock *s = opaque;
> -
> -    qemu_bh_delete(s->bh);
> -    s->bh = NULL;
> -
> -    virtio_blk_process_queued_requests(s, true);
> -}
> -
>  static void virtio_blk_dma_restart_cb(void *opaque, bool running,
>                                        RunState state)
>  {
>      VirtIOBlock *s = opaque;
> -    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
> -    VirtioBusState *bus = VIRTIO_BUS(qbus);
>  
>      if (!running) {
>          return;
>      }
>  
> -    /*
> -     * If ioeventfd is enabled, don't schedule the BH here as queued
> -     * requests will be processed while starting the data plane.
> -     */
> -    if (!s->bh && !virtio_bus_ioeventfd_enabled(bus)) {
> -        s->bh = aio_bh_new(blk_get_aio_context(s->conf.conf.blk),
> -                           virtio_blk_dma_restart_bh, s);
> -        blk_inc_in_flight(s->conf.conf.blk);
> -        qemu_bh_schedule(s->bh);
> -    }
> +    /* Paired with dec in virtio_blk_dma_restart_bh() */
> +    blk_inc_in_flight(s->conf.conf.blk);
> +
> +    aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.conf.blk),
> +            virtio_blk_dma_restart_bh, s);
>  }
>  
>  static void virtio_blk_reset(VirtIODevice *vdev)
> @@ -1213,7 +1199,13 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
>          return;
>      }
>  
> -    s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
> +    /*
> +     * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets
> +     * called after ->start_ioeventfd() has already set blk's AioContext.
> +     */
> +    s->change =
> +        qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s);
> +
>      blk_ram_registrar_init(&s->blk_ram_registrar, s->blk);
>      blk_set_dev_ops(s->blk, &virtio_block_ops, s);
>  
>
Re: [PATCH] virtio-blk: simplify virtio_blk_dma_restart_cb()
Posted by Michael S. Tsirkin 1 year, 5 months ago
On Wed, Nov 02, 2022 at 02:23:37PM -0400, Stefan Hajnoczi wrote:
> virtio_blk_dma_restart_cb() is tricky because the BH must deal with
> virtio_blk_data_plane_start()/virtio_blk_data_plane_stop() being called.
> 
> There are two issues with the code:
> 
> 1. virtio_blk_realize() should use qdev_add_vm_change_state_handler()
>    instead of qemu_add_vm_change_state_handler(). This ensures the
>    ordering with virtio_init()'s vm change state handler that calls
>    virtio_blk_data_plane_start()/virtio_blk_data_plane_stop() is
>    well-defined. Then blk's AioContext is guaranteed to be up-to-date in
>    virtio_blk_dma_restart_cb() and it's no longer necessary to have a
>    special case for virtio_blk_data_plane_start().
> 
> 2. Only blk_drain() waits for virtio_blk_dma_restart_cb()'s
>    blk_inc_in_flight() to be decremented. The bdrv_drain() family of
>    functions do not wait for BlockBackend's in_flight counter to reach
>    zero. virtio_blk_data_plane_stop() relies on blk_set_aio_context()'s
>    implicit drain, but that's a bdrv_drain() and not a blk_drain().
>    Note that virtio_blk_reset() already correctly relies on blk_drain().
>    If virtio_blk_data_plane_stop() switches to blk_drain() then we can
>    properly wait for pending virtio_blk_dma_restart_bh() calls.
> 
> Once these issues are taken care of the code becomes simpler. This
> change is in preparation for multiple IOThreads in virtio-blk where we
> need to clean up the multi-threading behavior.
> 
> I ran the reproducer from commit 49b44549ace7 ("virtio-blk: On restart,
> process queued requests in the proper context") to check that there is
> no regression.
> 
> Cc: Sergio Lopez <slp@redhat.com>
> Cc: Kevin Wolf <kwolf@redhat.com>
> Cc: Emanuele Giuseppe Esposito <eesposit@redhat.com>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>


Acked-by: Michael S. Tsirkin <mst@redhat.com>

> ---
>  include/hw/virtio/virtio-blk.h  |  2 --
>  hw/block/dataplane/virtio-blk.c | 17 +++++-------
>  hw/block/virtio-blk.c           | 46 ++++++++++++++-------------------
>  3 files changed, 26 insertions(+), 39 deletions(-)
> 
> diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
> index 7f589b4146..dafec432ce 100644
> --- a/include/hw/virtio/virtio-blk.h
> +++ b/include/hw/virtio/virtio-blk.h
> @@ -55,7 +55,6 @@ struct VirtIOBlock {
>      VirtIODevice parent_obj;
>      BlockBackend *blk;
>      void *rq;
> -    QEMUBH *bh;
>      VirtIOBlkConf conf;
>      unsigned short sector_mask;
>      bool original_wce;
> @@ -93,6 +92,5 @@ typedef struct MultiReqBuffer {
>  } MultiReqBuffer;
>  
>  void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
> -void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh);
>  
>  #endif
> diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
> index 26f965cabc..b28d81737e 100644
> --- a/hw/block/dataplane/virtio-blk.c
> +++ b/hw/block/dataplane/virtio-blk.c
> @@ -237,9 +237,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
>          goto fail_aio_context;
>      }
>  
> -    /* Process queued requests before the ones in vring */
> -    virtio_blk_process_queued_requests(vblk, false);
> -
>      /* Kick right away to begin processing requests already in vring */
>      for (i = 0; i < nvqs; i++) {
>          VirtQueue *vq = virtio_get_queue(s->vdev, i);
> @@ -272,11 +269,6 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
>    fail_host_notifiers:
>      k->set_guest_notifiers(qbus->parent, nvqs, false);
>    fail_guest_notifiers:
> -    /*
> -     * If we failed to set up the guest notifiers queued requests will be
> -     * processed on the main context.
> -     */
> -    virtio_blk_process_queued_requests(vblk, false);
>      vblk->dataplane_disabled = true;
>      s->starting = false;
>      vblk->dataplane_started = true;
> @@ -325,8 +317,13 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
>      aio_context_acquire(s->ctx);
>      aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
>  
> -    /* Drain and try to switch bs back to the QEMU main loop. If other users
> -     * keep the BlockBackend in the iothread, that's ok */
> +    /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
> +    blk_drain(s->conf->conf.blk);
> +
> +    /*
> +     * Try to switch bs back to the QEMU main loop. If other users keep the
> +     * BlockBackend in the iothread, that's ok
> +     */
>      blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
>  
>      aio_context_release(s->ctx);
> diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
> index f717550fdc..1762517878 100644
> --- a/hw/block/virtio-blk.c
> +++ b/hw/block/virtio-blk.c
> @@ -806,8 +806,10 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
>      virtio_blk_handle_vq(s, vq);
>  }
>  
> -void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
> +static void virtio_blk_dma_restart_bh(void *opaque)
>  {
> +    VirtIOBlock *s = opaque;
> +
>      VirtIOBlockReq *req = s->rq;
>      MultiReqBuffer mrb = {};
>  
> @@ -834,43 +836,27 @@ void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
>      if (mrb.num_reqs) {
>          virtio_blk_submit_multireq(s, &mrb);
>      }
> -    if (is_bh) {
> -        blk_dec_in_flight(s->conf.conf.blk);
> -    }
> +
> +    /* Paired with inc in virtio_blk_dma_restart_cb() */
> +    blk_dec_in_flight(s->conf.conf.blk);
> +
>      aio_context_release(blk_get_aio_context(s->conf.conf.blk));
>  }
>  
> -static void virtio_blk_dma_restart_bh(void *opaque)
> -{
> -    VirtIOBlock *s = opaque;
> -
> -    qemu_bh_delete(s->bh);
> -    s->bh = NULL;
> -
> -    virtio_blk_process_queued_requests(s, true);
> -}
> -
>  static void virtio_blk_dma_restart_cb(void *opaque, bool running,
>                                        RunState state)
>  {
>      VirtIOBlock *s = opaque;
> -    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
> -    VirtioBusState *bus = VIRTIO_BUS(qbus);
>  
>      if (!running) {
>          return;
>      }
>  
> -    /*
> -     * If ioeventfd is enabled, don't schedule the BH here as queued
> -     * requests will be processed while starting the data plane.
> -     */
> -    if (!s->bh && !virtio_bus_ioeventfd_enabled(bus)) {
> -        s->bh = aio_bh_new(blk_get_aio_context(s->conf.conf.blk),
> -                           virtio_blk_dma_restart_bh, s);
> -        blk_inc_in_flight(s->conf.conf.blk);
> -        qemu_bh_schedule(s->bh);
> -    }
> +    /* Paired with dec in virtio_blk_dma_restart_bh() */
> +    blk_inc_in_flight(s->conf.conf.blk);
> +
> +    aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.conf.blk),
> +            virtio_blk_dma_restart_bh, s);
>  }
>  
>  static void virtio_blk_reset(VirtIODevice *vdev)
> @@ -1213,7 +1199,13 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
>          return;
>      }
>  
> -    s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
> +    /*
> +     * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets
> +     * called after ->start_ioeventfd() has already set blk's AioContext.
> +     */
> +    s->change =
> +        qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s);
> +
>      blk_ram_registrar_init(&s->blk_ram_registrar, s->blk);
>      blk_set_dev_ops(s->blk, &virtio_block_ops, s);
>  
> -- 
> 2.38.1