On Thu, Nov 20, 2025 at 6:00 PM Ming Lei <ming.lei@redhat.com> wrote:
>
> In case of BATCH_IO, any request filled in event kfifo, they don't get
> chance to be dispatched any more when releasing ublk char device, so
> we have to abort them too.
>
> Add ublk_abort_batch_queue() for aborting this kind of requests.
>
> Signed-off-by: Ming Lei <ming.lei@redhat.com>
> ---
> drivers/block/ublk_drv.c | 26 +++++++++++++++++++++++++-
> 1 file changed, 25 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
> index 2e5e392c939e..849199771f86 100644
> --- a/drivers/block/ublk_drv.c
> +++ b/drivers/block/ublk_drv.c
> @@ -2241,7 +2241,8 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
> static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io,
> struct request *req)
> {
> - WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
> + WARN_ON_ONCE(!ublk_dev_support_batch_io(ub) &&
> + io->flags & UBLK_IO_FLAG_ACTIVE);
>
> if (ublk_nosrv_should_reissue_outstanding(ub))
> blk_mq_requeue_request(req, false);
> @@ -2251,6 +2252,26 @@ static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io,
> }
> }
>
> +/*
> + * Request tag may just be filled to event kfifo, not get chance to
> + * dispatch, abort these requests too
> + */
> +static void ublk_abort_batch_queue(struct ublk_device *ub,
> + struct ublk_queue *ubq)
> +{
> + while (true) {
> + struct request *req;
> + short tag;
> +
> + if (!kfifo_out(&ubq->evts_fifo, &tag, 1))
> + break;
This loop could also be written a bit more simply as while
(kfifo_out(&ubq->evts_fifo, &tag, 1)).
Best,
Caleb
> +
> + req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
> + if (req && blk_mq_request_started(req))
> + __ublk_fail_req(ub, &ubq->ios[tag], req);
> + }
> +}
> +
> /*
> * Called from ublk char device release handler, when any uring_cmd is
> * done, meantime request queue is "quiesced" since all inflight requests
> @@ -2269,6 +2290,9 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
> if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
> __ublk_fail_req(ub, io, io->req);
> }
> +
> + if (ublk_support_batch_io(ubq))
> + ublk_abort_batch_queue(ub, ubq);
> }
>
> static void ublk_start_cancel(struct ublk_device *ub)
> --
> 2.47.0
>