nvme_process_completion() must run in the main BDS context, so schedule
a BH for requests that aren’t there.
The context in which we kick does not matter, but let’s just keep kick
and process_completion together for simplicity’s sake.
(For what it’s worth, a quick fio bandwidth test indicates that on my
test hardware, if anything, this may be a bit better than kicking
immediately before scheduling a pure nvme_process_completion() BH. But
I wouldn’t take more from those results than that it doesn’t really seem
to matter either way.)
Cc: qemu-stable@nongnu.org
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
---
block/nvme.c | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/block/nvme.c b/block/nvme.c
index 8df53ee4ca..7ed5f570bc 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -481,7 +481,7 @@ static void nvme_trace_command(const NvmeCmd *cmd)
}
}
-static void nvme_deferred_fn(void *opaque)
+static void nvme_kick_and_check_completions(void *opaque)
{
NVMeQueuePair *q = opaque;
@@ -490,6 +490,18 @@ static void nvme_deferred_fn(void *opaque)
nvme_process_completion(q);
}
+static void nvme_deferred_fn(void *opaque)
+{
+ NVMeQueuePair *q = opaque;
+
+ if (qemu_get_current_aio_context() == q->s->aio_context) {
+ nvme_kick_and_check_completions(q);
+ } else {
+ aio_bh_schedule_oneshot(q->s->aio_context,
+ nvme_kick_and_check_completions, q);
+ }
+}
+
static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
NvmeCmd *cmd, BlockCompletionFunc cb,
void *opaque)
--
2.51.1