Sprinkle comments throughout block/nvme.c noting for some functions
(where it may not be obvious) that they require a certain AioContext, or
in which AioContext they do happen to run (for callbacks, BHs, event
notifiers).
Suggested-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
---
block/nvme.c | 43 ++++++++++++++++++++++++++++++++++---------
1 file changed, 34 insertions(+), 9 deletions(-)
diff --git a/block/nvme.c b/block/nvme.c
index b8262ebfd9..919e14cef9 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -65,6 +65,7 @@ typedef struct {
} NVMeQueue;
typedef struct {
+ /* Called from nvme_process_completion() in the BDS's main AioContext */
BlockCompletionFunc *cb;
void *opaque;
int cid;
@@ -84,6 +85,7 @@ typedef struct {
uint8_t *prp_list_pages;
/* Fields protected by @lock */
+ /* Coroutines in this queue are woken in their own context */
CoQueue free_req_queue;
NVMeQueue sq, cq;
int cq_phase;
@@ -92,7 +94,7 @@ typedef struct {
int need_kick;
int inflight;
- /* Thread-safe, no lock necessary */
+ /* Thread-safe, no lock necessary; runs in the BDS's main context */
QEMUBH *completion_bh;
} NVMeQueuePair;
@@ -206,11 +208,13 @@ static void nvme_free_queue_pair(NVMeQueuePair *q)
g_free(q);
}
+/* Runs in the BDS's main AioContext */
static void nvme_free_req_queue_cb(void *opaque)
{
NVMeQueuePair *q = opaque;
qemu_mutex_lock(&q->lock);
+ /* qemu_co_enter_next() wakes the coroutine in its own AioContext */
while (q->free_req_head != -1 &&
qemu_co_enter_next(&q->free_req_queue, &q->lock)) {
/* Retry waiting requests */
@@ -281,7 +285,7 @@ fail:
return NULL;
}
-/* With q->lock */
+/* With q->lock, must be run in the BDS's main AioContext */
static void nvme_kick(NVMeQueuePair *q)
{
BDRVNVMeState *s = q->s;
@@ -308,7 +312,10 @@ static NVMeRequest *nvme_get_free_req_nofail_locked(NVMeQueuePair *q)
return req;
}
-/* Return a free request element if any, otherwise return NULL. */
+/*
+ * Return a free request element if any, otherwise return NULL.
+ * May be run from any AioContext.
+ */
static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q)
{
QEMU_LOCK_GUARD(&q->lock);
@@ -321,6 +328,7 @@ static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q)
/*
* Wait for a free request to become available if necessary, then
* return it.
+ * May be called in any AioContext.
*/
static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
{
@@ -328,20 +336,21 @@ static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
while (q->free_req_head == -1) {
trace_nvme_free_req_queue_wait(q->s, q->index);
+ /* nvme_free_req_queue_cb() wakes us in our own AioContext */
qemu_co_queue_wait(&q->free_req_queue, &q->lock);
}
return nvme_get_free_req_nofail_locked(q);
}
-/* With q->lock */
+/* With q->lock, may be called in any AioContext */
static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req)
{
req->free_req_next = q->free_req_head;
q->free_req_head = req - q->reqs;
}
-/* With q->lock */
+/* With q->lock, may be called in any AioContext */
static void nvme_wake_free_req_locked(NVMeQueuePair *q)
{
if (!qemu_co_queue_empty(&q->free_req_queue)) {
@@ -350,7 +359,7 @@ static void nvme_wake_free_req_locked(NVMeQueuePair *q)
}
}
-/* Insert a request in the freelist and wake waiters */
+/* Insert a request in the freelist and wake waiters (from any AioContext) */
static void nvme_put_free_req_and_wake(NVMeQueuePair *q, NVMeRequest *req)
{
qemu_mutex_lock(&q->lock);
@@ -381,7 +390,7 @@ static inline int nvme_translate_error(const NvmeCqe *c)
}
}
-/* With q->lock */
+/* With q->lock, must be run in the BDS's main AioContext */
static bool nvme_process_completion(NVMeQueuePair *q)
{
BDRVNVMeState *s = q->s;
@@ -451,6 +460,7 @@ static bool nvme_process_completion(NVMeQueuePair *q)
return progress;
}
+/* As q->completion_bh, runs in the BDS's main AioContext */
static void nvme_process_completion_bh(void *opaque)
{
NVMeQueuePair *q = opaque;
@@ -481,6 +491,7 @@ static void nvme_trace_command(const NvmeCmd *cmd)
}
}
+/* Must be run in the BDS's main AioContext */
static void nvme_kick_and_check_completions(void *opaque)
{
NVMeQueuePair *q = opaque;
@@ -490,6 +501,7 @@ static void nvme_kick_and_check_completions(void *opaque)
nvme_process_completion(q);
}
+/* Runs in nvme_submit_command()'s AioContext */
static void nvme_deferred_fn(void *opaque)
{
NVMeQueuePair *q = opaque;
@@ -502,6 +514,7 @@ static void nvme_deferred_fn(void *opaque)
}
}
+/* May be run in any AioContext */
static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
NvmeCmd *cmd, BlockCompletionFunc cb,
void *opaque)
@@ -523,6 +536,7 @@ static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
defer_call(nvme_deferred_fn, q);
}
+/* Put into NVMeRequest.cb, so runs in the BDS's main AioContext */
static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
{
int *pret = opaque;
@@ -530,6 +544,7 @@ static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
aio_wait_kick();
}
+/* Must be run in the BDS's or qemu's main AioContext */
static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd)
{
BDRVNVMeState *s = bs->opaque;
@@ -638,6 +653,7 @@ out:
return ret;
}
+/* Must be run in the BDS's main AioContext */
static void nvme_poll_queue(NVMeQueuePair *q)
{
const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
@@ -660,6 +676,7 @@ static void nvme_poll_queue(NVMeQueuePair *q)
qemu_mutex_unlock(&q->lock);
}
+/* Must be run in the BDS's main AioContext */
static void nvme_poll_queues(BDRVNVMeState *s)
{
int i;
@@ -669,6 +686,7 @@ static void nvme_poll_queues(BDRVNVMeState *s)
}
}
+/* Run as an event notifier in the BDS's main AioContext */
static void nvme_handle_event(EventNotifier *n)
{
BDRVNVMeState *s = container_of(n, BDRVNVMeState,
@@ -722,6 +740,7 @@ out_error:
return false;
}
+/* Run as an event notifier in the BDS's main AioContext */
static bool nvme_poll_cb(void *opaque)
{
EventNotifier *e = opaque;
@@ -745,6 +764,7 @@ static bool nvme_poll_cb(void *opaque)
return false;
}
+/* Run as an event notifier in the BDS's main AioContext */
static void nvme_poll_ready(EventNotifier *e)
{
BDRVNVMeState *s = container_of(e, BDRVNVMeState,
@@ -1050,7 +1070,7 @@ static int nvme_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
return 0;
}
-/* Called with s->dma_map_lock */
+/* Called with s->dma_map_lock, may be run in any AioContext */
static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs,
QEMUIOVector *qiov)
{
@@ -1061,13 +1081,17 @@ static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs,
if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) {
r = qemu_vfio_dma_reset_temporary(s->vfio);
if (!r) {
+ /*
+ * Queue access is protected by the dma_map_lock, and all
+ * coroutines are woken in their own AioContext
+ */
qemu_co_queue_restart_all(&s->dma_flush_queue);
}
}
return r;
}
-/* Called with s->dma_map_lock */
+/* Called with s->dma_map_lock, may be run in any AioContext */
static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
NVMeRequest *req, QEMUIOVector *qiov)
{
@@ -1180,6 +1204,7 @@ typedef struct {
int ret;
} NVMeCoData;
+/* Put into NVMeRequest.cb, so runs in the BDS's main AioContext */
static void nvme_rw_cb(void *opaque, int ret)
{
NVMeCoData *data = opaque;
--
2.51.1