From nobody Tue Feb 10 06:43:34 2026 Delivered-To: importer@patchew.org Authentication-Results: mx.zohomail.com; spf=pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) by mx.zohomail.com with SMTPS id 1770061752452353.7273289880644; Mon, 2 Feb 2026 11:49:12 -0800 (PST) Received: from localhost ([::1] helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1vmztV-0000Py-6w; Mon, 02 Feb 2026 14:47:33 -0500 Received: from eggs.gnu.org ([2001:470:142:3::10]) by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1vmztR-0000Nu-9S; Mon, 02 Feb 2026 14:47:30 -0500 Received: from isrv.corpit.ru ([212.248.84.144]) by eggs.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1vmztP-0003Ee-7B; Mon, 02 Feb 2026 14:47:29 -0500 Received: from tsrv.corpit.ru (tsrv.tls.msk.ru [192.168.177.2]) by isrv.corpit.ru (Postfix) with ESMTP id 6A54318509E; Mon, 02 Feb 2026 22:46:03 +0300 (MSK) Received: from think4mjt.tls.msk.ru (mjtthink.wg.tls.msk.ru [192.168.177.146]) by tsrv.corpit.ru (Postfix) with ESMTP id 1BABA35B280; Mon, 02 Feb 2026 22:46:45 +0300 (MSK) From: Michael Tokarev To: qemu-devel@nongnu.org Cc: qemu-stable@nongnu.org, Hanna Czenczek , Kevin Wolf , Stefan Hajnoczi , Michael Tokarev Subject: [Stable-10.0.8 12/69] nvme: Note in which AioContext some functions run Date: Mon, 2 Feb 2026 22:45:35 +0300 Message-ID: <20260202194638.939438-12-mjt@tls.msk.ru> X-Mailer: git-send-email 2.47.3 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Received-SPF: pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) client-ip=209.51.188.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Received-SPF: pass client-ip=212.248.84.144; envelope-from=mjt@tls.msk.ru; helo=isrv.corpit.ru X-Spam_score_int: -18 X-Spam_score: -1.9 X-Spam_bar: - X-Spam_report: (-1.9 / 5.0 requ) BAYES_00=-1.9, RCVD_IN_VALIDITY_RPBL_BLOCKED=0.001, RCVD_IN_VALIDITY_SAFE_BLOCKED=0.001, SPF_HELO_NONE=0.001, SPF_PASS=-0.001 autolearn=ham autolearn_force=no X-Spam_action: no action X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: qemu development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: qemu-devel-bounces+importer=patchew.org@nongnu.org X-ZM-MESSAGEID: 1770061753177158500 Content-Type: text/plain; charset="utf-8" From: Hanna Czenczek Sprinkle comments throughout block/nvme.c noting for some functions (where it may not be obvious) that they require a certain AioContext, or in which AioContext they do happen to run (for callbacks, BHs, event notifiers). Suggested-by: Kevin Wolf Signed-off-by: Hanna Czenczek Message-ID: <20251110154854.151484-10-hreitz@redhat.com> Reviewed-by: Stefan Hajnoczi Reviewed-by: Kevin Wolf Signed-off-by: Kevin Wolf (cherry picked from commit ac3520f599fedee05945ce06bb0f71820a7b2ffc) (Mjt: pick this comments-only, no-code-changes commit to 10.0.x so the next change applies cleanly) Signed-off-by: Michael Tokarev diff --git a/block/nvme.c b/block/nvme.c index 2e93abdb4b..bef515e903 100644 --- a/block/nvme.c +++ b/block/nvme.c @@ -64,6 +64,7 @@ typedef struct { } NVMeQueue; =20 typedef struct { + /* Called from nvme_process_completion() in the BDS's main AioContext = */ BlockCompletionFunc *cb; void *opaque; int cid; @@ -83,6 +84,7 @@ typedef struct { uint8_t *prp_list_pages; =20 /* Fields protected by @lock */ + /* Coroutines in this queue are woken in their own context */ CoQueue free_req_queue; NVMeQueue sq, cq; int cq_phase; @@ -91,7 +93,7 @@ typedef struct { int need_kick; int inflight; =20 - /* Thread-safe, no lock necessary */ + /* Thread-safe, no lock necessary; runs in the BDS's main context */ QEMUBH *completion_bh; } NVMeQueuePair; =20 @@ -205,11 +207,13 @@ static void nvme_free_queue_pair(NVMeQueuePair *q) g_free(q); } =20 +/* Runs in the BDS's main AioContext */ static void nvme_free_req_queue_cb(void *opaque) { NVMeQueuePair *q =3D opaque; =20 qemu_mutex_lock(&q->lock); + /* qemu_co_enter_next() wakes the coroutine in its own AioContext */ while (q->free_req_head !=3D -1 && qemu_co_enter_next(&q->free_req_queue, &q->lock)) { /* Retry waiting requests */ @@ -280,7 +284,7 @@ fail: return NULL; } =20 -/* With q->lock */ +/* With q->lock, must be run in the BDS's main AioContext */ static void nvme_kick(NVMeQueuePair *q) { BDRVNVMeState *s =3D q->s; @@ -307,7 +311,10 @@ static NVMeRequest *nvme_get_free_req_nofail_locked(NV= MeQueuePair *q) return req; } =20 -/* Return a free request element if any, otherwise return NULL. */ +/* + * Return a free request element if any, otherwise return NULL. + * May be run from any AioContext. + */ static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q) { QEMU_LOCK_GUARD(&q->lock); @@ -320,6 +327,7 @@ static NVMeRequest *nvme_get_free_req_nowait(NVMeQueueP= air *q) /* * Wait for a free request to become available if necessary, then * return it. + * May be called in any AioContext. */ static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q) { @@ -327,20 +335,21 @@ static coroutine_fn NVMeRequest *nvme_get_free_req(NV= MeQueuePair *q) =20 while (q->free_req_head =3D=3D -1) { trace_nvme_free_req_queue_wait(q->s, q->index); + /* nvme_free_req_queue_cb() wakes us in our own AioContext */ qemu_co_queue_wait(&q->free_req_queue, &q->lock); } =20 return nvme_get_free_req_nofail_locked(q); } =20 -/* With q->lock */ +/* With q->lock, may be called in any AioContext */ static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req) { req->free_req_next =3D q->free_req_head; q->free_req_head =3D req - q->reqs; } =20 -/* With q->lock */ +/* With q->lock, may be called in any AioContext */ static void nvme_wake_free_req_locked(NVMeQueuePair *q) { if (!qemu_co_queue_empty(&q->free_req_queue)) { @@ -349,7 +358,7 @@ static void nvme_wake_free_req_locked(NVMeQueuePair *q) } } =20 -/* Insert a request in the freelist and wake waiters */ +/* Insert a request in the freelist and wake waiters (from any AioContext)= */ static void nvme_put_free_req_and_wake(NVMeQueuePair *q, NVMeRequest *req) { qemu_mutex_lock(&q->lock); @@ -380,7 +389,7 @@ static inline int nvme_translate_error(const NvmeCqe *c) } } =20 -/* With q->lock */ +/* With q->lock, must be run in the BDS's main AioContext */ static bool nvme_process_completion(NVMeQueuePair *q) { BDRVNVMeState *s =3D q->s; @@ -450,6 +459,7 @@ static bool nvme_process_completion(NVMeQueuePair *q) return progress; } =20 +/* As q->completion_bh, runs in the BDS's main AioContext */ static void nvme_process_completion_bh(void *opaque) { NVMeQueuePair *q =3D opaque; @@ -480,6 +490,7 @@ static void nvme_trace_command(const NvmeCmd *cmd) } } =20 +/* Must be run in the BDS's main AioContext */ static void nvme_kick_and_check_completions(void *opaque) { NVMeQueuePair *q =3D opaque; @@ -489,6 +500,7 @@ static void nvme_kick_and_check_completions(void *opaqu= e) nvme_process_completion(q); } =20 +/* Runs in nvme_submit_command()'s AioContext */ static void nvme_deferred_fn(void *opaque) { NVMeQueuePair *q =3D opaque; @@ -501,6 +513,7 @@ static void nvme_deferred_fn(void *opaque) } } =20 +/* May be run in any AioContext */ static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req, NvmeCmd *cmd, BlockCompletionFunc cb, void *opaque) @@ -522,6 +535,7 @@ static void nvme_submit_command(NVMeQueuePair *q, NVMeR= equest *req, defer_call(nvme_deferred_fn, q); } =20 +/* Put into NVMeRequest.cb, so runs in the BDS's main AioContext */ static void nvme_admin_cmd_sync_cb(void *opaque, int ret) { int *pret =3D opaque; @@ -529,6 +543,7 @@ static void nvme_admin_cmd_sync_cb(void *opaque, int re= t) aio_wait_kick(); } =20 +/* Must be run in the BDS's or qemu's main AioContext */ static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd) { BDRVNVMeState *s =3D bs->opaque; @@ -637,6 +652,7 @@ out: return ret; } =20 +/* Must be run in the BDS's main AioContext */ static void nvme_poll_queue(NVMeQueuePair *q) { const size_t cqe_offset =3D q->cq.head * NVME_CQ_ENTRY_BYTES; @@ -659,6 +675,7 @@ static void nvme_poll_queue(NVMeQueuePair *q) qemu_mutex_unlock(&q->lock); } =20 +/* Must be run in the BDS's main AioContext */ static void nvme_poll_queues(BDRVNVMeState *s) { int i; @@ -668,6 +685,7 @@ static void nvme_poll_queues(BDRVNVMeState *s) } } =20 +/* Run as an event notifier in the BDS's main AioContext */ static void nvme_handle_event(EventNotifier *n) { BDRVNVMeState *s =3D container_of(n, BDRVNVMeState, @@ -721,6 +739,7 @@ out_error: return false; } =20 +/* Run as an event notifier in the BDS's main AioContext */ static bool nvme_poll_cb(void *opaque) { EventNotifier *e =3D opaque; @@ -744,6 +763,7 @@ static bool nvme_poll_cb(void *opaque) return false; } =20 +/* Run as an event notifier in the BDS's main AioContext */ static void nvme_poll_ready(EventNotifier *e) { BDRVNVMeState *s =3D container_of(e, BDRVNVMeState, @@ -1045,7 +1065,7 @@ static int nvme_probe_blocksizes(BlockDriverState *bs= , BlockSizes *bsz) return 0; } =20 -/* Called with s->dma_map_lock */ +/* Called with s->dma_map_lock, may be run in any AioContext */ static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs, QEMUIOVector *qiov) { @@ -1056,13 +1076,17 @@ static coroutine_fn int nvme_cmd_unmap_qiov(BlockDr= iverState *bs, if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) { r =3D qemu_vfio_dma_reset_temporary(s->vfio); if (!r) { + /* + * Queue access is protected by the dma_map_lock, and all + * coroutines are woken in their own AioContext + */ qemu_co_queue_restart_all(&s->dma_flush_queue); } } return r; } =20 -/* Called with s->dma_map_lock */ +/* Called with s->dma_map_lock, may be run in any AioContext */ static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *c= md, NVMeRequest *req, QEMUIOVector *= qiov) { @@ -1175,6 +1199,7 @@ typedef struct { int ret; } NVMeCoData; =20 +/* Put into NVMeRequest.cb, so runs in the BDS's main AioContext */ static void nvme_rw_cb(void *opaque, int ret) { NVMeCoData *data =3D opaque; --=20 2.47.3