1 | The following changes since commit d992f2f1368ceb92e6bfd8efece174110f4236ff: | 1 | The following changes since commit c6a5fc2ac76c5ab709896ee1b0edd33685a67ed1: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/artyom/tags/pull-sun4v-20170226' into staging (2017-02-26 22:40:23 +0000) | 3 | decodetree: Add --output-null for meson testing (2023-05-31 19:56:42 -0700) |
4 | 4 | ||
5 | are available in the git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | git://github.com/stefanha/qemu.git tags/block-pull-request | 7 | https://gitlab.com/stefanha/qemu.git tags/block-pull-request |
8 | 8 | ||
9 | for you to fetch changes up to 1ab17f9f5c63c2798d707aeb22588e4fcc17b2cd: | 9 | for you to fetch changes up to 98b126f5e3228a346c774e569e26689943b401dd: |
10 | 10 | ||
11 | tests-aio-multithread: use atomic_read properly (2017-02-27 14:00:53 +0000) | 11 | qapi: add '@fdset' feature for BlockdevOptionsVirtioBlkVhostVdpa (2023-06-01 11:08:21 -0400) |
12 | |||
13 | ---------------------------------------------------------------- | ||
14 | Pull request | ||
15 | |||
16 | - Stefano Garzarella's blkio block driver 'fd' parameter | ||
17 | - My thread-local blk_io_plug() series | ||
12 | 18 | ||
13 | ---------------------------------------------------------------- | 19 | ---------------------------------------------------------------- |
14 | 20 | ||
15 | ---------------------------------------------------------------- | 21 | Stefan Hajnoczi (6): |
22 | block: add blk_io_plug_call() API | ||
23 | block/nvme: convert to blk_io_plug_call() API | ||
24 | block/blkio: convert to blk_io_plug_call() API | ||
25 | block/io_uring: convert to blk_io_plug_call() API | ||
26 | block/linux-aio: convert to blk_io_plug_call() API | ||
27 | block: remove bdrv_co_io_plug() API | ||
16 | 28 | ||
17 | Paolo Bonzini (4): | 29 | Stefano Garzarella (2): |
18 | curl: do not use aio_context_acquire/release | 30 | block/blkio: use qemu_open() to support fd passing for virtio-blk |
19 | nfs: do not use aio_context_acquire/release | 31 | qapi: add '@fdset' feature for BlockdevOptionsVirtioBlkVhostVdpa |
20 | iscsi: do not use aio_context_acquire/release | ||
21 | tests-aio-multithread: use atomic_read properly | ||
22 | 32 | ||
23 | block/curl.c | 24 ++++++++----- | 33 | MAINTAINERS | 1 + |
24 | block/iscsi.c | 83 ++++++++++++++++++++++++++++++++++---------- | 34 | qapi/block-core.json | 6 ++ |
25 | block/nfs.c | 23 +++++++++--- | 35 | meson.build | 4 + |
26 | tests/test-aio-multithread.c | 4 +-- | 36 | include/block/block-io.h | 3 - |
27 | 4 files changed, 100 insertions(+), 34 deletions(-) | 37 | include/block/block_int-common.h | 11 --- |
38 | include/block/raw-aio.h | 14 --- | ||
39 | include/sysemu/block-backend-io.h | 13 +-- | ||
40 | block/blkio.c | 96 ++++++++++++------ | ||
41 | block/block-backend.c | 22 ----- | ||
42 | block/file-posix.c | 38 ------- | ||
43 | block/io.c | 37 ------- | ||
44 | block/io_uring.c | 44 ++++----- | ||
45 | block/linux-aio.c | 41 +++----- | ||
46 | block/nvme.c | 44 +++------ | ||
47 | block/plug.c | 159 ++++++++++++++++++++++++++++++ | ||
48 | hw/block/dataplane/xen-block.c | 8 +- | ||
49 | hw/block/virtio-blk.c | 4 +- | ||
50 | hw/scsi/virtio-scsi.c | 6 +- | ||
51 | block/meson.build | 1 + | ||
52 | block/trace-events | 6 +- | ||
53 | 20 files changed, 293 insertions(+), 265 deletions(-) | ||
54 | create mode 100644 block/plug.c | ||
28 | 55 | ||
29 | -- | 56 | -- |
30 | 2.9.3 | 57 | 2.40.1 |
31 | |||
32 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | Introduce a new API for thread-local blk_io_plug() that does not |
---|---|---|---|
2 | 2 | traverse the block graph. The goal is to make blk_io_plug() multi-queue | |
3 | Now that all bottom halves and callbacks take care of taking the | 3 | friendly. |
4 | AioContext lock, we can migrate some users away from it and to a | 4 | |
5 | specific QemuMutex or CoMutex. | 5 | Instead of having block drivers track whether or not we're in a plugged |
6 | 6 | section, provide an API that allows them to defer a function call until | |
7 | Protect BDRVCURLState access with a QemuMutex. | 7 | we're unplugged: blk_io_plug_call(fn, opaque). If blk_io_plug_call() is |
8 | 8 | called multiple times with the same fn/opaque pair, then fn() is only | |
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | 9 | called once at the end of the function - resulting in batching. |
10 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 10 | |
11 | Message-id: 20170222180725.28611-2-pbonzini@redhat.com | 11 | This patch introduces the API and changes blk_io_plug()/blk_io_unplug(). |
12 | blk_io_plug()/blk_io_unplug() no longer require a BlockBackend argument | ||
13 | because the plug state is now thread-local. | ||
14 | |||
15 | Later patches convert block drivers to blk_io_plug_call() and then we | ||
16 | can finally remove .bdrv_co_io_plug() once all block drivers have been | ||
17 | converted. | ||
18 | |||
19 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
20 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
21 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> | ||
22 | Acked-by: Kevin Wolf <kwolf@redhat.com> | ||
23 | Message-id: 20230530180959.1108766-2-stefanha@redhat.com | ||
12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 24 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
13 | --- | 25 | --- |
14 | block/curl.c | 24 +++++++++++++++--------- | 26 | MAINTAINERS | 1 + |
15 | 1 file changed, 15 insertions(+), 9 deletions(-) | 27 | include/sysemu/block-backend-io.h | 13 +-- |
16 | 28 | block/block-backend.c | 22 ----- | |
17 | diff --git a/block/curl.c b/block/curl.c | 29 | block/plug.c | 159 ++++++++++++++++++++++++++++++ |
18 | index XXXXXXX..XXXXXXX 100644 | 30 | hw/block/dataplane/xen-block.c | 8 +- |
19 | --- a/block/curl.c | 31 | hw/block/virtio-blk.c | 4 +- |
20 | +++ b/block/curl.c | 32 | hw/scsi/virtio-scsi.c | 6 +- |
21 | @@ -XXX,XX +XXX,XX @@ typedef struct BDRVCURLState { | 33 | block/meson.build | 1 + |
22 | char *cookie; | 34 | 8 files changed, 173 insertions(+), 41 deletions(-) |
23 | bool accept_range; | 35 | create mode 100644 block/plug.c |
24 | AioContext *aio_context; | 36 | |
25 | + QemuMutex mutex; | 37 | diff --git a/MAINTAINERS b/MAINTAINERS |
26 | char *username; | 38 | index XXXXXXX..XXXXXXX 100644 |
27 | char *password; | 39 | --- a/MAINTAINERS |
28 | char *proxyusername; | 40 | +++ b/MAINTAINERS |
29 | @@ -XXX,XX +XXX,XX @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len, | 41 | @@ -XXX,XX +XXX,XX @@ F: util/aio-*.c |
30 | return FIND_RET_NONE; | 42 | F: util/aio-*.h |
43 | F: util/fdmon-*.c | ||
44 | F: block/io.c | ||
45 | +F: block/plug.c | ||
46 | F: migration/block* | ||
47 | F: include/block/aio.h | ||
48 | F: include/block/aio-wait.h | ||
49 | diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h | ||
50 | index XXXXXXX..XXXXXXX 100644 | ||
51 | --- a/include/sysemu/block-backend-io.h | ||
52 | +++ b/include/sysemu/block-backend-io.h | ||
53 | @@ -XXX,XX +XXX,XX @@ void blk_iostatus_set_err(BlockBackend *blk, int error); | ||
54 | int blk_get_max_iov(BlockBackend *blk); | ||
55 | int blk_get_max_hw_iov(BlockBackend *blk); | ||
56 | |||
57 | -/* | ||
58 | - * blk_io_plug/unplug are thread-local operations. This means that multiple | ||
59 | - * IOThreads can simultaneously call plug/unplug, but the caller must ensure | ||
60 | - * that each unplug() is called in the same IOThread of the matching plug(). | ||
61 | - */ | ||
62 | -void coroutine_fn blk_co_io_plug(BlockBackend *blk); | ||
63 | -void co_wrapper blk_io_plug(BlockBackend *blk); | ||
64 | - | ||
65 | -void coroutine_fn blk_co_io_unplug(BlockBackend *blk); | ||
66 | -void co_wrapper blk_io_unplug(BlockBackend *blk); | ||
67 | +void blk_io_plug(void); | ||
68 | +void blk_io_unplug(void); | ||
69 | +void blk_io_plug_call(void (*fn)(void *), void *opaque); | ||
70 | |||
71 | AioContext *blk_get_aio_context(BlockBackend *blk); | ||
72 | BlockAcctStats *blk_get_stats(BlockBackend *blk); | ||
73 | diff --git a/block/block-backend.c b/block/block-backend.c | ||
74 | index XXXXXXX..XXXXXXX 100644 | ||
75 | --- a/block/block-backend.c | ||
76 | +++ b/block/block-backend.c | ||
77 | @@ -XXX,XX +XXX,XX @@ void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify) | ||
78 | notifier_list_add(&blk->insert_bs_notifiers, notify); | ||
31 | } | 79 | } |
32 | 80 | ||
33 | +/* Called with s->mutex held. */ | 81 | -void coroutine_fn blk_co_io_plug(BlockBackend *blk) |
34 | static void curl_multi_check_completion(BDRVCURLState *s) | 82 | -{ |
83 | - BlockDriverState *bs = blk_bs(blk); | ||
84 | - IO_CODE(); | ||
85 | - GRAPH_RDLOCK_GUARD(); | ||
86 | - | ||
87 | - if (bs) { | ||
88 | - bdrv_co_io_plug(bs); | ||
89 | - } | ||
90 | -} | ||
91 | - | ||
92 | -void coroutine_fn blk_co_io_unplug(BlockBackend *blk) | ||
93 | -{ | ||
94 | - BlockDriverState *bs = blk_bs(blk); | ||
95 | - IO_CODE(); | ||
96 | - GRAPH_RDLOCK_GUARD(); | ||
97 | - | ||
98 | - if (bs) { | ||
99 | - bdrv_co_io_unplug(bs); | ||
100 | - } | ||
101 | -} | ||
102 | - | ||
103 | BlockAcctStats *blk_get_stats(BlockBackend *blk) | ||
35 | { | 104 | { |
36 | int msgs_in_queue; | 105 | IO_CODE(); |
37 | @@ -XXX,XX +XXX,XX @@ static void curl_multi_check_completion(BDRVCURLState *s) | 106 | diff --git a/block/plug.c b/block/plug.c |
38 | continue; | 107 | new file mode 100644 |
39 | } | 108 | index XXXXXXX..XXXXXXX |
40 | 109 | --- /dev/null | |
41 | + qemu_mutex_unlock(&s->mutex); | 110 | +++ b/block/plug.c |
42 | acb->common.cb(acb->common.opaque, -EPROTO); | 111 | @@ -XXX,XX +XXX,XX @@ |
43 | + qemu_mutex_lock(&s->mutex); | 112 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
44 | qemu_aio_unref(acb); | 113 | +/* |
45 | state->acb[i] = NULL; | 114 | + * Block I/O plugging |
46 | } | 115 | + * |
47 | @@ -XXX,XX +XXX,XX @@ static void curl_multi_check_completion(BDRVCURLState *s) | 116 | + * Copyright Red Hat. |
48 | } | 117 | + * |
118 | + * This API defers a function call within a blk_io_plug()/blk_io_unplug() | ||
119 | + * section, allowing multiple calls to batch up. This is a performance | ||
120 | + * optimization that is used in the block layer to submit several I/O requests | ||
121 | + * at once instead of individually: | ||
122 | + * | ||
123 | + * blk_io_plug(); <-- start of plugged region | ||
124 | + * ... | ||
125 | + * blk_io_plug_call(my_func, my_obj); <-- deferred my_func(my_obj) call | ||
126 | + * blk_io_plug_call(my_func, my_obj); <-- another | ||
127 | + * blk_io_plug_call(my_func, my_obj); <-- another | ||
128 | + * ... | ||
129 | + * blk_io_unplug(); <-- end of plugged region, my_func(my_obj) is called once | ||
130 | + * | ||
131 | + * This code is actually generic and not tied to the block layer. If another | ||
132 | + * subsystem needs this functionality, it could be renamed. | ||
133 | + */ | ||
134 | + | ||
135 | +#include "qemu/osdep.h" | ||
136 | +#include "qemu/coroutine-tls.h" | ||
137 | +#include "qemu/notify.h" | ||
138 | +#include "qemu/thread.h" | ||
139 | +#include "sysemu/block-backend.h" | ||
140 | + | ||
141 | +/* A function call that has been deferred until unplug() */ | ||
142 | +typedef struct { | ||
143 | + void (*fn)(void *); | ||
144 | + void *opaque; | ||
145 | +} UnplugFn; | ||
146 | + | ||
147 | +/* Per-thread state */ | ||
148 | +typedef struct { | ||
149 | + unsigned count; /* how many times has plug() been called? */ | ||
150 | + GArray *unplug_fns; /* functions to call at unplug time */ | ||
151 | +} Plug; | ||
152 | + | ||
153 | +/* Use get_ptr_plug() to fetch this thread-local value */ | ||
154 | +QEMU_DEFINE_STATIC_CO_TLS(Plug, plug); | ||
155 | + | ||
156 | +/* Called at thread cleanup time */ | ||
157 | +static void blk_io_plug_atexit(Notifier *n, void *value) | ||
158 | +{ | ||
159 | + Plug *plug = get_ptr_plug(); | ||
160 | + g_array_free(plug->unplug_fns, TRUE); | ||
161 | +} | ||
162 | + | ||
163 | +/* This won't involve coroutines, so use __thread */ | ||
164 | +static __thread Notifier blk_io_plug_atexit_notifier; | ||
165 | + | ||
166 | +/** | ||
167 | + * blk_io_plug_call: | ||
168 | + * @fn: a function pointer to be invoked | ||
169 | + * @opaque: a user-defined argument to @fn() | ||
170 | + * | ||
171 | + * Call @fn(@opaque) immediately if not within a blk_io_plug()/blk_io_unplug() | ||
172 | + * section. | ||
173 | + * | ||
174 | + * Otherwise defer the call until the end of the outermost | ||
175 | + * blk_io_plug()/blk_io_unplug() section in this thread. If the same | ||
176 | + * @fn/@opaque pair has already been deferred, it will only be called once upon | ||
177 | + * blk_io_unplug() so that accumulated calls are batched into a single call. | ||
178 | + * | ||
179 | + * The caller must ensure that @opaque is not freed before @fn() is invoked. | ||
180 | + */ | ||
181 | +void blk_io_plug_call(void (*fn)(void *), void *opaque) | ||
182 | +{ | ||
183 | + Plug *plug = get_ptr_plug(); | ||
184 | + | ||
185 | + /* Call immediately if we're not plugged */ | ||
186 | + if (plug->count == 0) { | ||
187 | + fn(opaque); | ||
188 | + return; | ||
189 | + } | ||
190 | + | ||
191 | + GArray *array = plug->unplug_fns; | ||
192 | + if (!array) { | ||
193 | + array = g_array_new(FALSE, FALSE, sizeof(UnplugFn)); | ||
194 | + plug->unplug_fns = array; | ||
195 | + blk_io_plug_atexit_notifier.notify = blk_io_plug_atexit; | ||
196 | + qemu_thread_atexit_add(&blk_io_plug_atexit_notifier); | ||
197 | + } | ||
198 | + | ||
199 | + UnplugFn *fns = (UnplugFn *)array->data; | ||
200 | + UnplugFn new_fn = { | ||
201 | + .fn = fn, | ||
202 | + .opaque = opaque, | ||
203 | + }; | ||
204 | + | ||
205 | + /* | ||
206 | + * There won't be many, so do a linear search. If this becomes a bottleneck | ||
207 | + * then a binary search (glib 2.62+) or different data structure could be | ||
208 | + * used. | ||
209 | + */ | ||
210 | + for (guint i = 0; i < array->len; i++) { | ||
211 | + if (memcmp(&fns[i], &new_fn, sizeof(new_fn)) == 0) { | ||
212 | + return; /* already exists */ | ||
213 | + } | ||
214 | + } | ||
215 | + | ||
216 | + g_array_append_val(array, new_fn); | ||
217 | +} | ||
218 | + | ||
219 | +/** | ||
220 | + * blk_io_plug: Defer blk_io_plug_call() functions until blk_io_unplug() | ||
221 | + * | ||
222 | + * blk_io_plug/unplug are thread-local operations. This means that multiple | ||
223 | + * threads can simultaneously call plug/unplug, but the caller must ensure that | ||
224 | + * each unplug() is called in the same thread of the matching plug(). | ||
225 | + * | ||
226 | + * Nesting is supported. blk_io_plug_call() functions are only called at the | ||
227 | + * outermost blk_io_unplug(). | ||
228 | + */ | ||
229 | +void blk_io_plug(void) | ||
230 | +{ | ||
231 | + Plug *plug = get_ptr_plug(); | ||
232 | + | ||
233 | + assert(plug->count < UINT32_MAX); | ||
234 | + | ||
235 | + plug->count++; | ||
236 | +} | ||
237 | + | ||
238 | +/** | ||
239 | + * blk_io_unplug: Run any pending blk_io_plug_call() functions | ||
240 | + * | ||
241 | + * There must have been a matching blk_io_plug() call in the same thread prior | ||
242 | + * to this blk_io_unplug() call. | ||
243 | + */ | ||
244 | +void blk_io_unplug(void) | ||
245 | +{ | ||
246 | + Plug *plug = get_ptr_plug(); | ||
247 | + | ||
248 | + assert(plug->count > 0); | ||
249 | + | ||
250 | + if (--plug->count > 0) { | ||
251 | + return; | ||
252 | + } | ||
253 | + | ||
254 | + GArray *array = plug->unplug_fns; | ||
255 | + if (!array) { | ||
256 | + return; | ||
257 | + } | ||
258 | + | ||
259 | + UnplugFn *fns = (UnplugFn *)array->data; | ||
260 | + | ||
261 | + for (guint i = 0; i < array->len; i++) { | ||
262 | + fns[i].fn(fns[i].opaque); | ||
263 | + } | ||
264 | + | ||
265 | + /* | ||
266 | + * This resets the array without freeing memory so that appending is cheap | ||
267 | + * in the future. | ||
268 | + */ | ||
269 | + g_array_set_size(array, 0); | ||
270 | +} | ||
271 | diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c | ||
272 | index XXXXXXX..XXXXXXX 100644 | ||
273 | --- a/hw/block/dataplane/xen-block.c | ||
274 | +++ b/hw/block/dataplane/xen-block.c | ||
275 | @@ -XXX,XX +XXX,XX @@ static bool xen_block_handle_requests(XenBlockDataPlane *dataplane) | ||
276 | * is below us. | ||
277 | */ | ||
278 | if (inflight_atstart > IO_PLUG_THRESHOLD) { | ||
279 | - blk_io_plug(dataplane->blk); | ||
280 | + blk_io_plug(); | ||
281 | } | ||
282 | while (rc != rp) { | ||
283 | /* pull request from ring */ | ||
284 | @@ -XXX,XX +XXX,XX @@ static bool xen_block_handle_requests(XenBlockDataPlane *dataplane) | ||
285 | |||
286 | if (inflight_atstart > IO_PLUG_THRESHOLD && | ||
287 | batched >= inflight_atstart) { | ||
288 | - blk_io_unplug(dataplane->blk); | ||
289 | + blk_io_unplug(); | ||
290 | } | ||
291 | xen_block_do_aio(request); | ||
292 | if (inflight_atstart > IO_PLUG_THRESHOLD) { | ||
293 | if (batched >= inflight_atstart) { | ||
294 | - blk_io_plug(dataplane->blk); | ||
295 | + blk_io_plug(); | ||
296 | batched = 0; | ||
297 | } else { | ||
298 | batched++; | ||
299 | @@ -XXX,XX +XXX,XX @@ static bool xen_block_handle_requests(XenBlockDataPlane *dataplane) | ||
300 | } | ||
301 | } | ||
302 | if (inflight_atstart > IO_PLUG_THRESHOLD) { | ||
303 | - blk_io_unplug(dataplane->blk); | ||
304 | + blk_io_unplug(); | ||
305 | } | ||
306 | |||
307 | return done_something; | ||
308 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | ||
309 | index XXXXXXX..XXXXXXX 100644 | ||
310 | --- a/hw/block/virtio-blk.c | ||
311 | +++ b/hw/block/virtio-blk.c | ||
312 | @@ -XXX,XX +XXX,XX @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) | ||
313 | bool suppress_notifications = virtio_queue_get_notification(vq); | ||
314 | |||
315 | aio_context_acquire(blk_get_aio_context(s->blk)); | ||
316 | - blk_io_plug(s->blk); | ||
317 | + blk_io_plug(); | ||
318 | |||
319 | do { | ||
320 | if (suppress_notifications) { | ||
321 | @@ -XXX,XX +XXX,XX @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) | ||
322 | virtio_blk_submit_multireq(s, &mrb); | ||
323 | } | ||
324 | |||
325 | - blk_io_unplug(s->blk); | ||
326 | + blk_io_unplug(); | ||
327 | aio_context_release(blk_get_aio_context(s->blk)); | ||
49 | } | 328 | } |
50 | 329 | ||
51 | +/* Called with s->mutex held. */ | 330 | diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c |
52 | static void curl_multi_do_locked(CURLState *s) | 331 | index XXXXXXX..XXXXXXX 100644 |
53 | { | 332 | --- a/hw/scsi/virtio-scsi.c |
54 | CURLSocket *socket, *next_socket; | 333 | +++ b/hw/scsi/virtio-scsi.c |
55 | @@ -XXX,XX +XXX,XX @@ static void curl_multi_do(void *arg) | 334 | @@ -XXX,XX +XXX,XX @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req) |
56 | { | 335 | return -ENOBUFS; |
57 | CURLState *s = (CURLState *)arg; | 336 | } |
58 | 337 | scsi_req_ref(req->sreq); | |
59 | - aio_context_acquire(s->s->aio_context); | 338 | - blk_io_plug(d->conf.blk); |
60 | + qemu_mutex_lock(&s->s->mutex); | 339 | + blk_io_plug(); |
61 | curl_multi_do_locked(s); | 340 | object_unref(OBJECT(d)); |
62 | - aio_context_release(s->s->aio_context); | 341 | return 0; |
63 | + qemu_mutex_unlock(&s->s->mutex); | ||
64 | } | 342 | } |
65 | 343 | @@ -XXX,XX +XXX,XX @@ static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req) | |
66 | static void curl_multi_read(void *arg) | 344 | if (scsi_req_enqueue(sreq)) { |
67 | { | 345 | scsi_req_continue(sreq); |
68 | CURLState *s = (CURLState *)arg; | 346 | } |
69 | 347 | - blk_io_unplug(sreq->dev->conf.blk); | |
70 | - aio_context_acquire(s->s->aio_context); | 348 | + blk_io_unplug(); |
71 | + qemu_mutex_lock(&s->s->mutex); | 349 | scsi_req_unref(sreq); |
72 | curl_multi_do_locked(s); | ||
73 | curl_multi_check_completion(s->s); | ||
74 | - aio_context_release(s->s->aio_context); | ||
75 | + qemu_mutex_unlock(&s->s->mutex); | ||
76 | } | 350 | } |
77 | 351 | ||
78 | static void curl_multi_timeout_do(void *arg) | 352 | @@ -XXX,XX +XXX,XX @@ static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq) |
79 | @@ -XXX,XX +XXX,XX @@ static void curl_multi_timeout_do(void *arg) | 353 | while (!QTAILQ_EMPTY(&reqs)) { |
80 | return; | 354 | req = QTAILQ_FIRST(&reqs); |
81 | } | 355 | QTAILQ_REMOVE(&reqs, req, next); |
82 | 356 | - blk_io_unplug(req->sreq->dev->conf.blk); | |
83 | - aio_context_acquire(s->aio_context); | 357 | + blk_io_unplug(); |
84 | + qemu_mutex_lock(&s->mutex); | 358 | scsi_req_unref(req->sreq); |
85 | curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); | 359 | virtqueue_detach_element(req->vq, &req->elem, 0); |
86 | 360 | virtio_scsi_free_req(req); | |
87 | curl_multi_check_completion(s); | 361 | diff --git a/block/meson.build b/block/meson.build |
88 | - aio_context_release(s->aio_context); | 362 | index XXXXXXX..XXXXXXX 100644 |
89 | + qemu_mutex_unlock(&s->mutex); | 363 | --- a/block/meson.build |
90 | #else | 364 | +++ b/block/meson.build |
91 | abort(); | 365 | @@ -XXX,XX +XXX,XX @@ block_ss.add(files( |
92 | #endif | 366 | 'mirror.c', |
93 | @@ -XXX,XX +XXX,XX @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, | 367 | 'nbd.c', |
94 | curl_easy_cleanup(state->curl); | 368 | 'null.c', |
95 | state->curl = NULL; | 369 | + 'plug.c', |
96 | 370 | 'qapi.c', | |
97 | + qemu_mutex_init(&s->mutex); | 371 | 'qcow2-bitmap.c', |
98 | curl_attach_aio_context(bs, bdrv_get_aio_context(bs)); | 372 | 'qcow2-cache.c', |
99 | |||
100 | qemu_opts_del(opts); | ||
101 | @@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p) | ||
102 | CURLAIOCB *acb = p; | ||
103 | BlockDriverState *bs = acb->common.bs; | ||
104 | BDRVCURLState *s = bs->opaque; | ||
105 | - AioContext *ctx = bdrv_get_aio_context(bs); | ||
106 | |||
107 | size_t start = acb->sector_num * BDRV_SECTOR_SIZE; | ||
108 | size_t end; | ||
109 | |||
110 | - aio_context_acquire(ctx); | ||
111 | + qemu_mutex_lock(&s->mutex); | ||
112 | |||
113 | // In case we have the requested data already (e.g. read-ahead), | ||
114 | // we can just call the callback and be done. | ||
115 | @@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p) | ||
116 | curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); | ||
117 | |||
118 | out: | ||
119 | - aio_context_release(ctx); | ||
120 | + qemu_mutex_unlock(&s->mutex); | ||
121 | if (ret != -EINPROGRESS) { | ||
122 | acb->common.cb(acb->common.opaque, ret); | ||
123 | qemu_aio_unref(acb); | ||
124 | @@ -XXX,XX +XXX,XX @@ static void curl_close(BlockDriverState *bs) | ||
125 | |||
126 | DPRINTF("CURL: Close\n"); | ||
127 | curl_detach_aio_context(bs); | ||
128 | + qemu_mutex_destroy(&s->mutex); | ||
129 | |||
130 | g_free(s->cookie); | ||
131 | g_free(s->url); | ||
132 | -- | 373 | -- |
133 | 2.9.3 | 374 | 2.40.1 |
134 | |||
135 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Stop using the .bdrv_co_io_plug() API because it is not multi-queue | ||
2 | block layer friendly. Use the new blk_io_plug_call() API to batch I/O | ||
3 | submission instead. | ||
1 | 4 | ||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
6 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
7 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> | ||
8 | Acked-by: Kevin Wolf <kwolf@redhat.com> | ||
9 | Message-id: 20230530180959.1108766-3-stefanha@redhat.com | ||
10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
11 | --- | ||
12 | block/nvme.c | 44 ++++++++++++-------------------------------- | ||
13 | block/trace-events | 1 - | ||
14 | 2 files changed, 12 insertions(+), 33 deletions(-) | ||
15 | |||
16 | diff --git a/block/nvme.c b/block/nvme.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/nvme.c | ||
19 | +++ b/block/nvme.c | ||
20 | @@ -XXX,XX +XXX,XX @@ | ||
21 | #include "qemu/vfio-helpers.h" | ||
22 | #include "block/block-io.h" | ||
23 | #include "block/block_int.h" | ||
24 | +#include "sysemu/block-backend.h" | ||
25 | #include "sysemu/replay.h" | ||
26 | #include "trace.h" | ||
27 | |||
28 | @@ -XXX,XX +XXX,XX @@ struct BDRVNVMeState { | ||
29 | int blkshift; | ||
30 | |||
31 | uint64_t max_transfer; | ||
32 | - bool plugged; | ||
33 | |||
34 | bool supports_write_zeroes; | ||
35 | bool supports_discard; | ||
36 | @@ -XXX,XX +XXX,XX @@ static void nvme_kick(NVMeQueuePair *q) | ||
37 | { | ||
38 | BDRVNVMeState *s = q->s; | ||
39 | |||
40 | - if (s->plugged || !q->need_kick) { | ||
41 | + if (!q->need_kick) { | ||
42 | return; | ||
43 | } | ||
44 | trace_nvme_kick(s, q->index); | ||
45 | @@ -XXX,XX +XXX,XX @@ static bool nvme_process_completion(NVMeQueuePair *q) | ||
46 | NvmeCqe *c; | ||
47 | |||
48 | trace_nvme_process_completion(s, q->index, q->inflight); | ||
49 | - if (s->plugged) { | ||
50 | - trace_nvme_process_completion_queue_plugged(s, q->index); | ||
51 | - return false; | ||
52 | - } | ||
53 | |||
54 | /* | ||
55 | * Support re-entrancy when a request cb() function invokes aio_poll(). | ||
56 | @@ -XXX,XX +XXX,XX @@ static void nvme_trace_command(const NvmeCmd *cmd) | ||
57 | } | ||
58 | } | ||
59 | |||
60 | +static void nvme_unplug_fn(void *opaque) | ||
61 | +{ | ||
62 | + NVMeQueuePair *q = opaque; | ||
63 | + | ||
64 | + QEMU_LOCK_GUARD(&q->lock); | ||
65 | + nvme_kick(q); | ||
66 | + nvme_process_completion(q); | ||
67 | +} | ||
68 | + | ||
69 | static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req, | ||
70 | NvmeCmd *cmd, BlockCompletionFunc cb, | ||
71 | void *opaque) | ||
72 | @@ -XXX,XX +XXX,XX @@ static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req, | ||
73 | q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd)); | ||
74 | q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE; | ||
75 | q->need_kick++; | ||
76 | - nvme_kick(q); | ||
77 | - nvme_process_completion(q); | ||
78 | + blk_io_plug_call(nvme_unplug_fn, q); | ||
79 | qemu_mutex_unlock(&q->lock); | ||
80 | } | ||
81 | |||
82 | @@ -XXX,XX +XXX,XX @@ static void nvme_attach_aio_context(BlockDriverState *bs, | ||
83 | } | ||
84 | } | ||
85 | |||
86 | -static void coroutine_fn nvme_co_io_plug(BlockDriverState *bs) | ||
87 | -{ | ||
88 | - BDRVNVMeState *s = bs->opaque; | ||
89 | - assert(!s->plugged); | ||
90 | - s->plugged = true; | ||
91 | -} | ||
92 | - | ||
93 | -static void coroutine_fn nvme_co_io_unplug(BlockDriverState *bs) | ||
94 | -{ | ||
95 | - BDRVNVMeState *s = bs->opaque; | ||
96 | - assert(s->plugged); | ||
97 | - s->plugged = false; | ||
98 | - for (unsigned i = INDEX_IO(0); i < s->queue_count; i++) { | ||
99 | - NVMeQueuePair *q = s->queues[i]; | ||
100 | - qemu_mutex_lock(&q->lock); | ||
101 | - nvme_kick(q); | ||
102 | - nvme_process_completion(q); | ||
103 | - qemu_mutex_unlock(&q->lock); | ||
104 | - } | ||
105 | -} | ||
106 | - | ||
107 | static bool nvme_register_buf(BlockDriverState *bs, void *host, size_t size, | ||
108 | Error **errp) | ||
109 | { | ||
110 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_nvme = { | ||
111 | .bdrv_detach_aio_context = nvme_detach_aio_context, | ||
112 | .bdrv_attach_aio_context = nvme_attach_aio_context, | ||
113 | |||
114 | - .bdrv_co_io_plug = nvme_co_io_plug, | ||
115 | - .bdrv_co_io_unplug = nvme_co_io_unplug, | ||
116 | - | ||
117 | .bdrv_register_buf = nvme_register_buf, | ||
118 | .bdrv_unregister_buf = nvme_unregister_buf, | ||
119 | }; | ||
120 | diff --git a/block/trace-events b/block/trace-events | ||
121 | index XXXXXXX..XXXXXXX 100644 | ||
122 | --- a/block/trace-events | ||
123 | +++ b/block/trace-events | ||
124 | @@ -XXX,XX +XXX,XX @@ nvme_kick(void *s, unsigned q_index) "s %p q #%u" | ||
125 | nvme_dma_flush_queue_wait(void *s) "s %p" | ||
126 | nvme_error(int cmd_specific, int sq_head, int sqid, int cid, int status) "cmd_specific %d sq_head %d sqid %d cid %d status 0x%x" | ||
127 | nvme_process_completion(void *s, unsigned q_index, int inflight) "s %p q #%u inflight %d" | ||
128 | -nvme_process_completion_queue_plugged(void *s, unsigned q_index) "s %p q #%u" | ||
129 | nvme_complete_command(void *s, unsigned q_index, int cid) "s %p q #%u cid %d" | ||
130 | nvme_submit_command(void *s, unsigned q_index, int cid) "s %p q #%u cid %d" | ||
131 | nvme_submit_command_raw(int c0, int c1, int c2, int c3, int c4, int c5, int c6, int c7) "%02x %02x %02x %02x %02x %02x %02x %02x" | ||
132 | -- | ||
133 | 2.40.1 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | Stop using the .bdrv_co_io_plug() API because it is not multi-queue |
---|---|---|---|
2 | block layer friendly. Use the new blk_io_plug_call() API to batch I/O | ||
3 | submission instead. | ||
2 | 4 | ||
3 | Now that all bottom halves and callbacks take care of taking the | 5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
4 | AioContext lock, we can migrate some users away from it and to a | 6 | Reviewed-by: Eric Blake <eblake@redhat.com> |
5 | specific QemuMutex or CoMutex. | 7 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> |
6 | 8 | Acked-by: Kevin Wolf <kwolf@redhat.com> | |
7 | Protect libiscsi calls with a QemuMutex. Callbacks are invoked | 9 | Message-id: 20230530180959.1108766-4-stefanha@redhat.com |
8 | using bottom halves, so we don't even have to drop it around | ||
9 | callback invocations. | ||
10 | |||
11 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
13 | Message-id: 20170222180725.28611-4-pbonzini@redhat.com | ||
14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
15 | --- | 11 | --- |
16 | block/iscsi.c | 83 +++++++++++++++++++++++++++++++++++++++++++++-------------- | 12 | block/blkio.c | 43 ++++++++++++++++++++++++------------------- |
17 | 1 file changed, 64 insertions(+), 19 deletions(-) | 13 | 1 file changed, 24 insertions(+), 19 deletions(-) |
18 | 14 | ||
19 | diff --git a/block/iscsi.c b/block/iscsi.c | 15 | diff --git a/block/blkio.c b/block/blkio.c |
20 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/block/iscsi.c | 17 | --- a/block/blkio.c |
22 | +++ b/block/iscsi.c | 18 | +++ b/block/blkio.c |
23 | @@ -XXX,XX +XXX,XX @@ typedef struct IscsiLun { | 19 | @@ -XXX,XX +XXX,XX @@ |
24 | int events; | 20 | #include "qemu/error-report.h" |
25 | QEMUTimer *nop_timer; | 21 | #include "qapi/qmp/qdict.h" |
26 | QEMUTimer *event_timer; | 22 | #include "qemu/module.h" |
27 | + QemuMutex mutex; | 23 | +#include "sysemu/block-backend.h" |
28 | struct scsi_inquiry_logical_block_provisioning lbp; | 24 | #include "exec/memory.h" /* for ram_block_discard_disable() */ |
29 | struct scsi_inquiry_block_limits bl; | 25 | |
30 | unsigned char *zeroblock; | 26 | #include "block/block-io.h" |
31 | @@ -XXX,XX +XXX,XX @@ static int iscsi_translate_sense(struct scsi_sense *sense) | 27 | @@ -XXX,XX +XXX,XX @@ static void blkio_detach_aio_context(BlockDriverState *bs) |
32 | return ret; | 28 | NULL, NULL, NULL); |
33 | } | 29 | } |
34 | 30 | ||
35 | +/* Called (via iscsi_service) with QemuMutex held. */ | 31 | -/* Call with s->blkio_lock held to submit I/O after enqueuing a new request */ |
36 | static void | 32 | -static void blkio_submit_io(BlockDriverState *bs) |
37 | iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, | 33 | +/* |
38 | void *command_data, void *opaque) | 34 | + * Called by blk_io_unplug() or immediately if not plugged. Called without |
39 | @@ -XXX,XX +XXX,XX @@ static const AIOCBInfo iscsi_aiocb_info = { | 35 | + * blkio_lock. |
40 | static void iscsi_process_read(void *arg); | 36 | + */ |
41 | static void iscsi_process_write(void *arg); | 37 | +static void blkio_unplug_fn(void *opaque) |
42 | |||
43 | +/* Called with QemuMutex held. */ | ||
44 | static void | ||
45 | iscsi_set_events(IscsiLun *iscsilun) | ||
46 | { | 38 | { |
47 | @@ -XXX,XX +XXX,XX @@ iscsi_process_read(void *arg) | 39 | - if (qatomic_read(&bs->io_plugged) == 0) { |
48 | IscsiLun *iscsilun = arg; | 40 | - BDRVBlkioState *s = bs->opaque; |
49 | struct iscsi_context *iscsi = iscsilun->iscsi; | 41 | + BDRVBlkioState *s = opaque; |
50 | 42 | ||
51 | - aio_context_acquire(iscsilun->aio_context); | 43 | + WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { |
52 | + qemu_mutex_lock(&iscsilun->mutex); | 44 | blkioq_do_io(s->blkioq, NULL, 0, 0, NULL); |
53 | iscsi_service(iscsi, POLLIN); | 45 | } |
54 | iscsi_set_events(iscsilun); | ||
55 | - aio_context_release(iscsilun->aio_context); | ||
56 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
57 | } | 46 | } |
58 | 47 | ||
59 | static void | 48 | +/* |
60 | @@ -XXX,XX +XXX,XX @@ iscsi_process_write(void *arg) | 49 | + * Schedule I/O submission after enqueuing a new request. Called without |
61 | IscsiLun *iscsilun = arg; | 50 | + * blkio_lock. |
62 | struct iscsi_context *iscsi = iscsilun->iscsi; | 51 | + */ |
63 | 52 | +static void blkio_submit_io(BlockDriverState *bs) | |
64 | - aio_context_acquire(iscsilun->aio_context); | 53 | +{ |
65 | + qemu_mutex_lock(&iscsilun->mutex); | 54 | + BDRVBlkioState *s = bs->opaque; |
66 | iscsi_service(iscsi, POLLOUT); | 55 | + |
67 | iscsi_set_events(iscsilun); | 56 | + blk_io_plug_call(blkio_unplug_fn, s); |
68 | - aio_context_release(iscsilun->aio_context); | 57 | +} |
69 | + qemu_mutex_unlock(&iscsilun->mutex); | 58 | + |
59 | static int coroutine_fn | ||
60 | blkio_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) | ||
61 | { | ||
62 | @@ -XXX,XX +XXX,XX @@ blkio_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) | ||
63 | |||
64 | WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { | ||
65 | blkioq_discard(s->blkioq, offset, bytes, &cod, 0); | ||
66 | - blkio_submit_io(bs); | ||
67 | } | ||
68 | |||
69 | + blkio_submit_io(bs); | ||
70 | qemu_coroutine_yield(); | ||
71 | return cod.ret; | ||
70 | } | 72 | } |
71 | 73 | @@ -XXX,XX +XXX,XX @@ blkio_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, | |
72 | static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun) | 74 | |
73 | @@ -XXX,XX +XXX,XX @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors, | 75 | WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { |
74 | uint64_t lba; | 76 | blkioq_readv(s->blkioq, offset, iov, iovcnt, &cod, 0); |
75 | uint32_t num_sectors; | 77 | - blkio_submit_io(bs); |
76 | bool fua = flags & BDRV_REQ_FUA; | ||
77 | + int r = 0; | ||
78 | |||
79 | if (fua) { | ||
80 | assert(iscsilun->dpofua); | ||
81 | @@ -XXX,XX +XXX,XX @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors, | ||
82 | lba = sector_qemu2lun(sector_num, iscsilun); | ||
83 | num_sectors = sector_qemu2lun(nb_sectors, iscsilun); | ||
84 | iscsi_co_init_iscsitask(iscsilun, &iTask); | ||
85 | + qemu_mutex_lock(&iscsilun->mutex); | ||
86 | retry: | ||
87 | if (iscsilun->use_16_for_rw) { | ||
88 | #if LIBISCSI_API_VERSION >= (20160603) | ||
89 | @@ -XXX,XX +XXX,XX @@ retry: | ||
90 | #endif | ||
91 | while (!iTask.complete) { | ||
92 | iscsi_set_events(iscsilun); | ||
93 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
94 | qemu_coroutine_yield(); | ||
95 | + qemu_mutex_lock(&iscsilun->mutex); | ||
96 | } | 78 | } |
97 | 79 | ||
98 | if (iTask.task != NULL) { | 80 | + blkio_submit_io(bs); |
99 | @@ -XXX,XX +XXX,XX @@ retry: | 81 | qemu_coroutine_yield(); |
100 | 82 | ||
101 | if (iTask.status != SCSI_STATUS_GOOD) { | 83 | if (use_bounce_buffer) { |
102 | iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors); | 84 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn blkio_co_pwritev(BlockDriverState *bs, int64_t offset, |
103 | - return iTask.err_code; | 85 | |
104 | + r = iTask.err_code; | 86 | WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { |
105 | + goto out_unlock; | 87 | blkioq_writev(s->blkioq, offset, iov, iovcnt, &cod, blkio_flags); |
88 | - blkio_submit_io(bs); | ||
106 | } | 89 | } |
107 | 90 | ||
108 | iscsi_allocmap_set_allocated(iscsilun, sector_num, nb_sectors); | 91 | + blkio_submit_io(bs); |
109 | 92 | qemu_coroutine_yield(); | |
110 | - return 0; | 93 | |
111 | +out_unlock: | 94 | if (use_bounce_buffer) { |
112 | + qemu_mutex_unlock(&iscsilun->mutex); | 95 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn blkio_co_flush(BlockDriverState *bs) |
113 | + return r; | 96 | |
97 | WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { | ||
98 | blkioq_flush(s->blkioq, &cod, 0); | ||
99 | - blkio_submit_io(bs); | ||
100 | } | ||
101 | |||
102 | + blkio_submit_io(bs); | ||
103 | qemu_coroutine_yield(); | ||
104 | return cod.ret; | ||
114 | } | 105 | } |
115 | 106 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn blkio_co_pwrite_zeroes(BlockDriverState *bs, | |
116 | 107 | ||
117 | @@ -XXX,XX +XXX,XX @@ static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs, | 108 | WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { |
118 | goto out; | 109 | blkioq_write_zeroes(s->blkioq, offset, bytes, &cod, blkio_flags); |
110 | - blkio_submit_io(bs); | ||
119 | } | 111 | } |
120 | 112 | ||
121 | + qemu_mutex_lock(&iscsilun->mutex); | 113 | + blkio_submit_io(bs); |
122 | retry: | 114 | qemu_coroutine_yield(); |
123 | if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun, | 115 | return cod.ret; |
124 | sector_qemu2lun(sector_num, iscsilun), | ||
125 | 8 + 16, iscsi_co_generic_cb, | ||
126 | &iTask) == NULL) { | ||
127 | ret = -ENOMEM; | ||
128 | - goto out; | ||
129 | + goto out_unlock; | ||
130 | } | ||
131 | |||
132 | while (!iTask.complete) { | ||
133 | iscsi_set_events(iscsilun); | ||
134 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
135 | qemu_coroutine_yield(); | ||
136 | + qemu_mutex_lock(&iscsilun->mutex); | ||
137 | } | ||
138 | |||
139 | if (iTask.do_retry) { | ||
140 | @@ -XXX,XX +XXX,XX @@ retry: | ||
141 | * because the device is busy or the cmd is not | ||
142 | * supported) we pretend all blocks are allocated | ||
143 | * for backwards compatibility */ | ||
144 | - goto out; | ||
145 | + goto out_unlock; | ||
146 | } | ||
147 | |||
148 | lbas = scsi_datain_unmarshall(iTask.task); | ||
149 | if (lbas == NULL) { | ||
150 | ret = -EIO; | ||
151 | - goto out; | ||
152 | + goto out_unlock; | ||
153 | } | ||
154 | |||
155 | lbasd = &lbas->descriptors[0]; | ||
156 | |||
157 | if (sector_qemu2lun(sector_num, iscsilun) != lbasd->lba) { | ||
158 | ret = -EIO; | ||
159 | - goto out; | ||
160 | + goto out_unlock; | ||
161 | } | ||
162 | |||
163 | *pnum = sector_lun2qemu(lbasd->num_blocks, iscsilun); | ||
164 | @@ -XXX,XX +XXX,XX @@ retry: | ||
165 | if (*pnum > nb_sectors) { | ||
166 | *pnum = nb_sectors; | ||
167 | } | ||
168 | +out_unlock: | ||
169 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
170 | out: | ||
171 | if (iTask.task != NULL) { | ||
172 | scsi_free_scsi_task(iTask.task); | ||
173 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs, | ||
174 | num_sectors = sector_qemu2lun(nb_sectors, iscsilun); | ||
175 | |||
176 | iscsi_co_init_iscsitask(iscsilun, &iTask); | ||
177 | + qemu_mutex_lock(&iscsilun->mutex); | ||
178 | retry: | ||
179 | if (iscsilun->use_16_for_rw) { | ||
180 | #if LIBISCSI_API_VERSION >= (20160603) | ||
181 | @@ -XXX,XX +XXX,XX @@ retry: | ||
182 | #endif | ||
183 | while (!iTask.complete) { | ||
184 | iscsi_set_events(iscsilun); | ||
185 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
186 | qemu_coroutine_yield(); | ||
187 | + qemu_mutex_lock(&iscsilun->mutex); | ||
188 | } | ||
189 | |||
190 | if (iTask.task != NULL) { | ||
191 | @@ -XXX,XX +XXX,XX @@ retry: | ||
192 | iTask.complete = 0; | ||
193 | goto retry; | ||
194 | } | ||
195 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
196 | |||
197 | if (iTask.status != SCSI_STATUS_GOOD) { | ||
198 | return iTask.err_code; | ||
199 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn iscsi_co_flush(BlockDriverState *bs) | ||
200 | struct IscsiTask iTask; | ||
201 | |||
202 | iscsi_co_init_iscsitask(iscsilun, &iTask); | ||
203 | + qemu_mutex_lock(&iscsilun->mutex); | ||
204 | retry: | ||
205 | if (iscsi_synchronizecache10_task(iscsilun->iscsi, iscsilun->lun, 0, 0, 0, | ||
206 | 0, iscsi_co_generic_cb, &iTask) == NULL) { | ||
207 | @@ -XXX,XX +XXX,XX @@ retry: | ||
208 | |||
209 | while (!iTask.complete) { | ||
210 | iscsi_set_events(iscsilun); | ||
211 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
212 | qemu_coroutine_yield(); | ||
213 | + qemu_mutex_lock(&iscsilun->mutex); | ||
214 | } | ||
215 | |||
216 | if (iTask.task != NULL) { | ||
217 | @@ -XXX,XX +XXX,XX @@ retry: | ||
218 | iTask.complete = 0; | ||
219 | goto retry; | ||
220 | } | ||
221 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
222 | |||
223 | if (iTask.status != SCSI_STATUS_GOOD) { | ||
224 | return iTask.err_code; | ||
225 | @@ -XXX,XX +XXX,XX @@ retry: | ||
226 | } | 116 | } |
227 | 117 | ||
228 | #ifdef __linux__ | 118 | -static void coroutine_fn blkio_co_io_unplug(BlockDriverState *bs) |
229 | +/* Called (via iscsi_service) with QemuMutex held. */ | 119 | -{ |
230 | static void | 120 | - BDRVBlkioState *s = bs->opaque; |
231 | iscsi_aio_ioctl_cb(struct iscsi_context *iscsi, int status, | 121 | - |
232 | void *command_data, void *opaque) | 122 | - WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { |
233 | @@ -XXX,XX +XXX,XX @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, | 123 | - blkio_submit_io(bs); |
234 | acb->task->expxferlen = acb->ioh->dxfer_len; | 124 | - } |
235 | 125 | -} | |
236 | data.size = 0; | 126 | - |
237 | + qemu_mutex_lock(&iscsilun->mutex); | 127 | typedef enum { |
238 | if (acb->task->xfer_dir == SCSI_XFER_WRITE) { | 128 | BMRR_OK, |
239 | if (acb->ioh->iovec_count == 0) { | 129 | BMRR_SKIP, |
240 | data.data = acb->ioh->dxferp; | 130 | @@ -XXX,XX +XXX,XX @@ static void blkio_refresh_limits(BlockDriverState *bs, Error **errp) |
241 | @@ -XXX,XX +XXX,XX @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, | 131 | .bdrv_co_pwritev = blkio_co_pwritev, \ |
242 | iscsi_aio_ioctl_cb, | 132 | .bdrv_co_flush_to_disk = blkio_co_flush, \ |
243 | (data.size > 0) ? &data : NULL, | 133 | .bdrv_co_pwrite_zeroes = blkio_co_pwrite_zeroes, \ |
244 | acb) != 0) { | 134 | - .bdrv_co_io_unplug = blkio_co_io_unplug, \ |
245 | + qemu_mutex_unlock(&iscsilun->mutex); | 135 | .bdrv_refresh_limits = blkio_refresh_limits, \ |
246 | scsi_free_scsi_task(acb->task); | 136 | .bdrv_register_buf = blkio_register_buf, \ |
247 | qemu_aio_unref(acb); | 137 | .bdrv_unregister_buf = blkio_unregister_buf, \ |
248 | return NULL; | ||
249 | @@ -XXX,XX +XXX,XX @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, | ||
250 | } | ||
251 | |||
252 | iscsi_set_events(iscsilun); | ||
253 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
254 | |||
255 | return &acb->common; | ||
256 | } | ||
257 | @@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) | ||
258 | IscsiLun *iscsilun = bs->opaque; | ||
259 | struct IscsiTask iTask; | ||
260 | struct unmap_list list; | ||
261 | + int r = 0; | ||
262 | |||
263 | if (!is_byte_request_lun_aligned(offset, count, iscsilun)) { | ||
264 | return -ENOTSUP; | ||
265 | @@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) | ||
266 | list.num = count / iscsilun->block_size; | ||
267 | |||
268 | iscsi_co_init_iscsitask(iscsilun, &iTask); | ||
269 | + qemu_mutex_lock(&iscsilun->mutex); | ||
270 | retry: | ||
271 | if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1, | ||
272 | iscsi_co_generic_cb, &iTask) == NULL) { | ||
273 | - return -ENOMEM; | ||
274 | + r = -ENOMEM; | ||
275 | + goto out_unlock; | ||
276 | } | ||
277 | |||
278 | while (!iTask.complete) { | ||
279 | iscsi_set_events(iscsilun); | ||
280 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
281 | qemu_coroutine_yield(); | ||
282 | + qemu_mutex_lock(&iscsilun->mutex); | ||
283 | } | ||
284 | |||
285 | if (iTask.task != NULL) { | ||
286 | @@ -XXX,XX +XXX,XX @@ retry: | ||
287 | /* the target might fail with a check condition if it | ||
288 | is not happy with the alignment of the UNMAP request | ||
289 | we silently fail in this case */ | ||
290 | - return 0; | ||
291 | + goto out_unlock; | ||
292 | } | ||
293 | |||
294 | if (iTask.status != SCSI_STATUS_GOOD) { | ||
295 | - return iTask.err_code; | ||
296 | + r = iTask.err_code; | ||
297 | + goto out_unlock; | ||
298 | } | ||
299 | |||
300 | iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS, | ||
301 | count >> BDRV_SECTOR_BITS); | ||
302 | |||
303 | - return 0; | ||
304 | +out_unlock: | ||
305 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
306 | + return r; | ||
307 | } | ||
308 | |||
309 | static int | ||
310 | @@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, | ||
311 | uint64_t lba; | ||
312 | uint32_t nb_blocks; | ||
313 | bool use_16_for_ws = iscsilun->use_16_for_rw; | ||
314 | + int r = 0; | ||
315 | |||
316 | if (!is_byte_request_lun_aligned(offset, count, iscsilun)) { | ||
317 | return -ENOTSUP; | ||
318 | @@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, | ||
319 | } | ||
320 | } | ||
321 | |||
322 | + qemu_mutex_lock(&iscsilun->mutex); | ||
323 | iscsi_co_init_iscsitask(iscsilun, &iTask); | ||
324 | retry: | ||
325 | if (use_16_for_ws) { | ||
326 | @@ -XXX,XX +XXX,XX @@ retry: | ||
327 | |||
328 | while (!iTask.complete) { | ||
329 | iscsi_set_events(iscsilun); | ||
330 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
331 | qemu_coroutine_yield(); | ||
332 | + qemu_mutex_lock(&iscsilun->mutex); | ||
333 | } | ||
334 | |||
335 | if (iTask.status == SCSI_STATUS_CHECK_CONDITION && | ||
336 | @@ -XXX,XX +XXX,XX @@ retry: | ||
337 | /* WRITE SAME is not supported by the target */ | ||
338 | iscsilun->has_write_same = false; | ||
339 | scsi_free_scsi_task(iTask.task); | ||
340 | - return -ENOTSUP; | ||
341 | + r = -ENOTSUP; | ||
342 | + goto out_unlock; | ||
343 | } | ||
344 | |||
345 | if (iTask.task != NULL) { | ||
346 | @@ -XXX,XX +XXX,XX @@ retry: | ||
347 | if (iTask.status != SCSI_STATUS_GOOD) { | ||
348 | iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS, | ||
349 | count >> BDRV_SECTOR_BITS); | ||
350 | - return iTask.err_code; | ||
351 | + r = iTask.err_code; | ||
352 | + goto out_unlock; | ||
353 | } | ||
354 | |||
355 | if (flags & BDRV_REQ_MAY_UNMAP) { | ||
356 | @@ -XXX,XX +XXX,XX @@ retry: | ||
357 | count >> BDRV_SECTOR_BITS); | ||
358 | } | ||
359 | |||
360 | - return 0; | ||
361 | +out_unlock: | ||
362 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
363 | + return r; | ||
364 | } | ||
365 | |||
366 | static void apply_chap(struct iscsi_context *iscsi, QemuOpts *opts, | ||
367 | @@ -XXX,XX +XXX,XX @@ static void iscsi_nop_timed_event(void *opaque) | ||
368 | { | ||
369 | IscsiLun *iscsilun = opaque; | ||
370 | |||
371 | - aio_context_acquire(iscsilun->aio_context); | ||
372 | + qemu_mutex_lock(&iscsilun->mutex); | ||
373 | if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) { | ||
374 | error_report("iSCSI: NOP timeout. Reconnecting..."); | ||
375 | iscsilun->request_timed_out = true; | ||
376 | @@ -XXX,XX +XXX,XX @@ static void iscsi_nop_timed_event(void *opaque) | ||
377 | iscsi_set_events(iscsilun); | ||
378 | |||
379 | out: | ||
380 | - aio_context_release(iscsilun->aio_context); | ||
381 | + qemu_mutex_unlock(&iscsilun->mutex); | ||
382 | } | ||
383 | |||
384 | static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp) | ||
385 | @@ -XXX,XX +XXX,XX @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags, | ||
386 | scsi_free_scsi_task(task); | ||
387 | task = NULL; | ||
388 | |||
389 | + qemu_mutex_init(&iscsilun->mutex); | ||
390 | iscsi_attach_aio_context(bs, iscsilun->aio_context); | ||
391 | |||
392 | /* Guess the internal cluster (page) size of the iscsi target by the means | ||
393 | @@ -XXX,XX +XXX,XX @@ static void iscsi_close(BlockDriverState *bs) | ||
394 | iscsi_destroy_context(iscsi); | ||
395 | g_free(iscsilun->zeroblock); | ||
396 | iscsi_allocmap_free(iscsilun); | ||
397 | + qemu_mutex_destroy(&iscsilun->mutex); | ||
398 | memset(iscsilun, 0, sizeof(IscsiLun)); | ||
399 | } | ||
400 | |||
401 | -- | 138 | -- |
402 | 2.9.3 | 139 | 2.40.1 |
403 | |||
404 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Stop using the .bdrv_co_io_plug() API because it is not multi-queue | ||
2 | block layer friendly. Use the new blk_io_plug_call() API to batch I/O | ||
3 | submission instead. | ||
1 | 4 | ||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
6 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
7 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> | ||
8 | Acked-by: Kevin Wolf <kwolf@redhat.com> | ||
9 | Message-id: 20230530180959.1108766-5-stefanha@redhat.com | ||
10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
11 | --- | ||
12 | include/block/raw-aio.h | 7 ------- | ||
13 | block/file-posix.c | 10 ---------- | ||
14 | block/io_uring.c | 44 ++++++++++++++++------------------------- | ||
15 | block/trace-events | 5 ++--- | ||
16 | 4 files changed, 19 insertions(+), 47 deletions(-) | ||
17 | |||
18 | diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/include/block/raw-aio.h | ||
21 | +++ b/include/block/raw-aio.h | ||
22 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset, | ||
23 | QEMUIOVector *qiov, int type); | ||
24 | void luring_detach_aio_context(LuringState *s, AioContext *old_context); | ||
25 | void luring_attach_aio_context(LuringState *s, AioContext *new_context); | ||
26 | - | ||
27 | -/* | ||
28 | - * luring_io_plug/unplug work in the thread's current AioContext, therefore the | ||
29 | - * caller must ensure that they are paired in the same IOThread. | ||
30 | - */ | ||
31 | -void luring_io_plug(void); | ||
32 | -void luring_io_unplug(void); | ||
33 | #endif | ||
34 | |||
35 | #ifdef _WIN32 | ||
36 | diff --git a/block/file-posix.c b/block/file-posix.c | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/block/file-posix.c | ||
39 | +++ b/block/file-posix.c | ||
40 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn raw_co_io_plug(BlockDriverState *bs) | ||
41 | laio_io_plug(); | ||
42 | } | ||
43 | #endif | ||
44 | -#ifdef CONFIG_LINUX_IO_URING | ||
45 | - if (s->use_linux_io_uring) { | ||
46 | - luring_io_plug(); | ||
47 | - } | ||
48 | -#endif | ||
49 | } | ||
50 | |||
51 | static void coroutine_fn raw_co_io_unplug(BlockDriverState *bs) | ||
52 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn raw_co_io_unplug(BlockDriverState *bs) | ||
53 | laio_io_unplug(s->aio_max_batch); | ||
54 | } | ||
55 | #endif | ||
56 | -#ifdef CONFIG_LINUX_IO_URING | ||
57 | - if (s->use_linux_io_uring) { | ||
58 | - luring_io_unplug(); | ||
59 | - } | ||
60 | -#endif | ||
61 | } | ||
62 | |||
63 | static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs) | ||
64 | diff --git a/block/io_uring.c b/block/io_uring.c | ||
65 | index XXXXXXX..XXXXXXX 100644 | ||
66 | --- a/block/io_uring.c | ||
67 | +++ b/block/io_uring.c | ||
68 | @@ -XXX,XX +XXX,XX @@ | ||
69 | #include "block/raw-aio.h" | ||
70 | #include "qemu/coroutine.h" | ||
71 | #include "qapi/error.h" | ||
72 | +#include "sysemu/block-backend.h" | ||
73 | #include "trace.h" | ||
74 | |||
75 | /* Only used for assertions. */ | ||
76 | @@ -XXX,XX +XXX,XX @@ typedef struct LuringAIOCB { | ||
77 | } LuringAIOCB; | ||
78 | |||
79 | typedef struct LuringQueue { | ||
80 | - int plugged; | ||
81 | unsigned int in_queue; | ||
82 | unsigned int in_flight; | ||
83 | bool blocked; | ||
84 | @@ -XXX,XX +XXX,XX @@ static void luring_process_completions_and_submit(LuringState *s) | ||
85 | { | ||
86 | luring_process_completions(s); | ||
87 | |||
88 | - if (!s->io_q.plugged && s->io_q.in_queue > 0) { | ||
89 | + if (s->io_q.in_queue > 0) { | ||
90 | ioq_submit(s); | ||
91 | } | ||
92 | } | ||
93 | @@ -XXX,XX +XXX,XX @@ static void qemu_luring_poll_ready(void *opaque) | ||
94 | static void ioq_init(LuringQueue *io_q) | ||
95 | { | ||
96 | QSIMPLEQ_INIT(&io_q->submit_queue); | ||
97 | - io_q->plugged = 0; | ||
98 | io_q->in_queue = 0; | ||
99 | io_q->in_flight = 0; | ||
100 | io_q->blocked = false; | ||
101 | } | ||
102 | |||
103 | -void luring_io_plug(void) | ||
104 | +static void luring_unplug_fn(void *opaque) | ||
105 | { | ||
106 | - AioContext *ctx = qemu_get_current_aio_context(); | ||
107 | - LuringState *s = aio_get_linux_io_uring(ctx); | ||
108 | - trace_luring_io_plug(s); | ||
109 | - s->io_q.plugged++; | ||
110 | -} | ||
111 | - | ||
112 | -void luring_io_unplug(void) | ||
113 | -{ | ||
114 | - AioContext *ctx = qemu_get_current_aio_context(); | ||
115 | - LuringState *s = aio_get_linux_io_uring(ctx); | ||
116 | - assert(s->io_q.plugged); | ||
117 | - trace_luring_io_unplug(s, s->io_q.blocked, s->io_q.plugged, | ||
118 | - s->io_q.in_queue, s->io_q.in_flight); | ||
119 | - if (--s->io_q.plugged == 0 && | ||
120 | - !s->io_q.blocked && s->io_q.in_queue > 0) { | ||
121 | + LuringState *s = opaque; | ||
122 | + trace_luring_unplug_fn(s, s->io_q.blocked, s->io_q.in_queue, | ||
123 | + s->io_q.in_flight); | ||
124 | + if (!s->io_q.blocked && s->io_q.in_queue > 0) { | ||
125 | ioq_submit(s); | ||
126 | } | ||
127 | } | ||
128 | @@ -XXX,XX +XXX,XX @@ static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s, | ||
129 | |||
130 | QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next); | ||
131 | s->io_q.in_queue++; | ||
132 | - trace_luring_do_submit(s, s->io_q.blocked, s->io_q.plugged, | ||
133 | - s->io_q.in_queue, s->io_q.in_flight); | ||
134 | - if (!s->io_q.blocked && | ||
135 | - (!s->io_q.plugged || | ||
136 | - s->io_q.in_flight + s->io_q.in_queue >= MAX_ENTRIES)) { | ||
137 | - ret = ioq_submit(s); | ||
138 | - trace_luring_do_submit_done(s, ret); | ||
139 | - return ret; | ||
140 | + trace_luring_do_submit(s, s->io_q.blocked, s->io_q.in_queue, | ||
141 | + s->io_q.in_flight); | ||
142 | + if (!s->io_q.blocked) { | ||
143 | + if (s->io_q.in_flight + s->io_q.in_queue >= MAX_ENTRIES) { | ||
144 | + ret = ioq_submit(s); | ||
145 | + trace_luring_do_submit_done(s, ret); | ||
146 | + return ret; | ||
147 | + } | ||
148 | + | ||
149 | + blk_io_plug_call(luring_unplug_fn, s); | ||
150 | } | ||
151 | return 0; | ||
152 | } | ||
153 | diff --git a/block/trace-events b/block/trace-events | ||
154 | index XXXXXXX..XXXXXXX 100644 | ||
155 | --- a/block/trace-events | ||
156 | +++ b/block/trace-events | ||
157 | @@ -XXX,XX +XXX,XX @@ file_paio_submit(void *acb, void *opaque, int64_t offset, int count, int type) " | ||
158 | # io_uring.c | ||
159 | luring_init_state(void *s, size_t size) "s %p size %zu" | ||
160 | luring_cleanup_state(void *s) "%p freed" | ||
161 | -luring_io_plug(void *s) "LuringState %p plug" | ||
162 | -luring_io_unplug(void *s, int blocked, int plugged, int queued, int inflight) "LuringState %p blocked %d plugged %d queued %d inflight %d" | ||
163 | -luring_do_submit(void *s, int blocked, int plugged, int queued, int inflight) "LuringState %p blocked %d plugged %d queued %d inflight %d" | ||
164 | +luring_unplug_fn(void *s, int blocked, int queued, int inflight) "LuringState %p blocked %d queued %d inflight %d" | ||
165 | +luring_do_submit(void *s, int blocked, int queued, int inflight) "LuringState %p blocked %d queued %d inflight %d" | ||
166 | luring_do_submit_done(void *s, int ret) "LuringState %p submitted to kernel %d" | ||
167 | luring_co_submit(void *bs, void *s, void *luringcb, int fd, uint64_t offset, size_t nbytes, int type) "bs %p s %p luringcb %p fd %d offset %" PRId64 " nbytes %zd type %d" | ||
168 | luring_process_completion(void *s, void *aiocb, int ret) "LuringState %p luringcb %p ret %d" | ||
169 | -- | ||
170 | 2.40.1 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | Stop using the .bdrv_co_io_plug() API because it is not multi-queue | |
2 | block layer friendly. Use the new blk_io_plug_call() API to batch I/O | ||
3 | submission instead. | ||
4 | |||
5 | Note that a dev_max_batch check is dropped in laio_io_unplug() because | ||
6 | the semantics of unplug_fn() are different from .bdrv_co_unplug(): | ||
7 | 1. unplug_fn() is only called when the last blk_io_unplug() call occurs, | ||
8 | not every time blk_io_unplug() is called. | ||
9 | 2. unplug_fn() is per-thread, not per-BlockDriverState, so there is no | ||
10 | way to get per-BlockDriverState fields like dev_max_batch. | ||
11 | |||
12 | Therefore this condition cannot be moved to laio_unplug_fn(). It is not | ||
13 | obvious that this condition affects performance in practice, so I am | ||
14 | removing it instead of trying to come up with a more complex mechanism | ||
15 | to preserve the condition. | ||
16 | |||
17 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
18 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
19 | Acked-by: Kevin Wolf <kwolf@redhat.com> | ||
20 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> | ||
21 | Message-id: 20230530180959.1108766-6-stefanha@redhat.com | ||
22 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
23 | --- | ||
24 | include/block/raw-aio.h | 7 ------- | ||
25 | block/file-posix.c | 28 ---------------------------- | ||
26 | block/linux-aio.c | 41 +++++++++++------------------------------ | ||
27 | 3 files changed, 11 insertions(+), 65 deletions(-) | ||
28 | |||
29 | diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h | ||
30 | index XXXXXXX..XXXXXXX 100644 | ||
31 | --- a/include/block/raw-aio.h | ||
32 | +++ b/include/block/raw-aio.h | ||
33 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov, | ||
34 | |||
35 | void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context); | ||
36 | void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context); | ||
37 | - | ||
38 | -/* | ||
39 | - * laio_io_plug/unplug work in the thread's current AioContext, therefore the | ||
40 | - * caller must ensure that they are paired in the same IOThread. | ||
41 | - */ | ||
42 | -void laio_io_plug(void); | ||
43 | -void laio_io_unplug(uint64_t dev_max_batch); | ||
44 | #endif | ||
45 | /* io_uring.c - Linux io_uring implementation */ | ||
46 | #ifdef CONFIG_LINUX_IO_URING | ||
47 | diff --git a/block/file-posix.c b/block/file-posix.c | ||
48 | index XXXXXXX..XXXXXXX 100644 | ||
49 | --- a/block/file-posix.c | ||
50 | +++ b/block/file-posix.c | ||
51 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset, | ||
52 | return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_WRITE); | ||
53 | } | ||
54 | |||
55 | -static void coroutine_fn raw_co_io_plug(BlockDriverState *bs) | ||
56 | -{ | ||
57 | - BDRVRawState __attribute__((unused)) *s = bs->opaque; | ||
58 | -#ifdef CONFIG_LINUX_AIO | ||
59 | - if (s->use_linux_aio) { | ||
60 | - laio_io_plug(); | ||
61 | - } | ||
62 | -#endif | ||
63 | -} | ||
64 | - | ||
65 | -static void coroutine_fn raw_co_io_unplug(BlockDriverState *bs) | ||
66 | -{ | ||
67 | - BDRVRawState __attribute__((unused)) *s = bs->opaque; | ||
68 | -#ifdef CONFIG_LINUX_AIO | ||
69 | - if (s->use_linux_aio) { | ||
70 | - laio_io_unplug(s->aio_max_batch); | ||
71 | - } | ||
72 | -#endif | ||
73 | -} | ||
74 | - | ||
75 | static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs) | ||
76 | { | ||
77 | BDRVRawState *s = bs->opaque; | ||
78 | @@ -XXX,XX +XXX,XX @@ BlockDriver bdrv_file = { | ||
79 | .bdrv_co_copy_range_from = raw_co_copy_range_from, | ||
80 | .bdrv_co_copy_range_to = raw_co_copy_range_to, | ||
81 | .bdrv_refresh_limits = raw_refresh_limits, | ||
82 | - .bdrv_co_io_plug = raw_co_io_plug, | ||
83 | - .bdrv_co_io_unplug = raw_co_io_unplug, | ||
84 | .bdrv_attach_aio_context = raw_aio_attach_aio_context, | ||
85 | |||
86 | .bdrv_co_truncate = raw_co_truncate, | ||
87 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_host_device = { | ||
88 | .bdrv_co_copy_range_from = raw_co_copy_range_from, | ||
89 | .bdrv_co_copy_range_to = raw_co_copy_range_to, | ||
90 | .bdrv_refresh_limits = raw_refresh_limits, | ||
91 | - .bdrv_co_io_plug = raw_co_io_plug, | ||
92 | - .bdrv_co_io_unplug = raw_co_io_unplug, | ||
93 | .bdrv_attach_aio_context = raw_aio_attach_aio_context, | ||
94 | |||
95 | .bdrv_co_truncate = raw_co_truncate, | ||
96 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_host_cdrom = { | ||
97 | .bdrv_co_pwritev = raw_co_pwritev, | ||
98 | .bdrv_co_flush_to_disk = raw_co_flush_to_disk, | ||
99 | .bdrv_refresh_limits = cdrom_refresh_limits, | ||
100 | - .bdrv_co_io_plug = raw_co_io_plug, | ||
101 | - .bdrv_co_io_unplug = raw_co_io_unplug, | ||
102 | .bdrv_attach_aio_context = raw_aio_attach_aio_context, | ||
103 | |||
104 | .bdrv_co_truncate = raw_co_truncate, | ||
105 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_host_cdrom = { | ||
106 | .bdrv_co_pwritev = raw_co_pwritev, | ||
107 | .bdrv_co_flush_to_disk = raw_co_flush_to_disk, | ||
108 | .bdrv_refresh_limits = cdrom_refresh_limits, | ||
109 | - .bdrv_co_io_plug = raw_co_io_plug, | ||
110 | - .bdrv_co_io_unplug = raw_co_io_unplug, | ||
111 | .bdrv_attach_aio_context = raw_aio_attach_aio_context, | ||
112 | |||
113 | .bdrv_co_truncate = raw_co_truncate, | ||
114 | diff --git a/block/linux-aio.c b/block/linux-aio.c | ||
115 | index XXXXXXX..XXXXXXX 100644 | ||
116 | --- a/block/linux-aio.c | ||
117 | +++ b/block/linux-aio.c | ||
118 | @@ -XXX,XX +XXX,XX @@ | ||
119 | #include "qemu/event_notifier.h" | ||
120 | #include "qemu/coroutine.h" | ||
121 | #include "qapi/error.h" | ||
122 | +#include "sysemu/block-backend.h" | ||
123 | |||
124 | /* Only used for assertions. */ | ||
125 | #include "qemu/coroutine_int.h" | ||
126 | @@ -XXX,XX +XXX,XX @@ struct qemu_laiocb { | ||
127 | }; | ||
128 | |||
129 | typedef struct { | ||
130 | - int plugged; | ||
131 | unsigned int in_queue; | ||
132 | unsigned int in_flight; | ||
133 | bool blocked; | ||
134 | @@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completions_and_submit(LinuxAioState *s) | ||
135 | { | ||
136 | qemu_laio_process_completions(s); | ||
137 | |||
138 | - if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) { | ||
139 | + if (!QSIMPLEQ_EMPTY(&s->io_q.pending)) { | ||
140 | ioq_submit(s); | ||
141 | } | ||
142 | } | ||
143 | @@ -XXX,XX +XXX,XX @@ static void qemu_laio_poll_ready(EventNotifier *opaque) | ||
144 | static void ioq_init(LaioQueue *io_q) | ||
145 | { | ||
146 | QSIMPLEQ_INIT(&io_q->pending); | ||
147 | - io_q->plugged = 0; | ||
148 | io_q->in_queue = 0; | ||
149 | io_q->in_flight = 0; | ||
150 | io_q->blocked = false; | ||
151 | @@ -XXX,XX +XXX,XX @@ static uint64_t laio_max_batch(LinuxAioState *s, uint64_t dev_max_batch) | ||
152 | return max_batch; | ||
153 | } | ||
154 | |||
155 | -void laio_io_plug(void) | ||
156 | +static void laio_unplug_fn(void *opaque) | ||
157 | { | ||
158 | - AioContext *ctx = qemu_get_current_aio_context(); | ||
159 | - LinuxAioState *s = aio_get_linux_aio(ctx); | ||
160 | + LinuxAioState *s = opaque; | ||
161 | |||
162 | - s->io_q.plugged++; | ||
163 | -} | ||
164 | - | ||
165 | -void laio_io_unplug(uint64_t dev_max_batch) | ||
166 | -{ | ||
167 | - AioContext *ctx = qemu_get_current_aio_context(); | ||
168 | - LinuxAioState *s = aio_get_linux_aio(ctx); | ||
169 | - | ||
170 | - assert(s->io_q.plugged); | ||
171 | - s->io_q.plugged--; | ||
172 | - | ||
173 | - /* | ||
174 | - * Why max batch checking is performed here: | ||
175 | - * Another BDS may have queued requests with a higher dev_max_batch and | ||
176 | - * therefore in_queue could now exceed our dev_max_batch. Re-check the max | ||
177 | - * batch so we can honor our device's dev_max_batch. | ||
178 | - */ | ||
179 | - if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch) || | ||
180 | - (!s->io_q.plugged && | ||
181 | - !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending))) { | ||
182 | + if (!s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) { | ||
183 | ioq_submit(s); | ||
184 | } | ||
185 | } | ||
186 | @@ -XXX,XX +XXX,XX @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, | ||
187 | |||
188 | QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next); | ||
189 | s->io_q.in_queue++; | ||
190 | - if (!s->io_q.blocked && | ||
191 | - (!s->io_q.plugged || | ||
192 | - s->io_q.in_queue >= laio_max_batch(s, dev_max_batch))) { | ||
193 | - ioq_submit(s); | ||
194 | + if (!s->io_q.blocked) { | ||
195 | + if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch)) { | ||
196 | + ioq_submit(s); | ||
197 | + } else { | ||
198 | + blk_io_plug_call(laio_unplug_fn, s); | ||
199 | + } | ||
200 | } | ||
201 | |||
202 | return 0; | ||
203 | -- | ||
204 | 2.40.1 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | No block driver implements .bdrv_co_io_plug() anymore. Get rid of the |
---|---|---|---|
2 | function pointers. | ||
2 | 3 | ||
3 | Now that all bottom halves and callbacks take care of taking the | 4 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
4 | AioContext lock, we can migrate some users away from it and to a | 5 | Reviewed-by: Eric Blake <eblake@redhat.com> |
5 | specific QemuMutex or CoMutex. | 6 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> |
6 | 7 | Acked-by: Kevin Wolf <kwolf@redhat.com> | |
7 | Protect libnfs calls with a QemuMutex. Callbacks are invoked | 8 | Message-id: 20230530180959.1108766-7-stefanha@redhat.com |
8 | using bottom halves, so we don't even have to drop it around | ||
9 | callback invocations. | ||
10 | |||
11 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
12 | Message-id: 20170222180725.28611-3-pbonzini@redhat.com | ||
13 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
14 | --- | 10 | --- |
15 | block/nfs.c | 23 +++++++++++++++++++---- | 11 | include/block/block-io.h | 3 --- |
16 | 1 file changed, 19 insertions(+), 4 deletions(-) | 12 | include/block/block_int-common.h | 11 ---------- |
13 | block/io.c | 37 -------------------------------- | ||
14 | 3 files changed, 51 deletions(-) | ||
17 | 15 | ||
18 | diff --git a/block/nfs.c b/block/nfs.c | 16 | diff --git a/include/block/block-io.h b/include/block/block-io.h |
19 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/block/nfs.c | 18 | --- a/include/block/block-io.h |
21 | +++ b/block/nfs.c | 19 | +++ b/include/block/block-io.h |
22 | @@ -XXX,XX +XXX,XX @@ typedef struct NFSClient { | 20 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn bdrv_co_leave(BlockDriverState *bs, AioContext *old_ctx); |
23 | int events; | 21 | |
24 | bool has_zero_init; | 22 | AioContext *child_of_bds_get_parent_aio_context(BdrvChild *c); |
25 | AioContext *aio_context; | 23 | |
26 | + QemuMutex mutex; | 24 | -void coroutine_fn GRAPH_RDLOCK bdrv_co_io_plug(BlockDriverState *bs); |
27 | blkcnt_t st_blocks; | 25 | -void coroutine_fn GRAPH_RDLOCK bdrv_co_io_unplug(BlockDriverState *bs); |
28 | bool cache_used; | 26 | - |
29 | NFSServer *server; | 27 | bool coroutine_fn GRAPH_RDLOCK |
30 | @@ -XXX,XX +XXX,XX @@ static void nfs_parse_filename(const char *filename, QDict *options, | 28 | bdrv_co_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name, |
31 | static void nfs_process_read(void *arg); | 29 | uint32_t granularity, Error **errp); |
32 | static void nfs_process_write(void *arg); | 30 | diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h |
33 | 31 | index XXXXXXX..XXXXXXX 100644 | |
34 | +/* Called with QemuMutex held. */ | 32 | --- a/include/block/block_int-common.h |
35 | static void nfs_set_events(NFSClient *client) | 33 | +++ b/include/block/block_int-common.h |
36 | { | 34 | @@ -XXX,XX +XXX,XX @@ struct BlockDriver { |
37 | int ev = nfs_which_events(client->context); | 35 | void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_debug_event)( |
38 | @@ -XXX,XX +XXX,XX @@ static void nfs_process_read(void *arg) | 36 | BlockDriverState *bs, BlkdebugEvent event); |
39 | { | 37 | |
40 | NFSClient *client = arg; | 38 | - /* io queue for linux-aio */ |
41 | 39 | - void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_io_plug)(BlockDriverState *bs); | |
42 | - aio_context_acquire(client->aio_context); | 40 | - void coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_io_unplug)( |
43 | + qemu_mutex_lock(&client->mutex); | 41 | - BlockDriverState *bs); |
44 | nfs_service(client->context, POLLIN); | 42 | - |
45 | nfs_set_events(client); | 43 | bool (*bdrv_supports_persistent_dirty_bitmap)(BlockDriverState *bs); |
46 | - aio_context_release(client->aio_context); | 44 | |
47 | + qemu_mutex_unlock(&client->mutex); | 45 | bool coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_can_store_new_dirty_bitmap)( |
46 | @@ -XXX,XX +XXX,XX @@ struct BlockDriverState { | ||
47 | unsigned int in_flight; | ||
48 | unsigned int serialising_in_flight; | ||
49 | |||
50 | - /* | ||
51 | - * counter for nested bdrv_io_plug. | ||
52 | - * Accessed with atomic ops. | ||
53 | - */ | ||
54 | - unsigned io_plugged; | ||
55 | - | ||
56 | /* do we need to tell the quest if we have a volatile write cache? */ | ||
57 | int enable_write_cache; | ||
58 | |||
59 | diff --git a/block/io.c b/block/io.c | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/block/io.c | ||
62 | +++ b/block/io.c | ||
63 | @@ -XXX,XX +XXX,XX @@ void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) | ||
64 | return mem; | ||
48 | } | 65 | } |
49 | 66 | ||
50 | static void nfs_process_write(void *arg) | 67 | -void coroutine_fn bdrv_co_io_plug(BlockDriverState *bs) |
51 | { | 68 | -{ |
52 | NFSClient *client = arg; | 69 | - BdrvChild *child; |
53 | 70 | - IO_CODE(); | |
54 | - aio_context_acquire(client->aio_context); | 71 | - assert_bdrv_graph_readable(); |
55 | + qemu_mutex_lock(&client->mutex); | 72 | - |
56 | nfs_service(client->context, POLLOUT); | 73 | - QLIST_FOREACH(child, &bs->children, next) { |
57 | nfs_set_events(client); | 74 | - bdrv_co_io_plug(child->bs); |
58 | - aio_context_release(client->aio_context); | 75 | - } |
59 | + qemu_mutex_unlock(&client->mutex); | 76 | - |
60 | } | 77 | - if (qatomic_fetch_inc(&bs->io_plugged) == 0) { |
61 | 78 | - BlockDriver *drv = bs->drv; | |
62 | static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) | 79 | - if (drv && drv->bdrv_co_io_plug) { |
63 | @@ -XXX,XX +XXX,XX @@ static void nfs_co_generic_bh_cb(void *opaque) | 80 | - drv->bdrv_co_io_plug(bs); |
64 | aio_co_wake(task->co); | 81 | - } |
65 | } | 82 | - } |
66 | 83 | -} | |
67 | +/* Called (via nfs_service) with QemuMutex held. */ | 84 | - |
68 | static void | 85 | -void coroutine_fn bdrv_co_io_unplug(BlockDriverState *bs) |
69 | nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data, | 86 | -{ |
70 | void *private_data) | 87 | - BdrvChild *child; |
71 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset, | 88 | - IO_CODE(); |
72 | nfs_co_init_task(bs, &task); | 89 | - assert_bdrv_graph_readable(); |
73 | task.iov = iov; | 90 | - |
74 | 91 | - assert(bs->io_plugged); | |
75 | + qemu_mutex_lock(&client->mutex); | 92 | - if (qatomic_fetch_dec(&bs->io_plugged) == 1) { |
76 | if (nfs_pread_async(client->context, client->fh, | 93 | - BlockDriver *drv = bs->drv; |
77 | offset, bytes, nfs_co_generic_cb, &task) != 0) { | 94 | - if (drv && drv->bdrv_co_io_unplug) { |
78 | + qemu_mutex_unlock(&client->mutex); | 95 | - drv->bdrv_co_io_unplug(bs); |
79 | return -ENOMEM; | 96 | - } |
80 | } | 97 | - } |
81 | 98 | - | |
82 | nfs_set_events(client); | 99 | - QLIST_FOREACH(child, &bs->children, next) { |
83 | + qemu_mutex_unlock(&client->mutex); | 100 | - bdrv_co_io_unplug(child->bs); |
84 | while (!task.complete) { | 101 | - } |
85 | qemu_coroutine_yield(); | 102 | -} |
86 | } | 103 | - |
87 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset, | 104 | /* Helper that undoes bdrv_register_buf() when it fails partway through */ |
88 | buf = iov->iov[0].iov_base; | 105 | static void GRAPH_RDLOCK |
89 | } | 106 | bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size, |
90 | |||
91 | + qemu_mutex_lock(&client->mutex); | ||
92 | if (nfs_pwrite_async(client->context, client->fh, | ||
93 | offset, bytes, buf, | ||
94 | nfs_co_generic_cb, &task) != 0) { | ||
95 | + qemu_mutex_unlock(&client->mutex); | ||
96 | if (my_buffer) { | ||
97 | g_free(buf); | ||
98 | } | ||
99 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset, | ||
100 | } | ||
101 | |||
102 | nfs_set_events(client); | ||
103 | + qemu_mutex_unlock(&client->mutex); | ||
104 | while (!task.complete) { | ||
105 | qemu_coroutine_yield(); | ||
106 | } | ||
107 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs) | ||
108 | |||
109 | nfs_co_init_task(bs, &task); | ||
110 | |||
111 | + qemu_mutex_lock(&client->mutex); | ||
112 | if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb, | ||
113 | &task) != 0) { | ||
114 | + qemu_mutex_unlock(&client->mutex); | ||
115 | return -ENOMEM; | ||
116 | } | ||
117 | |||
118 | nfs_set_events(client); | ||
119 | + qemu_mutex_unlock(&client->mutex); | ||
120 | while (!task.complete) { | ||
121 | qemu_coroutine_yield(); | ||
122 | } | ||
123 | @@ -XXX,XX +XXX,XX @@ static void nfs_file_close(BlockDriverState *bs) | ||
124 | { | ||
125 | NFSClient *client = bs->opaque; | ||
126 | nfs_client_close(client); | ||
127 | + qemu_mutex_destroy(&client->mutex); | ||
128 | } | ||
129 | |||
130 | static NFSServer *nfs_config(QDict *options, Error **errp) | ||
131 | @@ -XXX,XX +XXX,XX @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags, | ||
132 | if (ret < 0) { | ||
133 | return ret; | ||
134 | } | ||
135 | + qemu_mutex_init(&client->mutex); | ||
136 | bs->total_sectors = ret; | ||
137 | ret = 0; | ||
138 | return ret; | ||
139 | @@ -XXX,XX +XXX,XX @@ static int nfs_has_zero_init(BlockDriverState *bs) | ||
140 | return client->has_zero_init; | ||
141 | } | ||
142 | |||
143 | +/* Called (via nfs_service) with QemuMutex held. */ | ||
144 | static void | ||
145 | nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data, | ||
146 | void *private_data) | ||
147 | -- | 107 | -- |
148 | 2.9.3 | 108 | 2.40.1 |
149 | |||
150 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | From: Stefano Garzarella <sgarzare@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | nodes[id].next is written by other threads. If atomic_read is not used | 3 | Some virtio-blk drivers (e.g. virtio-blk-vhost-vdpa) supports the fd |
4 | (matching atomic_set in mcs_mutex_lock!) the compiler can optimize the | 4 | passing. Let's expose this to the user, so the management layer |
5 | whole "if" away! | 5 | can pass the file descriptor of an already opened path. |
6 | 6 | ||
7 | Reported-by: Alex Bennée <alex.bennee@linaro.org> | 7 | If the libblkio virtio-blk driver supports fd passing, let's always |
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 8 | use qemu_open() to open the `path`, so we can handle fd passing |
9 | Tested-by: Greg Kurz <groug@kaod.org> | 9 | from the management layer through the "/dev/fdset/N" special path. |
10 | Message-id: 20170227111726.9237-1-pbonzini@redhat.com | 10 | |
11 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | ||
13 | Message-id: 20230530071941.8954-2-sgarzare@redhat.com | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
12 | --- | 15 | --- |
13 | tests/test-aio-multithread.c | 4 ++-- | 16 | block/blkio.c | 53 ++++++++++++++++++++++++++++++++++++++++++--------- |
14 | 1 file changed, 2 insertions(+), 2 deletions(-) | 17 | 1 file changed, 44 insertions(+), 9 deletions(-) |
15 | 18 | ||
16 | diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c | 19 | diff --git a/block/blkio.c b/block/blkio.c |
17 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/tests/test-aio-multithread.c | 21 | --- a/block/blkio.c |
19 | +++ b/tests/test-aio-multithread.c | 22 | +++ b/block/blkio.c |
20 | @@ -XXX,XX +XXX,XX @@ static void mcs_mutex_lock(void) | 23 | @@ -XXX,XX +XXX,XX @@ static int blkio_virtio_blk_common_open(BlockDriverState *bs, |
21 | static void mcs_mutex_unlock(void) | ||
22 | { | 24 | { |
23 | int next; | 25 | const char *path = qdict_get_try_str(options, "path"); |
24 | - if (nodes[id].next == -1) { | 26 | BDRVBlkioState *s = bs->opaque; |
25 | + if (atomic_read(&nodes[id].next) == -1) { | 27 | - int ret; |
26 | if (atomic_read(&mutex_head) == id && | 28 | + bool fd_supported = false; |
27 | atomic_cmpxchg(&mutex_head, id, -1) == id) { | 29 | + int fd, ret; |
28 | /* Last item in the list, exit. */ | 30 | |
29 | @@ -XXX,XX +XXX,XX @@ static void mcs_mutex_unlock(void) | 31 | if (!path) { |
32 | error_setg(errp, "missing 'path' option"); | ||
33 | return -EINVAL; | ||
30 | } | 34 | } |
31 | 35 | ||
32 | /* Wake up the next in line. */ | 36 | - ret = blkio_set_str(s->blkio, "path", path); |
33 | - next = nodes[id].next; | 37 | - qdict_del(options, "path"); |
34 | + next = atomic_read(&nodes[id].next); | 38 | - if (ret < 0) { |
35 | nodes[next].locked = 0; | 39 | - error_setg_errno(errp, -ret, "failed to set path: %s", |
36 | qemu_futex_wake(&nodes[next].locked, 1); | 40 | - blkio_get_error_msg()); |
41 | - return ret; | ||
42 | - } | ||
43 | - | ||
44 | if (!(flags & BDRV_O_NOCACHE)) { | ||
45 | error_setg(errp, "cache.direct=off is not supported"); | ||
46 | return -EINVAL; | ||
47 | } | ||
48 | + | ||
49 | + if (blkio_get_int(s->blkio, "fd", &fd) == 0) { | ||
50 | + fd_supported = true; | ||
51 | + } | ||
52 | + | ||
53 | + /* | ||
54 | + * If the libblkio driver supports fd passing, let's always use qemu_open() | ||
55 | + * to open the `path`, so we can handle fd passing from the management | ||
56 | + * layer through the "/dev/fdset/N" special path. | ||
57 | + */ | ||
58 | + if (fd_supported) { | ||
59 | + int open_flags; | ||
60 | + | ||
61 | + if (flags & BDRV_O_RDWR) { | ||
62 | + open_flags = O_RDWR; | ||
63 | + } else { | ||
64 | + open_flags = O_RDONLY; | ||
65 | + } | ||
66 | + | ||
67 | + fd = qemu_open(path, open_flags, errp); | ||
68 | + if (fd < 0) { | ||
69 | + return -EINVAL; | ||
70 | + } | ||
71 | + | ||
72 | + ret = blkio_set_int(s->blkio, "fd", fd); | ||
73 | + if (ret < 0) { | ||
74 | + error_setg_errno(errp, -ret, "failed to set fd: %s", | ||
75 | + blkio_get_error_msg()); | ||
76 | + qemu_close(fd); | ||
77 | + return ret; | ||
78 | + } | ||
79 | + } else { | ||
80 | + ret = blkio_set_str(s->blkio, "path", path); | ||
81 | + if (ret < 0) { | ||
82 | + error_setg_errno(errp, -ret, "failed to set path: %s", | ||
83 | + blkio_get_error_msg()); | ||
84 | + return ret; | ||
85 | + } | ||
86 | + } | ||
87 | + | ||
88 | + qdict_del(options, "path"); | ||
89 | + | ||
90 | return 0; | ||
37 | } | 91 | } |
92 | |||
38 | -- | 93 | -- |
39 | 2.9.3 | 94 | 2.40.1 |
40 | |||
41 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Stefano Garzarella <sgarzare@redhat.com> | ||
1 | 2 | ||
3 | The virtio-blk-vhost-vdpa driver in libblkio 1.3.0 supports the fd | ||
4 | passing through the new 'fd' property. | ||
5 | |||
6 | Since now we are using qemu_open() on '@path' if the virtio-blk driver | ||
7 | supports the fd passing, let's announce it. | ||
8 | In this way, the management layer can pass the file descriptor of an | ||
9 | already opened vhost-vdpa character device. This is useful especially | ||
10 | when the device can only be accessed with certain privileges. | ||
11 | |||
12 | Add the '@fdset' feature only when the virtio-blk-vhost-vdpa driver | ||
13 | in libblkio supports it. | ||
14 | |||
15 | Suggested-by: Markus Armbruster <armbru@redhat.com> | ||
16 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
17 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | ||
18 | Message-id: 20230530071941.8954-3-sgarzare@redhat.com | ||
19 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
20 | --- | ||
21 | qapi/block-core.json | 6 ++++++ | ||
22 | meson.build | 4 ++++ | ||
23 | 2 files changed, 10 insertions(+) | ||
24 | |||
25 | diff --git a/qapi/block-core.json b/qapi/block-core.json | ||
26 | index XXXXXXX..XXXXXXX 100644 | ||
27 | --- a/qapi/block-core.json | ||
28 | +++ b/qapi/block-core.json | ||
29 | @@ -XXX,XX +XXX,XX @@ | ||
30 | # | ||
31 | # @path: path to the vhost-vdpa character device. | ||
32 | # | ||
33 | +# Features: | ||
34 | +# @fdset: Member @path supports the special "/dev/fdset/N" path | ||
35 | +# (since 8.1) | ||
36 | +# | ||
37 | # Since: 7.2 | ||
38 | ## | ||
39 | { 'struct': 'BlockdevOptionsVirtioBlkVhostVdpa', | ||
40 | 'data': { 'path': 'str' }, | ||
41 | + 'features': [ { 'name' :'fdset', | ||
42 | + 'if': 'CONFIG_BLKIO_VHOST_VDPA_FD' } ], | ||
43 | 'if': 'CONFIG_BLKIO' } | ||
44 | |||
45 | ## | ||
46 | diff --git a/meson.build b/meson.build | ||
47 | index XXXXXXX..XXXXXXX 100644 | ||
48 | --- a/meson.build | ||
49 | +++ b/meson.build | ||
50 | @@ -XXX,XX +XXX,XX @@ config_host_data.set('CONFIG_LZO', lzo.found()) | ||
51 | config_host_data.set('CONFIG_MPATH', mpathpersist.found()) | ||
52 | config_host_data.set('CONFIG_MPATH_NEW_API', mpathpersist_new_api) | ||
53 | config_host_data.set('CONFIG_BLKIO', blkio.found()) | ||
54 | +if blkio.found() | ||
55 | + config_host_data.set('CONFIG_BLKIO_VHOST_VDPA_FD', | ||
56 | + blkio.version().version_compare('>=1.3.0')) | ||
57 | +endif | ||
58 | config_host_data.set('CONFIG_CURL', curl.found()) | ||
59 | config_host_data.set('CONFIG_CURSES', curses.found()) | ||
60 | config_host_data.set('CONFIG_GBM', gbm.found()) | ||
61 | -- | ||
62 | 2.40.1 | diff view generated by jsdifflib |