1 | The following changes since commit 801f3db7564dcce8a37a70833c0abe40ec19f8ce: | 1 | The following changes since commit 3521ade3510eb5cefb2e27a101667f25dad89935: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/philmd/tags/kconfig-20210720' into staging (2021-07-20 19:30:28 +0100) | 3 | Merge remote-tracking branch 'remotes/thuth-gitlab/tags/pull-request-2021-07-29' into staging (2021-07-29 13:17:20 +0100) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | https://gitlab.com/stefanha/qemu.git tags/block-pull-request | 7 | https://gitlab.com/stefanha/qemu.git tags/block-pull-request |
8 | 8 | ||
9 | for you to fetch changes up to d7ddd0a1618a75b31dc308bb37365ce1da972154: | 9 | for you to fetch changes up to cc8eecd7f105a1dff5876adeb238a14696061a4a: |
10 | 10 | ||
11 | linux-aio: limit the batch size using `aio-max-batch` parameter (2021-07-21 13:47:50 +0100) | 11 | MAINTAINERS: Added myself as a reviewer for the NVMe Block Driver (2021-07-29 17:17:34 +0100) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Pull request | 14 | Pull request |
15 | 15 | ||
16 | Stefano's performance regression fix for commit 2558cb8dd4 ("linux-aio: | 16 | The main fix here is for io_uring. Spurious -EAGAIN errors can happen and the |
17 | increasing MAX_EVENTS to a larger hardcoded value"). | 17 | request needs to be resubmitted. |
18 | |||
19 | The MAINTAINERS changes carry no risk and we might as well include them in QEMU | ||
20 | 6.1. | ||
18 | 21 | ||
19 | ---------------------------------------------------------------- | 22 | ---------------------------------------------------------------- |
20 | 23 | ||
21 | Stefano Garzarella (3): | 24 | Fabian Ebner (1): |
22 | iothread: generalize iothread_set_param/iothread_get_param | 25 | block/io_uring: resubmit when result is -EAGAIN |
23 | iothread: add aio-max-batch parameter | ||
24 | linux-aio: limit the batch size using `aio-max-batch` parameter | ||
25 | 26 | ||
26 | qapi/misc.json | 6 ++- | 27 | Philippe Mathieu-Daudé (1): |
27 | qapi/qom.json | 7 +++- | 28 | MAINTAINERS: Added myself as a reviewer for the NVMe Block Driver |
28 | include/block/aio.h | 12 ++++++ | 29 | |
29 | include/sysemu/iothread.h | 3 ++ | 30 | Stefano Garzarella (1): |
30 | block/linux-aio.c | 9 ++++- | 31 | MAINTAINERS: add Stefano Garzarella as io_uring reviewer |
31 | iothread.c | 82 ++++++++++++++++++++++++++++++++++----- | 32 | |
32 | monitor/hmp-cmds.c | 2 + | 33 | MAINTAINERS | 2 ++ |
33 | util/aio-posix.c | 12 ++++++ | 34 | block/io_uring.c | 16 +++++++++++++++- |
34 | util/aio-win32.c | 5 +++ | 35 | 2 files changed, 17 insertions(+), 1 deletion(-) |
35 | util/async.c | 2 + | ||
36 | qemu-options.hx | 8 +++- | ||
37 | 11 files changed, 134 insertions(+), 14 deletions(-) | ||
38 | 36 | ||
39 | -- | 37 | -- |
40 | 2.31.1 | 38 | 2.31.1 |
41 | 39 | diff view generated by jsdifflib |
1 | From: Stefano Garzarella <sgarzare@redhat.com> | 1 | From: Stefano Garzarella <sgarzare@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | When there are multiple queues attached to the same AIO context, | 3 | I've been working with io_uring for a while so I'd like to help |
4 | some requests may experience high latency, since in the worst case | 4 | with reviews. |
5 | the AIO engine queue is only flushed when it is full (MAX_EVENTS) or | ||
6 | there are no more queues plugged. | ||
7 | |||
8 | Commit 2558cb8dd4 ("linux-aio: increasing MAX_EVENTS to a larger | ||
9 | hardcoded value") changed MAX_EVENTS from 128 to 1024, to increase | ||
10 | the number of in-flight requests. But this change also increased | ||
11 | the potential maximum batch to 1024 elements. | ||
12 | |||
13 | When there is a single queue attached to the AIO context, the issue | ||
14 | is mitigated from laio_io_unplug() that will flush the queue every | ||
15 | time is invoked since there can't be others queue plugged. | ||
16 | |||
17 | Let's use the new `aio-max-batch` IOThread parameter to mitigate | ||
18 | this issue, limiting the number of requests in a batch. | ||
19 | |||
20 | We also define a default value (32): this value is obtained running | ||
21 | some benchmarks and it represents a good tradeoff between the latency | ||
22 | increase while a request is queued and the cost of the io_submit(2) | ||
23 | system call. | ||
24 | 5 | ||
25 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | 6 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> |
26 | Message-id: 20210721094211.69853-4-sgarzare@redhat.com | 7 | Message-Id: <20210728131515.131045-1-sgarzare@redhat.com> |
27 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
28 | --- | 9 | --- |
29 | block/linux-aio.c | 9 ++++++++- | 10 | MAINTAINERS | 1 + |
30 | 1 file changed, 8 insertions(+), 1 deletion(-) | 11 | 1 file changed, 1 insertion(+) |
31 | 12 | ||
32 | diff --git a/block/linux-aio.c b/block/linux-aio.c | 13 | diff --git a/MAINTAINERS b/MAINTAINERS |
33 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
34 | --- a/block/linux-aio.c | 15 | --- a/MAINTAINERS |
35 | +++ b/block/linux-aio.c | 16 | +++ b/MAINTAINERS |
36 | @@ -XXX,XX +XXX,XX @@ | 17 | @@ -XXX,XX +XXX,XX @@ Linux io_uring |
37 | */ | 18 | M: Aarushi Mehta <mehta.aaru20@gmail.com> |
38 | #define MAX_EVENTS 1024 | 19 | M: Julia Suvorova <jusual@redhat.com> |
39 | 20 | M: Stefan Hajnoczi <stefanha@redhat.com> | |
40 | +/* Maximum number of requests in a batch. (default value) */ | 21 | +R: Stefano Garzarella <sgarzare@redhat.com> |
41 | +#define DEFAULT_MAX_BATCH 32 | 22 | L: qemu-block@nongnu.org |
42 | + | 23 | S: Maintained |
43 | struct qemu_laiocb { | 24 | F: block/io_uring.c |
44 | Coroutine *co; | ||
45 | LinuxAioState *ctx; | ||
46 | @@ -XXX,XX +XXX,XX @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, | ||
47 | LinuxAioState *s = laiocb->ctx; | ||
48 | struct iocb *iocbs = &laiocb->iocb; | ||
49 | QEMUIOVector *qiov = laiocb->qiov; | ||
50 | + int64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH; | ||
51 | + | ||
52 | + /* limit the batch with the number of available events */ | ||
53 | + max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch); | ||
54 | |||
55 | switch (type) { | ||
56 | case QEMU_AIO_WRITE: | ||
57 | @@ -XXX,XX +XXX,XX @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, | ||
58 | s->io_q.in_queue++; | ||
59 | if (!s->io_q.blocked && | ||
60 | (!s->io_q.plugged || | ||
61 | - s->io_q.in_flight + s->io_q.in_queue >= MAX_EVENTS)) { | ||
62 | + s->io_q.in_queue >= max_batch)) { | ||
63 | ioq_submit(s); | ||
64 | } | ||
65 | |||
66 | -- | 25 | -- |
67 | 2.31.1 | 26 | 2.31.1 |
68 | 27 | diff view generated by jsdifflib |
1 | From: Stefano Garzarella <sgarzare@redhat.com> | 1 | From: Fabian Ebner <f.ebner@proxmox.com> |
---|---|---|---|
2 | 2 | ||
3 | The `aio-max-batch` parameter will be propagated to AIO engines | 3 | Linux SCSI can throw spurious -EAGAIN in some corner cases in its |
4 | and it will be used to control the maximum number of queued requests. | 4 | completion path, which will end up being the result in the completed |
5 | io_uring request. | ||
5 | 6 | ||
6 | When there are in queue a number of requests equal to `aio-max-batch`, | 7 | Resubmitting such requests should allow block jobs to complete, even |
7 | the engine invokes the system call to forward the requests to the kernel. | 8 | if such spurious errors are encountered. |
8 | 9 | ||
9 | This parameter allows us to control the maximum batch size to reduce | 10 | Co-authored-by: Stefan Hajnoczi <stefanha@gmail.com> |
10 | the latency that requests might accumulate while queued in the AIO | 11 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> |
11 | engine queue. | 12 | Signed-off-by: Fabian Ebner <f.ebner@proxmox.com> |
12 | 13 | Message-id: 20210729091029.65369-1-f.ebner@proxmox.com | |
13 | If `aio-max-batch` is equal to 0 (default value), the AIO engine will | ||
14 | use its default maximum batch size value. | ||
15 | |||
16 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | ||
17 | Message-id: 20210721094211.69853-3-sgarzare@redhat.com | ||
18 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
19 | --- | 15 | --- |
20 | qapi/misc.json | 6 ++++- | 16 | block/io_uring.c | 16 +++++++++++++++- |
21 | qapi/qom.json | 7 ++++- | 17 | 1 file changed, 15 insertions(+), 1 deletion(-) |
22 | include/block/aio.h | 12 +++++++++ | ||
23 | include/sysemu/iothread.h | 3 +++ | ||
24 | iothread.c | 55 +++++++++++++++++++++++++++++++++++---- | ||
25 | monitor/hmp-cmds.c | 2 ++ | ||
26 | util/aio-posix.c | 12 +++++++++ | ||
27 | util/aio-win32.c | 5 ++++ | ||
28 | util/async.c | 2 ++ | ||
29 | qemu-options.hx | 8 ++++-- | ||
30 | 10 files changed, 103 insertions(+), 9 deletions(-) | ||
31 | 18 | ||
32 | diff --git a/qapi/misc.json b/qapi/misc.json | 19 | diff --git a/block/io_uring.c b/block/io_uring.c |
33 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
34 | --- a/qapi/misc.json | 21 | --- a/block/io_uring.c |
35 | +++ b/qapi/misc.json | 22 | +++ b/block/io_uring.c |
36 | @@ -XXX,XX +XXX,XX @@ | 23 | @@ -XXX,XX +XXX,XX @@ static void luring_process_completions(LuringState *s) |
37 | # @poll-shrink: how many ns will be removed from polling time, 0 means that | 24 | total_bytes = ret + luringcb->total_read; |
38 | # it's not configured (since 2.9) | 25 | |
39 | # | 26 | if (ret < 0) { |
40 | +# @aio-max-batch: maximum number of requests in a batch for the AIO engine, | 27 | - if (ret == -EINTR) { |
41 | +# 0 means that the engine will use its default (since 6.1) | 28 | + /* |
42 | +# | 29 | + * Only writev/readv/fsync requests on regular files or host block |
43 | # Since: 2.0 | 30 | + * devices are submitted. Therefore -EAGAIN is not expected but it's |
44 | ## | 31 | + * known to happen sometimes with Linux SCSI. Submit again and hope |
45 | { 'struct': 'IOThreadInfo', | 32 | + * the request completes successfully. |
46 | @@ -XXX,XX +XXX,XX @@ | 33 | + * |
47 | 'thread-id': 'int', | 34 | + * For more information, see: |
48 | 'poll-max-ns': 'int', | 35 | + * https://lore.kernel.org/io-uring/20210727165811.284510-3-axboe@kernel.dk/T/#u |
49 | 'poll-grow': 'int', | 36 | + * |
50 | - 'poll-shrink': 'int' } } | 37 | + * If the code is changed to submit other types of requests in the |
51 | + 'poll-shrink': 'int', | 38 | + * future, then this workaround may need to be extended to deal with |
52 | + 'aio-max-batch': 'int' } } | 39 | + * genuine -EAGAIN results that should not be resubmitted |
53 | 40 | + * immediately. | |
54 | ## | 41 | + */ |
55 | # @query-iothreads: | 42 | + if (ret == -EINTR || ret == -EAGAIN) { |
56 | diff --git a/qapi/qom.json b/qapi/qom.json | 43 | luring_resubmit(s, luringcb); |
57 | index XXXXXXX..XXXXXXX 100644 | 44 | continue; |
58 | --- a/qapi/qom.json | 45 | } |
59 | +++ b/qapi/qom.json | ||
60 | @@ -XXX,XX +XXX,XX @@ | ||
61 | # algorithm detects it is spending too long polling without | ||
62 | # encountering events. 0 selects a default behaviour (default: 0) | ||
63 | # | ||
64 | +# @aio-max-batch: maximum number of requests in a batch for the AIO engine, | ||
65 | +# 0 means that the engine will use its default | ||
66 | +# (default:0, since 6.1) | ||
67 | +# | ||
68 | # Since: 2.0 | ||
69 | ## | ||
70 | { 'struct': 'IothreadProperties', | ||
71 | 'data': { '*poll-max-ns': 'int', | ||
72 | '*poll-grow': 'int', | ||
73 | - '*poll-shrink': 'int' } } | ||
74 | + '*poll-shrink': 'int', | ||
75 | + '*aio-max-batch': 'int' } } | ||
76 | |||
77 | ## | ||
78 | # @MemoryBackendProperties: | ||
79 | diff --git a/include/block/aio.h b/include/block/aio.h | ||
80 | index XXXXXXX..XXXXXXX 100644 | ||
81 | --- a/include/block/aio.h | ||
82 | +++ b/include/block/aio.h | ||
83 | @@ -XXX,XX +XXX,XX @@ struct AioContext { | ||
84 | int64_t poll_grow; /* polling time growth factor */ | ||
85 | int64_t poll_shrink; /* polling time shrink factor */ | ||
86 | |||
87 | + /* AIO engine parameters */ | ||
88 | + int64_t aio_max_batch; /* maximum number of requests in a batch */ | ||
89 | + | ||
90 | /* | ||
91 | * List of handlers participating in userspace polling. Protected by | ||
92 | * ctx->list_lock. Iterated and modified mostly by the event loop thread | ||
93 | @@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, | ||
94 | int64_t grow, int64_t shrink, | ||
95 | Error **errp); | ||
96 | |||
97 | +/** | ||
98 | + * aio_context_set_aio_params: | ||
99 | + * @ctx: the aio context | ||
100 | + * @max_batch: maximum number of requests in a batch, 0 means that the | ||
101 | + * engine will use its default | ||
102 | + */ | ||
103 | +void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch, | ||
104 | + Error **errp); | ||
105 | + | ||
106 | #endif | ||
107 | diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h | ||
108 | index XXXXXXX..XXXXXXX 100644 | ||
109 | --- a/include/sysemu/iothread.h | ||
110 | +++ b/include/sysemu/iothread.h | ||
111 | @@ -XXX,XX +XXX,XX @@ struct IOThread { | ||
112 | int64_t poll_max_ns; | ||
113 | int64_t poll_grow; | ||
114 | int64_t poll_shrink; | ||
115 | + | ||
116 | + /* AioContext AIO engine parameters */ | ||
117 | + int64_t aio_max_batch; | ||
118 | }; | ||
119 | typedef struct IOThread IOThread; | ||
120 | |||
121 | diff --git a/iothread.c b/iothread.c | ||
122 | index XXXXXXX..XXXXXXX 100644 | ||
123 | --- a/iothread.c | ||
124 | +++ b/iothread.c | ||
125 | @@ -XXX,XX +XXX,XX @@ static void iothread_init_gcontext(IOThread *iothread) | ||
126 | iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE); | ||
127 | } | ||
128 | |||
129 | +static void iothread_set_aio_context_params(IOThread *iothread, Error **errp) | ||
130 | +{ | ||
131 | + ERRP_GUARD(); | ||
132 | + | ||
133 | + aio_context_set_poll_params(iothread->ctx, | ||
134 | + iothread->poll_max_ns, | ||
135 | + iothread->poll_grow, | ||
136 | + iothread->poll_shrink, | ||
137 | + errp); | ||
138 | + if (*errp) { | ||
139 | + return; | ||
140 | + } | ||
141 | + | ||
142 | + aio_context_set_aio_params(iothread->ctx, | ||
143 | + iothread->aio_max_batch, | ||
144 | + errp); | ||
145 | +} | ||
146 | + | ||
147 | static void iothread_complete(UserCreatable *obj, Error **errp) | ||
148 | { | ||
149 | Error *local_error = NULL; | ||
150 | @@ -XXX,XX +XXX,XX @@ static void iothread_complete(UserCreatable *obj, Error **errp) | ||
151 | */ | ||
152 | iothread_init_gcontext(iothread); | ||
153 | |||
154 | - aio_context_set_poll_params(iothread->ctx, | ||
155 | - iothread->poll_max_ns, | ||
156 | - iothread->poll_grow, | ||
157 | - iothread->poll_shrink, | ||
158 | - &local_error); | ||
159 | + iothread_set_aio_context_params(iothread, &local_error); | ||
160 | if (local_error) { | ||
161 | error_propagate(errp, local_error); | ||
162 | aio_context_unref(iothread->ctx); | ||
163 | @@ -XXX,XX +XXX,XX @@ static PollParamInfo poll_grow_info = { | ||
164 | static PollParamInfo poll_shrink_info = { | ||
165 | "poll-shrink", offsetof(IOThread, poll_shrink), | ||
166 | }; | ||
167 | +static PollParamInfo aio_max_batch_info = { | ||
168 | + "aio-max-batch", offsetof(IOThread, aio_max_batch), | ||
169 | +}; | ||
170 | |||
171 | static void iothread_get_param(Object *obj, Visitor *v, | ||
172 | const char *name, void *opaque, Error **errp) | ||
173 | @@ -XXX,XX +XXX,XX @@ static void iothread_set_poll_param(Object *obj, Visitor *v, | ||
174 | } | ||
175 | } | ||
176 | |||
177 | +static void iothread_get_aio_param(Object *obj, Visitor *v, | ||
178 | + const char *name, void *opaque, Error **errp) | ||
179 | +{ | ||
180 | + | ||
181 | + iothread_get_param(obj, v, name, opaque, errp); | ||
182 | +} | ||
183 | + | ||
184 | +static void iothread_set_aio_param(Object *obj, Visitor *v, | ||
185 | + const char *name, void *opaque, Error **errp) | ||
186 | +{ | ||
187 | + IOThread *iothread = IOTHREAD(obj); | ||
188 | + | ||
189 | + if (!iothread_set_param(obj, v, name, opaque, errp)) { | ||
190 | + return; | ||
191 | + } | ||
192 | + | ||
193 | + if (iothread->ctx) { | ||
194 | + aio_context_set_aio_params(iothread->ctx, | ||
195 | + iothread->aio_max_batch, | ||
196 | + errp); | ||
197 | + } | ||
198 | +} | ||
199 | + | ||
200 | static void iothread_class_init(ObjectClass *klass, void *class_data) | ||
201 | { | ||
202 | UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass); | ||
203 | @@ -XXX,XX +XXX,XX @@ static void iothread_class_init(ObjectClass *klass, void *class_data) | ||
204 | iothread_get_poll_param, | ||
205 | iothread_set_poll_param, | ||
206 | NULL, &poll_shrink_info); | ||
207 | + object_class_property_add(klass, "aio-max-batch", "int", | ||
208 | + iothread_get_aio_param, | ||
209 | + iothread_set_aio_param, | ||
210 | + NULL, &aio_max_batch_info); | ||
211 | } | ||
212 | |||
213 | static const TypeInfo iothread_info = { | ||
214 | @@ -XXX,XX +XXX,XX @@ static int query_one_iothread(Object *object, void *opaque) | ||
215 | info->poll_max_ns = iothread->poll_max_ns; | ||
216 | info->poll_grow = iothread->poll_grow; | ||
217 | info->poll_shrink = iothread->poll_shrink; | ||
218 | + info->aio_max_batch = iothread->aio_max_batch; | ||
219 | |||
220 | QAPI_LIST_APPEND(*tail, info); | ||
221 | return 0; | ||
222 | diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c | ||
223 | index XXXXXXX..XXXXXXX 100644 | ||
224 | --- a/monitor/hmp-cmds.c | ||
225 | +++ b/monitor/hmp-cmds.c | ||
226 | @@ -XXX,XX +XXX,XX @@ void hmp_info_iothreads(Monitor *mon, const QDict *qdict) | ||
227 | monitor_printf(mon, " poll-max-ns=%" PRId64 "\n", value->poll_max_ns); | ||
228 | monitor_printf(mon, " poll-grow=%" PRId64 "\n", value->poll_grow); | ||
229 | monitor_printf(mon, " poll-shrink=%" PRId64 "\n", value->poll_shrink); | ||
230 | + monitor_printf(mon, " aio-max-batch=%" PRId64 "\n", | ||
231 | + value->aio_max_batch); | ||
232 | } | ||
233 | |||
234 | qapi_free_IOThreadInfoList(info_list); | ||
235 | diff --git a/util/aio-posix.c b/util/aio-posix.c | ||
236 | index XXXXXXX..XXXXXXX 100644 | ||
237 | --- a/util/aio-posix.c | ||
238 | +++ b/util/aio-posix.c | ||
239 | @@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, | ||
240 | |||
241 | aio_notify(ctx); | ||
242 | } | ||
243 | + | ||
244 | +void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch, | ||
245 | + Error **errp) | ||
246 | +{ | ||
247 | + /* | ||
248 | + * No thread synchronization here, it doesn't matter if an incorrect value | ||
249 | + * is used once. | ||
250 | + */ | ||
251 | + ctx->aio_max_batch = max_batch; | ||
252 | + | ||
253 | + aio_notify(ctx); | ||
254 | +} | ||
255 | diff --git a/util/aio-win32.c b/util/aio-win32.c | ||
256 | index XXXXXXX..XXXXXXX 100644 | ||
257 | --- a/util/aio-win32.c | ||
258 | +++ b/util/aio-win32.c | ||
259 | @@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, | ||
260 | error_setg(errp, "AioContext polling is not implemented on Windows"); | ||
261 | } | ||
262 | } | ||
263 | + | ||
264 | +void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch, | ||
265 | + Error **errp) | ||
266 | +{ | ||
267 | +} | ||
268 | diff --git a/util/async.c b/util/async.c | ||
269 | index XXXXXXX..XXXXXXX 100644 | ||
270 | --- a/util/async.c | ||
271 | +++ b/util/async.c | ||
272 | @@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp) | ||
273 | ctx->poll_grow = 0; | ||
274 | ctx->poll_shrink = 0; | ||
275 | |||
276 | + ctx->aio_max_batch = 0; | ||
277 | + | ||
278 | return ctx; | ||
279 | fail: | ||
280 | g_source_destroy(&ctx->source); | ||
281 | diff --git a/qemu-options.hx b/qemu-options.hx | ||
282 | index XXXXXXX..XXXXXXX 100644 | ||
283 | --- a/qemu-options.hx | ||
284 | +++ b/qemu-options.hx | ||
285 | @@ -XXX,XX +XXX,XX @@ SRST | ||
286 | |||
287 | CN=laptop.example.com,O=Example Home,L=London,ST=London,C=GB | ||
288 | |||
289 | - ``-object iothread,id=id,poll-max-ns=poll-max-ns,poll-grow=poll-grow,poll-shrink=poll-shrink`` | ||
290 | + ``-object iothread,id=id,poll-max-ns=poll-max-ns,poll-grow=poll-grow,poll-shrink=poll-shrink,aio-max-batch=aio-max-batch`` | ||
291 | Creates a dedicated event loop thread that devices can be | ||
292 | assigned to. This is known as an IOThread. By default device | ||
293 | emulation happens in vCPU threads or the main event loop thread. | ||
294 | @@ -XXX,XX +XXX,XX @@ SRST | ||
295 | the polling time when the algorithm detects it is spending too | ||
296 | long polling without encountering events. | ||
297 | |||
298 | - The polling parameters can be modified at run-time using the | ||
299 | + The ``aio-max-batch`` parameter is the maximum number of requests | ||
300 | + in a batch for the AIO engine, 0 means that the engine will use | ||
301 | + its default. | ||
302 | + | ||
303 | + The IOThread parameters can be modified at run-time using the | ||
304 | ``qom-set`` command (where ``iothread1`` is the IOThread's | ||
305 | ``id``): | ||
306 | |||
307 | -- | 46 | -- |
308 | 2.31.1 | 47 | 2.31.1 |
309 | 48 | diff view generated by jsdifflib |
1 | From: Stefano Garzarella <sgarzare@redhat.com> | 1 | From: Philippe Mathieu-Daudé <philmd@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Changes in preparation for next patches where we add a new | 3 | I'm interested in following the activity around the NVMe bdrv. |
4 | parameter not related to the poll mechanism. | ||
5 | 4 | ||
6 | Let's add two new generic functions (iothread_set_param and | 5 | Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> |
7 | iothread_get_param) that we use to set and get IOThread | 6 | Message-id: 20210728183340.2018313-1-philmd@redhat.com |
8 | parameters. | ||
9 | |||
10 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | ||
11 | Message-id: 20210721094211.69853-2-sgarzare@redhat.com | ||
12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 7 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
13 | --- | 8 | --- |
14 | iothread.c | 27 +++++++++++++++++++++++---- | 9 | MAINTAINERS | 1 + |
15 | 1 file changed, 23 insertions(+), 4 deletions(-) | 10 | 1 file changed, 1 insertion(+) |
16 | 11 | ||
17 | diff --git a/iothread.c b/iothread.c | 12 | diff --git a/MAINTAINERS b/MAINTAINERS |
18 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/iothread.c | 14 | --- a/MAINTAINERS |
20 | +++ b/iothread.c | 15 | +++ b/MAINTAINERS |
21 | @@ -XXX,XX +XXX,XX @@ static PollParamInfo poll_shrink_info = { | 16 | @@ -XXX,XX +XXX,XX @@ F: block/null.c |
22 | "poll-shrink", offsetof(IOThread, poll_shrink), | 17 | NVMe Block Driver |
23 | }; | 18 | M: Stefan Hajnoczi <stefanha@redhat.com> |
24 | 19 | R: Fam Zheng <fam@euphon.net> | |
25 | -static void iothread_get_poll_param(Object *obj, Visitor *v, | 20 | +R: Philippe Mathieu-Daudé <philmd@redhat.com> |
26 | +static void iothread_get_param(Object *obj, Visitor *v, | 21 | L: qemu-block@nongnu.org |
27 | const char *name, void *opaque, Error **errp) | 22 | S: Supported |
28 | { | 23 | F: block/nvme* |
29 | IOThread *iothread = IOTHREAD(obj); | ||
30 | @@ -XXX,XX +XXX,XX @@ static void iothread_get_poll_param(Object *obj, Visitor *v, | ||
31 | visit_type_int64(v, name, field, errp); | ||
32 | } | ||
33 | |||
34 | -static void iothread_set_poll_param(Object *obj, Visitor *v, | ||
35 | +static bool iothread_set_param(Object *obj, Visitor *v, | ||
36 | const char *name, void *opaque, Error **errp) | ||
37 | { | ||
38 | IOThread *iothread = IOTHREAD(obj); | ||
39 | @@ -XXX,XX +XXX,XX @@ static void iothread_set_poll_param(Object *obj, Visitor *v, | ||
40 | int64_t value; | ||
41 | |||
42 | if (!visit_type_int64(v, name, &value, errp)) { | ||
43 | - return; | ||
44 | + return false; | ||
45 | } | ||
46 | |||
47 | if (value < 0) { | ||
48 | error_setg(errp, "%s value must be in range [0, %" PRId64 "]", | ||
49 | info->name, INT64_MAX); | ||
50 | - return; | ||
51 | + return false; | ||
52 | } | ||
53 | |||
54 | *field = value; | ||
55 | |||
56 | + return true; | ||
57 | +} | ||
58 | + | ||
59 | +static void iothread_get_poll_param(Object *obj, Visitor *v, | ||
60 | + const char *name, void *opaque, Error **errp) | ||
61 | +{ | ||
62 | + | ||
63 | + iothread_get_param(obj, v, name, opaque, errp); | ||
64 | +} | ||
65 | + | ||
66 | +static void iothread_set_poll_param(Object *obj, Visitor *v, | ||
67 | + const char *name, void *opaque, Error **errp) | ||
68 | +{ | ||
69 | + IOThread *iothread = IOTHREAD(obj); | ||
70 | + | ||
71 | + if (!iothread_set_param(obj, v, name, opaque, errp)) { | ||
72 | + return; | ||
73 | + } | ||
74 | + | ||
75 | if (iothread->ctx) { | ||
76 | aio_context_set_poll_params(iothread->ctx, | ||
77 | iothread->poll_max_ns, | ||
78 | -- | 24 | -- |
79 | 2.31.1 | 25 | 2.31.1 |
80 | 26 | diff view generated by jsdifflib |