1
The following changes since commit fa54abb8c298f892639ffc4bc2f61448ac3be4a1:
1
The following changes since commit 91f5f7a5df1fda8c34677a7c49ee8a4bb5b56a36:
2
2
3
Drop QEMU_GNUC_PREREQ() checks for gcc older than 4.1 (2017-04-20 18:33:33 +0100)
3
Merge remote-tracking branch 'remotes/lvivier-gitlab/tags/linux-user-for-7.0-pull-request' into staging (2022-01-12 11:51:47 +0000)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/stefanha/qemu.git tags/block-pull-request
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 3ccc0a0163b932fe980dce8d26db4bf98b1900e9:
9
for you to fetch changes up to db608fb78444c58896db69495729e4458eeaace1:
10
10
11
MAINTAINERS: update my email address (2017-04-21 10:36:12 +0100)
11
virtio: unify dataplane and non-dataplane ->handle_output() (2022-01-12 17:09:39 +0000)
12
13
----------------------------------------------------------------
14
Pull request
12
15
13
----------------------------------------------------------------
16
----------------------------------------------------------------
14
17
15
----------------------------------------------------------------
18
Stefan Hajnoczi (6):
19
aio-posix: split poll check from ready handler
20
virtio: get rid of VirtIOHandleAIOOutput
21
virtio-blk: drop unused virtio_blk_handle_vq() return value
22
virtio-scsi: prepare virtio_scsi_handle_cmd for dataplane
23
virtio: use ->handle_output() instead of ->handle_aio_output()
24
virtio: unify dataplane and non-dataplane ->handle_output()
16
25
17
Changlong Xie (1):
26
include/block/aio.h | 4 +-
18
MAINTAINERS: update Wen's email address
27
include/hw/virtio/virtio-blk.h | 2 +-
19
28
include/hw/virtio/virtio.h | 5 +-
20
Lidong Chen (1):
29
util/aio-posix.h | 1 +
21
migration/block: use blk_pwrite_zeroes for each zero cluster
30
block/curl.c | 11 ++--
22
31
block/export/fuse.c | 4 +-
23
Stefan Hajnoczi (3):
32
block/io_uring.c | 19 ++++---
24
qemu-options: explain disk I/O throttling options
33
block/iscsi.c | 4 +-
25
throttle: do not use invalid config in test
34
block/linux-aio.c | 16 +++---
26
throttle: make throttle_config(throttle_get_config()) symmetric
35
block/nfs.c | 6 +--
27
36
block/nvme.c | 51 ++++++++++++-------
28
Zhang Chen (1):
37
block/ssh.c | 4 +-
29
MAINTAINERS: update my email address
38
block/win32-aio.c | 4 +-
30
39
hw/block/dataplane/virtio-blk.c | 16 +-----
31
MAINTAINERS | 4 ++--
40
hw/block/virtio-blk.c | 14 ++----
32
migration/block.c | 35 +++++++++++++++++++++++++++++++++--
41
hw/scsi/virtio-scsi-dataplane.c | 60 +++-------------------
33
tests/test-throttle.c | 8 ++++----
42
hw/scsi/virtio-scsi.c | 2 +-
34
util/throttle.c | 14 ++++++++++++++
43
hw/virtio/virtio.c | 73 +++++++++------------------
35
qemu-options.hx | 24 ++++++++++++++++++++++++
44
hw/xen/xen-bus.c | 6 +--
36
5 files changed, 77 insertions(+), 8 deletions(-)
45
io/channel-command.c | 6 ++-
46
io/channel-file.c | 3 +-
47
io/channel-socket.c | 3 +-
48
migration/rdma.c | 8 +--
49
tests/unit/test-aio.c | 4 +-
50
tests/unit/test-fdmon-epoll.c | 4 +-
51
util/aio-posix.c | 89 +++++++++++++++++++++++++--------
52
util/aio-win32.c | 4 +-
53
util/async.c | 10 +++-
54
util/main-loop.c | 4 +-
55
util/qemu-coroutine-io.c | 5 +-
56
util/vhost-user-server.c | 11 ++--
57
31 files changed, 221 insertions(+), 232 deletions(-)
37
58
38
--
59
--
39
2.9.3
60
2.34.1
40
61
41
62
63
diff view generated by jsdifflib
1
From: Lidong Chen <lidongchen@tencent.com>
1
Adaptive polling measures the execution time of the polling check plus
2
2
handlers called when a polled event becomes ready. Handlers can take a
3
BLOCK_SIZE is (1 << 20), qcow2 cluster size is 65536 by default,
3
significant amount of time, making it look like polling was running for
4
this may cause the qcow2 file size to be bigger after migration.
4
a long time when in fact the event handler was running for a long time.
5
This patch checks each cluster, using blk_pwrite_zeroes for each
5
6
zero cluster.
6
For example, on Linux the io_submit(2) syscall invoked when a virtio-blk
7
7
device's virtqueue becomes ready can take 10s of microseconds. This
8
[Initialize cluster_size to BLOCK_SIZE to prevent a gcc uninitialized
8
can exceed the default polling interval (32 microseconds) and cause
9
variable compiler warning. In reality we always initialize cluster_size
9
adaptive polling to stop polling.
10
in a conditional but gcc doesn't know that.
10
11
By excluding the handler's execution time from the polling check we make
12
the adaptive polling calculation more accurate. As a result, the event
13
loop now stays in polling mode where previously it would have fallen
14
back to file descriptor monitoring.
15
16
The following data was collected with virtio-blk num-queues=2
17
event_idx=off using an IOThread. Before:
18
19
168k IOPS, IOThread syscalls:
20
21
9837.115 ( 0.020 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 16, iocbpp: 0x7fcb9f937db0) = 16
22
9837.158 ( 0.002 ms): IO iothread1/620155 write(fd: 103, buf: 0x556a2ef71b88, count: 8) = 8
23
9837.161 ( 0.001 ms): IO iothread1/620155 write(fd: 104, buf: 0x556a2ef71b88, count: 8) = 8
24
9837.163 ( 0.001 ms): IO iothread1/620155 ppoll(ufds: 0x7fcb90002800, nfds: 4, tsp: 0x7fcb9f1342d0, sigsetsize: 8) = 3
25
9837.164 ( 0.001 ms): IO iothread1/620155 read(fd: 107, buf: 0x7fcb9f939cc0, count: 512) = 8
26
9837.174 ( 0.001 ms): IO iothread1/620155 read(fd: 105, buf: 0x7fcb9f939cc0, count: 512) = 8
27
9837.176 ( 0.001 ms): IO iothread1/620155 read(fd: 106, buf: 0x7fcb9f939cc0, count: 512) = 8
28
9837.209 ( 0.035 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 32, iocbpp: 0x7fca7d0cebe0) = 32
29
30
174k IOPS (+3.6%), IOThread syscalls:
31
32
9809.566 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0cdd62be0) = 32
33
9809.625 ( 0.001 ms): IO iothread1/623061 write(fd: 103, buf: 0x5647cfba5f58, count: 8) = 8
34
9809.627 ( 0.002 ms): IO iothread1/623061 write(fd: 104, buf: 0x5647cfba5f58, count: 8) = 8
35
9809.663 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0d0388b50) = 32
36
37
Notice that ppoll(2) and eventfd read(2) syscalls are eliminated because
38
the IOThread stays in polling mode instead of falling back to file
39
descriptor monitoring.
40
41
As usual, polling is not implemented on Windows so this patch ignores
42
the new io_poll_read() callback in aio-win32.c.
43
44
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
45
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
46
Message-id: 20211207132336.36627-2-stefanha@redhat.com
47
48
[Fixed up aio_set_event_notifier() calls in
49
tests/unit/test-fdmon-epoll.c added after this series was queued.
11
--Stefan]
50
--Stefan]
12
51
13
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Signed-off-by: Lidong Chen <lidongchen@tencent.com>
15
Message-id: 1492050868-16200-1-git-send-email-lidongchen@tencent.com
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
52
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
---
53
---
18
migration/block.c | 35 +++++++++++++++++++++++++++++++++--
54
include/block/aio.h | 4 +-
19
1 file changed, 33 insertions(+), 2 deletions(-)
55
util/aio-posix.h | 1 +
20
56
block/curl.c | 11 +++--
21
diff --git a/migration/block.c b/migration/block.c
57
block/export/fuse.c | 4 +-
22
index XXXXXXX..XXXXXXX 100644
58
block/io_uring.c | 19 ++++----
23
--- a/migration/block.c
59
block/iscsi.c | 4 +-
24
+++ b/migration/block.c
60
block/linux-aio.c | 16 ++++---
25
@@ -XXX,XX +XXX,XX @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
61
block/nfs.c | 6 +--
26
int64_t total_sectors = 0;
62
block/nvme.c | 51 +++++++++++++-------
27
int nr_sectors;
63
block/ssh.c | 4 +-
28
int ret;
64
block/win32-aio.c | 4 +-
29
+ BlockDriverInfo bdi;
65
hw/virtio/virtio.c | 16 ++++---
30
+ int cluster_size = BLOCK_SIZE;
66
hw/xen/xen-bus.c | 6 +--
31
67
io/channel-command.c | 6 ++-
32
do {
68
io/channel-file.c | 3 +-
33
addr = qemu_get_be64(f);
69
io/channel-socket.c | 3 +-
34
@@ -XXX,XX +XXX,XX @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
70
migration/rdma.c | 8 ++--
35
error_report_err(local_err);
71
tests/unit/test-aio.c | 4 +-
36
return -EINVAL;
72
tests/unit/test-fdmon-epoll.c | 4 +-
37
}
73
util/aio-posix.c | 89 ++++++++++++++++++++++++++---------
38
+
74
util/aio-win32.c | 4 +-
39
+ ret = bdrv_get_info(blk_bs(blk), &bdi);
75
util/async.c | 10 +++-
40
+ if (ret == 0 && bdi.cluster_size > 0 &&
76
util/main-loop.c | 4 +-
41
+ bdi.cluster_size <= BLOCK_SIZE &&
77
util/qemu-coroutine-io.c | 5 +-
42
+ BLOCK_SIZE % bdi.cluster_size == 0) {
78
util/vhost-user-server.c | 11 +++--
43
+ cluster_size = bdi.cluster_size;
79
25 files changed, 193 insertions(+), 104 deletions(-)
44
+ } else {
80
45
+ cluster_size = BLOCK_SIZE;
81
diff --git a/include/block/aio.h b/include/block/aio.h
82
index XXXXXXX..XXXXXXX 100644
83
--- a/include/block/aio.h
84
+++ b/include/block/aio.h
85
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
86
IOHandler *io_read,
87
IOHandler *io_write,
88
AioPollFn *io_poll,
89
+ IOHandler *io_poll_ready,
90
void *opaque);
91
92
/* Set polling begin/end callbacks for a file descriptor that has already been
93
@@ -XXX,XX +XXX,XX @@ void aio_set_event_notifier(AioContext *ctx,
94
EventNotifier *notifier,
95
bool is_external,
96
EventNotifierHandler *io_read,
97
- AioPollFn *io_poll);
98
+ AioPollFn *io_poll,
99
+ EventNotifierHandler *io_poll_ready);
100
101
/* Set polling begin/end callbacks for an event notifier that has already been
102
* registered with aio_set_event_notifier. Do nothing if the event notifier is
103
diff --git a/util/aio-posix.h b/util/aio-posix.h
104
index XXXXXXX..XXXXXXX 100644
105
--- a/util/aio-posix.h
106
+++ b/util/aio-posix.h
107
@@ -XXX,XX +XXX,XX @@ struct AioHandler {
108
IOHandler *io_read;
109
IOHandler *io_write;
110
AioPollFn *io_poll;
111
+ IOHandler *io_poll_ready;
112
IOHandler *io_poll_begin;
113
IOHandler *io_poll_end;
114
void *opaque;
115
diff --git a/block/curl.c b/block/curl.c
116
index XXXXXXX..XXXXXXX 100644
117
--- a/block/curl.c
118
+++ b/block/curl.c
119
@@ -XXX,XX +XXX,XX @@ static gboolean curl_drop_socket(void *key, void *value, void *opaque)
120
BDRVCURLState *s = socket->s;
121
122
aio_set_fd_handler(s->aio_context, socket->fd, false,
123
- NULL, NULL, NULL, NULL);
124
+ NULL, NULL, NULL, NULL, NULL);
125
return true;
126
}
127
128
@@ -XXX,XX +XXX,XX @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
129
switch (action) {
130
case CURL_POLL_IN:
131
aio_set_fd_handler(s->aio_context, fd, false,
132
- curl_multi_do, NULL, NULL, socket);
133
+ curl_multi_do, NULL, NULL, NULL, socket);
134
break;
135
case CURL_POLL_OUT:
136
aio_set_fd_handler(s->aio_context, fd, false,
137
- NULL, curl_multi_do, NULL, socket);
138
+ NULL, curl_multi_do, NULL, NULL, socket);
139
break;
140
case CURL_POLL_INOUT:
141
aio_set_fd_handler(s->aio_context, fd, false,
142
- curl_multi_do, curl_multi_do, NULL, socket);
143
+ curl_multi_do, curl_multi_do,
144
+ NULL, NULL, socket);
145
break;
146
case CURL_POLL_REMOVE:
147
aio_set_fd_handler(s->aio_context, fd, false,
148
- NULL, NULL, NULL, NULL);
149
+ NULL, NULL, NULL, NULL, NULL);
150
break;
151
}
152
153
diff --git a/block/export/fuse.c b/block/export/fuse.c
154
index XXXXXXX..XXXXXXX 100644
155
--- a/block/export/fuse.c
156
+++ b/block/export/fuse.c
157
@@ -XXX,XX +XXX,XX @@ static int setup_fuse_export(FuseExport *exp, const char *mountpoint,
158
159
aio_set_fd_handler(exp->common.ctx,
160
fuse_session_fd(exp->fuse_session), true,
161
- read_from_fuse_export, NULL, NULL, exp);
162
+ read_from_fuse_export, NULL, NULL, NULL, exp);
163
exp->fd_handler_set_up = true;
164
165
return 0;
166
@@ -XXX,XX +XXX,XX @@ static void fuse_export_shutdown(BlockExport *blk_exp)
167
if (exp->fd_handler_set_up) {
168
aio_set_fd_handler(exp->common.ctx,
169
fuse_session_fd(exp->fuse_session), true,
170
- NULL, NULL, NULL, NULL);
171
+ NULL, NULL, NULL, NULL, NULL);
172
exp->fd_handler_set_up = false;
173
}
174
}
175
diff --git a/block/io_uring.c b/block/io_uring.c
176
index XXXXXXX..XXXXXXX 100644
177
--- a/block/io_uring.c
178
+++ b/block/io_uring.c
179
@@ -XXX,XX +XXX,XX @@ static bool qemu_luring_poll_cb(void *opaque)
180
{
181
LuringState *s = opaque;
182
183
- if (io_uring_cq_ready(&s->ring)) {
184
- luring_process_completions_and_submit(s);
185
- return true;
186
- }
187
+ return io_uring_cq_ready(&s->ring);
188
+}
189
190
- return false;
191
+static void qemu_luring_poll_ready(void *opaque)
192
+{
193
+ LuringState *s = opaque;
194
+
195
+ luring_process_completions_and_submit(s);
196
}
197
198
static void ioq_init(LuringQueue *io_q)
199
@@ -XXX,XX +XXX,XX @@ int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd,
200
201
void luring_detach_aio_context(LuringState *s, AioContext *old_context)
202
{
203
- aio_set_fd_handler(old_context, s->ring.ring_fd, false, NULL, NULL, NULL,
204
- s);
205
+ aio_set_fd_handler(old_context, s->ring.ring_fd, false,
206
+ NULL, NULL, NULL, NULL, s);
207
qemu_bh_delete(s->completion_bh);
208
s->aio_context = NULL;
209
}
210
@@ -XXX,XX +XXX,XX @@ void luring_attach_aio_context(LuringState *s, AioContext *new_context)
211
s->aio_context = new_context;
212
s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s);
213
aio_set_fd_handler(s->aio_context, s->ring.ring_fd, false,
214
- qemu_luring_completion_cb, NULL, qemu_luring_poll_cb, s);
215
+ qemu_luring_completion_cb, NULL,
216
+ qemu_luring_poll_cb, qemu_luring_poll_ready, s);
217
}
218
219
LuringState *luring_init(Error **errp)
220
diff --git a/block/iscsi.c b/block/iscsi.c
221
index XXXXXXX..XXXXXXX 100644
222
--- a/block/iscsi.c
223
+++ b/block/iscsi.c
224
@@ -XXX,XX +XXX,XX @@ iscsi_set_events(IscsiLun *iscsilun)
225
false,
226
(ev & POLLIN) ? iscsi_process_read : NULL,
227
(ev & POLLOUT) ? iscsi_process_write : NULL,
228
- NULL,
229
+ NULL, NULL,
230
iscsilun);
231
iscsilun->events = ev;
232
}
233
@@ -XXX,XX +XXX,XX @@ static void iscsi_detach_aio_context(BlockDriverState *bs)
234
IscsiLun *iscsilun = bs->opaque;
235
236
aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi),
237
- false, NULL, NULL, NULL, NULL);
238
+ false, NULL, NULL, NULL, NULL, NULL);
239
iscsilun->events = 0;
240
241
if (iscsilun->nop_timer) {
242
diff --git a/block/linux-aio.c b/block/linux-aio.c
243
index XXXXXXX..XXXXXXX 100644
244
--- a/block/linux-aio.c
245
+++ b/block/linux-aio.c
246
@@ -XXX,XX +XXX,XX @@ static bool qemu_laio_poll_cb(void *opaque)
247
LinuxAioState *s = container_of(e, LinuxAioState, e);
248
struct io_event *events;
249
250
- if (!io_getevents_peek(s->ctx, &events)) {
251
- return false;
252
- }
253
+ return io_getevents_peek(s->ctx, &events);
254
+}
255
+
256
+static void qemu_laio_poll_ready(EventNotifier *opaque)
257
+{
258
+ EventNotifier *e = opaque;
259
+ LinuxAioState *s = container_of(e, LinuxAioState, e);
260
261
qemu_laio_process_completions_and_submit(s);
262
- return true;
263
}
264
265
static void ioq_init(LaioQueue *io_q)
266
@@ -XXX,XX +XXX,XX @@ int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
267
268
void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
269
{
270
- aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
271
+ aio_set_event_notifier(old_context, &s->e, false, NULL, NULL, NULL);
272
qemu_bh_delete(s->completion_bh);
273
s->aio_context = NULL;
274
}
275
@@ -XXX,XX +XXX,XX @@ void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
276
s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
277
aio_set_event_notifier(new_context, &s->e, false,
278
qemu_laio_completion_cb,
279
- qemu_laio_poll_cb);
280
+ qemu_laio_poll_cb,
281
+ qemu_laio_poll_ready);
282
}
283
284
LinuxAioState *laio_init(Error **errp)
285
diff --git a/block/nfs.c b/block/nfs.c
286
index XXXXXXX..XXXXXXX 100644
287
--- a/block/nfs.c
288
+++ b/block/nfs.c
289
@@ -XXX,XX +XXX,XX @@ static void nfs_set_events(NFSClient *client)
290
false,
291
(ev & POLLIN) ? nfs_process_read : NULL,
292
(ev & POLLOUT) ? nfs_process_write : NULL,
293
- NULL, client);
294
+ NULL, NULL, client);
295
296
}
297
client->events = ev;
298
@@ -XXX,XX +XXX,XX @@ static void nfs_detach_aio_context(BlockDriverState *bs)
299
NFSClient *client = bs->opaque;
300
301
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
302
- false, NULL, NULL, NULL, NULL);
303
+ false, NULL, NULL, NULL, NULL, NULL);
304
client->events = 0;
305
}
306
307
@@ -XXX,XX +XXX,XX @@ static void nfs_client_close(NFSClient *client)
308
if (client->context) {
309
qemu_mutex_lock(&client->mutex);
310
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
311
- false, NULL, NULL, NULL, NULL);
312
+ false, NULL, NULL, NULL, NULL, NULL);
313
qemu_mutex_unlock(&client->mutex);
314
if (client->fh) {
315
nfs_close(client->context, client->fh);
316
diff --git a/block/nvme.c b/block/nvme.c
317
index XXXXXXX..XXXXXXX 100644
318
--- a/block/nvme.c
319
+++ b/block/nvme.c
320
@@ -XXX,XX +XXX,XX @@ out:
321
return ret;
322
}
323
324
-static bool nvme_poll_queue(NVMeQueuePair *q)
325
+static void nvme_poll_queue(NVMeQueuePair *q)
326
{
327
- bool progress = false;
328
-
329
const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
330
NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
331
332
@@ -XXX,XX +XXX,XX @@ static bool nvme_poll_queue(NVMeQueuePair *q)
333
* cannot race with itself.
334
*/
335
if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
336
- return false;
337
+ return;
338
}
339
340
qemu_mutex_lock(&q->lock);
341
while (nvme_process_completion(q)) {
342
/* Keep polling */
343
- progress = true;
344
}
345
qemu_mutex_unlock(&q->lock);
346
-
347
- return progress;
348
}
349
350
-static bool nvme_poll_queues(BDRVNVMeState *s)
351
+static void nvme_poll_queues(BDRVNVMeState *s)
352
{
353
- bool progress = false;
354
int i;
355
356
for (i = 0; i < s->queue_count; i++) {
357
- if (nvme_poll_queue(s->queues[i])) {
358
- progress = true;
359
- }
360
+ nvme_poll_queue(s->queues[i]);
361
}
362
- return progress;
363
}
364
365
static void nvme_handle_event(EventNotifier *n)
366
@@ -XXX,XX +XXX,XX @@ static bool nvme_poll_cb(void *opaque)
367
EventNotifier *e = opaque;
368
BDRVNVMeState *s = container_of(e, BDRVNVMeState,
369
irq_notifier[MSIX_SHARED_IRQ_IDX]);
370
+ int i;
371
372
- return nvme_poll_queues(s);
373
+ for (i = 0; i < s->queue_count; i++) {
374
+ NVMeQueuePair *q = s->queues[i];
375
+ const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
376
+ NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
377
+
378
+ /*
379
+ * q->lock isn't needed because nvme_process_completion() only runs in
380
+ * the event loop thread and cannot race with itself.
381
+ */
382
+ if ((le16_to_cpu(cqe->status) & 0x1) != q->cq_phase) {
383
+ return true;
384
+ }
385
+ }
386
+ return false;
387
+}
388
+
389
+static void nvme_poll_ready(EventNotifier *e)
390
+{
391
+ BDRVNVMeState *s = container_of(e, BDRVNVMeState,
392
+ irq_notifier[MSIX_SHARED_IRQ_IDX]);
393
+
394
+ nvme_poll_queues(s);
395
}
396
397
static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
398
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
399
}
400
aio_set_event_notifier(bdrv_get_aio_context(bs),
401
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
402
- false, nvme_handle_event, nvme_poll_cb);
403
+ false, nvme_handle_event, nvme_poll_cb,
404
+ nvme_poll_ready);
405
406
if (!nvme_identify(bs, namespace, errp)) {
407
ret = -EIO;
408
@@ -XXX,XX +XXX,XX @@ static void nvme_close(BlockDriverState *bs)
409
g_free(s->queues);
410
aio_set_event_notifier(bdrv_get_aio_context(bs),
411
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
412
- false, NULL, NULL);
413
+ false, NULL, NULL, NULL);
414
event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
415
qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map,
416
0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE);
417
@@ -XXX,XX +XXX,XX @@ static void nvme_detach_aio_context(BlockDriverState *bs)
418
419
aio_set_event_notifier(bdrv_get_aio_context(bs),
420
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
421
- false, NULL, NULL);
422
+ false, NULL, NULL, NULL);
423
}
424
425
static void nvme_attach_aio_context(BlockDriverState *bs,
426
@@ -XXX,XX +XXX,XX @@ static void nvme_attach_aio_context(BlockDriverState *bs,
427
428
s->aio_context = new_context;
429
aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
430
- false, nvme_handle_event, nvme_poll_cb);
431
+ false, nvme_handle_event, nvme_poll_cb,
432
+ nvme_poll_ready);
433
434
for (unsigned i = 0; i < s->queue_count; i++) {
435
NVMeQueuePair *q = s->queues[i];
436
diff --git a/block/ssh.c b/block/ssh.c
437
index XXXXXXX..XXXXXXX 100644
438
--- a/block/ssh.c
439
+++ b/block/ssh.c
440
@@ -XXX,XX +XXX,XX @@ static void restart_coroutine(void *opaque)
441
AioContext *ctx = bdrv_get_aio_context(bs);
442
443
trace_ssh_restart_coroutine(restart->co);
444
- aio_set_fd_handler(ctx, s->sock, false, NULL, NULL, NULL, NULL);
445
+ aio_set_fd_handler(ctx, s->sock, false, NULL, NULL, NULL, NULL, NULL);
446
447
aio_co_wake(restart->co);
448
}
449
@@ -XXX,XX +XXX,XX @@ static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs)
450
trace_ssh_co_yield(s->sock, rd_handler, wr_handler);
451
452
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
453
- false, rd_handler, wr_handler, NULL, &restart);
454
+ false, rd_handler, wr_handler, NULL, NULL, &restart);
455
qemu_coroutine_yield();
456
trace_ssh_co_yield_back(s->sock);
457
}
458
diff --git a/block/win32-aio.c b/block/win32-aio.c
459
index XXXXXXX..XXXXXXX 100644
460
--- a/block/win32-aio.c
461
+++ b/block/win32-aio.c
462
@@ -XXX,XX +XXX,XX @@ int win32_aio_attach(QEMUWin32AIOState *aio, HANDLE hfile)
463
void win32_aio_detach_aio_context(QEMUWin32AIOState *aio,
464
AioContext *old_context)
465
{
466
- aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL);
467
+ aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL, NULL);
468
aio->aio_ctx = NULL;
469
}
470
471
@@ -XXX,XX +XXX,XX @@ void win32_aio_attach_aio_context(QEMUWin32AIOState *aio,
472
{
473
aio->aio_ctx = new_context;
474
aio_set_event_notifier(new_context, &aio->e, false,
475
- win32_aio_completion_cb, NULL);
476
+ win32_aio_completion_cb, NULL, NULL);
477
}
478
479
QEMUWin32AIOState *win32_aio_init(void)
480
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
481
index XXXXXXX..XXXXXXX 100644
482
--- a/hw/virtio/virtio.c
483
+++ b/hw/virtio/virtio.c
484
@@ -XXX,XX +XXX,XX @@ static bool virtio_queue_host_notifier_aio_poll(void *opaque)
485
EventNotifier *n = opaque;
486
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
487
488
- if (!vq->vring.desc || virtio_queue_empty(vq)) {
489
- return false;
490
- }
491
+ return vq->vring.desc && !virtio_queue_empty(vq);
492
+}
493
494
- return virtio_queue_notify_aio_vq(vq);
495
+static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n)
496
+{
497
+ VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
498
+
499
+ virtio_queue_notify_aio_vq(vq);
500
}
501
502
static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
503
@@ -XXX,XX +XXX,XX @@ void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
504
vq->handle_aio_output = handle_output;
505
aio_set_event_notifier(ctx, &vq->host_notifier, true,
506
virtio_queue_host_notifier_aio_read,
507
- virtio_queue_host_notifier_aio_poll);
508
+ virtio_queue_host_notifier_aio_poll,
509
+ virtio_queue_host_notifier_aio_poll_ready);
510
aio_set_event_notifier_poll(ctx, &vq->host_notifier,
511
virtio_queue_host_notifier_aio_poll_begin,
512
virtio_queue_host_notifier_aio_poll_end);
513
} else {
514
- aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
515
+ aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);
516
/* Test and clear notifier before after disabling event,
517
* in case poll callback didn't have time to run. */
518
virtio_queue_host_notifier_aio_read(&vq->host_notifier);
519
diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/hw/xen/xen-bus.c
522
+++ b/hw/xen/xen-bus.c
523
@@ -XXX,XX +XXX,XX @@ void xen_device_set_event_channel_context(XenDevice *xendev,
524
525
if (channel->ctx)
526
aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true,
527
- NULL, NULL, NULL, NULL);
528
+ NULL, NULL, NULL, NULL, NULL);
529
530
channel->ctx = ctx;
531
aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true,
532
- xen_device_event, NULL, xen_device_poll, channel);
533
+ xen_device_event, NULL, xen_device_poll, NULL, channel);
534
}
535
536
XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev,
537
@@ -XXX,XX +XXX,XX @@ void xen_device_unbind_event_channel(XenDevice *xendev,
538
QLIST_REMOVE(channel, list);
539
540
aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true,
541
- NULL, NULL, NULL, NULL);
542
+ NULL, NULL, NULL, NULL, NULL);
543
544
if (xenevtchn_unbind(channel->xeh, channel->local_port) < 0) {
545
error_setg_errno(errp, errno, "xenevtchn_unbind failed");
546
diff --git a/io/channel-command.c b/io/channel-command.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/io/channel-command.c
549
+++ b/io/channel-command.c
550
@@ -XXX,XX +XXX,XX @@ static void qio_channel_command_set_aio_fd_handler(QIOChannel *ioc,
551
void *opaque)
552
{
553
QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc);
554
- aio_set_fd_handler(ctx, cioc->readfd, false, io_read, NULL, NULL, opaque);
555
- aio_set_fd_handler(ctx, cioc->writefd, false, NULL, io_write, NULL, opaque);
556
+ aio_set_fd_handler(ctx, cioc->readfd, false,
557
+ io_read, NULL, NULL, NULL, opaque);
558
+ aio_set_fd_handler(ctx, cioc->writefd, false,
559
+ NULL, io_write, NULL, NULL, opaque);
560
}
561
562
563
diff --git a/io/channel-file.c b/io/channel-file.c
564
index XXXXXXX..XXXXXXX 100644
565
--- a/io/channel-file.c
566
+++ b/io/channel-file.c
567
@@ -XXX,XX +XXX,XX @@ static void qio_channel_file_set_aio_fd_handler(QIOChannel *ioc,
568
void *opaque)
569
{
570
QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc);
571
- aio_set_fd_handler(ctx, fioc->fd, false, io_read, io_write, NULL, opaque);
572
+ aio_set_fd_handler(ctx, fioc->fd, false, io_read, io_write,
573
+ NULL, NULL, opaque);
574
}
575
576
static GSource *qio_channel_file_create_watch(QIOChannel *ioc,
577
diff --git a/io/channel-socket.c b/io/channel-socket.c
578
index XXXXXXX..XXXXXXX 100644
579
--- a/io/channel-socket.c
580
+++ b/io/channel-socket.c
581
@@ -XXX,XX +XXX,XX @@ static void qio_channel_socket_set_aio_fd_handler(QIOChannel *ioc,
582
void *opaque)
583
{
584
QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
585
- aio_set_fd_handler(ctx, sioc->fd, false, io_read, io_write, NULL, opaque);
586
+ aio_set_fd_handler(ctx, sioc->fd, false,
587
+ io_read, io_write, NULL, NULL, opaque);
588
}
589
590
static GSource *qio_channel_socket_create_watch(QIOChannel *ioc,
591
diff --git a/migration/rdma.c b/migration/rdma.c
592
index XXXXXXX..XXXXXXX 100644
593
--- a/migration/rdma.c
594
+++ b/migration/rdma.c
595
@@ -XXX,XX +XXX,XX @@ static void qio_channel_rdma_set_aio_fd_handler(QIOChannel *ioc,
596
QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
597
if (io_read) {
598
aio_set_fd_handler(ctx, rioc->rdmain->recv_comp_channel->fd,
599
- false, io_read, io_write, NULL, opaque);
600
+ false, io_read, io_write, NULL, NULL, opaque);
601
aio_set_fd_handler(ctx, rioc->rdmain->send_comp_channel->fd,
602
- false, io_read, io_write, NULL, opaque);
603
+ false, io_read, io_write, NULL, NULL, opaque);
604
} else {
605
aio_set_fd_handler(ctx, rioc->rdmaout->recv_comp_channel->fd,
606
- false, io_read, io_write, NULL, opaque);
607
+ false, io_read, io_write, NULL, NULL, opaque);
608
aio_set_fd_handler(ctx, rioc->rdmaout->send_comp_channel->fd,
609
- false, io_read, io_write, NULL, opaque);
610
+ false, io_read, io_write, NULL, NULL, opaque);
611
}
612
}
613
614
diff --git a/tests/unit/test-aio.c b/tests/unit/test-aio.c
615
index XXXXXXX..XXXXXXX 100644
616
--- a/tests/unit/test-aio.c
617
+++ b/tests/unit/test-aio.c
618
@@ -XXX,XX +XXX,XX @@ static void *test_acquire_thread(void *opaque)
619
static void set_event_notifier(AioContext *ctx, EventNotifier *notifier,
620
EventNotifierHandler *handler)
621
{
622
- aio_set_event_notifier(ctx, notifier, false, handler, NULL);
623
+ aio_set_event_notifier(ctx, notifier, false, handler, NULL, NULL);
624
}
625
626
static void dummy_notifier_read(EventNotifier *n)
627
@@ -XXX,XX +XXX,XX @@ static void test_aio_external_client(void)
628
for (i = 1; i < 3; i++) {
629
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
630
event_notifier_init(&data.e, false);
631
- aio_set_event_notifier(ctx, &data.e, true, event_ready_cb, NULL);
632
+ aio_set_event_notifier(ctx, &data.e, true, event_ready_cb, NULL, NULL);
633
event_notifier_set(&data.e);
634
for (j = 0; j < i; j++) {
635
aio_disable_external(ctx);
636
diff --git a/tests/unit/test-fdmon-epoll.c b/tests/unit/test-fdmon-epoll.c
637
index XXXXXXX..XXXXXXX 100644
638
--- a/tests/unit/test-fdmon-epoll.c
639
+++ b/tests/unit/test-fdmon-epoll.c
640
@@ -XXX,XX +XXX,XX @@ static void add_event_notifiers(EventNotifier *notifiers, size_t n)
641
for (size_t i = 0; i < n; i++) {
642
event_notifier_init(&notifiers[i], false);
643
aio_set_event_notifier(ctx, &notifiers[i], false,
644
- dummy_fd_handler, NULL);
645
+ dummy_fd_handler, NULL, NULL);
646
}
647
}
648
649
static void remove_event_notifiers(EventNotifier *notifiers, size_t n)
650
{
651
for (size_t i = 0; i < n; i++) {
652
- aio_set_event_notifier(ctx, &notifiers[i], false, NULL, NULL);
653
+ aio_set_event_notifier(ctx, &notifiers[i], false, NULL, NULL, NULL);
654
event_notifier_cleanup(&notifiers[i]);
655
}
656
}
657
diff --git a/util/aio-posix.c b/util/aio-posix.c
658
index XXXXXXX..XXXXXXX 100644
659
--- a/util/aio-posix.c
660
+++ b/util/aio-posix.c
661
@@ -XXX,XX +XXX,XX @@
662
#include "trace.h"
663
#include "aio-posix.h"
664
665
+/*
666
+ * G_IO_IN and G_IO_OUT are not appropriate revents values for polling, since
667
+ * the handler may not need to access the file descriptor. For example, the
668
+ * handler doesn't need to read from an EventNotifier if it polled a memory
669
+ * location and a read syscall would be slow. Define our own unique revents
670
+ * value to indicate that polling determined this AioHandler is ready.
671
+ */
672
+#define REVENTS_POLL_READY 0
673
+
674
/* Stop userspace polling on a handler if it isn't active for some time */
675
#define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND)
676
677
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
678
IOHandler *io_read,
679
IOHandler *io_write,
680
AioPollFn *io_poll,
681
+ IOHandler *io_poll_ready,
682
void *opaque)
683
{
684
AioHandler *node;
685
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
686
bool deleted = false;
687
int poll_disable_change;
688
689
+ if (io_poll && !io_poll_ready) {
690
+ io_poll = NULL; /* polling only makes sense if there is a handler */
691
+ }
692
+
693
qemu_lockcnt_lock(&ctx->list_lock);
694
695
node = find_aio_handler(ctx, fd);
696
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
697
new_node->io_read = io_read;
698
new_node->io_write = io_write;
699
new_node->io_poll = io_poll;
700
+ new_node->io_poll_ready = io_poll_ready;
701
new_node->opaque = opaque;
702
new_node->is_external = is_external;
703
704
@@ -XXX,XX +XXX,XX @@ void aio_set_event_notifier(AioContext *ctx,
705
EventNotifier *notifier,
706
bool is_external,
707
EventNotifierHandler *io_read,
708
- AioPollFn *io_poll)
709
+ AioPollFn *io_poll,
710
+ EventNotifierHandler *io_poll_ready)
711
{
712
aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external,
713
- (IOHandler *)io_read, NULL, io_poll, notifier);
714
+ (IOHandler *)io_read, NULL, io_poll,
715
+ (IOHandler *)io_poll_ready, notifier);
716
}
717
718
void aio_set_event_notifier_poll(AioContext *ctx,
719
@@ -XXX,XX +XXX,XX @@ void aio_set_event_notifier_poll(AioContext *ctx,
720
(IOHandler *)io_poll_end);
721
}
722
723
-static bool poll_set_started(AioContext *ctx, bool started)
724
+static bool poll_set_started(AioContext *ctx, AioHandlerList *ready_list,
725
+ bool started)
726
{
727
AioHandler *node;
728
bool progress = false;
729
@@ -XXX,XX +XXX,XX @@ static bool poll_set_started(AioContext *ctx, bool started)
730
}
731
732
/* Poll one last time in case ->io_poll_end() raced with the event */
733
- if (!started) {
734
- progress = node->io_poll(node->opaque) || progress;
735
+ if (!started && node->io_poll(node->opaque)) {
736
+ aio_add_ready_handler(ready_list, node, REVENTS_POLL_READY);
737
+ progress = true;
738
}
739
}
740
qemu_lockcnt_dec(&ctx->list_lock);
741
@@ -XXX,XX +XXX,XX @@ static bool poll_set_started(AioContext *ctx, bool started)
742
743
bool aio_prepare(AioContext *ctx)
744
{
745
+ AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
746
+
747
/* Poll mode cannot be used with glib's event loop, disable it. */
748
- poll_set_started(ctx, false);
749
+ poll_set_started(ctx, &ready_list, false);
750
+ /* TODO what to do with this list? */
751
752
return false;
753
}
754
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
755
}
756
QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll);
757
}
758
+ if (!QLIST_IS_INSERTED(node, node_deleted) &&
759
+ revents == 0 &&
760
+ aio_node_check(ctx, node->is_external) &&
761
+ node->io_poll_ready) {
762
+ node->io_poll_ready(node->opaque);
763
+
764
+ /*
765
+ * Return early since revents was zero. aio_notify() does not count as
766
+ * progress.
767
+ */
768
+ return node->opaque != &ctx->notifier;
769
+ }
770
771
if (!QLIST_IS_INSERTED(node, node_deleted) &&
772
(revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
773
@@ -XXX,XX +XXX,XX @@ void aio_dispatch(AioContext *ctx)
774
}
775
776
static bool run_poll_handlers_once(AioContext *ctx,
777
+ AioHandlerList *ready_list,
778
int64_t now,
779
int64_t *timeout)
780
{
781
@@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers_once(AioContext *ctx,
782
QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
783
if (aio_node_check(ctx, node->is_external) &&
784
node->io_poll(node->opaque)) {
785
+ aio_add_ready_handler(ready_list, node, REVENTS_POLL_READY);
786
+
787
node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
788
789
/*
790
@@ -XXX,XX +XXX,XX @@ static bool fdmon_supports_polling(AioContext *ctx)
791
return ctx->fdmon_ops->need_wait != aio_poll_disabled;
792
}
793
794
-static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now)
795
+static bool remove_idle_poll_handlers(AioContext *ctx,
796
+ AioHandlerList *ready_list,
797
+ int64_t now)
798
{
799
AioHandler *node;
800
AioHandler *tmp;
801
@@ -XXX,XX +XXX,XX @@ static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now)
802
* Nevermind about re-adding the handler in the rare case where
803
* this causes progress.
804
*/
805
- progress = node->io_poll(node->opaque) || progress;
806
+ if (node->io_poll(node->opaque)) {
807
+ aio_add_ready_handler(ready_list, node,
808
+ REVENTS_POLL_READY);
809
+ progress = true;
46
+ }
810
+ }
47
}
811
}
48
812
}
49
if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
813
}
50
@@ -XXX,XX +XXX,XX @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
814
@@ -XXX,XX +XXX,XX @@ static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now)
51
nr_sectors * BDRV_SECTOR_SIZE,
815
52
BDRV_REQ_MAY_UNMAP);
816
/* run_poll_handlers:
53
} else {
817
* @ctx: the AioContext
54
+ int i;
818
+ * @ready_list: the list to place ready handlers on
55
+ int64_t cur_addr;
819
* @max_ns: maximum time to poll for, in nanoseconds
56
+ uint8_t *cur_buf;
820
*
57
+
821
* Polls for a given time.
58
buf = g_malloc(BLOCK_SIZE);
822
@@ -XXX,XX +XXX,XX @@ static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now)
59
qemu_get_buffer(f, buf, BLOCK_SIZE);
823
*
60
- ret = blk_pwrite(blk, addr * BDRV_SECTOR_SIZE, buf,
824
* Returns: true if progress was made, false otherwise
61
- nr_sectors * BDRV_SECTOR_SIZE, 0);
825
*/
62
+ for (i = 0; i < BLOCK_SIZE / cluster_size; i++) {
826
-static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
63
+ cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size;
827
+static bool run_poll_handlers(AioContext *ctx, AioHandlerList *ready_list,
64
+ cur_buf = buf + i * cluster_size;
828
+ int64_t max_ns, int64_t *timeout)
65
+
829
{
66
+ if ((!block_mig_state.zero_blocks ||
830
bool progress;
67
+ cluster_size < BLOCK_SIZE) &&
831
int64_t start_time, elapsed_time;
68
+ buffer_is_zero(cur_buf, cluster_size)) {
832
@@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
69
+ ret = blk_pwrite_zeroes(blk, cur_addr,
833
70
+ cluster_size,
834
start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
71
+ BDRV_REQ_MAY_UNMAP);
835
do {
72
+ } else {
836
- progress = run_poll_handlers_once(ctx, start_time, timeout);
73
+ ret = blk_pwrite(blk, cur_addr, cur_buf,
837
+ progress = run_poll_handlers_once(ctx, ready_list,
74
+ cluster_size, 0);
838
+ start_time, timeout);
75
+ }
839
elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
76
+ if (ret < 0) {
840
max_ns = qemu_soonest_timeout(*timeout, max_ns);
77
+ break;
841
assert(!(max_ns && progress));
78
+ }
842
} while (elapsed_time < max_ns && !ctx->fdmon_ops->need_wait(ctx));
79
+ }
843
80
g_free(buf);
844
- if (remove_idle_poll_handlers(ctx, start_time + elapsed_time)) {
81
}
845
+ if (remove_idle_poll_handlers(ctx, ready_list,
82
846
+ start_time + elapsed_time)) {
847
*timeout = 0;
848
progress = true;
849
}
850
@@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
851
852
/* try_poll_mode:
853
* @ctx: the AioContext
854
+ * @ready_list: list to add handlers that need to be run
855
* @timeout: timeout for blocking wait, computed by the caller and updated if
856
* polling succeeds.
857
*
858
@@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
859
*
860
* Returns: true if progress was made, false otherwise
861
*/
862
-static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
863
+static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
864
+ int64_t *timeout)
865
{
866
int64_t max_ns;
867
868
@@ -XXX,XX +XXX,XX @@ static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
869
870
max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
871
if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
872
- poll_set_started(ctx, true);
873
+ poll_set_started(ctx, ready_list, true);
874
875
- if (run_poll_handlers(ctx, max_ns, timeout)) {
876
+ if (run_poll_handlers(ctx, ready_list, max_ns, timeout)) {
877
return true;
878
}
879
}
880
881
- if (poll_set_started(ctx, false)) {
882
+ if (poll_set_started(ctx, ready_list, false)) {
883
*timeout = 0;
884
return true;
885
}
886
@@ -XXX,XX +XXX,XX @@ static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
887
bool aio_poll(AioContext *ctx, bool blocking)
888
{
889
AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
890
- int ret = 0;
891
bool progress;
892
bool use_notify_me;
893
int64_t timeout;
894
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
895
}
896
897
timeout = blocking ? aio_compute_timeout(ctx) : 0;
898
- progress = try_poll_mode(ctx, &timeout);
899
+ progress = try_poll_mode(ctx, &ready_list, &timeout);
900
assert(!(timeout && progress));
901
902
/*
903
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
904
* system call---a single round of run_poll_handlers_once suffices.
905
*/
906
if (timeout || ctx->fdmon_ops->need_wait(ctx)) {
907
- ret = ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
908
+ ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
909
}
910
911
if (use_notify_me) {
912
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
913
}
914
915
progress |= aio_bh_poll(ctx);
916
-
917
- if (ret > 0) {
918
- progress |= aio_dispatch_ready_handlers(ctx, &ready_list);
919
- }
920
+ progress |= aio_dispatch_ready_handlers(ctx, &ready_list);
921
922
aio_free_deleted_handlers(ctx);
923
924
diff --git a/util/aio-win32.c b/util/aio-win32.c
925
index XXXXXXX..XXXXXXX 100644
926
--- a/util/aio-win32.c
927
+++ b/util/aio-win32.c
928
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
929
IOHandler *io_read,
930
IOHandler *io_write,
931
AioPollFn *io_poll,
932
+ IOHandler *io_poll_ready,
933
void *opaque)
934
{
935
/* fd is a SOCKET in our case */
936
@@ -XXX,XX +XXX,XX @@ void aio_set_event_notifier(AioContext *ctx,
937
EventNotifier *e,
938
bool is_external,
939
EventNotifierHandler *io_notify,
940
- AioPollFn *io_poll)
941
+ AioPollFn *io_poll,
942
+ EventNotifierHandler *io_poll_ready)
943
{
944
AioHandler *node;
945
946
diff --git a/util/async.c b/util/async.c
947
index XXXXXXX..XXXXXXX 100644
948
--- a/util/async.c
949
+++ b/util/async.c
950
@@ -XXX,XX +XXX,XX @@ aio_ctx_finalize(GSource *source)
951
g_free(bh);
952
}
953
954
- aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL);
955
+ aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL);
956
event_notifier_cleanup(&ctx->notifier);
957
qemu_rec_mutex_destroy(&ctx->lock);
958
qemu_lockcnt_destroy(&ctx->list_lock);
959
@@ -XXX,XX +XXX,XX @@ static bool aio_context_notifier_poll(void *opaque)
960
return qatomic_read(&ctx->notified);
961
}
962
963
+static void aio_context_notifier_poll_ready(EventNotifier *e)
964
+{
965
+ /* Do nothing, we just wanted to kick the event loop */
966
+}
967
+
968
static void co_schedule_bh_cb(void *opaque)
969
{
970
AioContext *ctx = opaque;
971
@@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp)
972
aio_set_event_notifier(ctx, &ctx->notifier,
973
false,
974
aio_context_notifier_cb,
975
- aio_context_notifier_poll);
976
+ aio_context_notifier_poll,
977
+ aio_context_notifier_poll_ready);
978
#ifdef CONFIG_LINUX_AIO
979
ctx->linux_aio = NULL;
980
#endif
981
diff --git a/util/main-loop.c b/util/main-loop.c
982
index XXXXXXX..XXXXXXX 100644
983
--- a/util/main-loop.c
984
+++ b/util/main-loop.c
985
@@ -XXX,XX +XXX,XX @@ void qemu_set_fd_handler(int fd,
986
{
987
iohandler_init();
988
aio_set_fd_handler(iohandler_ctx, fd, false,
989
- fd_read, fd_write, NULL, opaque);
990
+ fd_read, fd_write, NULL, NULL, opaque);
991
}
992
993
void event_notifier_set_handler(EventNotifier *e,
994
@@ -XXX,XX +XXX,XX @@ void event_notifier_set_handler(EventNotifier *e,
995
{
996
iohandler_init();
997
aio_set_event_notifier(iohandler_ctx, e, false,
998
- handler, NULL);
999
+ handler, NULL, NULL);
1000
}
1001
diff --git a/util/qemu-coroutine-io.c b/util/qemu-coroutine-io.c
1002
index XXXXXXX..XXXXXXX 100644
1003
--- a/util/qemu-coroutine-io.c
1004
+++ b/util/qemu-coroutine-io.c
1005
@@ -XXX,XX +XXX,XX @@ typedef struct {
1006
static void fd_coroutine_enter(void *opaque)
1007
{
1008
FDYieldUntilData *data = opaque;
1009
- aio_set_fd_handler(data->ctx, data->fd, false, NULL, NULL, NULL, NULL);
1010
+ aio_set_fd_handler(data->ctx, data->fd, false,
1011
+ NULL, NULL, NULL, NULL, NULL);
1012
qemu_coroutine_enter(data->co);
1013
}
1014
1015
@@ -XXX,XX +XXX,XX @@ void coroutine_fn yield_until_fd_readable(int fd)
1016
data.co = qemu_coroutine_self();
1017
data.fd = fd;
1018
aio_set_fd_handler(
1019
- data.ctx, fd, false, fd_coroutine_enter, NULL, NULL, &data);
1020
+ data.ctx, fd, false, fd_coroutine_enter, NULL, NULL, NULL, &data);
1021
qemu_coroutine_yield();
1022
}
1023
diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c
1024
index XXXXXXX..XXXXXXX 100644
1025
--- a/util/vhost-user-server.c
1026
+++ b/util/vhost-user-server.c
1027
@@ -XXX,XX +XXX,XX @@ set_watch(VuDev *vu_dev, int fd, int vu_evt,
1028
vu_fd_watch->cb = cb;
1029
qemu_set_nonblock(fd);
1030
aio_set_fd_handler(server->ioc->ctx, fd, true, kick_handler,
1031
- NULL, NULL, vu_fd_watch);
1032
+ NULL, NULL, NULL, vu_fd_watch);
1033
vu_fd_watch->vu_dev = vu_dev;
1034
vu_fd_watch->pvt = pvt;
1035
}
1036
@@ -XXX,XX +XXX,XX @@ static void remove_watch(VuDev *vu_dev, int fd)
1037
if (!vu_fd_watch) {
1038
return;
1039
}
1040
- aio_set_fd_handler(server->ioc->ctx, fd, true, NULL, NULL, NULL, NULL);
1041
+ aio_set_fd_handler(server->ioc->ctx, fd, true,
1042
+ NULL, NULL, NULL, NULL, NULL);
1043
1044
QTAILQ_REMOVE(&server->vu_fd_watches, vu_fd_watch, next);
1045
g_free(vu_fd_watch);
1046
@@ -XXX,XX +XXX,XX @@ void vhost_user_server_stop(VuServer *server)
1047
1048
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
1049
aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true,
1050
- NULL, NULL, NULL, vu_fd_watch);
1051
+ NULL, NULL, NULL, NULL, vu_fd_watch);
1052
}
1053
1054
qio_channel_shutdown(server->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
1055
@@ -XXX,XX +XXX,XX @@ void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx)
1056
1057
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
1058
aio_set_fd_handler(ctx, vu_fd_watch->fd, true, kick_handler, NULL,
1059
- NULL, vu_fd_watch);
1060
+ NULL, NULL, vu_fd_watch);
1061
}
1062
1063
aio_co_schedule(ctx, server->co_trip);
1064
@@ -XXX,XX +XXX,XX @@ void vhost_user_server_detach_aio_context(VuServer *server)
1065
1066
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
1067
aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true,
1068
- NULL, NULL, NULL, vu_fd_watch);
1069
+ NULL, NULL, NULL, NULL, vu_fd_watch);
1070
}
1071
1072
qio_channel_detach_aio_context(server->ioc);
83
--
1073
--
84
2.9.3
1074
2.34.1
85
1075
86
1076
diff view generated by jsdifflib
1
From: Zhang Chen <zhangchen.fnst@cn.fujitsu.com>
1
The virtqueue host notifier API
2
virtio_queue_aio_set_host_notifier_handler() polls the virtqueue for new
3
buffers. AioContext previously required a bool progress return value
4
indicating whether an event was handled or not. This is no longer
5
necessary because the AioContext polling API has been split into a poll
6
check function and an event handler function. The event handler is only
7
run when we know there is work to do, so it doesn't return bool.
2
8
3
I'm leaving my job at Fujitsu, this email address will stop working
9
The VirtIOHandleAIOOutput function signature is now the same as
4
this week. Update it to one that I will have access to later.
10
VirtIOHandleOutput. Get rid of the bool return value.
5
11
6
Signed-off-by: Xie Changlong <xiecl.fnst@cn.fujitsu.com>
12
Further simplifications will be made for virtio-blk and virtio-scsi in
7
Message-id: 1492758767-19716-1-git-send-email-xiecl.fnst@cn.fujitsu.com
13
the next patch.
14
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
16
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
17
Message-id: 20211207132336.36627-3-stefanha@redhat.com
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
18
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
19
---
10
MAINTAINERS | 2 +-
20
include/hw/virtio/virtio.h | 3 +--
11
1 file changed, 1 insertion(+), 1 deletion(-)
21
hw/block/dataplane/virtio-blk.c | 4 ++--
22
hw/scsi/virtio-scsi-dataplane.c | 18 ++++++------------
23
hw/virtio/virtio.c | 12 ++++--------
24
4 files changed, 13 insertions(+), 24 deletions(-)
12
25
13
diff --git a/MAINTAINERS b/MAINTAINERS
26
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
14
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
15
--- a/MAINTAINERS
28
--- a/include/hw/virtio/virtio.h
16
+++ b/MAINTAINERS
29
+++ b/include/hw/virtio/virtio.h
17
@@ -XXX,XX +XXX,XX @@ F: tests/image-fuzzer/
30
@@ -XXX,XX +XXX,XX @@ void virtio_error(VirtIODevice *vdev, const char *fmt, ...) GCC_FMT_ATTR(2, 3);
18
31
void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name);
19
Replication
32
20
M: Wen Congyang <wencongyang2@huawei.com>
33
typedef void (*VirtIOHandleOutput)(VirtIODevice *, VirtQueue *);
21
-M: Changlong Xie <xiecl.fnst@cn.fujitsu.com>
34
-typedef bool (*VirtIOHandleAIOOutput)(VirtIODevice *, VirtQueue *);
22
+M: Xie Changlong <xiechanglong.d@gmail.com>
35
23
S: Supported
36
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
24
F: replication*
37
VirtIOHandleOutput handle_output);
25
F: block/replication.c
38
@@ -XXX,XX +XXX,XX @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
39
void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
40
void virtio_queue_host_notifier_read(EventNotifier *n);
41
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
42
- VirtIOHandleAIOOutput handle_output);
43
+ VirtIOHandleOutput handle_output);
44
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
45
VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
46
47
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/hw/block/dataplane/virtio-blk.c
50
+++ b/hw/block/dataplane/virtio-blk.c
51
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
52
g_free(s);
53
}
54
55
-static bool virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
56
+static void virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
57
VirtQueue *vq)
58
{
59
VirtIOBlock *s = (VirtIOBlock *)vdev;
60
@@ -XXX,XX +XXX,XX @@ static bool virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
61
assert(s->dataplane);
62
assert(s->dataplane_started);
63
64
- return virtio_blk_handle_vq(s, vq);
65
+ virtio_blk_handle_vq(s, vq);
66
}
67
68
/* Context: QEMU global mutex held */
69
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/hw/scsi/virtio-scsi-dataplane.c
72
+++ b/hw/scsi/virtio-scsi-dataplane.c
73
@@ -XXX,XX +XXX,XX @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
74
}
75
}
76
77
-static bool virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev,
78
+static void virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev,
79
VirtQueue *vq)
80
{
81
- bool progress = false;
82
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
83
84
virtio_scsi_acquire(s);
85
if (!s->dataplane_fenced) {
86
assert(s->ctx && s->dataplane_started);
87
- progress = virtio_scsi_handle_cmd_vq(s, vq);
88
+ virtio_scsi_handle_cmd_vq(s, vq);
89
}
90
virtio_scsi_release(s);
91
- return progress;
92
}
93
94
-static bool virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev,
95
+static void virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev,
96
VirtQueue *vq)
97
{
98
- bool progress = false;
99
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
100
101
virtio_scsi_acquire(s);
102
if (!s->dataplane_fenced) {
103
assert(s->ctx && s->dataplane_started);
104
- progress = virtio_scsi_handle_ctrl_vq(s, vq);
105
+ virtio_scsi_handle_ctrl_vq(s, vq);
106
}
107
virtio_scsi_release(s);
108
- return progress;
109
}
110
111
-static bool virtio_scsi_data_plane_handle_event(VirtIODevice *vdev,
112
+static void virtio_scsi_data_plane_handle_event(VirtIODevice *vdev,
113
VirtQueue *vq)
114
{
115
- bool progress = false;
116
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
117
118
virtio_scsi_acquire(s);
119
if (!s->dataplane_fenced) {
120
assert(s->ctx && s->dataplane_started);
121
- progress = virtio_scsi_handle_event_vq(s, vq);
122
+ virtio_scsi_handle_event_vq(s, vq);
123
}
124
virtio_scsi_release(s);
125
- return progress;
126
}
127
128
static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n)
129
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
130
index XXXXXXX..XXXXXXX 100644
131
--- a/hw/virtio/virtio.c
132
+++ b/hw/virtio/virtio.c
133
@@ -XXX,XX +XXX,XX @@ struct VirtQueue
134
135
uint16_t vector;
136
VirtIOHandleOutput handle_output;
137
- VirtIOHandleAIOOutput handle_aio_output;
138
+ VirtIOHandleOutput handle_aio_output;
139
VirtIODevice *vdev;
140
EventNotifier guest_notifier;
141
EventNotifier host_notifier;
142
@@ -XXX,XX +XXX,XX @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
143
}
144
}
145
146
-static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
147
+static void virtio_queue_notify_aio_vq(VirtQueue *vq)
148
{
149
- bool ret = false;
150
-
151
if (vq->vring.desc && vq->handle_aio_output) {
152
VirtIODevice *vdev = vq->vdev;
153
154
trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
155
- ret = vq->handle_aio_output(vdev, vq);
156
+ vq->handle_aio_output(vdev, vq);
157
158
if (unlikely(vdev->start_on_kick)) {
159
virtio_set_started(vdev, true);
160
}
161
}
162
-
163
- return ret;
164
}
165
166
static void virtio_queue_notify_vq(VirtQueue *vq)
167
@@ -XXX,XX +XXX,XX @@ static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
168
}
169
170
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
171
- VirtIOHandleAIOOutput handle_output)
172
+ VirtIOHandleOutput handle_output)
173
{
174
if (handle_output) {
175
vq->handle_aio_output = handle_output;
26
--
176
--
27
2.9.3
177
2.34.1
28
178
29
179
diff view generated by jsdifflib
1
The (burst) max parameter cannot be smaller than the avg parameter.
1
The return value of virtio_blk_handle_vq() is no longer used. Get rid of
2
There is a test case that uses avg = 56, max = 1 and gets away with it
2
it. This is a step towards unifying the dataplane and non-dataplane
3
because no input validation is performed by the test case.
3
virtqueue handler functions.
4
4
5
This patch switches to valid test input parameters.
5
Prepare virtio_blk_handle_output() to be used by both dataplane and
6
non-dataplane by making the condition for starting ioeventfd more
7
specific. This way it won't trigger when dataplane has already been
8
started.
6
9
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reviewed-by: Alberto Garcia <berto@igalia.com>
11
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
9
Message-id: 20170301115026.22621-3-stefanha@redhat.com
12
Message-id: 20211207132336.36627-4-stefanha@redhat.com
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
14
---
12
tests/test-throttle.c | 8 ++++----
15
include/hw/virtio/virtio-blk.h | 2 +-
13
1 file changed, 4 insertions(+), 4 deletions(-)
16
hw/block/virtio-blk.c | 14 +++-----------
17
2 files changed, 4 insertions(+), 12 deletions(-)
14
18
15
diff --git a/tests/test-throttle.c b/tests/test-throttle.c
19
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
16
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
17
--- a/tests/test-throttle.c
21
--- a/include/hw/virtio/virtio-blk.h
18
+++ b/tests/test-throttle.c
22
+++ b/include/hw/virtio/virtio-blk.h
19
@@ -XXX,XX +XXX,XX @@ static void test_config_functions(void)
23
@@ -XXX,XX +XXX,XX @@ typedef struct MultiReqBuffer {
20
orig_cfg.buckets[THROTTLE_OPS_READ].avg = 69;
24
bool is_write;
21
orig_cfg.buckets[THROTTLE_OPS_WRITE].avg = 23;
25
} MultiReqBuffer;
22
26
23
- orig_cfg.buckets[THROTTLE_BPS_TOTAL].max = 0; /* should be corrected */
27
-bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
24
- orig_cfg.buckets[THROTTLE_BPS_READ].max = 1; /* should not be corrected */
28
+void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
25
+ orig_cfg.buckets[THROTTLE_BPS_TOTAL].max = 0; /* should be corrected */
29
void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh);
26
+ orig_cfg.buckets[THROTTLE_BPS_READ].max = 56; /* should not be corrected */
30
27
orig_cfg.buckets[THROTTLE_BPS_WRITE].max = 120;
31
#endif
28
32
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
29
orig_cfg.buckets[THROTTLE_OPS_TOTAL].max = 150;
33
index XXXXXXX..XXXXXXX 100644
30
@@ -XXX,XX +XXX,XX @@ static void test_config_functions(void)
34
--- a/hw/block/virtio-blk.c
31
g_assert(final_cfg.buckets[THROTTLE_OPS_READ].avg == 69);
35
+++ b/hw/block/virtio-blk.c
32
g_assert(final_cfg.buckets[THROTTLE_OPS_WRITE].avg == 23);
36
@@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
33
37
return 0;
34
- g_assert(final_cfg.buckets[THROTTLE_BPS_TOTAL].max == 15.3);/* fixed */
38
}
35
- g_assert(final_cfg.buckets[THROTTLE_BPS_READ].max == 1); /* not fixed */
39
36
+ g_assert(final_cfg.buckets[THROTTLE_BPS_TOTAL].max == 15.3); /* fixed */
40
-bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
37
+ g_assert(final_cfg.buckets[THROTTLE_BPS_READ].max == 56); /* not fixed */
41
+void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
38
g_assert(final_cfg.buckets[THROTTLE_BPS_WRITE].max == 120);
42
{
39
43
VirtIOBlockReq *req;
40
g_assert(final_cfg.buckets[THROTTLE_OPS_TOTAL].max == 150);
44
MultiReqBuffer mrb = {};
45
bool suppress_notifications = virtio_queue_get_notification(vq);
46
- bool progress = false;
47
48
aio_context_acquire(blk_get_aio_context(s->blk));
49
blk_io_plug(s->blk);
50
@@ -XXX,XX +XXX,XX @@ bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
51
}
52
53
while ((req = virtio_blk_get_request(s, vq))) {
54
- progress = true;
55
if (virtio_blk_handle_request(req, &mrb)) {
56
virtqueue_detach_element(req->vq, &req->elem, 0);
57
virtio_blk_free_request(req);
58
@@ -XXX,XX +XXX,XX @@ bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
59
60
blk_io_unplug(s->blk);
61
aio_context_release(blk_get_aio_context(s->blk));
62
- return progress;
63
-}
64
-
65
-static void virtio_blk_handle_output_do(VirtIOBlock *s, VirtQueue *vq)
66
-{
67
- virtio_blk_handle_vq(s, vq);
68
}
69
70
static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
71
{
72
VirtIOBlock *s = (VirtIOBlock *)vdev;
73
74
- if (s->dataplane) {
75
+ if (s->dataplane && !s->dataplane_started) {
76
/* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
77
* dataplane here instead of waiting for .set_status().
78
*/
79
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
80
return;
81
}
82
}
83
- virtio_blk_handle_output_do(s, vq);
84
+ virtio_blk_handle_vq(s, vq);
85
}
86
87
void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
41
--
88
--
42
2.9.3
89
2.34.1
43
90
44
91
diff view generated by jsdifflib
1
From: Changlong Xie <xiecl.fnst@cn.fujitsu.com>
1
Prepare virtio_scsi_handle_cmd() to be used by both dataplane and
2
non-dataplane by making the condition for starting ioeventfd more
3
specific. This way it won't trigger when dataplane has already been
4
started.
2
5
3
So he can get CC'ed on future patches and bugs for this feature
6
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
4
7
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
5
Signed-off-by: Changlong Xie <xiecl.fnst@cn.fujitsu.com>
8
Message-id: 20211207132336.36627-5-stefanha@redhat.com
6
Message-id: 1492484893-23435-1-git-send-email-xiecl.fnst@cn.fujitsu.com
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
---
10
---
9
MAINTAINERS | 2 +-
11
hw/scsi/virtio-scsi.c | 2 +-
10
1 file changed, 1 insertion(+), 1 deletion(-)
12
1 file changed, 1 insertion(+), 1 deletion(-)
11
13
12
diff --git a/MAINTAINERS b/MAINTAINERS
14
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/MAINTAINERS
16
--- a/hw/scsi/virtio-scsi.c
15
+++ b/MAINTAINERS
17
+++ b/hw/scsi/virtio-scsi.c
16
@@ -XXX,XX +XXX,XX @@ S: Supported
18
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
17
F: tests/image-fuzzer/
19
/* use non-QOM casts in the data path */
18
20
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
19
Replication
21
20
-M: Wen Congyang <wency@cn.fujitsu.com>
22
- if (s->ctx) {
21
+M: Wen Congyang <wencongyang2@huawei.com>
23
+ if (s->ctx && !s->dataplane_started) {
22
M: Changlong Xie <xiecl.fnst@cn.fujitsu.com>
24
virtio_device_start_ioeventfd(vdev);
23
S: Supported
25
if (!s->dataplane_fenced) {
24
F: replication*
26
return;
25
--
27
--
26
2.9.3
28
2.34.1
27
29
28
30
diff view generated by jsdifflib
1
Throttling has a weird property that throttle_get_config() does not
1
The difference between ->handle_output() and ->handle_aio_output() was
2
always return the same throttling settings that were given with
2
that ->handle_aio_output() returned a bool return value indicating
3
throttle_config(). In other words, the set and get functions aren't
3
progress. This was needed by the old polling API but now that the bool
4
symmetric.
4
return value is gone, the two functions can be unified.
5
5
6
If .max is 0 then the throttling code assigns a default value of .avg /
7
10 in throttle_config(). This is an implementation detail of the
8
throttling algorithm. When throttle_get_config() is called the .max
9
value returned should still be 0.
10
11
Users are exposed to this quirk via "info block" or "query-block"
12
monitor commands. This has caused confusion because it looks like a bug
13
when an unexpected value is reported.
14
15
This patch hides the .max value adjustment in throttle_get_config() and
16
updates test-throttle.c appropriately.
17
18
Reported-by: Nini Gu <ngu@redhat.com>
19
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
20
Reviewed-by: Alberto Garcia <berto@igalia.com>
7
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
21
Message-id: 20170301115026.22621-4-stefanha@redhat.com
8
Message-id: 20211207132336.36627-6-stefanha@redhat.com
22
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
23
---
10
---
24
tests/test-throttle.c | 8 ++++----
11
hw/virtio/virtio.c | 33 +++------------------------------
25
util/throttle.c | 14 ++++++++++++++
12
1 file changed, 3 insertions(+), 30 deletions(-)
26
2 files changed, 18 insertions(+), 4 deletions(-)
27
13
28
diff --git a/tests/test-throttle.c b/tests/test-throttle.c
14
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
29
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/test-throttle.c
16
--- a/hw/virtio/virtio.c
31
+++ b/tests/test-throttle.c
17
+++ b/hw/virtio/virtio.c
32
@@ -XXX,XX +XXX,XX @@ static void test_config_functions(void)
18
@@ -XXX,XX +XXX,XX @@ struct VirtQueue
33
orig_cfg.buckets[THROTTLE_OPS_READ].avg = 69;
19
34
orig_cfg.buckets[THROTTLE_OPS_WRITE].avg = 23;
20
uint16_t vector;
35
21
VirtIOHandleOutput handle_output;
36
- orig_cfg.buckets[THROTTLE_BPS_TOTAL].max = 0; /* should be corrected */
22
- VirtIOHandleOutput handle_aio_output;
37
- orig_cfg.buckets[THROTTLE_BPS_READ].max = 56; /* should not be corrected */
23
VirtIODevice *vdev;
38
+ orig_cfg.buckets[THROTTLE_BPS_TOTAL].max = 0;
24
EventNotifier guest_notifier;
39
+ orig_cfg.buckets[THROTTLE_BPS_READ].max = 56;
25
EventNotifier host_notifier;
40
orig_cfg.buckets[THROTTLE_BPS_WRITE].max = 120;
26
@@ -XXX,XX +XXX,XX @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
41
42
orig_cfg.buckets[THROTTLE_OPS_TOTAL].max = 150;
43
@@ -XXX,XX +XXX,XX @@ static void test_config_functions(void)
44
g_assert(final_cfg.buckets[THROTTLE_OPS_READ].avg == 69);
45
g_assert(final_cfg.buckets[THROTTLE_OPS_WRITE].avg == 23);
46
47
- g_assert(final_cfg.buckets[THROTTLE_BPS_TOTAL].max == 15.3); /* fixed */
48
- g_assert(final_cfg.buckets[THROTTLE_BPS_READ].max == 56); /* not fixed */
49
+ g_assert(final_cfg.buckets[THROTTLE_BPS_TOTAL].max == 0);
50
+ g_assert(final_cfg.buckets[THROTTLE_BPS_READ].max == 56);
51
g_assert(final_cfg.buckets[THROTTLE_BPS_WRITE].max == 120);
52
53
g_assert(final_cfg.buckets[THROTTLE_OPS_TOTAL].max == 150);
54
diff --git a/util/throttle.c b/util/throttle.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/util/throttle.c
57
+++ b/util/throttle.c
58
@@ -XXX,XX +XXX,XX @@ static void throttle_fix_bucket(LeakyBucket *bkt)
59
}
27
}
60
}
28
}
61
29
62
+/* undo internal bucket parameter changes (see throttle_fix_bucket()) */
30
-static void virtio_queue_notify_aio_vq(VirtQueue *vq)
63
+static void throttle_unfix_bucket(LeakyBucket *bkt)
31
-{
64
+{
32
- if (vq->vring.desc && vq->handle_aio_output) {
65
+ if (bkt->max < bkt->avg) {
33
- VirtIODevice *vdev = vq->vdev;
66
+ bkt->max = 0;
34
-
67
+ }
35
- trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
68
+}
36
- vq->handle_aio_output(vdev, vq);
69
+
37
-
70
/* take care of canceling a timer */
38
- if (unlikely(vdev->start_on_kick)) {
71
static void throttle_cancel_timer(QEMUTimer *timer)
39
- virtio_set_started(vdev, true);
40
- }
41
- }
42
-}
43
-
44
static void virtio_queue_notify_vq(VirtQueue *vq)
72
{
45
{
73
@@ -XXX,XX +XXX,XX @@ void throttle_config(ThrottleState *ts,
46
if (vq->vring.desc && vq->handle_output) {
74
*/
47
@@ -XXX,XX +XXX,XX @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
75
void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg)
48
vdev->vq[i].vring.num_default = queue_size;
49
vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
50
vdev->vq[i].handle_output = handle_output;
51
- vdev->vq[i].handle_aio_output = NULL;
52
vdev->vq[i].used_elems = g_malloc0(sizeof(VirtQueueElement) *
53
queue_size);
54
55
@@ -XXX,XX +XXX,XX @@ void virtio_delete_queue(VirtQueue *vq)
56
vq->vring.num = 0;
57
vq->vring.num_default = 0;
58
vq->handle_output = NULL;
59
- vq->handle_aio_output = NULL;
60
g_free(vq->used_elems);
61
vq->used_elems = NULL;
62
virtio_virtqueue_reset_region_cache(vq);
63
@@ -XXX,XX +XXX,XX @@ EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
64
return &vq->guest_notifier;
65
}
66
67
-static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
68
-{
69
- VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
70
- if (event_notifier_test_and_clear(n)) {
71
- virtio_queue_notify_aio_vq(vq);
72
- }
73
-}
74
-
75
static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
76
{
76
{
77
+ int i;
77
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
78
+
78
@@ -XXX,XX +XXX,XX @@ static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n)
79
*cfg = ts->cfg;
79
{
80
+
80
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
81
+ for (i = 0; i < BUCKETS_COUNT; i++) {
81
82
+ throttle_unfix_bucket(&cfg->buckets[i]);
82
- virtio_queue_notify_aio_vq(vq);
83
+ }
83
+ virtio_queue_notify_vq(vq);
84
}
84
}
85
85
86
static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
87
@@ -XXX,XX +XXX,XX @@ void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
88
VirtIOHandleOutput handle_output)
89
{
90
if (handle_output) {
91
- vq->handle_aio_output = handle_output;
92
aio_set_event_notifier(ctx, &vq->host_notifier, true,
93
- virtio_queue_host_notifier_aio_read,
94
+ virtio_queue_host_notifier_read,
95
virtio_queue_host_notifier_aio_poll,
96
virtio_queue_host_notifier_aio_poll_ready);
97
aio_set_event_notifier_poll(ctx, &vq->host_notifier,
98
@@ -XXX,XX +XXX,XX @@ void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
99
aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);
100
/* Test and clear notifier before after disabling event,
101
* in case poll callback didn't have time to run. */
102
- virtio_queue_host_notifier_aio_read(&vq->host_notifier);
103
- vq->handle_aio_output = NULL;
104
+ virtio_queue_host_notifier_read(&vq->host_notifier);
105
}
106
}
86
107
87
--
108
--
88
2.9.3
109
2.34.1
89
110
90
111
diff view generated by jsdifflib
1
The disk I/O throttling options have been listed for a long time but
1
Now that virtio-blk and virtio-scsi are ready, get rid of
2
never explained on the QEMU man page.
2
the handle_aio_output() callback. It's no longer needed.
3
3
4
Suggested-by: Nini Gu <ngu@redhat.com>
5
Cc: Alberto Garcia <berto@igalia.com>
6
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
4
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Reviewed-by: Alberto Garcia <berto@igalia.com>
5
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
8
Reviewed-by: Greg Kurz <groug@kaod.org>
6
Message-id: 20211207132336.36627-7-stefanha@redhat.com
9
Message-id: 20170301115026.22621-2-stefanha@redhat.com
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
8
---
12
qemu-options.hx | 24 ++++++++++++++++++++++++
9
include/hw/virtio/virtio.h | 4 +--
13
1 file changed, 24 insertions(+)
10
hw/block/dataplane/virtio-blk.c | 16 ++--------
14
11
hw/scsi/virtio-scsi-dataplane.c | 54 ++++-----------------------------
15
diff --git a/qemu-options.hx b/qemu-options.hx
12
hw/virtio/virtio.c | 32 +++++++++----------
16
index XXXXXXX..XXXXXXX 100644
13
4 files changed, 26 insertions(+), 80 deletions(-)
17
--- a/qemu-options.hx
14
18
+++ b/qemu-options.hx
15
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
19
@@ -XXX,XX +XXX,XX @@ file sectors into the image file.
16
index XXXXXXX..XXXXXXX 100644
20
conversion of plain zero writes by the OS to driver specific optimized
17
--- a/include/hw/virtio/virtio.h
21
zero write commands. You may even choose "unmap" if @var{discard} is set
18
+++ b/include/hw/virtio/virtio.h
22
to "unmap" to allow a zero write to be converted to an UNMAP operation.
19
@@ -XXX,XX +XXX,XX @@ bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev);
23
+@item bps=@var{b},bps_rd=@var{r},bps_wr=@var{w}
20
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
24
+Specify bandwidth throttling limits in bytes per second, either for all request
21
void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
25
+types or for reads or writes only. Small values can lead to timeouts or hangs
22
void virtio_queue_host_notifier_read(EventNotifier *n);
26
+inside the guest. A safe minimum for disks is 2 MB/s.
23
-void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
27
+@item bps_max=@var{bm},bps_rd_max=@var{rm},bps_wr_max=@var{wm}
24
- VirtIOHandleOutput handle_output);
28
+Specify bursts in bytes per second, either for all request types or for reads
25
+void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx);
29
+or writes only. Bursts allow the guest I/O to spike above the limit
26
+void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx);
30
+temporarily.
27
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
31
+@item iops=@var{i},iops_rd=@var{r},iops_wr=@var{w}
28
VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
32
+Specify request rate limits in requests per second, either for all request
29
33
+types or for reads or writes only.
30
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
34
+@item iops_max=@var{bm},iops_rd_max=@var{rm},iops_wr_max=@var{wm}
31
index XXXXXXX..XXXXXXX 100644
35
+Specify bursts in requests per second, either for all request types or for reads
32
--- a/hw/block/dataplane/virtio-blk.c
36
+or writes only. Bursts allow the guest I/O to spike above the limit
33
+++ b/hw/block/dataplane/virtio-blk.c
37
+temporarily.
34
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
38
+@item iops_size=@var{is}
35
g_free(s);
39
+Let every @var{is} bytes of a request count as a new request for iops
36
}
40
+throttling purposes. Use this option to prevent guests from circumventing iops
37
41
+limits by sending fewer but larger requests.
38
-static void virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
42
+@item group=@var{g}
39
- VirtQueue *vq)
43
+Join a throttling quota group with given name @var{g}. All drives that are
40
-{
44
+members of the same group are accounted for together. Use this option to
41
- VirtIOBlock *s = (VirtIOBlock *)vdev;
45
+prevent guests from circumventing throttling limits by using many small disks
42
-
46
+instead of a single larger disk.
43
- assert(s->dataplane);
47
@end table
44
- assert(s->dataplane_started);
48
45
-
49
By default, the @option{cache=writeback} mode is used. It will report data
46
- virtio_blk_handle_vq(s, vq);
47
-}
48
-
49
/* Context: QEMU global mutex held */
50
int virtio_blk_data_plane_start(VirtIODevice *vdev)
51
{
52
@@ -XXX,XX +XXX,XX @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
53
for (i = 0; i < nvqs; i++) {
54
VirtQueue *vq = virtio_get_queue(s->vdev, i);
55
56
- virtio_queue_aio_set_host_notifier_handler(vq, s->ctx,
57
- virtio_blk_data_plane_handle_output);
58
+ virtio_queue_aio_attach_host_notifier(vq, s->ctx);
59
}
60
aio_context_release(s->ctx);
61
return 0;
62
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_data_plane_stop_bh(void *opaque)
63
for (i = 0; i < s->conf->num_queues; i++) {
64
VirtQueue *vq = virtio_get_queue(s->vdev, i);
65
66
- virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL);
67
+ virtio_queue_aio_detach_host_notifier(vq, s->ctx);
68
}
69
}
70
71
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/hw/scsi/virtio-scsi-dataplane.c
74
+++ b/hw/scsi/virtio-scsi-dataplane.c
75
@@ -XXX,XX +XXX,XX @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
76
}
77
}
78
79
-static void virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev,
80
- VirtQueue *vq)
81
-{
82
- VirtIOSCSI *s = VIRTIO_SCSI(vdev);
83
-
84
- virtio_scsi_acquire(s);
85
- if (!s->dataplane_fenced) {
86
- assert(s->ctx && s->dataplane_started);
87
- virtio_scsi_handle_cmd_vq(s, vq);
88
- }
89
- virtio_scsi_release(s);
90
-}
91
-
92
-static void virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev,
93
- VirtQueue *vq)
94
-{
95
- VirtIOSCSI *s = VIRTIO_SCSI(vdev);
96
-
97
- virtio_scsi_acquire(s);
98
- if (!s->dataplane_fenced) {
99
- assert(s->ctx && s->dataplane_started);
100
- virtio_scsi_handle_ctrl_vq(s, vq);
101
- }
102
- virtio_scsi_release(s);
103
-}
104
-
105
-static void virtio_scsi_data_plane_handle_event(VirtIODevice *vdev,
106
- VirtQueue *vq)
107
-{
108
- VirtIOSCSI *s = VIRTIO_SCSI(vdev);
109
-
110
- virtio_scsi_acquire(s);
111
- if (!s->dataplane_fenced) {
112
- assert(s->ctx && s->dataplane_started);
113
- virtio_scsi_handle_event_vq(s, vq);
114
- }
115
- virtio_scsi_release(s);
116
-}
117
-
118
static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n)
119
{
120
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
121
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_dataplane_stop_bh(void *opaque)
122
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
123
int i;
124
125
- virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx, NULL);
126
- virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx, NULL);
127
+ virtio_queue_aio_detach_host_notifier(vs->ctrl_vq, s->ctx);
128
+ virtio_queue_aio_detach_host_notifier(vs->event_vq, s->ctx);
129
for (i = 0; i < vs->conf.num_queues; i++) {
130
- virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx, NULL);
131
+ virtio_queue_aio_detach_host_notifier(vs->cmd_vqs[i], s->ctx);
132
}
133
}
134
135
@@ -XXX,XX +XXX,XX @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev)
136
memory_region_transaction_commit();
137
138
aio_context_acquire(s->ctx);
139
- virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx,
140
- virtio_scsi_data_plane_handle_ctrl);
141
- virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx,
142
- virtio_scsi_data_plane_handle_event);
143
+ virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx);
144
+ virtio_queue_aio_attach_host_notifier(vs->event_vq, s->ctx);
145
146
for (i = 0; i < vs->conf.num_queues; i++) {
147
- virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx,
148
- virtio_scsi_data_plane_handle_cmd);
149
+ virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx);
150
}
151
152
s->dataplane_starting = false;
153
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
154
index XXXXXXX..XXXXXXX 100644
155
--- a/hw/virtio/virtio.c
156
+++ b/hw/virtio/virtio.c
157
@@ -XXX,XX +XXX,XX @@ static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
158
virtio_queue_set_notification(vq, 1);
159
}
160
161
-void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
162
- VirtIOHandleOutput handle_output)
163
+void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
164
{
165
- if (handle_output) {
166
- aio_set_event_notifier(ctx, &vq->host_notifier, true,
167
- virtio_queue_host_notifier_read,
168
- virtio_queue_host_notifier_aio_poll,
169
- virtio_queue_host_notifier_aio_poll_ready);
170
- aio_set_event_notifier_poll(ctx, &vq->host_notifier,
171
- virtio_queue_host_notifier_aio_poll_begin,
172
- virtio_queue_host_notifier_aio_poll_end);
173
- } else {
174
- aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);
175
- /* Test and clear notifier before after disabling event,
176
- * in case poll callback didn't have time to run. */
177
- virtio_queue_host_notifier_read(&vq->host_notifier);
178
- }
179
+ aio_set_event_notifier(ctx, &vq->host_notifier, true,
180
+ virtio_queue_host_notifier_read,
181
+ virtio_queue_host_notifier_aio_poll,
182
+ virtio_queue_host_notifier_aio_poll_ready);
183
+ aio_set_event_notifier_poll(ctx, &vq->host_notifier,
184
+ virtio_queue_host_notifier_aio_poll_begin,
185
+ virtio_queue_host_notifier_aio_poll_end);
186
+}
187
+
188
+void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
189
+{
190
+ aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);
191
+ /* Test and clear notifier before after disabling event,
192
+ * in case poll callback didn't have time to run. */
193
+ virtio_queue_host_notifier_read(&vq->host_notifier);
194
}
195
196
void virtio_queue_host_notifier_read(EventNotifier *n)
50
--
197
--
51
2.9.3
198
2.34.1
52
199
53
200
diff view generated by jsdifflib