1
The following changes since commit 411ad78115ebeb3411cf4b7622784b93dfabe259:
1
The following changes since commit 91f5f7a5df1fda8c34677a7c49ee8a4bb5b56a36:
2
2
3
Merge remote-tracking branch 'remotes/stefanberger/tags/pull-tpm-2017-12-15-1' into staging (2017-12-17 15:27:41 +0000)
3
Merge remote-tracking branch 'remotes/lvivier-gitlab/tags/linux-user-for-7.0-pull-request' into staging (2022-01-12 11:51:47 +0000)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/codyprime/qemu-kvm-jtc.git tags/block-pull-request
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 996922de45299878cdc4c15b72b19edf2bc618a4:
9
for you to fetch changes up to db608fb78444c58896db69495729e4458eeaace1:
10
10
11
block/curl: fix minor memory leaks (2017-12-18 15:44:39 -0500)
11
virtio: unify dataplane and non-dataplane ->handle_output() (2022-01-12 17:09:39 +0000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Blockjob and protocol patches
14
Pull request
15
15
----------------------------------------------------------------
16
----------------------------------------------------------------
16
17
17
Jeff Cody (4):
18
Stefan Hajnoczi (6):
18
block/sheepdog: remove spurious NULL check
19
aio-posix: split poll check from ready handler
19
block/sheepdog: code beautification
20
virtio: get rid of VirtIOHandleAIOOutput
20
block/curl: check error return of curl_global_init()
21
virtio-blk: drop unused virtio_blk_handle_vq() return value
21
block/curl: fix minor memory leaks
22
virtio-scsi: prepare virtio_scsi_handle_cmd for dataplane
23
virtio: use ->handle_output() instead of ->handle_aio_output()
24
virtio: unify dataplane and non-dataplane ->handle_output()
22
25
23
John Snow (1):
26
include/block/aio.h | 4 +-
24
blockjob: kick jobs on set-speed
27
include/hw/virtio/virtio-blk.h | 2 +-
25
28
include/hw/virtio/virtio.h | 5 +-
26
Vladimir Sementsov-Ogievskiy (5):
29
util/aio-posix.h | 1 +
27
hbitmap: add next_zero function
30
block/curl.c | 11 ++--
28
backup: move from done_bitmap to copy_bitmap
31
block/export/fuse.c | 4 +-
29
backup: init copy_bitmap from sync_bitmap for incremental
32
block/io_uring.c | 19 ++++---
30
backup: simplify non-dirty bits progress processing
33
block/iscsi.c | 4 +-
31
backup: use copy_bitmap in incremental backup
34
block/linux-aio.c | 16 +++---
32
35
block/nfs.c | 6 +--
33
block/backup.c | 116 +++++++++++++++++-------------
36
block/nvme.c | 51 ++++++++++++-------
34
block/curl.c | 24 +++++--
37
block/ssh.c | 4 +-
35
block/dirty-bitmap.c | 5 ++
38
block/win32-aio.c | 4 +-
36
block/sheepdog.c | 166 +++++++++++++++++++++----------------------
39
hw/block/dataplane/virtio-blk.c | 16 +-----
37
blockjob.c | 30 +++++++-
40
hw/block/virtio-blk.c | 14 ++----
38
include/block/dirty-bitmap.h | 1 +
41
hw/scsi/virtio-scsi-dataplane.c | 60 +++-------------------
39
include/qemu/hbitmap.h | 8 +++
42
hw/scsi/virtio-scsi.c | 2 +-
40
tests/test-hbitmap.c | 61 ++++++++++++++++
43
hw/virtio/virtio.c | 73 +++++++++------------------
41
util/hbitmap.c | 39 ++++++++++
44
hw/xen/xen-bus.c | 6 +--
42
9 files changed, 309 insertions(+), 141 deletions(-)
45
io/channel-command.c | 6 ++-
46
io/channel-file.c | 3 +-
47
io/channel-socket.c | 3 +-
48
migration/rdma.c | 8 +--
49
tests/unit/test-aio.c | 4 +-
50
tests/unit/test-fdmon-epoll.c | 4 +-
51
util/aio-posix.c | 89 +++++++++++++++++++++++++--------
52
util/aio-win32.c | 4 +-
53
util/async.c | 10 +++-
54
util/main-loop.c | 4 +-
55
util/qemu-coroutine-io.c | 5 +-
56
util/vhost-user-server.c | 11 ++--
57
31 files changed, 221 insertions(+), 232 deletions(-)
43
58
44
--
59
--
45
2.9.5
60
2.34.1
46
61
47
62
63
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
Adaptive polling measures the execution time of the polling check plus
2
2
handlers called when a polled event becomes ready. Handlers can take a
3
We should not copy non-dirty clusters in write notifiers. So,
3
significant amount of time, making it look like polling was running for
4
initialize copy_bitmap from sync_bitmap.
4
a long time when in fact the event handler was running for a long time.
5
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
For example, on Linux the io_submit(2) syscall invoked when a virtio-blk
7
Reviewed-by: John Snow <jsnow@redhat.com>
7
device's virtqueue becomes ready can take 10s of microseconds. This
8
Reviewed-by: Jeff Cody <jcody@redhat.com>
8
can exceed the default polling interval (32 microseconds) and cause
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
adaptive polling to stop polling.
10
Message-id: 20171012135313.227864-4-vsementsov@virtuozzo.com
10
11
Signed-off-by: Jeff Cody <jcody@redhat.com>
11
By excluding the handler's execution time from the polling check we make
12
the adaptive polling calculation more accurate. As a result, the event
13
loop now stays in polling mode where previously it would have fallen
14
back to file descriptor monitoring.
15
16
The following data was collected with virtio-blk num-queues=2
17
event_idx=off using an IOThread. Before:
18
19
168k IOPS, IOThread syscalls:
20
21
9837.115 ( 0.020 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 16, iocbpp: 0x7fcb9f937db0) = 16
22
9837.158 ( 0.002 ms): IO iothread1/620155 write(fd: 103, buf: 0x556a2ef71b88, count: 8) = 8
23
9837.161 ( 0.001 ms): IO iothread1/620155 write(fd: 104, buf: 0x556a2ef71b88, count: 8) = 8
24
9837.163 ( 0.001 ms): IO iothread1/620155 ppoll(ufds: 0x7fcb90002800, nfds: 4, tsp: 0x7fcb9f1342d0, sigsetsize: 8) = 3
25
9837.164 ( 0.001 ms): IO iothread1/620155 read(fd: 107, buf: 0x7fcb9f939cc0, count: 512) = 8
26
9837.174 ( 0.001 ms): IO iothread1/620155 read(fd: 105, buf: 0x7fcb9f939cc0, count: 512) = 8
27
9837.176 ( 0.001 ms): IO iothread1/620155 read(fd: 106, buf: 0x7fcb9f939cc0, count: 512) = 8
28
9837.209 ( 0.035 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 32, iocbpp: 0x7fca7d0cebe0) = 32
29
30
174k IOPS (+3.6%), IOThread syscalls:
31
32
9809.566 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0cdd62be0) = 32
33
9809.625 ( 0.001 ms): IO iothread1/623061 write(fd: 103, buf: 0x5647cfba5f58, count: 8) = 8
34
9809.627 ( 0.002 ms): IO iothread1/623061 write(fd: 104, buf: 0x5647cfba5f58, count: 8) = 8
35
9809.663 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0d0388b50) = 32
36
37
Notice that ppoll(2) and eventfd read(2) syscalls are eliminated because
38
the IOThread stays in polling mode instead of falling back to file
39
descriptor monitoring.
40
41
As usual, polling is not implemented on Windows so this patch ignores
42
the new io_poll_read() callback in aio-win32.c.
43
44
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
45
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
46
Message-id: 20211207132336.36627-2-stefanha@redhat.com
47
48
[Fixed up aio_set_event_notifier() calls in
49
tests/unit/test-fdmon-epoll.c added after this series was queued.
50
--Stefan]
51
52
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
53
---
13
block/backup.c | 44 +++++++++++++++++++++++++++++++++++++++++++-
54
include/block/aio.h | 4 +-
14
1 file changed, 43 insertions(+), 1 deletion(-)
55
util/aio-posix.h | 1 +
15
56
block/curl.c | 11 +++--
16
diff --git a/block/backup.c b/block/backup.c
57
block/export/fuse.c | 4 +-
17
index XXXXXXX..XXXXXXX 100644
58
block/io_uring.c | 19 ++++----
18
--- a/block/backup.c
59
block/iscsi.c | 4 +-
19
+++ b/block/backup.c
60
block/linux-aio.c | 16 ++++---
61
block/nfs.c | 6 +--
62
block/nvme.c | 51 +++++++++++++-------
63
block/ssh.c | 4 +-
64
block/win32-aio.c | 4 +-
65
hw/virtio/virtio.c | 16 ++++---
66
hw/xen/xen-bus.c | 6 +--
67
io/channel-command.c | 6 ++-
68
io/channel-file.c | 3 +-
69
io/channel-socket.c | 3 +-
70
migration/rdma.c | 8 ++--
71
tests/unit/test-aio.c | 4 +-
72
tests/unit/test-fdmon-epoll.c | 4 +-
73
util/aio-posix.c | 89 ++++++++++++++++++++++++++---------
74
util/aio-win32.c | 4 +-
75
util/async.c | 10 +++-
76
util/main-loop.c | 4 +-
77
util/qemu-coroutine-io.c | 5 +-
78
util/vhost-user-server.c | 11 +++--
79
25 files changed, 193 insertions(+), 104 deletions(-)
80
81
diff --git a/include/block/aio.h b/include/block/aio.h
82
index XXXXXXX..XXXXXXX 100644
83
--- a/include/block/aio.h
84
+++ b/include/block/aio.h
85
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
86
IOHandler *io_read,
87
IOHandler *io_write,
88
AioPollFn *io_poll,
89
+ IOHandler *io_poll_ready,
90
void *opaque);
91
92
/* Set polling begin/end callbacks for a file descriptor that has already been
93
@@ -XXX,XX +XXX,XX @@ void aio_set_event_notifier(AioContext *ctx,
94
EventNotifier *notifier,
95
bool is_external,
96
EventNotifierHandler *io_read,
97
- AioPollFn *io_poll);
98
+ AioPollFn *io_poll,
99
+ EventNotifierHandler *io_poll_ready);
100
101
/* Set polling begin/end callbacks for an event notifier that has already been
102
* registered with aio_set_event_notifier. Do nothing if the event notifier is
103
diff --git a/util/aio-posix.h b/util/aio-posix.h
104
index XXXXXXX..XXXXXXX 100644
105
--- a/util/aio-posix.h
106
+++ b/util/aio-posix.h
107
@@ -XXX,XX +XXX,XX @@ struct AioHandler {
108
IOHandler *io_read;
109
IOHandler *io_write;
110
AioPollFn *io_poll;
111
+ IOHandler *io_poll_ready;
112
IOHandler *io_poll_begin;
113
IOHandler *io_poll_end;
114
void *opaque;
115
diff --git a/block/curl.c b/block/curl.c
116
index XXXXXXX..XXXXXXX 100644
117
--- a/block/curl.c
118
+++ b/block/curl.c
119
@@ -XXX,XX +XXX,XX @@ static gboolean curl_drop_socket(void *key, void *value, void *opaque)
120
BDRVCURLState *s = socket->s;
121
122
aio_set_fd_handler(s->aio_context, socket->fd, false,
123
- NULL, NULL, NULL, NULL);
124
+ NULL, NULL, NULL, NULL, NULL);
125
return true;
126
}
127
128
@@ -XXX,XX +XXX,XX @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
129
switch (action) {
130
case CURL_POLL_IN:
131
aio_set_fd_handler(s->aio_context, fd, false,
132
- curl_multi_do, NULL, NULL, socket);
133
+ curl_multi_do, NULL, NULL, NULL, socket);
134
break;
135
case CURL_POLL_OUT:
136
aio_set_fd_handler(s->aio_context, fd, false,
137
- NULL, curl_multi_do, NULL, socket);
138
+ NULL, curl_multi_do, NULL, NULL, socket);
139
break;
140
case CURL_POLL_INOUT:
141
aio_set_fd_handler(s->aio_context, fd, false,
142
- curl_multi_do, curl_multi_do, NULL, socket);
143
+ curl_multi_do, curl_multi_do,
144
+ NULL, NULL, socket);
145
break;
146
case CURL_POLL_REMOVE:
147
aio_set_fd_handler(s->aio_context, fd, false,
148
- NULL, NULL, NULL, NULL);
149
+ NULL, NULL, NULL, NULL, NULL);
150
break;
151
}
152
153
diff --git a/block/export/fuse.c b/block/export/fuse.c
154
index XXXXXXX..XXXXXXX 100644
155
--- a/block/export/fuse.c
156
+++ b/block/export/fuse.c
157
@@ -XXX,XX +XXX,XX @@ static int setup_fuse_export(FuseExport *exp, const char *mountpoint,
158
159
aio_set_fd_handler(exp->common.ctx,
160
fuse_session_fd(exp->fuse_session), true,
161
- read_from_fuse_export, NULL, NULL, exp);
162
+ read_from_fuse_export, NULL, NULL, NULL, exp);
163
exp->fd_handler_set_up = true;
164
165
return 0;
166
@@ -XXX,XX +XXX,XX @@ static void fuse_export_shutdown(BlockExport *blk_exp)
167
if (exp->fd_handler_set_up) {
168
aio_set_fd_handler(exp->common.ctx,
169
fuse_session_fd(exp->fuse_session), true,
170
- NULL, NULL, NULL, NULL);
171
+ NULL, NULL, NULL, NULL, NULL);
172
exp->fd_handler_set_up = false;
173
}
174
}
175
diff --git a/block/io_uring.c b/block/io_uring.c
176
index XXXXXXX..XXXXXXX 100644
177
--- a/block/io_uring.c
178
+++ b/block/io_uring.c
179
@@ -XXX,XX +XXX,XX @@ static bool qemu_luring_poll_cb(void *opaque)
180
{
181
LuringState *s = opaque;
182
183
- if (io_uring_cq_ready(&s->ring)) {
184
- luring_process_completions_and_submit(s);
185
- return true;
186
- }
187
+ return io_uring_cq_ready(&s->ring);
188
+}
189
190
- return false;
191
+static void qemu_luring_poll_ready(void *opaque)
192
+{
193
+ LuringState *s = opaque;
194
+
195
+ luring_process_completions_and_submit(s);
196
}
197
198
static void ioq_init(LuringQueue *io_q)
199
@@ -XXX,XX +XXX,XX @@ int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd,
200
201
void luring_detach_aio_context(LuringState *s, AioContext *old_context)
202
{
203
- aio_set_fd_handler(old_context, s->ring.ring_fd, false, NULL, NULL, NULL,
204
- s);
205
+ aio_set_fd_handler(old_context, s->ring.ring_fd, false,
206
+ NULL, NULL, NULL, NULL, s);
207
qemu_bh_delete(s->completion_bh);
208
s->aio_context = NULL;
209
}
210
@@ -XXX,XX +XXX,XX @@ void luring_attach_aio_context(LuringState *s, AioContext *new_context)
211
s->aio_context = new_context;
212
s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s);
213
aio_set_fd_handler(s->aio_context, s->ring.ring_fd, false,
214
- qemu_luring_completion_cb, NULL, qemu_luring_poll_cb, s);
215
+ qemu_luring_completion_cb, NULL,
216
+ qemu_luring_poll_cb, qemu_luring_poll_ready, s);
217
}
218
219
LuringState *luring_init(Error **errp)
220
diff --git a/block/iscsi.c b/block/iscsi.c
221
index XXXXXXX..XXXXXXX 100644
222
--- a/block/iscsi.c
223
+++ b/block/iscsi.c
224
@@ -XXX,XX +XXX,XX @@ iscsi_set_events(IscsiLun *iscsilun)
225
false,
226
(ev & POLLIN) ? iscsi_process_read : NULL,
227
(ev & POLLOUT) ? iscsi_process_write : NULL,
228
- NULL,
229
+ NULL, NULL,
230
iscsilun);
231
iscsilun->events = ev;
232
}
233
@@ -XXX,XX +XXX,XX @@ static void iscsi_detach_aio_context(BlockDriverState *bs)
234
IscsiLun *iscsilun = bs->opaque;
235
236
aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi),
237
- false, NULL, NULL, NULL, NULL);
238
+ false, NULL, NULL, NULL, NULL, NULL);
239
iscsilun->events = 0;
240
241
if (iscsilun->nop_timer) {
242
diff --git a/block/linux-aio.c b/block/linux-aio.c
243
index XXXXXXX..XXXXXXX 100644
244
--- a/block/linux-aio.c
245
+++ b/block/linux-aio.c
246
@@ -XXX,XX +XXX,XX @@ static bool qemu_laio_poll_cb(void *opaque)
247
LinuxAioState *s = container_of(e, LinuxAioState, e);
248
struct io_event *events;
249
250
- if (!io_getevents_peek(s->ctx, &events)) {
251
- return false;
252
- }
253
+ return io_getevents_peek(s->ctx, &events);
254
+}
255
+
256
+static void qemu_laio_poll_ready(EventNotifier *opaque)
257
+{
258
+ EventNotifier *e = opaque;
259
+ LinuxAioState *s = container_of(e, LinuxAioState, e);
260
261
qemu_laio_process_completions_and_submit(s);
262
- return true;
263
}
264
265
static void ioq_init(LaioQueue *io_q)
266
@@ -XXX,XX +XXX,XX @@ int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
267
268
void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
269
{
270
- aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
271
+ aio_set_event_notifier(old_context, &s->e, false, NULL, NULL, NULL);
272
qemu_bh_delete(s->completion_bh);
273
s->aio_context = NULL;
274
}
275
@@ -XXX,XX +XXX,XX @@ void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
276
s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
277
aio_set_event_notifier(new_context, &s->e, false,
278
qemu_laio_completion_cb,
279
- qemu_laio_poll_cb);
280
+ qemu_laio_poll_cb,
281
+ qemu_laio_poll_ready);
282
}
283
284
LinuxAioState *laio_init(Error **errp)
285
diff --git a/block/nfs.c b/block/nfs.c
286
index XXXXXXX..XXXXXXX 100644
287
--- a/block/nfs.c
288
+++ b/block/nfs.c
289
@@ -XXX,XX +XXX,XX @@ static void nfs_set_events(NFSClient *client)
290
false,
291
(ev & POLLIN) ? nfs_process_read : NULL,
292
(ev & POLLOUT) ? nfs_process_write : NULL,
293
- NULL, client);
294
+ NULL, NULL, client);
295
296
}
297
client->events = ev;
298
@@ -XXX,XX +XXX,XX @@ static void nfs_detach_aio_context(BlockDriverState *bs)
299
NFSClient *client = bs->opaque;
300
301
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
302
- false, NULL, NULL, NULL, NULL);
303
+ false, NULL, NULL, NULL, NULL, NULL);
304
client->events = 0;
305
}
306
307
@@ -XXX,XX +XXX,XX @@ static void nfs_client_close(NFSClient *client)
308
if (client->context) {
309
qemu_mutex_lock(&client->mutex);
310
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
311
- false, NULL, NULL, NULL, NULL);
312
+ false, NULL, NULL, NULL, NULL, NULL);
313
qemu_mutex_unlock(&client->mutex);
314
if (client->fh) {
315
nfs_close(client->context, client->fh);
316
diff --git a/block/nvme.c b/block/nvme.c
317
index XXXXXXX..XXXXXXX 100644
318
--- a/block/nvme.c
319
+++ b/block/nvme.c
20
@@ -XXX,XX +XXX,XX @@ out:
320
@@ -XXX,XX +XXX,XX @@ out:
21
return ret;
321
return ret;
22
}
322
}
23
323
24
+/* init copy_bitmap from sync_bitmap */
324
-static bool nvme_poll_queue(NVMeQueuePair *q)
25
+static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
325
+static void nvme_poll_queue(NVMeQueuePair *q)
326
{
327
- bool progress = false;
328
-
329
const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
330
NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
331
332
@@ -XXX,XX +XXX,XX @@ static bool nvme_poll_queue(NVMeQueuePair *q)
333
* cannot race with itself.
334
*/
335
if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
336
- return false;
337
+ return;
338
}
339
340
qemu_mutex_lock(&q->lock);
341
while (nvme_process_completion(q)) {
342
/* Keep polling */
343
- progress = true;
344
}
345
qemu_mutex_unlock(&q->lock);
346
-
347
- return progress;
348
}
349
350
-static bool nvme_poll_queues(BDRVNVMeState *s)
351
+static void nvme_poll_queues(BDRVNVMeState *s)
352
{
353
- bool progress = false;
354
int i;
355
356
for (i = 0; i < s->queue_count; i++) {
357
- if (nvme_poll_queue(s->queues[i])) {
358
- progress = true;
359
- }
360
+ nvme_poll_queue(s->queues[i]);
361
}
362
- return progress;
363
}
364
365
static void nvme_handle_event(EventNotifier *n)
366
@@ -XXX,XX +XXX,XX @@ static bool nvme_poll_cb(void *opaque)
367
EventNotifier *e = opaque;
368
BDRVNVMeState *s = container_of(e, BDRVNVMeState,
369
irq_notifier[MSIX_SHARED_IRQ_IDX]);
370
+ int i;
371
372
- return nvme_poll_queues(s);
373
+ for (i = 0; i < s->queue_count; i++) {
374
+ NVMeQueuePair *q = s->queues[i];
375
+ const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
376
+ NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
377
+
378
+ /*
379
+ * q->lock isn't needed because nvme_process_completion() only runs in
380
+ * the event loop thread and cannot race with itself.
381
+ */
382
+ if ((le16_to_cpu(cqe->status) & 0x1) != q->cq_phase) {
383
+ return true;
384
+ }
385
+ }
386
+ return false;
387
+}
388
+
389
+static void nvme_poll_ready(EventNotifier *e)
26
+{
390
+{
27
+ BdrvDirtyBitmapIter *dbi;
391
+ BDRVNVMeState *s = container_of(e, BDRVNVMeState,
28
+ int64_t offset;
392
+ irq_notifier[MSIX_SHARED_IRQ_IDX]);
29
+ int64_t end = DIV_ROUND_UP(bdrv_dirty_bitmap_size(job->sync_bitmap),
393
+
30
+ job->cluster_size);
394
+ nvme_poll_queues(s);
31
+
395
}
32
+ dbi = bdrv_dirty_iter_new(job->sync_bitmap);
396
33
+ while ((offset = bdrv_dirty_iter_next(dbi)) != -1) {
397
static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
34
+ int64_t cluster = offset / job->cluster_size;
398
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
35
+ int64_t next_cluster;
399
}
36
+
400
aio_set_event_notifier(bdrv_get_aio_context(bs),
37
+ offset += bdrv_dirty_bitmap_granularity(job->sync_bitmap);
401
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
38
+ if (offset >= bdrv_dirty_bitmap_size(job->sync_bitmap)) {
402
- false, nvme_handle_event, nvme_poll_cb);
39
+ hbitmap_set(job->copy_bitmap, cluster, end - cluster);
403
+ false, nvme_handle_event, nvme_poll_cb,
40
+ break;
404
+ nvme_poll_ready);
41
+ }
405
42
+
406
if (!nvme_identify(bs, namespace, errp)) {
43
+ offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset);
407
ret = -EIO;
44
+ if (offset == -1) {
408
@@ -XXX,XX +XXX,XX @@ static void nvme_close(BlockDriverState *bs)
45
+ hbitmap_set(job->copy_bitmap, cluster, end - cluster);
409
g_free(s->queues);
46
+ break;
410
aio_set_event_notifier(bdrv_get_aio_context(bs),
47
+ }
411
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
48
+
412
- false, NULL, NULL);
49
+ next_cluster = DIV_ROUND_UP(offset, job->cluster_size);
413
+ false, NULL, NULL, NULL);
50
+ hbitmap_set(job->copy_bitmap, cluster, next_cluster - cluster);
414
event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
51
+ if (next_cluster >= end) {
415
qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map,
52
+ break;
416
0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE);
53
+ }
417
@@ -XXX,XX +XXX,XX @@ static void nvme_detach_aio_context(BlockDriverState *bs)
54
+
418
55
+ bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size);
419
aio_set_event_notifier(bdrv_get_aio_context(bs),
420
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
421
- false, NULL, NULL);
422
+ false, NULL, NULL, NULL);
423
}
424
425
static void nvme_attach_aio_context(BlockDriverState *bs,
426
@@ -XXX,XX +XXX,XX @@ static void nvme_attach_aio_context(BlockDriverState *bs,
427
428
s->aio_context = new_context;
429
aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
430
- false, nvme_handle_event, nvme_poll_cb);
431
+ false, nvme_handle_event, nvme_poll_cb,
432
+ nvme_poll_ready);
433
434
for (unsigned i = 0; i < s->queue_count; i++) {
435
NVMeQueuePair *q = s->queues[i];
436
diff --git a/block/ssh.c b/block/ssh.c
437
index XXXXXXX..XXXXXXX 100644
438
--- a/block/ssh.c
439
+++ b/block/ssh.c
440
@@ -XXX,XX +XXX,XX @@ static void restart_coroutine(void *opaque)
441
AioContext *ctx = bdrv_get_aio_context(bs);
442
443
trace_ssh_restart_coroutine(restart->co);
444
- aio_set_fd_handler(ctx, s->sock, false, NULL, NULL, NULL, NULL);
445
+ aio_set_fd_handler(ctx, s->sock, false, NULL, NULL, NULL, NULL, NULL);
446
447
aio_co_wake(restart->co);
448
}
449
@@ -XXX,XX +XXX,XX @@ static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs)
450
trace_ssh_co_yield(s->sock, rd_handler, wr_handler);
451
452
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
453
- false, rd_handler, wr_handler, NULL, &restart);
454
+ false, rd_handler, wr_handler, NULL, NULL, &restart);
455
qemu_coroutine_yield();
456
trace_ssh_co_yield_back(s->sock);
457
}
458
diff --git a/block/win32-aio.c b/block/win32-aio.c
459
index XXXXXXX..XXXXXXX 100644
460
--- a/block/win32-aio.c
461
+++ b/block/win32-aio.c
462
@@ -XXX,XX +XXX,XX @@ int win32_aio_attach(QEMUWin32AIOState *aio, HANDLE hfile)
463
void win32_aio_detach_aio_context(QEMUWin32AIOState *aio,
464
AioContext *old_context)
465
{
466
- aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL);
467
+ aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL, NULL);
468
aio->aio_ctx = NULL;
469
}
470
471
@@ -XXX,XX +XXX,XX @@ void win32_aio_attach_aio_context(QEMUWin32AIOState *aio,
472
{
473
aio->aio_ctx = new_context;
474
aio_set_event_notifier(new_context, &aio->e, false,
475
- win32_aio_completion_cb, NULL);
476
+ win32_aio_completion_cb, NULL, NULL);
477
}
478
479
QEMUWin32AIOState *win32_aio_init(void)
480
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
481
index XXXXXXX..XXXXXXX 100644
482
--- a/hw/virtio/virtio.c
483
+++ b/hw/virtio/virtio.c
484
@@ -XXX,XX +XXX,XX @@ static bool virtio_queue_host_notifier_aio_poll(void *opaque)
485
EventNotifier *n = opaque;
486
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
487
488
- if (!vq->vring.desc || virtio_queue_empty(vq)) {
489
- return false;
490
- }
491
+ return vq->vring.desc && !virtio_queue_empty(vq);
492
+}
493
494
- return virtio_queue_notify_aio_vq(vq);
495
+static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n)
496
+{
497
+ VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
498
+
499
+ virtio_queue_notify_aio_vq(vq);
500
}
501
502
static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
503
@@ -XXX,XX +XXX,XX @@ void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
504
vq->handle_aio_output = handle_output;
505
aio_set_event_notifier(ctx, &vq->host_notifier, true,
506
virtio_queue_host_notifier_aio_read,
507
- virtio_queue_host_notifier_aio_poll);
508
+ virtio_queue_host_notifier_aio_poll,
509
+ virtio_queue_host_notifier_aio_poll_ready);
510
aio_set_event_notifier_poll(ctx, &vq->host_notifier,
511
virtio_queue_host_notifier_aio_poll_begin,
512
virtio_queue_host_notifier_aio_poll_end);
513
} else {
514
- aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
515
+ aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);
516
/* Test and clear notifier before after disabling event,
517
* in case poll callback didn't have time to run. */
518
virtio_queue_host_notifier_aio_read(&vq->host_notifier);
519
diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/hw/xen/xen-bus.c
522
+++ b/hw/xen/xen-bus.c
523
@@ -XXX,XX +XXX,XX @@ void xen_device_set_event_channel_context(XenDevice *xendev,
524
525
if (channel->ctx)
526
aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true,
527
- NULL, NULL, NULL, NULL);
528
+ NULL, NULL, NULL, NULL, NULL);
529
530
channel->ctx = ctx;
531
aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true,
532
- xen_device_event, NULL, xen_device_poll, channel);
533
+ xen_device_event, NULL, xen_device_poll, NULL, channel);
534
}
535
536
XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev,
537
@@ -XXX,XX +XXX,XX @@ void xen_device_unbind_event_channel(XenDevice *xendev,
538
QLIST_REMOVE(channel, list);
539
540
aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true,
541
- NULL, NULL, NULL, NULL);
542
+ NULL, NULL, NULL, NULL, NULL);
543
544
if (xenevtchn_unbind(channel->xeh, channel->local_port) < 0) {
545
error_setg_errno(errp, errno, "xenevtchn_unbind failed");
546
diff --git a/io/channel-command.c b/io/channel-command.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/io/channel-command.c
549
+++ b/io/channel-command.c
550
@@ -XXX,XX +XXX,XX @@ static void qio_channel_command_set_aio_fd_handler(QIOChannel *ioc,
551
void *opaque)
552
{
553
QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc);
554
- aio_set_fd_handler(ctx, cioc->readfd, false, io_read, NULL, NULL, opaque);
555
- aio_set_fd_handler(ctx, cioc->writefd, false, NULL, io_write, NULL, opaque);
556
+ aio_set_fd_handler(ctx, cioc->readfd, false,
557
+ io_read, NULL, NULL, NULL, opaque);
558
+ aio_set_fd_handler(ctx, cioc->writefd, false,
559
+ NULL, io_write, NULL, NULL, opaque);
560
}
561
562
563
diff --git a/io/channel-file.c b/io/channel-file.c
564
index XXXXXXX..XXXXXXX 100644
565
--- a/io/channel-file.c
566
+++ b/io/channel-file.c
567
@@ -XXX,XX +XXX,XX @@ static void qio_channel_file_set_aio_fd_handler(QIOChannel *ioc,
568
void *opaque)
569
{
570
QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc);
571
- aio_set_fd_handler(ctx, fioc->fd, false, io_read, io_write, NULL, opaque);
572
+ aio_set_fd_handler(ctx, fioc->fd, false, io_read, io_write,
573
+ NULL, NULL, opaque);
574
}
575
576
static GSource *qio_channel_file_create_watch(QIOChannel *ioc,
577
diff --git a/io/channel-socket.c b/io/channel-socket.c
578
index XXXXXXX..XXXXXXX 100644
579
--- a/io/channel-socket.c
580
+++ b/io/channel-socket.c
581
@@ -XXX,XX +XXX,XX @@ static void qio_channel_socket_set_aio_fd_handler(QIOChannel *ioc,
582
void *opaque)
583
{
584
QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
585
- aio_set_fd_handler(ctx, sioc->fd, false, io_read, io_write, NULL, opaque);
586
+ aio_set_fd_handler(ctx, sioc->fd, false,
587
+ io_read, io_write, NULL, NULL, opaque);
588
}
589
590
static GSource *qio_channel_socket_create_watch(QIOChannel *ioc,
591
diff --git a/migration/rdma.c b/migration/rdma.c
592
index XXXXXXX..XXXXXXX 100644
593
--- a/migration/rdma.c
594
+++ b/migration/rdma.c
595
@@ -XXX,XX +XXX,XX @@ static void qio_channel_rdma_set_aio_fd_handler(QIOChannel *ioc,
596
QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
597
if (io_read) {
598
aio_set_fd_handler(ctx, rioc->rdmain->recv_comp_channel->fd,
599
- false, io_read, io_write, NULL, opaque);
600
+ false, io_read, io_write, NULL, NULL, opaque);
601
aio_set_fd_handler(ctx, rioc->rdmain->send_comp_channel->fd,
602
- false, io_read, io_write, NULL, opaque);
603
+ false, io_read, io_write, NULL, NULL, opaque);
604
} else {
605
aio_set_fd_handler(ctx, rioc->rdmaout->recv_comp_channel->fd,
606
- false, io_read, io_write, NULL, opaque);
607
+ false, io_read, io_write, NULL, NULL, opaque);
608
aio_set_fd_handler(ctx, rioc->rdmaout->send_comp_channel->fd,
609
- false, io_read, io_write, NULL, opaque);
610
+ false, io_read, io_write, NULL, NULL, opaque);
611
}
612
}
613
614
diff --git a/tests/unit/test-aio.c b/tests/unit/test-aio.c
615
index XXXXXXX..XXXXXXX 100644
616
--- a/tests/unit/test-aio.c
617
+++ b/tests/unit/test-aio.c
618
@@ -XXX,XX +XXX,XX @@ static void *test_acquire_thread(void *opaque)
619
static void set_event_notifier(AioContext *ctx, EventNotifier *notifier,
620
EventNotifierHandler *handler)
621
{
622
- aio_set_event_notifier(ctx, notifier, false, handler, NULL);
623
+ aio_set_event_notifier(ctx, notifier, false, handler, NULL, NULL);
624
}
625
626
static void dummy_notifier_read(EventNotifier *n)
627
@@ -XXX,XX +XXX,XX @@ static void test_aio_external_client(void)
628
for (i = 1; i < 3; i++) {
629
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
630
event_notifier_init(&data.e, false);
631
- aio_set_event_notifier(ctx, &data.e, true, event_ready_cb, NULL);
632
+ aio_set_event_notifier(ctx, &data.e, true, event_ready_cb, NULL, NULL);
633
event_notifier_set(&data.e);
634
for (j = 0; j < i; j++) {
635
aio_disable_external(ctx);
636
diff --git a/tests/unit/test-fdmon-epoll.c b/tests/unit/test-fdmon-epoll.c
637
index XXXXXXX..XXXXXXX 100644
638
--- a/tests/unit/test-fdmon-epoll.c
639
+++ b/tests/unit/test-fdmon-epoll.c
640
@@ -XXX,XX +XXX,XX @@ static void add_event_notifiers(EventNotifier *notifiers, size_t n)
641
for (size_t i = 0; i < n; i++) {
642
event_notifier_init(&notifiers[i], false);
643
aio_set_event_notifier(ctx, &notifiers[i], false,
644
- dummy_fd_handler, NULL);
645
+ dummy_fd_handler, NULL, NULL);
646
}
647
}
648
649
static void remove_event_notifiers(EventNotifier *notifiers, size_t n)
650
{
651
for (size_t i = 0; i < n; i++) {
652
- aio_set_event_notifier(ctx, &notifiers[i], false, NULL, NULL);
653
+ aio_set_event_notifier(ctx, &notifiers[i], false, NULL, NULL, NULL);
654
event_notifier_cleanup(&notifiers[i]);
655
}
656
}
657
diff --git a/util/aio-posix.c b/util/aio-posix.c
658
index XXXXXXX..XXXXXXX 100644
659
--- a/util/aio-posix.c
660
+++ b/util/aio-posix.c
661
@@ -XXX,XX +XXX,XX @@
662
#include "trace.h"
663
#include "aio-posix.h"
664
665
+/*
666
+ * G_IO_IN and G_IO_OUT are not appropriate revents values for polling, since
667
+ * the handler may not need to access the file descriptor. For example, the
668
+ * handler doesn't need to read from an EventNotifier if it polled a memory
669
+ * location and a read syscall would be slow. Define our own unique revents
670
+ * value to indicate that polling determined this AioHandler is ready.
671
+ */
672
+#define REVENTS_POLL_READY 0
673
+
674
/* Stop userspace polling on a handler if it isn't active for some time */
675
#define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND)
676
677
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
678
IOHandler *io_read,
679
IOHandler *io_write,
680
AioPollFn *io_poll,
681
+ IOHandler *io_poll_ready,
682
void *opaque)
683
{
684
AioHandler *node;
685
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
686
bool deleted = false;
687
int poll_disable_change;
688
689
+ if (io_poll && !io_poll_ready) {
690
+ io_poll = NULL; /* polling only makes sense if there is a handler */
56
+ }
691
+ }
57
+
692
+
58
+ bdrv_dirty_iter_free(dbi);
693
qemu_lockcnt_lock(&ctx->list_lock);
694
695
node = find_aio_handler(ctx, fd);
696
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
697
new_node->io_read = io_read;
698
new_node->io_write = io_write;
699
new_node->io_poll = io_poll;
700
+ new_node->io_poll_ready = io_poll_ready;
701
new_node->opaque = opaque;
702
new_node->is_external = is_external;
703
704
@@ -XXX,XX +XXX,XX @@ void aio_set_event_notifier(AioContext *ctx,
705
EventNotifier *notifier,
706
bool is_external,
707
EventNotifierHandler *io_read,
708
- AioPollFn *io_poll)
709
+ AioPollFn *io_poll,
710
+ EventNotifierHandler *io_poll_ready)
711
{
712
aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external,
713
- (IOHandler *)io_read, NULL, io_poll, notifier);
714
+ (IOHandler *)io_read, NULL, io_poll,
715
+ (IOHandler *)io_poll_ready, notifier);
716
}
717
718
void aio_set_event_notifier_poll(AioContext *ctx,
719
@@ -XXX,XX +XXX,XX @@ void aio_set_event_notifier_poll(AioContext *ctx,
720
(IOHandler *)io_poll_end);
721
}
722
723
-static bool poll_set_started(AioContext *ctx, bool started)
724
+static bool poll_set_started(AioContext *ctx, AioHandlerList *ready_list,
725
+ bool started)
726
{
727
AioHandler *node;
728
bool progress = false;
729
@@ -XXX,XX +XXX,XX @@ static bool poll_set_started(AioContext *ctx, bool started)
730
}
731
732
/* Poll one last time in case ->io_poll_end() raced with the event */
733
- if (!started) {
734
- progress = node->io_poll(node->opaque) || progress;
735
+ if (!started && node->io_poll(node->opaque)) {
736
+ aio_add_ready_handler(ready_list, node, REVENTS_POLL_READY);
737
+ progress = true;
738
}
739
}
740
qemu_lockcnt_dec(&ctx->list_lock);
741
@@ -XXX,XX +XXX,XX @@ static bool poll_set_started(AioContext *ctx, bool started)
742
743
bool aio_prepare(AioContext *ctx)
744
{
745
+ AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
746
+
747
/* Poll mode cannot be used with glib's event loop, disable it. */
748
- poll_set_started(ctx, false);
749
+ poll_set_started(ctx, &ready_list, false);
750
+ /* TODO what to do with this list? */
751
752
return false;
753
}
754
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
755
}
756
QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll);
757
}
758
+ if (!QLIST_IS_INSERTED(node, node_deleted) &&
759
+ revents == 0 &&
760
+ aio_node_check(ctx, node->is_external) &&
761
+ node->io_poll_ready) {
762
+ node->io_poll_ready(node->opaque);
763
+
764
+ /*
765
+ * Return early since revents was zero. aio_notify() does not count as
766
+ * progress.
767
+ */
768
+ return node->opaque != &ctx->notifier;
769
+ }
770
771
if (!QLIST_IS_INSERTED(node, node_deleted) &&
772
(revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
773
@@ -XXX,XX +XXX,XX @@ void aio_dispatch(AioContext *ctx)
774
}
775
776
static bool run_poll_handlers_once(AioContext *ctx,
777
+ AioHandlerList *ready_list,
778
int64_t now,
779
int64_t *timeout)
780
{
781
@@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers_once(AioContext *ctx,
782
QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
783
if (aio_node_check(ctx, node->is_external) &&
784
node->io_poll(node->opaque)) {
785
+ aio_add_ready_handler(ready_list, node, REVENTS_POLL_READY);
786
+
787
node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
788
789
/*
790
@@ -XXX,XX +XXX,XX @@ static bool fdmon_supports_polling(AioContext *ctx)
791
return ctx->fdmon_ops->need_wait != aio_poll_disabled;
792
}
793
794
-static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now)
795
+static bool remove_idle_poll_handlers(AioContext *ctx,
796
+ AioHandlerList *ready_list,
797
+ int64_t now)
798
{
799
AioHandler *node;
800
AioHandler *tmp;
801
@@ -XXX,XX +XXX,XX @@ static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now)
802
* Nevermind about re-adding the handler in the rare case where
803
* this causes progress.
804
*/
805
- progress = node->io_poll(node->opaque) || progress;
806
+ if (node->io_poll(node->opaque)) {
807
+ aio_add_ready_handler(ready_list, node,
808
+ REVENTS_POLL_READY);
809
+ progress = true;
810
+ }
811
}
812
}
813
}
814
@@ -XXX,XX +XXX,XX @@ static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now)
815
816
/* run_poll_handlers:
817
* @ctx: the AioContext
818
+ * @ready_list: the list to place ready handlers on
819
* @max_ns: maximum time to poll for, in nanoseconds
820
*
821
* Polls for a given time.
822
@@ -XXX,XX +XXX,XX @@ static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now)
823
*
824
* Returns: true if progress was made, false otherwise
825
*/
826
-static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
827
+static bool run_poll_handlers(AioContext *ctx, AioHandlerList *ready_list,
828
+ int64_t max_ns, int64_t *timeout)
829
{
830
bool progress;
831
int64_t start_time, elapsed_time;
832
@@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
833
834
start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
835
do {
836
- progress = run_poll_handlers_once(ctx, start_time, timeout);
837
+ progress = run_poll_handlers_once(ctx, ready_list,
838
+ start_time, timeout);
839
elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
840
max_ns = qemu_soonest_timeout(*timeout, max_ns);
841
assert(!(max_ns && progress));
842
} while (elapsed_time < max_ns && !ctx->fdmon_ops->need_wait(ctx));
843
844
- if (remove_idle_poll_handlers(ctx, start_time + elapsed_time)) {
845
+ if (remove_idle_poll_handlers(ctx, ready_list,
846
+ start_time + elapsed_time)) {
847
*timeout = 0;
848
progress = true;
849
}
850
@@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
851
852
/* try_poll_mode:
853
* @ctx: the AioContext
854
+ * @ready_list: list to add handlers that need to be run
855
* @timeout: timeout for blocking wait, computed by the caller and updated if
856
* polling succeeds.
857
*
858
@@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
859
*
860
* Returns: true if progress was made, false otherwise
861
*/
862
-static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
863
+static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
864
+ int64_t *timeout)
865
{
866
int64_t max_ns;
867
868
@@ -XXX,XX +XXX,XX @@ static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
869
870
max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
871
if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
872
- poll_set_started(ctx, true);
873
+ poll_set_started(ctx, ready_list, true);
874
875
- if (run_poll_handlers(ctx, max_ns, timeout)) {
876
+ if (run_poll_handlers(ctx, ready_list, max_ns, timeout)) {
877
return true;
878
}
879
}
880
881
- if (poll_set_started(ctx, false)) {
882
+ if (poll_set_started(ctx, ready_list, false)) {
883
*timeout = 0;
884
return true;
885
}
886
@@ -XXX,XX +XXX,XX @@ static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
887
bool aio_poll(AioContext *ctx, bool blocking)
888
{
889
AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
890
- int ret = 0;
891
bool progress;
892
bool use_notify_me;
893
int64_t timeout;
894
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
895
}
896
897
timeout = blocking ? aio_compute_timeout(ctx) : 0;
898
- progress = try_poll_mode(ctx, &timeout);
899
+ progress = try_poll_mode(ctx, &ready_list, &timeout);
900
assert(!(timeout && progress));
901
902
/*
903
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
904
* system call---a single round of run_poll_handlers_once suffices.
905
*/
906
if (timeout || ctx->fdmon_ops->need_wait(ctx)) {
907
- ret = ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
908
+ ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
909
}
910
911
if (use_notify_me) {
912
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
913
}
914
915
progress |= aio_bh_poll(ctx);
916
-
917
- if (ret > 0) {
918
- progress |= aio_dispatch_ready_handlers(ctx, &ready_list);
919
- }
920
+ progress |= aio_dispatch_ready_handlers(ctx, &ready_list);
921
922
aio_free_deleted_handlers(ctx);
923
924
diff --git a/util/aio-win32.c b/util/aio-win32.c
925
index XXXXXXX..XXXXXXX 100644
926
--- a/util/aio-win32.c
927
+++ b/util/aio-win32.c
928
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
929
IOHandler *io_read,
930
IOHandler *io_write,
931
AioPollFn *io_poll,
932
+ IOHandler *io_poll_ready,
933
void *opaque)
934
{
935
/* fd is a SOCKET in our case */
936
@@ -XXX,XX +XXX,XX @@ void aio_set_event_notifier(AioContext *ctx,
937
EventNotifier *e,
938
bool is_external,
939
EventNotifierHandler *io_notify,
940
- AioPollFn *io_poll)
941
+ AioPollFn *io_poll,
942
+ EventNotifierHandler *io_poll_ready)
943
{
944
AioHandler *node;
945
946
diff --git a/util/async.c b/util/async.c
947
index XXXXXXX..XXXXXXX 100644
948
--- a/util/async.c
949
+++ b/util/async.c
950
@@ -XXX,XX +XXX,XX @@ aio_ctx_finalize(GSource *source)
951
g_free(bh);
952
}
953
954
- aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL);
955
+ aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL);
956
event_notifier_cleanup(&ctx->notifier);
957
qemu_rec_mutex_destroy(&ctx->lock);
958
qemu_lockcnt_destroy(&ctx->list_lock);
959
@@ -XXX,XX +XXX,XX @@ static bool aio_context_notifier_poll(void *opaque)
960
return qatomic_read(&ctx->notified);
961
}
962
963
+static void aio_context_notifier_poll_ready(EventNotifier *e)
964
+{
965
+ /* Do nothing, we just wanted to kick the event loop */
59
+}
966
+}
60
+
967
+
61
static void coroutine_fn backup_run(void *opaque)
968
static void co_schedule_bh_cb(void *opaque)
62
{
969
{
63
BackupBlockJob *job = opaque;
970
AioContext *ctx = opaque;
64
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn backup_run(void *opaque)
971
@@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp)
65
972
aio_set_event_notifier(ctx, &ctx->notifier,
66
nb_clusters = DIV_ROUND_UP(job->common.len, job->cluster_size);
973
false,
67
job->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
974
aio_context_notifier_cb,
68
- hbitmap_set(job->copy_bitmap, 0, nb_clusters);
975
- aio_context_notifier_poll);
69
+ if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
976
+ aio_context_notifier_poll,
70
+ backup_incremental_init_copy_bitmap(job);
977
+ aio_context_notifier_poll_ready);
71
+ } else {
978
#ifdef CONFIG_LINUX_AIO
72
+ hbitmap_set(job->copy_bitmap, 0, nb_clusters);
979
ctx->linux_aio = NULL;
73
+ }
980
#endif
74
+
981
diff --git a/util/main-loop.c b/util/main-loop.c
75
982
index XXXXXXX..XXXXXXX 100644
76
job->before_write.notify = backup_before_write_notify;
983
--- a/util/main-loop.c
77
bdrv_add_before_write_notifier(bs, &job->before_write);
984
+++ b/util/main-loop.c
985
@@ -XXX,XX +XXX,XX @@ void qemu_set_fd_handler(int fd,
986
{
987
iohandler_init();
988
aio_set_fd_handler(iohandler_ctx, fd, false,
989
- fd_read, fd_write, NULL, opaque);
990
+ fd_read, fd_write, NULL, NULL, opaque);
991
}
992
993
void event_notifier_set_handler(EventNotifier *e,
994
@@ -XXX,XX +XXX,XX @@ void event_notifier_set_handler(EventNotifier *e,
995
{
996
iohandler_init();
997
aio_set_event_notifier(iohandler_ctx, e, false,
998
- handler, NULL);
999
+ handler, NULL, NULL);
1000
}
1001
diff --git a/util/qemu-coroutine-io.c b/util/qemu-coroutine-io.c
1002
index XXXXXXX..XXXXXXX 100644
1003
--- a/util/qemu-coroutine-io.c
1004
+++ b/util/qemu-coroutine-io.c
1005
@@ -XXX,XX +XXX,XX @@ typedef struct {
1006
static void fd_coroutine_enter(void *opaque)
1007
{
1008
FDYieldUntilData *data = opaque;
1009
- aio_set_fd_handler(data->ctx, data->fd, false, NULL, NULL, NULL, NULL);
1010
+ aio_set_fd_handler(data->ctx, data->fd, false,
1011
+ NULL, NULL, NULL, NULL, NULL);
1012
qemu_coroutine_enter(data->co);
1013
}
1014
1015
@@ -XXX,XX +XXX,XX @@ void coroutine_fn yield_until_fd_readable(int fd)
1016
data.co = qemu_coroutine_self();
1017
data.fd = fd;
1018
aio_set_fd_handler(
1019
- data.ctx, fd, false, fd_coroutine_enter, NULL, NULL, &data);
1020
+ data.ctx, fd, false, fd_coroutine_enter, NULL, NULL, NULL, &data);
1021
qemu_coroutine_yield();
1022
}
1023
diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c
1024
index XXXXXXX..XXXXXXX 100644
1025
--- a/util/vhost-user-server.c
1026
+++ b/util/vhost-user-server.c
1027
@@ -XXX,XX +XXX,XX @@ set_watch(VuDev *vu_dev, int fd, int vu_evt,
1028
vu_fd_watch->cb = cb;
1029
qemu_set_nonblock(fd);
1030
aio_set_fd_handler(server->ioc->ctx, fd, true, kick_handler,
1031
- NULL, NULL, vu_fd_watch);
1032
+ NULL, NULL, NULL, vu_fd_watch);
1033
vu_fd_watch->vu_dev = vu_dev;
1034
vu_fd_watch->pvt = pvt;
1035
}
1036
@@ -XXX,XX +XXX,XX @@ static void remove_watch(VuDev *vu_dev, int fd)
1037
if (!vu_fd_watch) {
1038
return;
1039
}
1040
- aio_set_fd_handler(server->ioc->ctx, fd, true, NULL, NULL, NULL, NULL);
1041
+ aio_set_fd_handler(server->ioc->ctx, fd, true,
1042
+ NULL, NULL, NULL, NULL, NULL);
1043
1044
QTAILQ_REMOVE(&server->vu_fd_watches, vu_fd_watch, next);
1045
g_free(vu_fd_watch);
1046
@@ -XXX,XX +XXX,XX @@ void vhost_user_server_stop(VuServer *server)
1047
1048
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
1049
aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true,
1050
- NULL, NULL, NULL, vu_fd_watch);
1051
+ NULL, NULL, NULL, NULL, vu_fd_watch);
1052
}
1053
1054
qio_channel_shutdown(server->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
1055
@@ -XXX,XX +XXX,XX @@ void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx)
1056
1057
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
1058
aio_set_fd_handler(ctx, vu_fd_watch->fd, true, kick_handler, NULL,
1059
- NULL, vu_fd_watch);
1060
+ NULL, NULL, vu_fd_watch);
1061
}
1062
1063
aio_co_schedule(ctx, server->co_trip);
1064
@@ -XXX,XX +XXX,XX @@ void vhost_user_server_detach_aio_context(VuServer *server)
1065
1066
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
1067
aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true,
1068
- NULL, NULL, NULL, vu_fd_watch);
1069
+ NULL, NULL, NULL, NULL, vu_fd_watch);
1070
}
1071
1072
qio_channel_detach_aio_context(server->ioc);
78
--
1073
--
79
2.9.5
1074
2.34.1
80
1075
81
1076
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
The virtqueue host notifier API
2
virtio_queue_aio_set_host_notifier_handler() polls the virtqueue for new
3
buffers. AioContext previously required a bool progress return value
4
indicating whether an event was handled or not. This is no longer
5
necessary because the AioContext polling API has been split into a poll
6
check function and an event handler function. The event handler is only
7
run when we know there is work to do, so it doesn't return bool.
2
8
3
If users set an unreasonably low speed (like one byte per second), the
9
The VirtIOHandleAIOOutput function signature is now the same as
4
calculated delay may exceed many hours. While we like to punish users
10
VirtIOHandleOutput. Get rid of the bool return value.
5
for asking for stupid things, we do also like to allow users to correct
6
their wicked ways.
7
11
8
When a user provides a new speed, kick the job to allow it to recalculate
12
Further simplifications will be made for virtio-blk and virtio-scsi in
9
its delay.
13
the next patch.
10
14
11
Signed-off-by: John Snow <jsnow@redhat.com>
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
16
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
13
Message-id: 20171213204611.26276-1-jsnow@redhat.com
17
Message-id: 20211207132336.36627-3-stefanha@redhat.com
14
Signed-off-by: Jeff Cody <jcody@redhat.com>
18
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
---
19
---
16
blockjob.c | 30 +++++++++++++++++++++++++++++-
20
include/hw/virtio/virtio.h | 3 +--
17
1 file changed, 29 insertions(+), 1 deletion(-)
21
hw/block/dataplane/virtio-blk.c | 4 ++--
22
hw/scsi/virtio-scsi-dataplane.c | 18 ++++++------------
23
hw/virtio/virtio.c | 12 ++++--------
24
4 files changed, 13 insertions(+), 24 deletions(-)
18
25
19
diff --git a/blockjob.c b/blockjob.c
26
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
20
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
21
--- a/blockjob.c
28
--- a/include/hw/virtio/virtio.h
22
+++ b/blockjob.c
29
+++ b/include/hw/virtio/virtio.h
23
@@ -XXX,XX +XXX,XX @@ static void __attribute__((__constructor__)) block_job_init(void)
30
@@ -XXX,XX +XXX,XX @@ void virtio_error(VirtIODevice *vdev, const char *fmt, ...) GCC_FMT_ATTR(2, 3);
24
31
void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name);
25
static void block_job_event_cancelled(BlockJob *job);
32
26
static void block_job_event_completed(BlockJob *job, const char *msg);
33
typedef void (*VirtIOHandleOutput)(VirtIODevice *, VirtQueue *);
27
+static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job));
34
-typedef bool (*VirtIOHandleAIOOutput)(VirtIODevice *, VirtQueue *);
28
35
29
/* Transactional group of block jobs */
36
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
30
struct BlockJobTxn {
37
VirtIOHandleOutput handle_output);
31
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_success(BlockJob *job)
38
@@ -XXX,XX +XXX,XX @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
39
void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
40
void virtio_queue_host_notifier_read(EventNotifier *n);
41
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
42
- VirtIOHandleAIOOutput handle_output);
43
+ VirtIOHandleOutput handle_output);
44
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
45
VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
46
47
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/hw/block/dataplane/virtio-blk.c
50
+++ b/hw/block/dataplane/virtio-blk.c
51
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
52
g_free(s);
53
}
54
55
-static bool virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
56
+static void virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
57
VirtQueue *vq)
58
{
59
VirtIOBlock *s = (VirtIOBlock *)vdev;
60
@@ -XXX,XX +XXX,XX @@ static bool virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
61
assert(s->dataplane);
62
assert(s->dataplane_started);
63
64
- return virtio_blk_handle_vq(s, vq);
65
+ virtio_blk_handle_vq(s, vq);
66
}
67
68
/* Context: QEMU global mutex held */
69
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/hw/scsi/virtio-scsi-dataplane.c
72
+++ b/hw/scsi/virtio-scsi-dataplane.c
73
@@ -XXX,XX +XXX,XX @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
32
}
74
}
33
}
75
}
34
76
35
+/* Assumes the block_job_mutex is held */
77
-static bool virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev,
36
+static bool block_job_timer_pending(BlockJob *job)
78
+static void virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev,
37
+{
79
VirtQueue *vq)
38
+ return timer_pending(&job->sleep_timer);
39
+}
40
+
41
void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
42
{
80
{
43
Error *local_err = NULL;
81
- bool progress = false;
44
+ int64_t old_speed = job->speed;
82
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
45
83
46
if (!job->driver->set_speed) {
84
virtio_scsi_acquire(s);
47
error_setg(errp, QERR_UNSUPPORTED);
85
if (!s->dataplane_fenced) {
48
@@ -XXX,XX +XXX,XX @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
86
assert(s->ctx && s->dataplane_started);
87
- progress = virtio_scsi_handle_cmd_vq(s, vq);
88
+ virtio_scsi_handle_cmd_vq(s, vq);
49
}
89
}
50
90
virtio_scsi_release(s);
51
job->speed = speed;
91
- return progress;
52
+ if (speed <= old_speed) {
53
+ return;
54
+ }
55
+
56
+ /* kick only if a timer is pending */
57
+ block_job_enter_cond(job, block_job_timer_pending);
58
}
92
}
59
93
60
void block_job_complete(BlockJob *job, Error **errp)
94
-static bool virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev,
61
@@ -XXX,XX +XXX,XX @@ void block_job_resume_all(void)
95
+static void virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev,
96
VirtQueue *vq)
97
{
98
- bool progress = false;
99
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
100
101
virtio_scsi_acquire(s);
102
if (!s->dataplane_fenced) {
103
assert(s->ctx && s->dataplane_started);
104
- progress = virtio_scsi_handle_ctrl_vq(s, vq);
105
+ virtio_scsi_handle_ctrl_vq(s, vq);
106
}
107
virtio_scsi_release(s);
108
- return progress;
109
}
110
111
-static bool virtio_scsi_data_plane_handle_event(VirtIODevice *vdev,
112
+static void virtio_scsi_data_plane_handle_event(VirtIODevice *vdev,
113
VirtQueue *vq)
114
{
115
- bool progress = false;
116
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
117
118
virtio_scsi_acquire(s);
119
if (!s->dataplane_fenced) {
120
assert(s->ctx && s->dataplane_started);
121
- progress = virtio_scsi_handle_event_vq(s, vq);
122
+ virtio_scsi_handle_event_vq(s, vq);
123
}
124
virtio_scsi_release(s);
125
- return progress;
126
}
127
128
static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n)
129
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
130
index XXXXXXX..XXXXXXX 100644
131
--- a/hw/virtio/virtio.c
132
+++ b/hw/virtio/virtio.c
133
@@ -XXX,XX +XXX,XX @@ struct VirtQueue
134
135
uint16_t vector;
136
VirtIOHandleOutput handle_output;
137
- VirtIOHandleAIOOutput handle_aio_output;
138
+ VirtIOHandleOutput handle_aio_output;
139
VirtIODevice *vdev;
140
EventNotifier guest_notifier;
141
EventNotifier host_notifier;
142
@@ -XXX,XX +XXX,XX @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
62
}
143
}
63
}
144
}
64
145
65
-void block_job_enter(BlockJob *job)
146
-static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
66
+/*
147
+static void virtio_queue_notify_aio_vq(VirtQueue *vq)
67
+ * Conditionally enter a block_job pending a call to fn() while
68
+ * under the block_job_lock critical section.
69
+ */
70
+static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job))
71
{
148
{
72
if (!block_job_started(job)) {
149
- bool ret = false;
73
return;
150
-
74
@@ -XXX,XX +XXX,XX @@ void block_job_enter(BlockJob *job)
151
if (vq->vring.desc && vq->handle_aio_output) {
75
return;
152
VirtIODevice *vdev = vq->vdev;
153
154
trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
155
- ret = vq->handle_aio_output(vdev, vq);
156
+ vq->handle_aio_output(vdev, vq);
157
158
if (unlikely(vdev->start_on_kick)) {
159
virtio_set_started(vdev, true);
160
}
76
}
161
}
77
162
-
78
+ if (fn && !fn(job)) {
163
- return ret;
79
+ block_job_unlock();
80
+ return;
81
+ }
82
+
83
assert(!job->deferred_to_main_loop);
84
timer_del(&job->sleep_timer);
85
job->busy = true;
86
@@ -XXX,XX +XXX,XX @@ void block_job_enter(BlockJob *job)
87
aio_co_wake(job->co);
88
}
164
}
89
165
90
+void block_job_enter(BlockJob *job)
166
static void virtio_queue_notify_vq(VirtQueue *vq)
91
+{
167
@@ -XXX,XX +XXX,XX @@ static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
92
+ block_job_enter_cond(job, NULL);
168
}
93
+}
169
94
+
170
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
95
bool block_job_is_cancelled(BlockJob *job)
171
- VirtIOHandleAIOOutput handle_output)
172
+ VirtIOHandleOutput handle_output)
96
{
173
{
97
return job->cancelled;
174
if (handle_output) {
175
vq->handle_aio_output = handle_output;
98
--
176
--
99
2.9.5
177
2.34.1
100
178
101
179
diff view generated by jsdifflib
1
Signed-off-by: Jeff Cody <jcody@redhat.com>
1
The return value of virtio_blk_handle_vq() is no longer used. Get rid of
2
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
2
it. This is a step towards unifying the dataplane and non-dataplane
3
Signed-off-by: Jeff Cody <jcody@redhat.com>
3
virtqueue handler functions.
4
5
Prepare virtio_blk_handle_output() to be used by both dataplane and
6
non-dataplane by making the condition for starting ioeventfd more
7
specific. This way it won't trigger when dataplane has already been
8
started.
9
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
12
Message-id: 20211207132336.36627-4-stefanha@redhat.com
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
4
---
14
---
5
block/curl.c | 6 ++++++
15
include/hw/virtio/virtio-blk.h | 2 +-
6
1 file changed, 6 insertions(+)
16
hw/block/virtio-blk.c | 14 +++-----------
17
2 files changed, 4 insertions(+), 12 deletions(-)
7
18
8
diff --git a/block/curl.c b/block/curl.c
19
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
9
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
10
--- a/block/curl.c
21
--- a/include/hw/virtio/virtio-blk.h
11
+++ b/block/curl.c
22
+++ b/include/hw/virtio/virtio-blk.h
12
@@ -XXX,XX +XXX,XX @@ out_noclean:
23
@@ -XXX,XX +XXX,XX @@ typedef struct MultiReqBuffer {
13
qemu_mutex_destroy(&s->mutex);
24
bool is_write;
14
g_free(s->cookie);
25
} MultiReqBuffer;
15
g_free(s->url);
26
16
+ g_free(s->username);
27
-bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
17
+ g_free(s->proxyusername);
28
+void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
18
+ g_free(s->proxypassword);
29
void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh);
19
qemu_opts_del(opts);
30
20
return -EINVAL;
31
#endif
32
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/hw/block/virtio-blk.c
35
+++ b/hw/block/virtio-blk.c
36
@@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
37
return 0;
21
}
38
}
22
@@ -XXX,XX +XXX,XX @@ static void curl_close(BlockDriverState *bs)
39
23
40
-bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
24
g_free(s->cookie);
41
+void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
25
g_free(s->url);
42
{
26
+ g_free(s->username);
43
VirtIOBlockReq *req;
27
+ g_free(s->proxyusername);
44
MultiReqBuffer mrb = {};
28
+ g_free(s->proxypassword);
45
bool suppress_notifications = virtio_queue_get_notification(vq);
46
- bool progress = false;
47
48
aio_context_acquire(blk_get_aio_context(s->blk));
49
blk_io_plug(s->blk);
50
@@ -XXX,XX +XXX,XX @@ bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
51
}
52
53
while ((req = virtio_blk_get_request(s, vq))) {
54
- progress = true;
55
if (virtio_blk_handle_request(req, &mrb)) {
56
virtqueue_detach_element(req->vq, &req->elem, 0);
57
virtio_blk_free_request(req);
58
@@ -XXX,XX +XXX,XX @@ bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
59
60
blk_io_unplug(s->blk);
61
aio_context_release(blk_get_aio_context(s->blk));
62
- return progress;
63
-}
64
-
65
-static void virtio_blk_handle_output_do(VirtIOBlock *s, VirtQueue *vq)
66
-{
67
- virtio_blk_handle_vq(s, vq);
29
}
68
}
30
69
31
static int64_t curl_getlength(BlockDriverState *bs)
70
static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
71
{
72
VirtIOBlock *s = (VirtIOBlock *)vdev;
73
74
- if (s->dataplane) {
75
+ if (s->dataplane && !s->dataplane_started) {
76
/* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
77
* dataplane here instead of waiting for .set_status().
78
*/
79
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
80
return;
81
}
82
}
83
- virtio_blk_handle_output_do(s, vq);
84
+ virtio_blk_handle_vq(s, vq);
85
}
86
87
void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
32
--
88
--
33
2.9.5
89
2.34.1
34
90
35
91
diff view generated by jsdifflib
1
'tag' is already checked in the lines immediately preceding this check,
1
Prepare virtio_scsi_handle_cmd() to be used by both dataplane and
2
and set to non-NULL if NULL. No need to check again, it hasn't changed.
2
non-dataplane by making the condition for starting ioeventfd more
3
specific. This way it won't trigger when dataplane has already been
4
started.
3
5
4
Signed-off-by: Jeff Cody <jcody@redhat.com>
6
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
5
Reviewed-by: Eric Blake <eblake@redhat.com>
7
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
6
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
8
Message-id: 20211207132336.36627-5-stefanha@redhat.com
7
Signed-off-by: Jeff Cody <jcody@redhat.com>
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
---
10
---
9
block/sheepdog.c | 2 +-
11
hw/scsi/virtio-scsi.c | 2 +-
10
1 file changed, 1 insertion(+), 1 deletion(-)
12
1 file changed, 1 insertion(+), 1 deletion(-)
11
13
12
diff --git a/block/sheepdog.c b/block/sheepdog.c
14
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/block/sheepdog.c
16
--- a/hw/scsi/virtio-scsi.c
15
+++ b/block/sheepdog.c
17
+++ b/hw/scsi/virtio-scsi.c
16
@@ -XXX,XX +XXX,XX @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags,
18
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
17
if (!tag) {
19
/* use non-QOM casts in the data path */
18
tag = "";
20
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
19
}
21
20
- if (tag && strlen(tag) >= SD_MAX_VDI_TAG_LEN) {
22
- if (s->ctx) {
21
+ if (strlen(tag) >= SD_MAX_VDI_TAG_LEN) {
23
+ if (s->ctx && !s->dataplane_started) {
22
error_setg(errp, "value of parameter 'tag' is too long");
24
virtio_device_start_ioeventfd(vdev);
23
ret = -EINVAL;
25
if (!s->dataplane_fenced) {
24
goto err_no_fd;
26
return;
25
--
27
--
26
2.9.5
28
2.34.1
27
29
28
30
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
The difference between ->handle_output() and ->handle_aio_output() was
2
that ->handle_aio_output() returned a bool return value indicating
3
progress. This was needed by the old polling API but now that the bool
4
return value is gone, the two functions can be unified.
2
5
3
The function searches for next zero bit.
6
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
4
Also add interface for BdrvDirtyBitmap and unit test.
7
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
8
Message-id: 20211207132336.36627-6-stefanha@redhat.com
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
11
hw/virtio/virtio.c | 33 +++------------------------------
12
1 file changed, 3 insertions(+), 30 deletions(-)
5
13
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
14
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
7
Reviewed-by: John Snow <jsnow@redhat.com>
8
Message-id: 20171012135313.227864-2-vsementsov@virtuozzo.com
9
Signed-off-by: Jeff Cody <jcody@redhat.com>
10
---
11
block/dirty-bitmap.c | 5 ++++
12
include/block/dirty-bitmap.h | 1 +
13
include/qemu/hbitmap.h | 8 ++++++
14
tests/test-hbitmap.c | 61 ++++++++++++++++++++++++++++++++++++++++++++
15
util/hbitmap.c | 39 ++++++++++++++++++++++++++++
16
5 files changed, 114 insertions(+)
17
18
diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c
19
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
20
--- a/block/dirty-bitmap.c
16
--- a/hw/virtio/virtio.c
21
+++ b/block/dirty-bitmap.c
17
+++ b/hw/virtio/virtio.c
22
@@ -XXX,XX +XXX,XX @@ char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp)
18
@@ -XXX,XX +XXX,XX @@ struct VirtQueue
23
{
19
24
return hbitmap_sha256(bitmap->bitmap, errp);
20
uint16_t vector;
25
}
21
VirtIOHandleOutput handle_output;
26
+
22
- VirtIOHandleOutput handle_aio_output;
27
+int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset)
23
VirtIODevice *vdev;
28
+{
24
EventNotifier guest_notifier;
29
+ return hbitmap_next_zero(bitmap->bitmap, offset);
25
EventNotifier host_notifier;
30
+}
26
@@ -XXX,XX +XXX,XX @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
31
diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/include/block/dirty-bitmap.h
34
+++ b/include/block/dirty-bitmap.h
35
@@ -XXX,XX +XXX,XX @@ bool bdrv_has_changed_persistent_bitmaps(BlockDriverState *bs);
36
BdrvDirtyBitmap *bdrv_dirty_bitmap_next(BlockDriverState *bs,
37
BdrvDirtyBitmap *bitmap);
38
char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp);
39
+int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t start);
40
41
#endif
42
diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/include/qemu/hbitmap.h
45
+++ b/include/qemu/hbitmap.h
46
@@ -XXX,XX +XXX,XX @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first);
47
*/
48
unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
49
50
+/* hbitmap_next_zero:
51
+ * @hb: The HBitmap to operate on
52
+ * @start: The bit to start from.
53
+ *
54
+ * Find next not dirty bit.
55
+ */
56
+int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start);
57
+
58
/* hbitmap_create_meta:
59
* Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap.
60
* The caller owns the created bitmap and must call hbitmap_free_meta(hb) to
61
diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/tests/test-hbitmap.c
64
+++ b/tests/test-hbitmap.c
65
@@ -XXX,XX +XXX,XX @@ static void test_hbitmap_iter_and_reset(TestHBitmapData *data,
66
hbitmap_iter_next(&hbi);
67
}
68
69
+static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start)
70
+{
71
+ int64_t ret1 = hbitmap_next_zero(data->hb, start);
72
+ int64_t ret2 = start;
73
+ for ( ; ret2 < data->size && hbitmap_get(data->hb, ret2); ret2++) {
74
+ ;
75
+ }
76
+ if (ret2 == data->size) {
77
+ ret2 = -1;
78
+ }
79
+
80
+ g_assert_cmpint(ret1, ==, ret2);
81
+}
82
+
83
+static void test_hbitmap_next_zero_do(TestHBitmapData *data, int granularity)
84
+{
85
+ hbitmap_test_init(data, L3, granularity);
86
+ test_hbitmap_next_zero_check(data, 0);
87
+ test_hbitmap_next_zero_check(data, L3 - 1);
88
+
89
+ hbitmap_set(data->hb, L2, 1);
90
+ test_hbitmap_next_zero_check(data, 0);
91
+ test_hbitmap_next_zero_check(data, L2 - 1);
92
+ test_hbitmap_next_zero_check(data, L2);
93
+ test_hbitmap_next_zero_check(data, L2 + 1);
94
+
95
+ hbitmap_set(data->hb, L2 + 5, L1);
96
+ test_hbitmap_next_zero_check(data, 0);
97
+ test_hbitmap_next_zero_check(data, L2 + 1);
98
+ test_hbitmap_next_zero_check(data, L2 + 2);
99
+ test_hbitmap_next_zero_check(data, L2 + 5);
100
+ test_hbitmap_next_zero_check(data, L2 + L1 - 1);
101
+ test_hbitmap_next_zero_check(data, L2 + L1);
102
+
103
+ hbitmap_set(data->hb, L2 * 2, L3 - L2 * 2);
104
+ test_hbitmap_next_zero_check(data, L2 * 2 - L1);
105
+ test_hbitmap_next_zero_check(data, L2 * 2 - 2);
106
+ test_hbitmap_next_zero_check(data, L2 * 2 - 1);
107
+ test_hbitmap_next_zero_check(data, L2 * 2);
108
+ test_hbitmap_next_zero_check(data, L3 - 1);
109
+
110
+ hbitmap_set(data->hb, 0, L3);
111
+ test_hbitmap_next_zero_check(data, 0);
112
+}
113
+
114
+static void test_hbitmap_next_zero_0(TestHBitmapData *data, const void *unused)
115
+{
116
+ test_hbitmap_next_zero_do(data, 0);
117
+}
118
+
119
+static void test_hbitmap_next_zero_4(TestHBitmapData *data, const void *unused)
120
+{
121
+ test_hbitmap_next_zero_do(data, 4);
122
+}
123
+
124
int main(int argc, char **argv)
125
{
126
g_test_init(&argc, &argv, NULL);
127
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
128
129
hbitmap_test_add("/hbitmap/iter/iter_and_reset",
130
test_hbitmap_iter_and_reset);
131
+
132
+ hbitmap_test_add("/hbitmap/next_zero/next_zero_0",
133
+ test_hbitmap_next_zero_0);
134
+ hbitmap_test_add("/hbitmap/next_zero/next_zero_4",
135
+ test_hbitmap_next_zero_4);
136
+
137
g_test_run();
138
139
return 0;
140
diff --git a/util/hbitmap.c b/util/hbitmap.c
141
index XXXXXXX..XXXXXXX 100644
142
--- a/util/hbitmap.c
143
+++ b/util/hbitmap.c
144
@@ -XXX,XX +XXX,XX @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
145
}
27
}
146
}
28
}
147
29
148
+int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
30
-static void virtio_queue_notify_aio_vq(VirtQueue *vq)
149
+{
31
-{
150
+ size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
32
- if (vq->vring.desc && vq->handle_aio_output) {
151
+ unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
33
- VirtIODevice *vdev = vq->vdev;
152
+ uint64_t sz = hb->sizes[HBITMAP_LEVELS - 1];
34
-
153
+ unsigned long cur = last_lev[pos];
35
- trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
154
+ unsigned start_bit_offset =
36
- vq->handle_aio_output(vdev, vq);
155
+ (start >> hb->granularity) & (BITS_PER_LONG - 1);
37
-
156
+ int64_t res;
38
- if (unlikely(vdev->start_on_kick)) {
157
+
39
- virtio_set_started(vdev, true);
158
+ cur |= (1UL << start_bit_offset) - 1;
40
- }
159
+ assert((start >> hb->granularity) < hb->size);
41
- }
160
+
42
-}
161
+ if (cur == (unsigned long)-1) {
43
-
162
+ do {
44
static void virtio_queue_notify_vq(VirtQueue *vq)
163
+ pos++;
164
+ } while (pos < sz && last_lev[pos] == (unsigned long)-1);
165
+
166
+ if (pos >= sz) {
167
+ return -1;
168
+ }
169
+
170
+ cur = last_lev[pos];
171
+ }
172
+
173
+ res = (pos << BITS_PER_LEVEL) + ctol(cur);
174
+ if (res >= hb->size) {
175
+ return -1;
176
+ }
177
+
178
+ res = res << hb->granularity;
179
+ if (res < start) {
180
+ assert(((start - res) >> hb->granularity) == 0);
181
+ return start;
182
+ }
183
+
184
+ return res;
185
+}
186
+
187
bool hbitmap_empty(const HBitmap *hb)
188
{
45
{
189
return hb->count == 0;
46
if (vq->vring.desc && vq->handle_output) {
47
@@ -XXX,XX +XXX,XX @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
48
vdev->vq[i].vring.num_default = queue_size;
49
vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
50
vdev->vq[i].handle_output = handle_output;
51
- vdev->vq[i].handle_aio_output = NULL;
52
vdev->vq[i].used_elems = g_malloc0(sizeof(VirtQueueElement) *
53
queue_size);
54
55
@@ -XXX,XX +XXX,XX @@ void virtio_delete_queue(VirtQueue *vq)
56
vq->vring.num = 0;
57
vq->vring.num_default = 0;
58
vq->handle_output = NULL;
59
- vq->handle_aio_output = NULL;
60
g_free(vq->used_elems);
61
vq->used_elems = NULL;
62
virtio_virtqueue_reset_region_cache(vq);
63
@@ -XXX,XX +XXX,XX @@ EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
64
return &vq->guest_notifier;
65
}
66
67
-static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
68
-{
69
- VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
70
- if (event_notifier_test_and_clear(n)) {
71
- virtio_queue_notify_aio_vq(vq);
72
- }
73
-}
74
-
75
static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
76
{
77
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
78
@@ -XXX,XX +XXX,XX @@ static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n)
79
{
80
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
81
82
- virtio_queue_notify_aio_vq(vq);
83
+ virtio_queue_notify_vq(vq);
84
}
85
86
static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
87
@@ -XXX,XX +XXX,XX @@ void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
88
VirtIOHandleOutput handle_output)
89
{
90
if (handle_output) {
91
- vq->handle_aio_output = handle_output;
92
aio_set_event_notifier(ctx, &vq->host_notifier, true,
93
- virtio_queue_host_notifier_aio_read,
94
+ virtio_queue_host_notifier_read,
95
virtio_queue_host_notifier_aio_poll,
96
virtio_queue_host_notifier_aio_poll_ready);
97
aio_set_event_notifier_poll(ctx, &vq->host_notifier,
98
@@ -XXX,XX +XXX,XX @@ void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
99
aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);
100
/* Test and clear notifier before after disabling event,
101
* in case poll callback didn't have time to run. */
102
- virtio_queue_host_notifier_aio_read(&vq->host_notifier);
103
- vq->handle_aio_output = NULL;
104
+ virtio_queue_host_notifier_read(&vq->host_notifier);
105
}
106
}
107
190
--
108
--
191
2.9.5
109
2.34.1
192
110
193
111
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Use HBitmap copy_bitmap instead of done_bitmap. This is needed to
4
improve incremental backup in following patches and to unify backup
5
loop for full/incremental modes in future patches.
6
7
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
8
Reviewed-by: Jeff Cody <jcody@redhat.com>
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Reviewed-by: John Snow <jsnow@redhat.com>
11
Message-id: 20171012135313.227864-3-vsementsov@virtuozzo.com
12
Signed-off-by: Jeff Cody <jcody@redhat.com>
13
---
14
block/backup.c | 23 ++++++++++++++---------
15
1 file changed, 14 insertions(+), 9 deletions(-)
16
17
diff --git a/block/backup.c b/block/backup.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/backup.c
20
+++ b/block/backup.c
21
@@ -XXX,XX +XXX,XX @@ typedef struct BackupBlockJob {
22
BlockdevOnError on_target_error;
23
CoRwlock flush_rwlock;
24
uint64_t bytes_read;
25
- unsigned long *done_bitmap;
26
int64_t cluster_size;
27
bool compress;
28
NotifierWithReturn before_write;
29
QLIST_HEAD(, CowRequest) inflight_reqs;
30
+
31
+ HBitmap *copy_bitmap;
32
} BackupBlockJob;
33
34
/* See if in-flight requests overlap and wait for them to complete */
35
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
36
cow_request_begin(&cow_request, job, start, end);
37
38
for (; start < end; start += job->cluster_size) {
39
- if (test_bit(start / job->cluster_size, job->done_bitmap)) {
40
+ if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) {
41
trace_backup_do_cow_skip(job, start);
42
continue; /* already copied */
43
}
44
+ hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1);
45
46
trace_backup_do_cow_process(job, start);
47
48
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
49
if (error_is_read) {
50
*error_is_read = true;
51
}
52
+ hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1);
53
goto out;
54
}
55
56
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
57
if (error_is_read) {
58
*error_is_read = false;
59
}
60
+ hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1);
61
goto out;
62
}
63
64
- set_bit(start / job->cluster_size, job->done_bitmap);
65
-
66
/* Publish progress, guest I/O counts as progress too. Note that the
67
* offset field is an opaque progress value, it is not a disk offset.
68
*/
69
@@ -XXX,XX +XXX,XX @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
70
}
71
72
len = DIV_ROUND_UP(backup_job->common.len, backup_job->cluster_size);
73
- bitmap_zero(backup_job->done_bitmap, len);
74
+ hbitmap_set(backup_job->copy_bitmap, 0, len);
75
}
76
77
void backup_wait_for_overlapping_requests(BlockJob *job, int64_t offset,
78
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn backup_run(void *opaque)
79
BackupBlockJob *job = opaque;
80
BackupCompleteData *data;
81
BlockDriverState *bs = blk_bs(job->common.blk);
82
- int64_t offset;
83
+ int64_t offset, nb_clusters;
84
int ret = 0;
85
86
QLIST_INIT(&job->inflight_reqs);
87
qemu_co_rwlock_init(&job->flush_rwlock);
88
89
- job->done_bitmap = bitmap_new(DIV_ROUND_UP(job->common.len,
90
- job->cluster_size));
91
+ nb_clusters = DIV_ROUND_UP(job->common.len, job->cluster_size);
92
+ job->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
93
+ hbitmap_set(job->copy_bitmap, 0, nb_clusters);
94
95
job->before_write.notify = backup_before_write_notify;
96
bdrv_add_before_write_notifier(bs, &job->before_write);
97
98
if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
99
+ /* All bits are set in copy_bitmap to allow any cluster to be copied.
100
+ * This does not actually require them to be copied. */
101
while (!block_job_is_cancelled(&job->common)) {
102
/* Yield until the job is cancelled. We just let our before_write
103
* notify callback service CoW requests. */
104
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn backup_run(void *opaque)
105
/* wait until pending backup_do_cow() calls have completed */
106
qemu_co_rwlock_wrlock(&job->flush_rwlock);
107
qemu_co_rwlock_unlock(&job->flush_rwlock);
108
- g_free(job->done_bitmap);
109
+ hbitmap_free(job->copy_bitmap);
110
111
data = g_malloc(sizeof(*data));
112
data->ret = ret;
113
--
114
2.9.5
115
116
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Set fake progress for non-dirty clusters in copy_bitmap initialization,
4
to. It simplifies code and allows further refactoring.
5
6
This patch changes user's view of backup progress, but formally it
7
doesn't changed: progress hops are just moved to the beginning.
8
9
Actually it's just a point of view: when do we actually skip clusters?
10
We can say in the very beginning, that we skip these clusters and do
11
not think about them later.
12
13
Of course, if go through disk sequentially, it's logical to say, that
14
we skip clusters between copied portions to the left and to the right
15
of them. But even now copying progress is not sequential because of
16
write notifiers. Future patches will introduce new backup architecture
17
which will do copying in several coroutines in parallel, so it will
18
make no sense to publish fake progress by parts in parallel with
19
other copying requests.
20
21
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
22
Reviewed-by: John Snow <jsnow@redhat.com>
23
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
24
Reviewed-by: Jeff Cody <jcody@redhat.com>
25
Message-id: 20171012135313.227864-5-vsementsov@virtuozzo.com
26
Signed-off-by: Jeff Cody <jcody@redhat.com>
27
---
28
block/backup.c | 18 +++---------------
29
1 file changed, 3 insertions(+), 15 deletions(-)
30
31
diff --git a/block/backup.c b/block/backup.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/block/backup.c
34
+++ b/block/backup.c
35
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
36
int64_t offset;
37
int64_t cluster;
38
int64_t end;
39
- int64_t last_cluster = -1;
40
BdrvDirtyBitmapIter *dbi;
41
42
granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap);
43
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
44
while ((offset = bdrv_dirty_iter_next(dbi)) >= 0) {
45
cluster = offset / job->cluster_size;
46
47
- /* Fake progress updates for any clusters we skipped */
48
- if (cluster != last_cluster + 1) {
49
- job->common.offset += ((cluster - last_cluster - 1) *
50
- job->cluster_size);
51
- }
52
-
53
for (end = cluster + clusters_per_iter; cluster < end; cluster++) {
54
do {
55
if (yield_and_check(job)) {
56
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
57
if (granularity < job->cluster_size) {
58
bdrv_set_dirty_iter(dbi, cluster * job->cluster_size);
59
}
60
-
61
- last_cluster = cluster - 1;
62
- }
63
-
64
- /* Play some final catchup with the progress meter */
65
- end = DIV_ROUND_UP(job->common.len, job->cluster_size);
66
- if (last_cluster + 1 < end) {
67
- job->common.offset += ((end - last_cluster - 1) * job->cluster_size);
68
}
69
70
out:
71
@@ -XXX,XX +XXX,XX @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
72
bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size);
73
}
74
75
+ job->common.offset = job->common.len -
76
+ hbitmap_count(job->copy_bitmap) * job->cluster_size;
77
+
78
bdrv_dirty_iter_free(dbi);
79
}
80
81
--
82
2.9.5
83
84
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
We can use copy_bitmap instead of sync_bitmap. copy_bitmap is
4
initialized from sync_bitmap and it is more informative: we will not try
5
to process data, that is already in progress (by write notifier).
6
7
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Reviewed-by: John Snow <jsnow@redhat.com>
10
Reviewed-by: Jeff Cody <jcody@redhat.com>
11
Message-id: 20171012135313.227864-6-vsementsov@virtuozzo.com
12
Signed-off-by: Jeff Cody <jcody@redhat.com>
13
---
14
block/backup.c | 55 +++++++++++++++++--------------------------------------
15
1 file changed, 17 insertions(+), 38 deletions(-)
16
17
diff --git a/block/backup.c b/block/backup.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/backup.c
20
+++ b/block/backup.c
21
@@ -XXX,XX +XXX,XX @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
22
23
static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
24
{
25
+ int ret;
26
bool error_is_read;
27
- int ret = 0;
28
- int clusters_per_iter;
29
- uint32_t granularity;
30
- int64_t offset;
31
int64_t cluster;
32
- int64_t end;
33
- BdrvDirtyBitmapIter *dbi;
34
+ HBitmapIter hbi;
35
36
- granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap);
37
- clusters_per_iter = MAX((granularity / job->cluster_size), 1);
38
- dbi = bdrv_dirty_iter_new(job->sync_bitmap);
39
-
40
- /* Find the next dirty sector(s) */
41
- while ((offset = bdrv_dirty_iter_next(dbi)) >= 0) {
42
- cluster = offset / job->cluster_size;
43
-
44
- for (end = cluster + clusters_per_iter; cluster < end; cluster++) {
45
- do {
46
- if (yield_and_check(job)) {
47
- goto out;
48
- }
49
- ret = backup_do_cow(job, cluster * job->cluster_size,
50
- job->cluster_size, &error_is_read,
51
- false);
52
- if ((ret < 0) &&
53
- backup_error_action(job, error_is_read, -ret) ==
54
- BLOCK_ERROR_ACTION_REPORT) {
55
- goto out;
56
- }
57
- } while (ret < 0);
58
- }
59
-
60
- /* If the bitmap granularity is smaller than the backup granularity,
61
- * we need to advance the iterator pointer to the next cluster. */
62
- if (granularity < job->cluster_size) {
63
- bdrv_set_dirty_iter(dbi, cluster * job->cluster_size);
64
- }
65
+ hbitmap_iter_init(&hbi, job->copy_bitmap, 0);
66
+ while ((cluster = hbitmap_iter_next(&hbi)) != -1) {
67
+ do {
68
+ if (yield_and_check(job)) {
69
+ return 0;
70
+ }
71
+ ret = backup_do_cow(job, cluster * job->cluster_size,
72
+ job->cluster_size, &error_is_read, false);
73
+ if (ret < 0 && backup_error_action(job, error_is_read, -ret) ==
74
+ BLOCK_ERROR_ACTION_REPORT)
75
+ {
76
+ return ret;
77
+ }
78
+ } while (ret < 0);
79
}
80
81
-out:
82
- bdrv_dirty_iter_free(dbi);
83
- return ret;
84
+ return 0;
85
}
86
87
/* init copy_bitmap from sync_bitmap */
88
--
89
2.9.5
90
91
diff view generated by jsdifflib
1
No functional changes, just whitespace manipulation.
1
Now that virtio-blk and virtio-scsi are ready, get rid of
2
2
the handle_aio_output() callback. It's no longer needed.
3
Signed-off-by: Jeff Cody <jcody@redhat.com>
3
4
Reviewed-by: Eric Blake <eblake@redhat.com>
4
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
5
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
5
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
6
Signed-off-by: Jeff Cody <jcody@redhat.com>
6
Message-id: 20211207132336.36627-7-stefanha@redhat.com
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
7
---
8
---
8
block/sheepdog.c | 164 +++++++++++++++++++++++++++----------------------------
9
include/hw/virtio/virtio.h | 4 +--
9
1 file changed, 82 insertions(+), 82 deletions(-)
10
hw/block/dataplane/virtio-blk.c | 16 ++--------
10
11
hw/scsi/virtio-scsi-dataplane.c | 54 ++++-----------------------------
11
diff --git a/block/sheepdog.c b/block/sheepdog.c
12
hw/virtio/virtio.c | 32 +++++++++----------
12
index XXXXXXX..XXXXXXX 100644
13
4 files changed, 26 insertions(+), 80 deletions(-)
13
--- a/block/sheepdog.c
14
14
+++ b/block/sheepdog.c
15
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
15
@@ -XXX,XX +XXX,XX @@ typedef struct BDRVSheepdogReopenState {
16
index XXXXXXX..XXXXXXX 100644
16
int cache_flags;
17
--- a/include/hw/virtio/virtio.h
17
} BDRVSheepdogReopenState;
18
+++ b/include/hw/virtio/virtio.h
18
19
@@ -XXX,XX +XXX,XX @@ bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev);
19
-static const char * sd_strerror(int err)
20
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
20
+static const char *sd_strerror(int err)
21
void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
22
void virtio_queue_host_notifier_read(EventNotifier *n);
23
-void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
24
- VirtIOHandleOutput handle_output);
25
+void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx);
26
+void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx);
27
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
28
VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
29
30
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/hw/block/dataplane/virtio-blk.c
33
+++ b/hw/block/dataplane/virtio-blk.c
34
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
35
g_free(s);
36
}
37
38
-static void virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
39
- VirtQueue *vq)
40
-{
41
- VirtIOBlock *s = (VirtIOBlock *)vdev;
42
-
43
- assert(s->dataplane);
44
- assert(s->dataplane_started);
45
-
46
- virtio_blk_handle_vq(s, vq);
47
-}
48
-
49
/* Context: QEMU global mutex held */
50
int virtio_blk_data_plane_start(VirtIODevice *vdev)
21
{
51
{
52
@@ -XXX,XX +XXX,XX @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
53
for (i = 0; i < nvqs; i++) {
54
VirtQueue *vq = virtio_get_queue(s->vdev, i);
55
56
- virtio_queue_aio_set_host_notifier_handler(vq, s->ctx,
57
- virtio_blk_data_plane_handle_output);
58
+ virtio_queue_aio_attach_host_notifier(vq, s->ctx);
59
}
60
aio_context_release(s->ctx);
61
return 0;
62
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_data_plane_stop_bh(void *opaque)
63
for (i = 0; i < s->conf->num_queues; i++) {
64
VirtQueue *vq = virtio_get_queue(s->vdev, i);
65
66
- virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL);
67
+ virtio_queue_aio_detach_host_notifier(vq, s->ctx);
68
}
69
}
70
71
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/hw/scsi/virtio-scsi-dataplane.c
74
+++ b/hw/scsi/virtio-scsi-dataplane.c
75
@@ -XXX,XX +XXX,XX @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
76
}
77
}
78
79
-static void virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev,
80
- VirtQueue *vq)
81
-{
82
- VirtIOSCSI *s = VIRTIO_SCSI(vdev);
83
-
84
- virtio_scsi_acquire(s);
85
- if (!s->dataplane_fenced) {
86
- assert(s->ctx && s->dataplane_started);
87
- virtio_scsi_handle_cmd_vq(s, vq);
88
- }
89
- virtio_scsi_release(s);
90
-}
91
-
92
-static void virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev,
93
- VirtQueue *vq)
94
-{
95
- VirtIOSCSI *s = VIRTIO_SCSI(vdev);
96
-
97
- virtio_scsi_acquire(s);
98
- if (!s->dataplane_fenced) {
99
- assert(s->ctx && s->dataplane_started);
100
- virtio_scsi_handle_ctrl_vq(s, vq);
101
- }
102
- virtio_scsi_release(s);
103
-}
104
-
105
-static void virtio_scsi_data_plane_handle_event(VirtIODevice *vdev,
106
- VirtQueue *vq)
107
-{
108
- VirtIOSCSI *s = VIRTIO_SCSI(vdev);
109
-
110
- virtio_scsi_acquire(s);
111
- if (!s->dataplane_fenced) {
112
- assert(s->ctx && s->dataplane_started);
113
- virtio_scsi_handle_event_vq(s, vq);
114
- }
115
- virtio_scsi_release(s);
116
-}
117
-
118
static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n)
119
{
120
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
121
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_dataplane_stop_bh(void *opaque)
122
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
22
int i;
123
int i;
23
124
24
@@ -XXX,XX +XXX,XX @@ static QemuOptsList sd_create_opts = {
125
- virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx, NULL);
25
};
126
- virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx, NULL);
26
127
+ virtio_queue_aio_detach_host_notifier(vs->ctrl_vq, s->ctx);
27
static BlockDriver bdrv_sheepdog = {
128
+ virtio_queue_aio_detach_host_notifier(vs->event_vq, s->ctx);
28
- .format_name = "sheepdog",
129
for (i = 0; i < vs->conf.num_queues; i++) {
29
- .protocol_name = "sheepdog",
130
- virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx, NULL);
30
- .instance_size = sizeof(BDRVSheepdogState),
131
+ virtio_queue_aio_detach_host_notifier(vs->cmd_vqs[i], s->ctx);
31
- .bdrv_parse_filename = sd_parse_filename,
132
}
32
- .bdrv_file_open = sd_open,
133
}
33
- .bdrv_reopen_prepare = sd_reopen_prepare,
134
34
- .bdrv_reopen_commit = sd_reopen_commit,
135
@@ -XXX,XX +XXX,XX @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev)
35
- .bdrv_reopen_abort = sd_reopen_abort,
136
memory_region_transaction_commit();
36
- .bdrv_close = sd_close,
137
37
- .bdrv_create = sd_create,
138
aio_context_acquire(s->ctx);
38
- .bdrv_has_zero_init = bdrv_has_zero_init_1,
139
- virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx,
39
- .bdrv_getlength = sd_getlength,
140
- virtio_scsi_data_plane_handle_ctrl);
40
+ .format_name = "sheepdog",
141
- virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx,
41
+ .protocol_name = "sheepdog",
142
- virtio_scsi_data_plane_handle_event);
42
+ .instance_size = sizeof(BDRVSheepdogState),
143
+ virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx);
43
+ .bdrv_parse_filename = sd_parse_filename,
144
+ virtio_queue_aio_attach_host_notifier(vs->event_vq, s->ctx);
44
+ .bdrv_file_open = sd_open,
145
45
+ .bdrv_reopen_prepare = sd_reopen_prepare,
146
for (i = 0; i < vs->conf.num_queues; i++) {
46
+ .bdrv_reopen_commit = sd_reopen_commit,
147
- virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx,
47
+ .bdrv_reopen_abort = sd_reopen_abort,
148
- virtio_scsi_data_plane_handle_cmd);
48
+ .bdrv_close = sd_close,
149
+ virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx);
49
+ .bdrv_create = sd_create,
150
}
50
+ .bdrv_has_zero_init = bdrv_has_zero_init_1,
151
51
+ .bdrv_getlength = sd_getlength,
152
s->dataplane_starting = false;
52
.bdrv_get_allocated_file_size = sd_get_allocated_file_size,
153
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
53
- .bdrv_truncate = sd_truncate,
154
index XXXXXXX..XXXXXXX 100644
54
+ .bdrv_truncate = sd_truncate,
155
--- a/hw/virtio/virtio.c
55
156
+++ b/hw/virtio/virtio.c
56
- .bdrv_co_readv = sd_co_readv,
157
@@ -XXX,XX +XXX,XX @@ static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
57
- .bdrv_co_writev = sd_co_writev,
158
virtio_queue_set_notification(vq, 1);
58
- .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
159
}
59
- .bdrv_co_pdiscard = sd_co_pdiscard,
160
60
- .bdrv_co_get_block_status = sd_co_get_block_status,
161
-void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
61
+ .bdrv_co_readv = sd_co_readv,
162
- VirtIOHandleOutput handle_output)
62
+ .bdrv_co_writev = sd_co_writev,
163
+void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
63
+ .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
164
{
64
+ .bdrv_co_pdiscard = sd_co_pdiscard,
165
- if (handle_output) {
65
+ .bdrv_co_get_block_status = sd_co_get_block_status,
166
- aio_set_event_notifier(ctx, &vq->host_notifier, true,
66
167
- virtio_queue_host_notifier_read,
67
- .bdrv_snapshot_create = sd_snapshot_create,
168
- virtio_queue_host_notifier_aio_poll,
68
- .bdrv_snapshot_goto = sd_snapshot_goto,
169
- virtio_queue_host_notifier_aio_poll_ready);
69
- .bdrv_snapshot_delete = sd_snapshot_delete,
170
- aio_set_event_notifier_poll(ctx, &vq->host_notifier,
70
- .bdrv_snapshot_list = sd_snapshot_list,
171
- virtio_queue_host_notifier_aio_poll_begin,
71
+ .bdrv_snapshot_create = sd_snapshot_create,
172
- virtio_queue_host_notifier_aio_poll_end);
72
+ .bdrv_snapshot_goto = sd_snapshot_goto,
173
- } else {
73
+ .bdrv_snapshot_delete = sd_snapshot_delete,
174
- aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);
74
+ .bdrv_snapshot_list = sd_snapshot_list,
175
- /* Test and clear notifier before after disabling event,
75
176
- * in case poll callback didn't have time to run. */
76
- .bdrv_save_vmstate = sd_save_vmstate,
177
- virtio_queue_host_notifier_read(&vq->host_notifier);
77
- .bdrv_load_vmstate = sd_load_vmstate,
178
- }
78
+ .bdrv_save_vmstate = sd_save_vmstate,
179
+ aio_set_event_notifier(ctx, &vq->host_notifier, true,
79
+ .bdrv_load_vmstate = sd_load_vmstate,
180
+ virtio_queue_host_notifier_read,
80
181
+ virtio_queue_host_notifier_aio_poll,
81
- .bdrv_detach_aio_context = sd_detach_aio_context,
182
+ virtio_queue_host_notifier_aio_poll_ready);
82
- .bdrv_attach_aio_context = sd_attach_aio_context,
183
+ aio_set_event_notifier_poll(ctx, &vq->host_notifier,
83
+ .bdrv_detach_aio_context = sd_detach_aio_context,
184
+ virtio_queue_host_notifier_aio_poll_begin,
84
+ .bdrv_attach_aio_context = sd_attach_aio_context,
185
+ virtio_queue_host_notifier_aio_poll_end);
85
186
+}
86
- .create_opts = &sd_create_opts,
187
+
87
+ .create_opts = &sd_create_opts,
188
+void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
88
};
189
+{
89
190
+ aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);
90
static BlockDriver bdrv_sheepdog_tcp = {
191
+ /* Test and clear notifier before after disabling event,
91
- .format_name = "sheepdog",
192
+ * in case poll callback didn't have time to run. */
92
- .protocol_name = "sheepdog+tcp",
193
+ virtio_queue_host_notifier_read(&vq->host_notifier);
93
- .instance_size = sizeof(BDRVSheepdogState),
194
}
94
- .bdrv_parse_filename = sd_parse_filename,
195
95
- .bdrv_file_open = sd_open,
196
void virtio_queue_host_notifier_read(EventNotifier *n)
96
- .bdrv_reopen_prepare = sd_reopen_prepare,
97
- .bdrv_reopen_commit = sd_reopen_commit,
98
- .bdrv_reopen_abort = sd_reopen_abort,
99
- .bdrv_close = sd_close,
100
- .bdrv_create = sd_create,
101
- .bdrv_has_zero_init = bdrv_has_zero_init_1,
102
- .bdrv_getlength = sd_getlength,
103
+ .format_name = "sheepdog",
104
+ .protocol_name = "sheepdog+tcp",
105
+ .instance_size = sizeof(BDRVSheepdogState),
106
+ .bdrv_parse_filename = sd_parse_filename,
107
+ .bdrv_file_open = sd_open,
108
+ .bdrv_reopen_prepare = sd_reopen_prepare,
109
+ .bdrv_reopen_commit = sd_reopen_commit,
110
+ .bdrv_reopen_abort = sd_reopen_abort,
111
+ .bdrv_close = sd_close,
112
+ .bdrv_create = sd_create,
113
+ .bdrv_has_zero_init = bdrv_has_zero_init_1,
114
+ .bdrv_getlength = sd_getlength,
115
.bdrv_get_allocated_file_size = sd_get_allocated_file_size,
116
- .bdrv_truncate = sd_truncate,
117
+ .bdrv_truncate = sd_truncate,
118
119
- .bdrv_co_readv = sd_co_readv,
120
- .bdrv_co_writev = sd_co_writev,
121
- .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
122
- .bdrv_co_pdiscard = sd_co_pdiscard,
123
- .bdrv_co_get_block_status = sd_co_get_block_status,
124
+ .bdrv_co_readv = sd_co_readv,
125
+ .bdrv_co_writev = sd_co_writev,
126
+ .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
127
+ .bdrv_co_pdiscard = sd_co_pdiscard,
128
+ .bdrv_co_get_block_status = sd_co_get_block_status,
129
130
- .bdrv_snapshot_create = sd_snapshot_create,
131
- .bdrv_snapshot_goto = sd_snapshot_goto,
132
- .bdrv_snapshot_delete = sd_snapshot_delete,
133
- .bdrv_snapshot_list = sd_snapshot_list,
134
+ .bdrv_snapshot_create = sd_snapshot_create,
135
+ .bdrv_snapshot_goto = sd_snapshot_goto,
136
+ .bdrv_snapshot_delete = sd_snapshot_delete,
137
+ .bdrv_snapshot_list = sd_snapshot_list,
138
139
- .bdrv_save_vmstate = sd_save_vmstate,
140
- .bdrv_load_vmstate = sd_load_vmstate,
141
+ .bdrv_save_vmstate = sd_save_vmstate,
142
+ .bdrv_load_vmstate = sd_load_vmstate,
143
144
- .bdrv_detach_aio_context = sd_detach_aio_context,
145
- .bdrv_attach_aio_context = sd_attach_aio_context,
146
+ .bdrv_detach_aio_context = sd_detach_aio_context,
147
+ .bdrv_attach_aio_context = sd_attach_aio_context,
148
149
- .create_opts = &sd_create_opts,
150
+ .create_opts = &sd_create_opts,
151
};
152
153
static BlockDriver bdrv_sheepdog_unix = {
154
- .format_name = "sheepdog",
155
- .protocol_name = "sheepdog+unix",
156
- .instance_size = sizeof(BDRVSheepdogState),
157
- .bdrv_parse_filename = sd_parse_filename,
158
- .bdrv_file_open = sd_open,
159
- .bdrv_reopen_prepare = sd_reopen_prepare,
160
- .bdrv_reopen_commit = sd_reopen_commit,
161
- .bdrv_reopen_abort = sd_reopen_abort,
162
- .bdrv_close = sd_close,
163
- .bdrv_create = sd_create,
164
- .bdrv_has_zero_init = bdrv_has_zero_init_1,
165
- .bdrv_getlength = sd_getlength,
166
+ .format_name = "sheepdog",
167
+ .protocol_name = "sheepdog+unix",
168
+ .instance_size = sizeof(BDRVSheepdogState),
169
+ .bdrv_parse_filename = sd_parse_filename,
170
+ .bdrv_file_open = sd_open,
171
+ .bdrv_reopen_prepare = sd_reopen_prepare,
172
+ .bdrv_reopen_commit = sd_reopen_commit,
173
+ .bdrv_reopen_abort = sd_reopen_abort,
174
+ .bdrv_close = sd_close,
175
+ .bdrv_create = sd_create,
176
+ .bdrv_has_zero_init = bdrv_has_zero_init_1,
177
+ .bdrv_getlength = sd_getlength,
178
.bdrv_get_allocated_file_size = sd_get_allocated_file_size,
179
- .bdrv_truncate = sd_truncate,
180
+ .bdrv_truncate = sd_truncate,
181
182
- .bdrv_co_readv = sd_co_readv,
183
- .bdrv_co_writev = sd_co_writev,
184
- .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
185
- .bdrv_co_pdiscard = sd_co_pdiscard,
186
- .bdrv_co_get_block_status = sd_co_get_block_status,
187
+ .bdrv_co_readv = sd_co_readv,
188
+ .bdrv_co_writev = sd_co_writev,
189
+ .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
190
+ .bdrv_co_pdiscard = sd_co_pdiscard,
191
+ .bdrv_co_get_block_status = sd_co_get_block_status,
192
193
- .bdrv_snapshot_create = sd_snapshot_create,
194
- .bdrv_snapshot_goto = sd_snapshot_goto,
195
- .bdrv_snapshot_delete = sd_snapshot_delete,
196
- .bdrv_snapshot_list = sd_snapshot_list,
197
+ .bdrv_snapshot_create = sd_snapshot_create,
198
+ .bdrv_snapshot_goto = sd_snapshot_goto,
199
+ .bdrv_snapshot_delete = sd_snapshot_delete,
200
+ .bdrv_snapshot_list = sd_snapshot_list,
201
202
- .bdrv_save_vmstate = sd_save_vmstate,
203
- .bdrv_load_vmstate = sd_load_vmstate,
204
+ .bdrv_save_vmstate = sd_save_vmstate,
205
+ .bdrv_load_vmstate = sd_load_vmstate,
206
207
- .bdrv_detach_aio_context = sd_detach_aio_context,
208
- .bdrv_attach_aio_context = sd_attach_aio_context,
209
+ .bdrv_detach_aio_context = sd_detach_aio_context,
210
+ .bdrv_attach_aio_context = sd_attach_aio_context,
211
212
- .create_opts = &sd_create_opts,
213
+ .create_opts = &sd_create_opts,
214
};
215
216
static void bdrv_sheepdog_init(void)
217
--
197
--
218
2.9.5
198
2.34.1
219
199
220
200
diff view generated by jsdifflib
Deleted patch
1
If curl_global_init() fails, per the documentation no other curl
2
functions may be called, so make sure to check the return value.
3
1
4
Also, some minor changes to the initialization latch variable 'inited':
5
6
- Make it static in the file, for clarity
7
- Change the name for clarity
8
- Make it a bool
9
10
Signed-off-by: Jeff Cody <jcody@redhat.com>
11
Reviewed-by: Eric Blake <eblake@redhat.com>
12
Reviewed-by: Richard W.M. Jones <rjones@redhat.com>
13
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
14
Signed-off-by: Jeff Cody <jcody@redhat.com>
15
---
16
block/curl.c | 18 ++++++++++++------
17
1 file changed, 12 insertions(+), 6 deletions(-)
18
19
diff --git a/block/curl.c b/block/curl.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/block/curl.c
22
+++ b/block/curl.c
23
@@ -XXX,XX +XXX,XX @@ static CURLMcode __curl_multi_socket_action(CURLM *multi_handle,
24
25
struct BDRVCURLState;
26
27
+static bool libcurl_initialized;
28
+
29
typedef struct CURLAIOCB {
30
Coroutine *co;
31
QEMUIOVector *qiov;
32
@@ -XXX,XX +XXX,XX @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
33
double d;
34
const char *secretid;
35
const char *protocol_delimiter;
36
+ int ret;
37
38
- static int inited = 0;
39
40
if (flags & BDRV_O_RDWR) {
41
error_setg(errp, "curl block device does not support writes");
42
return -EROFS;
43
}
44
45
+ if (!libcurl_initialized) {
46
+ ret = curl_global_init(CURL_GLOBAL_ALL);
47
+ if (ret) {
48
+ error_setg(errp, "libcurl initialization failed with %d", ret);
49
+ return -EIO;
50
+ }
51
+ libcurl_initialized = true;
52
+ }
53
+
54
qemu_mutex_init(&s->mutex);
55
opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
56
qemu_opts_absorb_qdict(opts, options, &local_err);
57
@@ -XXX,XX +XXX,XX @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
58
}
59
}
60
61
- if (!inited) {
62
- curl_global_init(CURL_GLOBAL_ALL);
63
- inited = 1;
64
- }
65
-
66
DPRINTF("CURL: Opening %s\n", file);
67
QSIMPLEQ_INIT(&s->free_state_waitq);
68
s->aio_context = bdrv_get_aio_context(bs);
69
--
70
2.9.5
71
72
diff view generated by jsdifflib