1
The following changes since commit 75ee62ac606bfc9eb59310b9446df3434bf6e8c2:
1
The following changes since commit 56f9e46b841c7be478ca038d8d4085d776ab4b0d:
2
2
3
Merge remote-tracking branch 'remotes/ehabkost-gl/tags/x86-next-pull-request' into staging (2020-12-17 18:53:36 +0000)
3
Merge remote-tracking branch 'remotes/armbru/tags/pull-qapi-2017-02-20' into staging (2017-02-20 17:42:47 +0000)
4
4
5
are available in the Git repository at:
5
are available in the git repository at:
6
6
7
https://github.com/XanClic/qemu.git tags/pull-block-2020-12-18
7
git://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 0e72078128229bf9efb542e396ab44bf91b91340:
9
for you to fetch changes up to a7b91d35bab97a2d3e779d0c64c9b837b52a6cf7:
10
10
11
iotests: Fix _send_qemu_cmd with bash 5.1 (2020-12-18 12:47:38 +0100)
11
coroutine-lock: make CoRwlock thread-safe and fair (2017-02-21 11:39:40 +0000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Block patches:
14
Pull request
15
- New block filter: preallocate (which, on writes beyond an image file's
15
16
end, allocates big chunks of data so that such post-EOF writes will
16
v2:
17
occur less frequently)
17
* Rebased to resolve scsi conflicts
18
- write-zeroes and block-status support for Quorum
19
- Implementation of truncate for the nvme block driver similarly to the
20
existing implementations for host block devices and iscsi devices
21
- Block layer refactoring: Drop the tighten_restrictions concept in the
22
block permission functions
23
- iotest fixes
24
18
25
----------------------------------------------------------------
19
----------------------------------------------------------------
26
Alberto Garcia (2):
27
quorum: Implement bdrv_co_block_status()
28
quorum: Implement bdrv_co_pwrite_zeroes()
29
20
30
Max Reitz (2):
21
Paolo Bonzini (24):
31
iotests/102: Pass $QEMU_HANDLE to _send_qemu_cmd
22
block: move AioContext, QEMUTimer, main-loop to libqemuutil
32
iotests: Fix _send_qemu_cmd with bash 5.1
23
aio: introduce aio_co_schedule and aio_co_wake
24
block-backend: allow blk_prw from coroutine context
25
test-thread-pool: use generic AioContext infrastructure
26
io: add methods to set I/O handlers on AioContext
27
io: make qio_channel_yield aware of AioContexts
28
nbd: convert to use qio_channel_yield
29
coroutine-lock: reschedule coroutine on the AioContext it was running
30
on
31
blkdebug: reschedule coroutine on the AioContext it is running on
32
qed: introduce qed_aio_start_io and qed_aio_next_io_cb
33
aio: push aio_context_acquire/release down to dispatching
34
block: explicitly acquire aiocontext in timers that need it
35
block: explicitly acquire aiocontext in callbacks that need it
36
block: explicitly acquire aiocontext in bottom halves that need it
37
block: explicitly acquire aiocontext in aio callbacks that need it
38
aio-posix: partially inline aio_dispatch into aio_poll
39
async: remove unnecessary inc/dec pairs
40
block: document fields protected by AioContext lock
41
coroutine-lock: make CoMutex thread-safe
42
coroutine-lock: add limited spinning to CoMutex
43
test-aio-multithread: add performance comparison with thread-based
44
mutexes
45
coroutine-lock: place CoMutex before CoQueue in header
46
coroutine-lock: add mutex argument to CoQueue APIs
47
coroutine-lock: make CoRwlock thread-safe and fair
33
48
34
Philippe Mathieu-Daudé (1):
49
Makefile.objs | 4 -
35
block/nvme: Implement fake truncate() coroutine
50
stubs/Makefile.objs | 1 +
36
51
tests/Makefile.include | 19 +-
37
Vladimir Sementsov-Ogievskiy (25):
52
util/Makefile.objs | 6 +-
38
block: add bdrv_refresh_perms() helper
53
block/nbd-client.h | 2 +-
39
block: bdrv_set_perm() drop redundant parameters.
54
block/qed.h | 3 +
40
block: bdrv_child_set_perm() drop redundant parameters.
55
include/block/aio.h | 38 ++-
41
block: drop tighten_restrictions
56
include/block/block_int.h | 64 +++--
42
block: simplify comment to BDRV_REQ_SERIALISING
57
include/io/channel.h | 72 +++++-
43
block/io.c: drop assertion on double waiting for request serialisation
58
include/qemu/coroutine.h | 84 ++++---
44
block/io: split out bdrv_find_conflicting_request
59
include/qemu/coroutine_int.h | 11 +-
45
block/io: bdrv_wait_serialising_requests_locked: drop extra bs arg
60
include/sysemu/block-backend.h | 14 +-
46
block: bdrv_mark_request_serialising: split non-waiting function
61
tests/iothread.h | 25 ++
47
block: introduce BDRV_REQ_NO_WAIT flag
62
block/backup.c | 2 +-
48
block: bdrv_check_perm(): process children anyway
63
block/blkdebug.c | 9 +-
49
block: introduce preallocate filter
64
block/blkreplay.c | 2 +-
50
qemu-io: add preallocate mode parameter for truncate command
65
block/block-backend.c | 13 +-
51
iotests: qemu_io_silent: support --image-opts
66
block/curl.c | 44 +++-
52
iotests.py: execute_setup_common(): add required_fmts argument
67
block/gluster.c | 9 +-
53
iotests: add 298 to test new preallocate filter driver
68
block/io.c | 42 +---
54
scripts/simplebench: fix grammar: s/successed/succeeded/
69
block/iscsi.c | 15 +-
55
scripts/simplebench: support iops
70
block/linux-aio.c | 10 +-
56
scripts/simplebench: use standard deviation for +- error
71
block/mirror.c | 12 +-
57
simplebench: rename ascii() to results_to_text()
72
block/nbd-client.c | 119 +++++----
58
simplebench: move results_to_text() into separate file
73
block/nfs.c | 9 +-
59
simplebench/results_to_text: improve view of the table
74
block/qcow2-cluster.c | 4 +-
60
simplebench/results_to_text: add difference line to the table
75
block/qed-cluster.c | 2 +
61
simplebench/results_to_text: make executable
76
block/qed-table.c | 12 +-
62
scripts/simplebench: add bench_prealloc.py
77
block/qed.c | 58 +++--
63
78
block/sheepdog.c | 31 +--
64
docs/system/qemu-block-drivers.rst.inc | 26 ++
79
block/ssh.c | 29 +--
65
qapi/block-core.json | 20 +-
80
block/throttle-groups.c | 4 +-
66
include/block/block.h | 20 +-
81
block/win32-aio.c | 9 +-
67
include/block/block_int.h | 3 +-
82
dma-helpers.c | 2 +
68
block.c | 185 +++-----
83
hw/9pfs/9p.c | 2 +-
69
block/file-posix.c | 2 +-
84
hw/block/virtio-blk.c | 19 +-
70
block/io.c | 130 +++---
85
hw/scsi/scsi-bus.c | 2 +
71
block/nvme.c | 24 ++
86
hw/scsi/scsi-disk.c | 15 ++
72
block/preallocate.c | 559 +++++++++++++++++++++++++
87
hw/scsi/scsi-generic.c | 20 +-
73
block/quorum.c | 88 +++-
88
hw/scsi/virtio-scsi.c | 7 +
74
qemu-io-cmds.c | 46 +-
89
io/channel-command.c | 13 +
75
block/meson.build | 1 +
90
io/channel-file.c | 11 +
76
scripts/simplebench/bench-example.py | 3 +-
91
io/channel-socket.c | 16 +-
77
scripts/simplebench/bench_prealloc.py | 132 ++++++
92
io/channel-tls.c | 12 +
78
scripts/simplebench/bench_write_req.py | 3 +-
93
io/channel-watch.c | 6 +
79
scripts/simplebench/results_to_text.py | 126 ++++++
94
io/channel.c | 97 ++++++--
80
scripts/simplebench/simplebench.py | 66 ++-
95
nbd/client.c | 2 +-
81
tests/qemu-iotests/085.out | 167 ++++++--
96
nbd/common.c | 9 +-
82
tests/qemu-iotests/094.out | 10 +-
97
nbd/server.c | 94 +++-----
83
tests/qemu-iotests/095.out | 4 +-
98
stubs/linux-aio.c | 32 +++
84
tests/qemu-iotests/102 | 2 +-
99
stubs/set-fd-handler.c | 11 -
85
tests/qemu-iotests/102.out | 2 +-
100
tests/iothread.c | 91 +++++++
86
tests/qemu-iotests/109.out | 88 +++-
101
tests/test-aio-multithread.c | 463 ++++++++++++++++++++++++++++++++++++
87
tests/qemu-iotests/117.out | 13 +-
102
tests/test-thread-pool.c | 12 +-
88
tests/qemu-iotests/127.out | 12 +-
103
aio-posix.c => util/aio-posix.c | 62 ++---
89
tests/qemu-iotests/140.out | 10 +-
104
aio-win32.c => util/aio-win32.c | 30 +--
90
tests/qemu-iotests/141.out | 128 ++++--
105
util/aiocb.c | 55 +++++
91
tests/qemu-iotests/143.out | 4 +-
106
async.c => util/async.c | 84 ++++++-
92
tests/qemu-iotests/144.out | 28 +-
107
iohandler.c => util/iohandler.c | 0
93
tests/qemu-iotests/153.out | 18 +-
108
main-loop.c => util/main-loop.c | 0
94
tests/qemu-iotests/156.out | 39 +-
109
util/qemu-coroutine-lock.c | 254 ++++++++++++++++++--
95
tests/qemu-iotests/161.out | 18 +-
110
util/qemu-coroutine-sleep.c | 2 +-
96
tests/qemu-iotests/173.out | 25 +-
111
util/qemu-coroutine.c | 8 +
97
tests/qemu-iotests/182.out | 42 +-
112
qemu-timer.c => util/qemu-timer.c | 0
98
tests/qemu-iotests/183.out | 19 +-
113
thread-pool.c => util/thread-pool.c | 8 +-
99
tests/qemu-iotests/185.out | 45 +-
114
trace-events | 11 -
100
tests/qemu-iotests/191.out | 12 +-
115
util/trace-events | 17 +-
101
tests/qemu-iotests/223.out | 92 ++--
116
67 files changed, 1712 insertions(+), 533 deletions(-)
102
tests/qemu-iotests/229.out | 13 +-
117
create mode 100644 tests/iothread.h
103
tests/qemu-iotests/249.out | 16 +-
118
create mode 100644 stubs/linux-aio.c
104
tests/qemu-iotests/298 | 186 ++++++++
119
create mode 100644 tests/iothread.c
105
tests/qemu-iotests/298.out | 5 +
120
create mode 100644 tests/test-aio-multithread.c
106
tests/qemu-iotests/308.out | 103 ++++-
121
rename aio-posix.c => util/aio-posix.c (94%)
107
tests/qemu-iotests/312 | 159 +++++++
122
rename aio-win32.c => util/aio-win32.c (95%)
108
tests/qemu-iotests/312.out | 81 ++++
123
create mode 100644 util/aiocb.c
109
tests/qemu-iotests/common.qemu | 11 +-
124
rename async.c => util/async.c (82%)
110
tests/qemu-iotests/group | 2 +
125
rename iohandler.c => util/iohandler.c (100%)
111
tests/qemu-iotests/iotests.py | 16 +-
126
rename main-loop.c => util/main-loop.c (100%)
112
48 files changed, 2357 insertions(+), 447 deletions(-)
127
rename qemu-timer.c => util/qemu-timer.c (100%)
113
create mode 100644 block/preallocate.c
128
rename thread-pool.c => util/thread-pool.c (97%)
114
create mode 100755 scripts/simplebench/bench_prealloc.py
115
create mode 100755 scripts/simplebench/results_to_text.py
116
create mode 100644 tests/qemu-iotests/298
117
create mode 100644 tests/qemu-iotests/298.out
118
create mode 100755 tests/qemu-iotests/312
119
create mode 100644 tests/qemu-iotests/312.out
120
129
121
--
130
--
122
2.29.2
131
2.9.3
123
132
124
133
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
bs is linked in req, so no needs to pass it separately. Most of
3
AioContext is fairly self contained, the only dependency is QEMUTimer but
4
tracked-requests API doesn't have bs argument. Actually, after this
4
that in turn doesn't need anything else. So move them out of block-obj-y
5
patch only tracked_request_begin has it, but it's for purpose.
5
to avoid introducing a dependency from io/ to block-obj-y.
6
6
7
While being here, also add a comment about what "_locked" is.
7
main-loop and its dependency iohandler also need to be moved, because
8
8
later in this series io/ will call iohandler_get_aio_context.
9
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
9
10
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
[Changed copyright "the QEMU team" to "other QEMU contributors" as
11
Message-Id: <20201021145859.11201-5-vsementsov@virtuozzo.com>
11
suggested by Daniel Berrange and agreed by Paolo.
12
Signed-off-by: Max Reitz <mreitz@redhat.com>
12
--Stefan]
13
14
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
15
Reviewed-by: Fam Zheng <famz@redhat.com>
16
Message-id: 20170213135235.12274-2-pbonzini@redhat.com
17
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
18
---
14
block/io.c | 10 +++++-----
19
Makefile.objs | 4 ---
15
1 file changed, 5 insertions(+), 5 deletions(-)
20
stubs/Makefile.objs | 1 +
16
21
tests/Makefile.include | 11 ++++----
22
util/Makefile.objs | 6 +++-
23
block/io.c | 29 -------------------
24
stubs/linux-aio.c | 32 +++++++++++++++++++++
25
stubs/set-fd-handler.c | 11 --------
26
aio-posix.c => util/aio-posix.c | 2 +-
27
aio-win32.c => util/aio-win32.c | 0
28
util/aiocb.c | 55 +++++++++++++++++++++++++++++++++++++
29
async.c => util/async.c | 3 +-
30
iohandler.c => util/iohandler.c | 0
31
main-loop.c => util/main-loop.c | 0
32
qemu-timer.c => util/qemu-timer.c | 0
33
thread-pool.c => util/thread-pool.c | 2 +-
34
trace-events | 11 --------
35
util/trace-events | 11 ++++++++
36
17 files changed, 114 insertions(+), 64 deletions(-)
37
create mode 100644 stubs/linux-aio.c
38
rename aio-posix.c => util/aio-posix.c (99%)
39
rename aio-win32.c => util/aio-win32.c (100%)
40
create mode 100644 util/aiocb.c
41
rename async.c => util/async.c (99%)
42
rename iohandler.c => util/iohandler.c (100%)
43
rename main-loop.c => util/main-loop.c (100%)
44
rename qemu-timer.c => util/qemu-timer.c (100%)
45
rename thread-pool.c => util/thread-pool.c (99%)
46
47
diff --git a/Makefile.objs b/Makefile.objs
48
index XXXXXXX..XXXXXXX 100644
49
--- a/Makefile.objs
50
+++ b/Makefile.objs
51
@@ -XXX,XX +XXX,XX @@ chardev-obj-y = chardev/
52
#######################################################################
53
# block-obj-y is code used by both qemu system emulation and qemu-img
54
55
-block-obj-y = async.o thread-pool.o
56
block-obj-y += nbd/
57
block-obj-y += block.o blockjob.o
58
-block-obj-y += main-loop.o iohandler.o qemu-timer.o
59
-block-obj-$(CONFIG_POSIX) += aio-posix.o
60
-block-obj-$(CONFIG_WIN32) += aio-win32.o
61
block-obj-y += block/
62
block-obj-y += qemu-io-cmds.o
63
block-obj-$(CONFIG_REPLICATION) += replication.o
64
diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs
65
index XXXXXXX..XXXXXXX 100644
66
--- a/stubs/Makefile.objs
67
+++ b/stubs/Makefile.objs
68
@@ -XXX,XX +XXX,XX @@ stub-obj-y += get-vm-name.o
69
stub-obj-y += iothread.o
70
stub-obj-y += iothread-lock.o
71
stub-obj-y += is-daemonized.o
72
+stub-obj-$(CONFIG_LINUX_AIO) += linux-aio.o
73
stub-obj-y += machine-init-done.o
74
stub-obj-y += migr-blocker.o
75
stub-obj-y += monitor.o
76
diff --git a/tests/Makefile.include b/tests/Makefile.include
77
index XXXXXXX..XXXXXXX 100644
78
--- a/tests/Makefile.include
79
+++ b/tests/Makefile.include
80
@@ -XXX,XX +XXX,XX @@ check-unit-y += tests/test-visitor-serialization$(EXESUF)
81
check-unit-y += tests/test-iov$(EXESUF)
82
gcov-files-test-iov-y = util/iov.c
83
check-unit-y += tests/test-aio$(EXESUF)
84
+gcov-files-test-aio-y = util/async.c util/qemu-timer.o
85
+gcov-files-test-aio-$(CONFIG_WIN32) += util/aio-win32.c
86
+gcov-files-test-aio-$(CONFIG_POSIX) += util/aio-posix.c
87
check-unit-y += tests/test-throttle$(EXESUF)
88
gcov-files-test-aio-$(CONFIG_WIN32) = aio-win32.c
89
gcov-files-test-aio-$(CONFIG_POSIX) = aio-posix.c
90
@@ -XXX,XX +XXX,XX @@ tests/check-qjson$(EXESUF): tests/check-qjson.o $(test-util-obj-y)
91
tests/check-qom-interface$(EXESUF): tests/check-qom-interface.o $(test-qom-obj-y)
92
tests/check-qom-proplist$(EXESUF): tests/check-qom-proplist.o $(test-qom-obj-y)
93
94
-tests/test-char$(EXESUF): tests/test-char.o qemu-timer.o \
95
-    $(test-util-obj-y) $(qtest-obj-y) $(test-block-obj-y) $(chardev-obj-y)
96
+tests/test-char$(EXESUF): tests/test-char.o $(test-util-obj-y) $(qtest-obj-y) $(test-io-obj-y) $(chardev-obj-y)
97
tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(test-block-obj-y)
98
tests/test-aio$(EXESUF): tests/test-aio.o $(test-block-obj-y)
99
tests/test-throttle$(EXESUF): tests/test-throttle.o $(test-block-obj-y)
100
@@ -XXX,XX +XXX,XX @@ tests/test-vmstate$(EXESUF): tests/test-vmstate.o \
101
    migration/vmstate.o migration/qemu-file.o \
102
migration/qemu-file-channel.o migration/qjson.o \
103
    $(test-io-obj-y)
104
-tests/test-timed-average$(EXESUF): tests/test-timed-average.o qemu-timer.o \
105
-    $(test-util-obj-y)
106
+tests/test-timed-average$(EXESUF): tests/test-timed-average.o $(test-util-obj-y)
107
tests/test-base64$(EXESUF): tests/test-base64.o \
108
    libqemuutil.a libqemustub.a
109
tests/ptimer-test$(EXESUF): tests/ptimer-test.o tests/ptimer-test-stubs.o hw/core/ptimer.o libqemustub.a
110
@@ -XXX,XX +XXX,XX @@ tests/usb-hcd-ehci-test$(EXESUF): tests/usb-hcd-ehci-test.o $(libqos-usb-obj-y)
111
tests/usb-hcd-xhci-test$(EXESUF): tests/usb-hcd-xhci-test.o $(libqos-usb-obj-y)
112
tests/pc-cpu-test$(EXESUF): tests/pc-cpu-test.o
113
tests/postcopy-test$(EXESUF): tests/postcopy-test.o
114
-tests/vhost-user-test$(EXESUF): tests/vhost-user-test.o qemu-timer.o \
115
+tests/vhost-user-test$(EXESUF): tests/vhost-user-test.o $(test-util-obj-y) \
116
    $(qtest-obj-y) $(test-io-obj-y) $(libqos-virtio-obj-y) $(libqos-pc-obj-y) \
117
    $(chardev-obj-y)
118
tests/qemu-iotests/socket_scm_helper$(EXESUF): tests/qemu-iotests/socket_scm_helper.o
119
diff --git a/util/Makefile.objs b/util/Makefile.objs
120
index XXXXXXX..XXXXXXX 100644
121
--- a/util/Makefile.objs
122
+++ b/util/Makefile.objs
123
@@ -XXX,XX +XXX,XX @@
124
util-obj-y = osdep.o cutils.o unicode.o qemu-timer-common.o
125
util-obj-y += bufferiszero.o
126
util-obj-y += lockcnt.o
127
+util-obj-y += aiocb.o async.o thread-pool.o qemu-timer.o
128
+util-obj-y += main-loop.o iohandler.o
129
+util-obj-$(CONFIG_POSIX) += aio-posix.o
130
util-obj-$(CONFIG_POSIX) += compatfd.o
131
util-obj-$(CONFIG_POSIX) += event_notifier-posix.o
132
util-obj-$(CONFIG_POSIX) += mmap-alloc.o
133
util-obj-$(CONFIG_POSIX) += oslib-posix.o
134
util-obj-$(CONFIG_POSIX) += qemu-openpty.o
135
util-obj-$(CONFIG_POSIX) += qemu-thread-posix.o
136
-util-obj-$(CONFIG_WIN32) += event_notifier-win32.o
137
util-obj-$(CONFIG_POSIX) += memfd.o
138
+util-obj-$(CONFIG_WIN32) += aio-win32.o
139
+util-obj-$(CONFIG_WIN32) += event_notifier-win32.o
140
util-obj-$(CONFIG_WIN32) += oslib-win32.o
141
util-obj-$(CONFIG_WIN32) += qemu-thread-win32.o
142
util-obj-y += envlist.o path.o module.o
17
diff --git a/block/io.c b/block/io.c
143
diff --git a/block/io.c b/block/io.c
18
index XXXXXXX..XXXXXXX 100644
144
index XXXXXXX..XXXXXXX 100644
19
--- a/block/io.c
145
--- a/block/io.c
20
+++ b/block/io.c
146
+++ b/block/io.c
21
@@ -XXX,XX +XXX,XX @@ bdrv_find_conflicting_request(BdrvTrackedRequest *self)
147
@@ -XXX,XX +XXX,XX @@ BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
22
return NULL;
148
return &acb->common;
23
}
149
}
24
150
25
+/* Called with self->bs->reqs_lock held */
151
-void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
26
static bool coroutine_fn
152
- BlockCompletionFunc *cb, void *opaque)
27
-bdrv_wait_serialising_requests_locked(BlockDriverState *bs,
153
-{
28
- BdrvTrackedRequest *self)
154
- BlockAIOCB *acb;
29
+bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
155
-
156
- acb = g_malloc(aiocb_info->aiocb_size);
157
- acb->aiocb_info = aiocb_info;
158
- acb->bs = bs;
159
- acb->cb = cb;
160
- acb->opaque = opaque;
161
- acb->refcnt = 1;
162
- return acb;
163
-}
164
-
165
-void qemu_aio_ref(void *p)
166
-{
167
- BlockAIOCB *acb = p;
168
- acb->refcnt++;
169
-}
170
-
171
-void qemu_aio_unref(void *p)
172
-{
173
- BlockAIOCB *acb = p;
174
- assert(acb->refcnt > 0);
175
- if (--acb->refcnt == 0) {
176
- g_free(acb);
177
- }
178
-}
179
-
180
/**************************************************************/
181
/* Coroutine block device emulation */
182
183
diff --git a/stubs/linux-aio.c b/stubs/linux-aio.c
184
new file mode 100644
185
index XXXXXXX..XXXXXXX
186
--- /dev/null
187
+++ b/stubs/linux-aio.c
188
@@ -XXX,XX +XXX,XX @@
189
+/*
190
+ * Linux native AIO support.
191
+ *
192
+ * Copyright (C) 2009 IBM, Corp.
193
+ * Copyright (C) 2009 Red Hat, Inc.
194
+ *
195
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
196
+ * See the COPYING file in the top-level directory.
197
+ */
198
+#include "qemu/osdep.h"
199
+#include "block/aio.h"
200
+#include "block/raw-aio.h"
201
+
202
+void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
203
+{
204
+ abort();
205
+}
206
+
207
+void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
208
+{
209
+ abort();
210
+}
211
+
212
+LinuxAioState *laio_init(void)
213
+{
214
+ abort();
215
+}
216
+
217
+void laio_cleanup(LinuxAioState *s)
218
+{
219
+ abort();
220
+}
221
diff --git a/stubs/set-fd-handler.c b/stubs/set-fd-handler.c
222
index XXXXXXX..XXXXXXX 100644
223
--- a/stubs/set-fd-handler.c
224
+++ b/stubs/set-fd-handler.c
225
@@ -XXX,XX +XXX,XX @@ void qemu_set_fd_handler(int fd,
30
{
226
{
31
BdrvTrackedRequest *req;
227
abort();
32
bool waited = false;
33
34
while ((req = bdrv_find_conflicting_request(self))) {
35
self->waiting_for = req;
36
- qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
37
+ qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
38
self->waiting_for = NULL;
39
waited = true;
40
}
41
@@ -XXX,XX +XXX,XX @@ bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
42
43
req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
44
req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
45
- waited = bdrv_wait_serialising_requests_locked(bs, req);
46
+ waited = bdrv_wait_serialising_requests_locked(req);
47
qemu_co_mutex_unlock(&bs->reqs_lock);
48
return waited;
49
}
228
}
50
@@ -XXX,XX +XXX,XX @@ static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self
229
-
51
}
230
-void aio_set_fd_handler(AioContext *ctx,
52
231
- int fd,
53
qemu_co_mutex_lock(&bs->reqs_lock);
232
- bool is_external,
54
- waited = bdrv_wait_serialising_requests_locked(bs, self);
233
- IOHandler *io_read,
55
+ waited = bdrv_wait_serialising_requests_locked(self);
234
- IOHandler *io_write,
56
qemu_co_mutex_unlock(&bs->reqs_lock);
235
- AioPollFn *io_poll,
57
236
- void *opaque)
58
return waited;
237
-{
238
- abort();
239
-}
240
diff --git a/aio-posix.c b/util/aio-posix.c
241
similarity index 99%
242
rename from aio-posix.c
243
rename to util/aio-posix.c
244
index XXXXXXX..XXXXXXX 100644
245
--- a/aio-posix.c
246
+++ b/util/aio-posix.c
247
@@ -XXX,XX +XXX,XX @@
248
#include "qemu/rcu_queue.h"
249
#include "qemu/sockets.h"
250
#include "qemu/cutils.h"
251
-#include "trace-root.h"
252
+#include "trace.h"
253
#ifdef CONFIG_EPOLL_CREATE1
254
#include <sys/epoll.h>
255
#endif
256
diff --git a/aio-win32.c b/util/aio-win32.c
257
similarity index 100%
258
rename from aio-win32.c
259
rename to util/aio-win32.c
260
diff --git a/util/aiocb.c b/util/aiocb.c
261
new file mode 100644
262
index XXXXXXX..XXXXXXX
263
--- /dev/null
264
+++ b/util/aiocb.c
265
@@ -XXX,XX +XXX,XX @@
266
+/*
267
+ * BlockAIOCB allocation
268
+ *
269
+ * Copyright (c) 2003-2017 Fabrice Bellard and other QEMU contributors
270
+ *
271
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
272
+ * of this software and associated documentation files (the "Software"), to deal
273
+ * in the Software without restriction, including without limitation the rights
274
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
275
+ * copies of the Software, and to permit persons to whom the Software is
276
+ * furnished to do so, subject to the following conditions:
277
+ *
278
+ * The above copyright notice and this permission notice shall be included in
279
+ * all copies or substantial portions of the Software.
280
+ *
281
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
282
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
283
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
284
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
285
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
286
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
287
+ * THE SOFTWARE.
288
+ */
289
+
290
+#include "qemu/osdep.h"
291
+#include "block/aio.h"
292
+
293
+void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
294
+ BlockCompletionFunc *cb, void *opaque)
295
+{
296
+ BlockAIOCB *acb;
297
+
298
+ acb = g_malloc(aiocb_info->aiocb_size);
299
+ acb->aiocb_info = aiocb_info;
300
+ acb->bs = bs;
301
+ acb->cb = cb;
302
+ acb->opaque = opaque;
303
+ acb->refcnt = 1;
304
+ return acb;
305
+}
306
+
307
+void qemu_aio_ref(void *p)
308
+{
309
+ BlockAIOCB *acb = p;
310
+ acb->refcnt++;
311
+}
312
+
313
+void qemu_aio_unref(void *p)
314
+{
315
+ BlockAIOCB *acb = p;
316
+ assert(acb->refcnt > 0);
317
+ if (--acb->refcnt == 0) {
318
+ g_free(acb);
319
+ }
320
+}
321
diff --git a/async.c b/util/async.c
322
similarity index 99%
323
rename from async.c
324
rename to util/async.c
325
index XXXXXXX..XXXXXXX 100644
326
--- a/async.c
327
+++ b/util/async.c
328
@@ -XXX,XX +XXX,XX @@
329
/*
330
- * QEMU System Emulator
331
+ * Data plane event loop
332
*
333
* Copyright (c) 2003-2008 Fabrice Bellard
334
+ * Copyright (c) 2009-2017 QEMU contributors
335
*
336
* Permission is hereby granted, free of charge, to any person obtaining a copy
337
* of this software and associated documentation files (the "Software"), to deal
338
diff --git a/iohandler.c b/util/iohandler.c
339
similarity index 100%
340
rename from iohandler.c
341
rename to util/iohandler.c
342
diff --git a/main-loop.c b/util/main-loop.c
343
similarity index 100%
344
rename from main-loop.c
345
rename to util/main-loop.c
346
diff --git a/qemu-timer.c b/util/qemu-timer.c
347
similarity index 100%
348
rename from qemu-timer.c
349
rename to util/qemu-timer.c
350
diff --git a/thread-pool.c b/util/thread-pool.c
351
similarity index 99%
352
rename from thread-pool.c
353
rename to util/thread-pool.c
354
index XXXXXXX..XXXXXXX 100644
355
--- a/thread-pool.c
356
+++ b/util/thread-pool.c
357
@@ -XXX,XX +XXX,XX @@
358
#include "qemu/queue.h"
359
#include "qemu/thread.h"
360
#include "qemu/coroutine.h"
361
-#include "trace-root.h"
362
+#include "trace.h"
363
#include "block/thread-pool.h"
364
#include "qemu/main-loop.h"
365
366
diff --git a/trace-events b/trace-events
367
index XXXXXXX..XXXXXXX 100644
368
--- a/trace-events
369
+++ b/trace-events
370
@@ -XXX,XX +XXX,XX @@
371
#
372
# The <format-string> should be a sprintf()-compatible format string.
373
374
-# aio-posix.c
375
-run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64
376
-run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
377
-poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
378
-poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
379
-
380
-# thread-pool.c
381
-thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
382
-thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d"
383
-thread_pool_cancel(void *req, void *opaque) "req %p opaque %p"
384
-
385
# ioport.c
386
cpu_in(unsigned int addr, char size, unsigned int val) "addr %#x(%c) value %u"
387
cpu_out(unsigned int addr, char size, unsigned int val) "addr %#x(%c) value %u"
388
diff --git a/util/trace-events b/util/trace-events
389
index XXXXXXX..XXXXXXX 100644
390
--- a/util/trace-events
391
+++ b/util/trace-events
392
@@ -XXX,XX +XXX,XX @@
393
# See docs/tracing.txt for syntax documentation.
394
395
+# util/aio-posix.c
396
+run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64
397
+run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
398
+poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
399
+poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
400
+
401
+# util/thread-pool.c
402
+thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
403
+thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d"
404
+thread_pool_cancel(void *req, void *opaque) "req %p opaque %p"
405
+
406
# util/buffer.c
407
buffer_resize(const char *buf, size_t olen, size_t len) "%s: old %zd, new %zd"
408
buffer_move_empty(const char *buf, size_t len, const char *from) "%s: %zd bytes from %s"
59
--
409
--
60
2.29.2
410
2.9.3
61
411
62
412
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
3
aio_co_wake provides the infrastructure to start a coroutine on a "home"
4
Message-Id: <20201021145859.11201-13-vsementsov@virtuozzo.com>
4
AioContext. It will be used by CoMutex and CoQueue, so that coroutines
5
Reviewed-by: Max Reitz <mreitz@redhat.com>
5
don't jump from one context to another when they go to sleep on a
6
Signed-off-by: Max Reitz <mreitz@redhat.com>
6
mutex or waitqueue. However, it can also be used as a more efficient
7
alternative to one-shot bottom halves, and saves the effort of tracking
8
which AioContext a coroutine is running on.
9
10
aio_co_schedule is the part of aio_co_wake that starts a coroutine
11
on a remove AioContext, but it is also useful to implement e.g.
12
bdrv_set_aio_context callbacks.
13
14
The implementation of aio_co_schedule is based on a lock-free
15
multiple-producer, single-consumer queue. The multiple producers use
16
cmpxchg to add to a LIFO stack. The consumer (a per-AioContext bottom
17
half) grabs all items added so far, inverts the list to make it FIFO,
18
and goes through it one item at a time until it's empty. The data
19
structure was inspired by OSv, which uses it in the very code we'll
20
"port" to QEMU for the thread-safe CoMutex.
21
22
Most of the new code is really tests.
23
24
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
25
Reviewed-by: Fam Zheng <famz@redhat.com>
26
Message-id: 20170213135235.12274-3-pbonzini@redhat.com
27
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
7
---
28
---
8
tests/qemu-iotests/298 | 186 +++++++++++++++++++++++++++++++++++++
29
tests/Makefile.include | 8 +-
9
tests/qemu-iotests/298.out | 5 +
30
include/block/aio.h | 32 +++++++
10
tests/qemu-iotests/group | 1 +
31
include/qemu/coroutine_int.h | 11 ++-
11
3 files changed, 192 insertions(+)
32
tests/iothread.h | 25 +++++
12
create mode 100644 tests/qemu-iotests/298
33
tests/iothread.c | 91 ++++++++++++++++++
13
create mode 100644 tests/qemu-iotests/298.out
34
tests/test-aio-multithread.c | 213 +++++++++++++++++++++++++++++++++++++++++++
14
35
util/async.c | 65 +++++++++++++
15
diff --git a/tests/qemu-iotests/298 b/tests/qemu-iotests/298
36
util/qemu-coroutine.c | 8 ++
37
util/trace-events | 4 +
38
9 files changed, 453 insertions(+), 4 deletions(-)
39
create mode 100644 tests/iothread.h
40
create mode 100644 tests/iothread.c
41
create mode 100644 tests/test-aio-multithread.c
42
43
diff --git a/tests/Makefile.include b/tests/Makefile.include
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tests/Makefile.include
46
+++ b/tests/Makefile.include
47
@@ -XXX,XX +XXX,XX @@ check-unit-y += tests/test-aio$(EXESUF)
48
gcov-files-test-aio-y = util/async.c util/qemu-timer.o
49
gcov-files-test-aio-$(CONFIG_WIN32) += util/aio-win32.c
50
gcov-files-test-aio-$(CONFIG_POSIX) += util/aio-posix.c
51
+check-unit-y += tests/test-aio-multithread$(EXESUF)
52
+gcov-files-test-aio-multithread-y = $(gcov-files-test-aio-y)
53
+gcov-files-test-aio-multithread-y += util/qemu-coroutine.c tests/iothread.c
54
check-unit-y += tests/test-throttle$(EXESUF)
55
-gcov-files-test-aio-$(CONFIG_WIN32) = aio-win32.c
56
-gcov-files-test-aio-$(CONFIG_POSIX) = aio-posix.c
57
check-unit-y += tests/test-thread-pool$(EXESUF)
58
gcov-files-test-thread-pool-y = thread-pool.c
59
gcov-files-test-hbitmap-y = util/hbitmap.c
60
@@ -XXX,XX +XXX,XX @@ test-qapi-obj-y = tests/test-qapi-visit.o tests/test-qapi-types.o \
61
    $(test-qom-obj-y)
62
test-crypto-obj-y = $(crypto-obj-y) $(test-qom-obj-y)
63
test-io-obj-y = $(io-obj-y) $(test-crypto-obj-y)
64
-test-block-obj-y = $(block-obj-y) $(test-io-obj-y)
65
+test-block-obj-y = $(block-obj-y) $(test-io-obj-y) tests/iothread.o
66
67
tests/check-qint$(EXESUF): tests/check-qint.o $(test-util-obj-y)
68
tests/check-qstring$(EXESUF): tests/check-qstring.o $(test-util-obj-y)
69
@@ -XXX,XX +XXX,XX @@ tests/check-qom-proplist$(EXESUF): tests/check-qom-proplist.o $(test-qom-obj-y)
70
tests/test-char$(EXESUF): tests/test-char.o $(test-util-obj-y) $(qtest-obj-y) $(test-io-obj-y) $(chardev-obj-y)
71
tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(test-block-obj-y)
72
tests/test-aio$(EXESUF): tests/test-aio.o $(test-block-obj-y)
73
+tests/test-aio-multithread$(EXESUF): tests/test-aio-multithread.o $(test-block-obj-y)
74
tests/test-throttle$(EXESUF): tests/test-throttle.o $(test-block-obj-y)
75
tests/test-blockjob$(EXESUF): tests/test-blockjob.o $(test-block-obj-y) $(test-util-obj-y)
76
tests/test-blockjob-txn$(EXESUF): tests/test-blockjob-txn.o $(test-block-obj-y) $(test-util-obj-y)
77
diff --git a/include/block/aio.h b/include/block/aio.h
78
index XXXXXXX..XXXXXXX 100644
79
--- a/include/block/aio.h
80
+++ b/include/block/aio.h
81
@@ -XXX,XX +XXX,XX @@ typedef void QEMUBHFunc(void *opaque);
82
typedef bool AioPollFn(void *opaque);
83
typedef void IOHandler(void *opaque);
84
85
+struct Coroutine;
86
struct ThreadPool;
87
struct LinuxAioState;
88
89
@@ -XXX,XX +XXX,XX @@ struct AioContext {
90
bool notified;
91
EventNotifier notifier;
92
93
+ QSLIST_HEAD(, Coroutine) scheduled_coroutines;
94
+ QEMUBH *co_schedule_bh;
95
+
96
/* Thread pool for performing work and receiving completion callbacks.
97
* Has its own locking.
98
*/
99
@@ -XXX,XX +XXX,XX @@ static inline bool aio_node_check(AioContext *ctx, bool is_external)
100
}
101
102
/**
103
+ * aio_co_schedule:
104
+ * @ctx: the aio context
105
+ * @co: the coroutine
106
+ *
107
+ * Start a coroutine on a remote AioContext.
108
+ *
109
+ * The coroutine must not be entered by anyone else while aio_co_schedule()
110
+ * is active. In addition the coroutine must have yielded unless ctx
111
+ * is the context in which the coroutine is running (i.e. the value of
112
+ * qemu_get_current_aio_context() from the coroutine itself).
113
+ */
114
+void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
115
+
116
+/**
117
+ * aio_co_wake:
118
+ * @co: the coroutine
119
+ *
120
+ * Restart a coroutine on the AioContext where it was running last, thus
121
+ * preventing coroutines from jumping from one context to another when they
122
+ * go to sleep.
123
+ *
124
+ * aio_co_wake may be executed either in coroutine or non-coroutine
125
+ * context. The coroutine must not be entered by anyone else while
126
+ * aio_co_wake() is active.
127
+ */
128
+void aio_co_wake(struct Coroutine *co);
129
+
130
+/**
131
* Return the AioContext whose event loop runs in the current thread.
132
*
133
* If called from an IOThread this will be the IOThread's AioContext. If
134
diff --git a/include/qemu/coroutine_int.h b/include/qemu/coroutine_int.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/include/qemu/coroutine_int.h
137
+++ b/include/qemu/coroutine_int.h
138
@@ -XXX,XX +XXX,XX @@ struct Coroutine {
139
CoroutineEntry *entry;
140
void *entry_arg;
141
Coroutine *caller;
142
+
143
+ /* Only used when the coroutine has terminated. */
144
QSLIST_ENTRY(Coroutine) pool_next;
145
+
146
size_t locks_held;
147
148
- /* Coroutines that should be woken up when we yield or terminate */
149
+ /* Coroutines that should be woken up when we yield or terminate.
150
+ * Only used when the coroutine is running.
151
+ */
152
QSIMPLEQ_HEAD(, Coroutine) co_queue_wakeup;
153
+
154
+ /* Only used when the coroutine has yielded. */
155
+ AioContext *ctx;
156
QSIMPLEQ_ENTRY(Coroutine) co_queue_next;
157
+ QSLIST_ENTRY(Coroutine) co_scheduled_next;
158
};
159
160
Coroutine *qemu_coroutine_new(void);
161
diff --git a/tests/iothread.h b/tests/iothread.h
16
new file mode 100644
162
new file mode 100644
17
index XXXXXXX..XXXXXXX
163
index XXXXXXX..XXXXXXX
18
--- /dev/null
164
--- /dev/null
19
+++ b/tests/qemu-iotests/298
165
+++ b/tests/iothread.h
20
@@ -XXX,XX +XXX,XX @@
166
@@ -XXX,XX +XXX,XX @@
21
+#!/usr/bin/env python3
167
+/*
22
+#
168
+ * Event loop thread implementation for unit tests
23
+# Test for preallocate filter
169
+ *
24
+#
170
+ * Copyright Red Hat Inc., 2013, 2016
25
+# Copyright (c) 2020 Virtuozzo International GmbH.
171
+ *
26
+#
172
+ * Authors:
27
+# This program is free software; you can redistribute it and/or modify
173
+ * Stefan Hajnoczi <stefanha@redhat.com>
28
+# it under the terms of the GNU General Public License as published by
174
+ * Paolo Bonzini <pbonzini@redhat.com>
29
+# the Free Software Foundation; either version 2 of the License, or
175
+ *
30
+# (at your option) any later version.
176
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
31
+#
177
+ * See the COPYING file in the top-level directory.
32
+# This program is distributed in the hope that it will be useful,
178
+ */
33
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
179
+#ifndef TEST_IOTHREAD_H
34
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
180
+#define TEST_IOTHREAD_H
35
+# GNU General Public License for more details.
181
+
36
+#
182
+#include "block/aio.h"
37
+# You should have received a copy of the GNU General Public License
183
+#include "qemu/thread.h"
38
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
184
+
39
+#
185
+typedef struct IOThread IOThread;
40
+
186
+
41
+import os
187
+IOThread *iothread_new(void);
42
+import iotests
188
+void iothread_join(IOThread *iothread);
43
+
189
+AioContext *iothread_get_aio_context(IOThread *iothread);
44
+MiB = 1024 * 1024
190
+
45
+disk = os.path.join(iotests.test_dir, 'disk')
191
+#endif
46
+overlay = os.path.join(iotests.test_dir, 'overlay')
192
diff --git a/tests/iothread.c b/tests/iothread.c
47
+refdisk = os.path.join(iotests.test_dir, 'refdisk')
48
+drive_opts = f'node-name=disk,driver={iotests.imgfmt},' \
49
+ f'file.node-name=filter,file.driver=preallocate,' \
50
+ f'file.file.node-name=file,file.file.filename={disk}'
51
+
52
+
53
+class TestPreallocateBase(iotests.QMPTestCase):
54
+ def setUp(self):
55
+ iotests.qemu_img_create('-f', iotests.imgfmt, disk, str(10 * MiB))
56
+
57
+ def tearDown(self):
58
+ try:
59
+ self.check_small()
60
+ check = iotests.qemu_img_check(disk)
61
+ self.assertFalse('leaks' in check)
62
+ self.assertFalse('corruptions' in check)
63
+ self.assertEqual(check['check-errors'], 0)
64
+ finally:
65
+ os.remove(disk)
66
+
67
+ def check_big(self):
68
+ self.assertTrue(os.path.getsize(disk) > 100 * MiB)
69
+
70
+ def check_small(self):
71
+ self.assertTrue(os.path.getsize(disk) < 10 * MiB)
72
+
73
+
74
+class TestQemuImg(TestPreallocateBase):
75
+ def test_qemu_img(self):
76
+ p = iotests.QemuIoInteractive('--image-opts', drive_opts)
77
+
78
+ p.cmd('write 0 1M')
79
+ p.cmd('flush')
80
+
81
+ self.check_big()
82
+
83
+ p.close()
84
+
85
+
86
+class TestPreallocateFilter(TestPreallocateBase):
87
+ def setUp(self):
88
+ super().setUp()
89
+ self.vm = iotests.VM().add_drive(path=None, opts=drive_opts)
90
+ self.vm.launch()
91
+
92
+ def tearDown(self):
93
+ self.vm.shutdown()
94
+ super().tearDown()
95
+
96
+ def test_prealloc(self):
97
+ self.vm.hmp_qemu_io('drive0', 'write 0 1M')
98
+ self.check_big()
99
+
100
+ def test_external_snapshot(self):
101
+ self.test_prealloc()
102
+
103
+ result = self.vm.qmp('blockdev-snapshot-sync', node_name='disk',
104
+ snapshot_file=overlay,
105
+ snapshot_node_name='overlay')
106
+ self.assert_qmp(result, 'return', {})
107
+
108
+ # on reopen to r-o base preallocation should be dropped
109
+ self.check_small()
110
+
111
+ self.vm.hmp_qemu_io('drive0', 'write 1M 1M')
112
+
113
+ result = self.vm.qmp('block-commit', device='overlay')
114
+ self.assert_qmp(result, 'return', {})
115
+ self.complete_and_wait()
116
+
117
+ # commit of new megabyte should trigger preallocation
118
+ self.check_big()
119
+
120
+ def test_reopen_opts(self):
121
+ result = self.vm.qmp('x-blockdev-reopen', **{
122
+ 'node-name': 'disk',
123
+ 'driver': iotests.imgfmt,
124
+ 'file': {
125
+ 'node-name': 'filter',
126
+ 'driver': 'preallocate',
127
+ 'prealloc-size': 20 * MiB,
128
+ 'prealloc-align': 5 * MiB,
129
+ 'file': {
130
+ 'node-name': 'file',
131
+ 'driver': 'file',
132
+ 'filename': disk
133
+ }
134
+ }
135
+ })
136
+ self.assert_qmp(result, 'return', {})
137
+
138
+ self.vm.hmp_qemu_io('drive0', 'write 0 1M')
139
+ self.assertTrue(os.path.getsize(disk) == 25 * MiB)
140
+
141
+
142
+class TestTruncate(iotests.QMPTestCase):
143
+ def setUp(self):
144
+ iotests.qemu_img_create('-f', iotests.imgfmt, disk, str(10 * MiB))
145
+ iotests.qemu_img_create('-f', iotests.imgfmt, refdisk, str(10 * MiB))
146
+
147
+ def tearDown(self):
148
+ os.remove(disk)
149
+ os.remove(refdisk)
150
+
151
+ def do_test(self, prealloc_mode, new_size):
152
+ ret = iotests.qemu_io_silent('--image-opts', '-c', 'write 0 10M', '-c',
153
+ f'truncate -m {prealloc_mode} {new_size}',
154
+ drive_opts)
155
+ self.assertEqual(ret, 0)
156
+
157
+ ret = iotests.qemu_io_silent('-f', iotests.imgfmt, '-c', 'write 0 10M',
158
+ '-c',
159
+ f'truncate -m {prealloc_mode} {new_size}',
160
+ refdisk)
161
+ self.assertEqual(ret, 0)
162
+
163
+ stat = os.stat(disk)
164
+ refstat = os.stat(refdisk)
165
+
166
+ # Probably we'll want preallocate filter to keep align to cluster when
167
+ # shrink preallocation, so, ignore small differece
168
+ self.assertLess(abs(stat.st_size - refstat.st_size), 64 * 1024)
169
+
170
+ # Preallocate filter may leak some internal clusters (for example, if
171
+ # guest write far over EOF, skipping some clusters - they will remain
172
+ # fallocated, preallocate filter don't care about such leaks, it drops
173
+ # only trailing preallocation.
174
+ self.assertLess(abs(stat.st_blocks - refstat.st_blocks) * 512,
175
+ 1024 * 1024)
176
+
177
+ def test_real_shrink(self):
178
+ self.do_test('off', '5M')
179
+
180
+ def test_truncate_inside_preallocated_area__falloc(self):
181
+ self.do_test('falloc', '50M')
182
+
183
+ def test_truncate_inside_preallocated_area__metadata(self):
184
+ self.do_test('metadata', '50M')
185
+
186
+ def test_truncate_inside_preallocated_area__full(self):
187
+ self.do_test('full', '50M')
188
+
189
+ def test_truncate_inside_preallocated_area__off(self):
190
+ self.do_test('off', '50M')
191
+
192
+ def test_truncate_over_preallocated_area__falloc(self):
193
+ self.do_test('falloc', '150M')
194
+
195
+ def test_truncate_over_preallocated_area__metadata(self):
196
+ self.do_test('metadata', '150M')
197
+
198
+ def test_truncate_over_preallocated_area__full(self):
199
+ self.do_test('full', '150M')
200
+
201
+ def test_truncate_over_preallocated_area__off(self):
202
+ self.do_test('off', '150M')
203
+
204
+
205
+if __name__ == '__main__':
206
+ iotests.main(supported_fmts=['qcow2'], required_fmts=['preallocate'])
207
diff --git a/tests/qemu-iotests/298.out b/tests/qemu-iotests/298.out
208
new file mode 100644
193
new file mode 100644
209
index XXXXXXX..XXXXXXX
194
index XXXXXXX..XXXXXXX
210
--- /dev/null
195
--- /dev/null
211
+++ b/tests/qemu-iotests/298.out
196
+++ b/tests/iothread.c
212
@@ -XXX,XX +XXX,XX @@
197
@@ -XXX,XX +XXX,XX @@
213
+.............
198
+/*
214
+----------------------------------------------------------------------
199
+ * Event loop thread implementation for unit tests
215
+Ran 13 tests
200
+ *
216
+
201
+ * Copyright Red Hat Inc., 2013, 2016
217
+OK
202
+ *
218
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
203
+ * Authors:
204
+ * Stefan Hajnoczi <stefanha@redhat.com>
205
+ * Paolo Bonzini <pbonzini@redhat.com>
206
+ *
207
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
208
+ * See the COPYING file in the top-level directory.
209
+ *
210
+ */
211
+
212
+#include "qemu/osdep.h"
213
+#include "qapi/error.h"
214
+#include "block/aio.h"
215
+#include "qemu/main-loop.h"
216
+#include "qemu/rcu.h"
217
+#include "iothread.h"
218
+
219
+struct IOThread {
220
+ AioContext *ctx;
221
+
222
+ QemuThread thread;
223
+ QemuMutex init_done_lock;
224
+ QemuCond init_done_cond; /* is thread initialization done? */
225
+ bool stopping;
226
+};
227
+
228
+static __thread IOThread *my_iothread;
229
+
230
+AioContext *qemu_get_current_aio_context(void)
231
+{
232
+ return my_iothread ? my_iothread->ctx : qemu_get_aio_context();
233
+}
234
+
235
+static void *iothread_run(void *opaque)
236
+{
237
+ IOThread *iothread = opaque;
238
+
239
+ rcu_register_thread();
240
+
241
+ my_iothread = iothread;
242
+ qemu_mutex_lock(&iothread->init_done_lock);
243
+ iothread->ctx = aio_context_new(&error_abort);
244
+ qemu_cond_signal(&iothread->init_done_cond);
245
+ qemu_mutex_unlock(&iothread->init_done_lock);
246
+
247
+ while (!atomic_read(&iothread->stopping)) {
248
+ aio_poll(iothread->ctx, true);
249
+ }
250
+
251
+ rcu_unregister_thread();
252
+ return NULL;
253
+}
254
+
255
+void iothread_join(IOThread *iothread)
256
+{
257
+ iothread->stopping = true;
258
+ aio_notify(iothread->ctx);
259
+ qemu_thread_join(&iothread->thread);
260
+ qemu_cond_destroy(&iothread->init_done_cond);
261
+ qemu_mutex_destroy(&iothread->init_done_lock);
262
+ aio_context_unref(iothread->ctx);
263
+ g_free(iothread);
264
+}
265
+
266
+IOThread *iothread_new(void)
267
+{
268
+ IOThread *iothread = g_new0(IOThread, 1);
269
+
270
+ qemu_mutex_init(&iothread->init_done_lock);
271
+ qemu_cond_init(&iothread->init_done_cond);
272
+ qemu_thread_create(&iothread->thread, NULL, iothread_run,
273
+ iothread, QEMU_THREAD_JOINABLE);
274
+
275
+ /* Wait for initialization to complete */
276
+ qemu_mutex_lock(&iothread->init_done_lock);
277
+ while (iothread->ctx == NULL) {
278
+ qemu_cond_wait(&iothread->init_done_cond,
279
+ &iothread->init_done_lock);
280
+ }
281
+ qemu_mutex_unlock(&iothread->init_done_lock);
282
+ return iothread;
283
+}
284
+
285
+AioContext *iothread_get_aio_context(IOThread *iothread)
286
+{
287
+ return iothread->ctx;
288
+}
289
diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c
290
new file mode 100644
291
index XXXXXXX..XXXXXXX
292
--- /dev/null
293
+++ b/tests/test-aio-multithread.c
294
@@ -XXX,XX +XXX,XX @@
295
+/*
296
+ * AioContext multithreading tests
297
+ *
298
+ * Copyright Red Hat, Inc. 2016
299
+ *
300
+ * Authors:
301
+ * Paolo Bonzini <pbonzini@redhat.com>
302
+ *
303
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
304
+ * See the COPYING.LIB file in the top-level directory.
305
+ */
306
+
307
+#include "qemu/osdep.h"
308
+#include <glib.h>
309
+#include "block/aio.h"
310
+#include "qapi/error.h"
311
+#include "qemu/coroutine.h"
312
+#include "qemu/thread.h"
313
+#include "qemu/error-report.h"
314
+#include "iothread.h"
315
+
316
+/* AioContext management */
317
+
318
+#define NUM_CONTEXTS 5
319
+
320
+static IOThread *threads[NUM_CONTEXTS];
321
+static AioContext *ctx[NUM_CONTEXTS];
322
+static __thread int id = -1;
323
+
324
+static QemuEvent done_event;
325
+
326
+/* Run a function synchronously on a remote iothread. */
327
+
328
+typedef struct CtxRunData {
329
+ QEMUBHFunc *cb;
330
+ void *arg;
331
+} CtxRunData;
332
+
333
+static void ctx_run_bh_cb(void *opaque)
334
+{
335
+ CtxRunData *data = opaque;
336
+
337
+ data->cb(data->arg);
338
+ qemu_event_set(&done_event);
339
+}
340
+
341
+static void ctx_run(int i, QEMUBHFunc *cb, void *opaque)
342
+{
343
+ CtxRunData data = {
344
+ .cb = cb,
345
+ .arg = opaque
346
+ };
347
+
348
+ qemu_event_reset(&done_event);
349
+ aio_bh_schedule_oneshot(ctx[i], ctx_run_bh_cb, &data);
350
+ qemu_event_wait(&done_event);
351
+}
352
+
353
+/* Starting the iothreads. */
354
+
355
+static void set_id_cb(void *opaque)
356
+{
357
+ int *i = opaque;
358
+
359
+ id = *i;
360
+}
361
+
362
+static void create_aio_contexts(void)
363
+{
364
+ int i;
365
+
366
+ for (i = 0; i < NUM_CONTEXTS; i++) {
367
+ threads[i] = iothread_new();
368
+ ctx[i] = iothread_get_aio_context(threads[i]);
369
+ }
370
+
371
+ qemu_event_init(&done_event, false);
372
+ for (i = 0; i < NUM_CONTEXTS; i++) {
373
+ ctx_run(i, set_id_cb, &i);
374
+ }
375
+}
376
+
377
+/* Stopping the iothreads. */
378
+
379
+static void join_aio_contexts(void)
380
+{
381
+ int i;
382
+
383
+ for (i = 0; i < NUM_CONTEXTS; i++) {
384
+ aio_context_ref(ctx[i]);
385
+ }
386
+ for (i = 0; i < NUM_CONTEXTS; i++) {
387
+ iothread_join(threads[i]);
388
+ }
389
+ for (i = 0; i < NUM_CONTEXTS; i++) {
390
+ aio_context_unref(ctx[i]);
391
+ }
392
+ qemu_event_destroy(&done_event);
393
+}
394
+
395
+/* Basic test for the stuff above. */
396
+
397
+static void test_lifecycle(void)
398
+{
399
+ create_aio_contexts();
400
+ join_aio_contexts();
401
+}
402
+
403
+/* aio_co_schedule test. */
404
+
405
+static Coroutine *to_schedule[NUM_CONTEXTS];
406
+
407
+static bool now_stopping;
408
+
409
+static int count_retry;
410
+static int count_here;
411
+static int count_other;
412
+
413
+static bool schedule_next(int n)
414
+{
415
+ Coroutine *co;
416
+
417
+ co = atomic_xchg(&to_schedule[n], NULL);
418
+ if (!co) {
419
+ atomic_inc(&count_retry);
420
+ return false;
421
+ }
422
+
423
+ if (n == id) {
424
+ atomic_inc(&count_here);
425
+ } else {
426
+ atomic_inc(&count_other);
427
+ }
428
+
429
+ aio_co_schedule(ctx[n], co);
430
+ return true;
431
+}
432
+
433
+static void finish_cb(void *opaque)
434
+{
435
+ schedule_next(id);
436
+}
437
+
438
+static coroutine_fn void test_multi_co_schedule_entry(void *opaque)
439
+{
440
+ g_assert(to_schedule[id] == NULL);
441
+ atomic_mb_set(&to_schedule[id], qemu_coroutine_self());
442
+
443
+ while (!atomic_mb_read(&now_stopping)) {
444
+ int n;
445
+
446
+ n = g_test_rand_int_range(0, NUM_CONTEXTS);
447
+ schedule_next(n);
448
+ qemu_coroutine_yield();
449
+
450
+ g_assert(to_schedule[id] == NULL);
451
+ atomic_mb_set(&to_schedule[id], qemu_coroutine_self());
452
+ }
453
+}
454
+
455
+
456
+static void test_multi_co_schedule(int seconds)
457
+{
458
+ int i;
459
+
460
+ count_here = count_other = count_retry = 0;
461
+ now_stopping = false;
462
+
463
+ create_aio_contexts();
464
+ for (i = 0; i < NUM_CONTEXTS; i++) {
465
+ Coroutine *co1 = qemu_coroutine_create(test_multi_co_schedule_entry, NULL);
466
+ aio_co_schedule(ctx[i], co1);
467
+ }
468
+
469
+ g_usleep(seconds * 1000000);
470
+
471
+ atomic_mb_set(&now_stopping, true);
472
+ for (i = 0; i < NUM_CONTEXTS; i++) {
473
+ ctx_run(i, finish_cb, NULL);
474
+ to_schedule[i] = NULL;
475
+ }
476
+
477
+ join_aio_contexts();
478
+ g_test_message("scheduled %d, queued %d, retry %d, total %d\n",
479
+ count_other, count_here, count_retry,
480
+ count_here + count_other + count_retry);
481
+}
482
+
483
+static void test_multi_co_schedule_1(void)
484
+{
485
+ test_multi_co_schedule(1);
486
+}
487
+
488
+static void test_multi_co_schedule_10(void)
489
+{
490
+ test_multi_co_schedule(10);
491
+}
492
+
493
+/* End of tests. */
494
+
495
+int main(int argc, char **argv)
496
+{
497
+ init_clocks();
498
+
499
+ g_test_init(&argc, &argv, NULL);
500
+ g_test_add_func("/aio/multi/lifecycle", test_lifecycle);
501
+ if (g_test_quick()) {
502
+ g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1);
503
+ } else {
504
+ g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10);
505
+ }
506
+ return g_test_run();
507
+}
508
diff --git a/util/async.c b/util/async.c
219
index XXXXXXX..XXXXXXX 100644
509
index XXXXXXX..XXXXXXX 100644
220
--- a/tests/qemu-iotests/group
510
--- a/util/async.c
221
+++ b/tests/qemu-iotests/group
511
+++ b/util/async.c
222
@@ -XXX,XX +XXX,XX @@
512
@@ -XXX,XX +XXX,XX @@
223
295 rw
513
#include "qemu/main-loop.h"
224
296 rw
514
#include "qemu/atomic.h"
225
297 meta
515
#include "block/raw-aio.h"
226
+298
516
+#include "qemu/coroutine_int.h"
227
299 auto quick
517
+#include "trace.h"
228
300 migration
518
229
301 backing quick
519
/***********************************************************/
520
/* bottom halves (can be seen as timers which expire ASAP) */
521
@@ -XXX,XX +XXX,XX @@ aio_ctx_finalize(GSource *source)
522
}
523
#endif
524
525
+ assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
526
+ qemu_bh_delete(ctx->co_schedule_bh);
527
+
528
qemu_lockcnt_lock(&ctx->list_lock);
529
assert(!qemu_lockcnt_count(&ctx->list_lock));
530
while (ctx->first_bh) {
531
@@ -XXX,XX +XXX,XX @@ static bool event_notifier_poll(void *opaque)
532
return atomic_read(&ctx->notified);
533
}
534
535
+static void co_schedule_bh_cb(void *opaque)
536
+{
537
+ AioContext *ctx = opaque;
538
+ QSLIST_HEAD(, Coroutine) straight, reversed;
539
+
540
+ QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
541
+ QSLIST_INIT(&straight);
542
+
543
+ while (!QSLIST_EMPTY(&reversed)) {
544
+ Coroutine *co = QSLIST_FIRST(&reversed);
545
+ QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
546
+ QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
547
+ }
548
+
549
+ while (!QSLIST_EMPTY(&straight)) {
550
+ Coroutine *co = QSLIST_FIRST(&straight);
551
+ QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
552
+ trace_aio_co_schedule_bh_cb(ctx, co);
553
+ qemu_coroutine_enter(co);
554
+ }
555
+}
556
+
557
AioContext *aio_context_new(Error **errp)
558
{
559
int ret;
560
@@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp)
561
}
562
g_source_set_can_recurse(&ctx->source, true);
563
qemu_lockcnt_init(&ctx->list_lock);
564
+
565
+ ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
566
+ QSLIST_INIT(&ctx->scheduled_coroutines);
567
+
568
aio_set_event_notifier(ctx, &ctx->notifier,
569
false,
570
(EventNotifierHandler *)
571
@@ -XXX,XX +XXX,XX @@ fail:
572
return NULL;
573
}
574
575
+void aio_co_schedule(AioContext *ctx, Coroutine *co)
576
+{
577
+ trace_aio_co_schedule(ctx, co);
578
+ QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
579
+ co, co_scheduled_next);
580
+ qemu_bh_schedule(ctx->co_schedule_bh);
581
+}
582
+
583
+void aio_co_wake(struct Coroutine *co)
584
+{
585
+ AioContext *ctx;
586
+
587
+ /* Read coroutine before co->ctx. Matches smp_wmb in
588
+ * qemu_coroutine_enter.
589
+ */
590
+ smp_read_barrier_depends();
591
+ ctx = atomic_read(&co->ctx);
592
+
593
+ if (ctx != qemu_get_current_aio_context()) {
594
+ aio_co_schedule(ctx, co);
595
+ return;
596
+ }
597
+
598
+ if (qemu_in_coroutine()) {
599
+ Coroutine *self = qemu_coroutine_self();
600
+ assert(self != co);
601
+ QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
602
+ } else {
603
+ aio_context_acquire(ctx);
604
+ qemu_coroutine_enter(co);
605
+ aio_context_release(ctx);
606
+ }
607
+}
608
+
609
void aio_context_ref(AioContext *ctx)
610
{
611
g_source_ref(&ctx->source);
612
diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
613
index XXXXXXX..XXXXXXX 100644
614
--- a/util/qemu-coroutine.c
615
+++ b/util/qemu-coroutine.c
616
@@ -XXX,XX +XXX,XX @@
617
#include "qemu/atomic.h"
618
#include "qemu/coroutine.h"
619
#include "qemu/coroutine_int.h"
620
+#include "block/aio.h"
621
622
enum {
623
POOL_BATCH_SIZE = 64,
624
@@ -XXX,XX +XXX,XX @@ void qemu_coroutine_enter(Coroutine *co)
625
}
626
627
co->caller = self;
628
+ co->ctx = qemu_get_current_aio_context();
629
+
630
+ /* Store co->ctx before anything that stores co. Matches
631
+ * barrier in aio_co_wake.
632
+ */
633
+ smp_wmb();
634
+
635
ret = qemu_coroutine_switch(self, co, COROUTINE_ENTER);
636
637
qemu_co_queue_run_restart(co);
638
diff --git a/util/trace-events b/util/trace-events
639
index XXXXXXX..XXXXXXX 100644
640
--- a/util/trace-events
641
+++ b/util/trace-events
642
@@ -XXX,XX +XXX,XX @@ run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
643
poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
644
poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
645
646
+# util/async.c
647
+aio_co_schedule(void *ctx, void *co) "ctx %p co %p"
648
+aio_co_schedule_bh_cb(void *ctx, void *co) "ctx %p co %p"
649
+
650
# util/thread-pool.c
651
thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
652
thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d"
230
--
653
--
231
2.29.2
654
2.9.3
232
655
233
656
diff view generated by jsdifflib
1
With bash 5.1, the output of the following script changes:
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
a=("double space")
3
qcow2_create2 calls this. Do not run a nested event loop, as that
4
a=${a[@]:0:1}
4
breaks when aio_co_wake tries to queue the coroutine on the co_queue_wakeup
5
echo "$a"
5
list of the currently running one.
6
6
7
from "double space" to "double space", i.e. all white space is
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
preserved as-is. This is probably what we actually want here (judging
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
from the "...to accommodate pathnames with spaces" comment), but before
9
Reviewed-by: Fam Zheng <famz@redhat.com>
10
5.1, we would have to quote the ${} slice to get the same behavior.
10
Message-id: 20170213135235.12274-4-pbonzini@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
block/block-backend.c | 12 ++++++++----
14
1 file changed, 8 insertions(+), 4 deletions(-)
11
15
12
In any case, without quoting, the reference output of many iotests is
16
diff --git a/block/block-backend.c b/block/block-backend.c
13
different between bash 5.1 and pre-5.1, which is not very good. The
14
output of 5.1 is what we want, so whatever we do to get pre-5.1 to the
15
same result, it means we have to fix the reference output of basically
16
all tests that invoke _send_qemu_cmd (except the ones that only use
17
single spaces in the commands they invoke).
18
19
Instead of quoting the ${} slice (cmd="${$@: 1:...}"), we can also just
20
not use array slicing and replace the whole thing with a simple "cmd=$1;
21
shift", which works because all callers quote the whole $cmd argument
22
anyway.
23
24
Signed-off-by: Max Reitz <mreitz@redhat.com>
25
Message-Id: <20201217153803.101231-3-mreitz@redhat.com>
26
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
27
---
28
tests/qemu-iotests/085.out | 167 ++++++++++++++++++++++++++++-----
29
tests/qemu-iotests/094.out | 10 +-
30
tests/qemu-iotests/095.out | 4 +-
31
tests/qemu-iotests/109.out | 88 ++++++++++++-----
32
tests/qemu-iotests/117.out | 13 ++-
33
tests/qemu-iotests/127.out | 12 ++-
34
tests/qemu-iotests/140.out | 10 +-
35
tests/qemu-iotests/141.out | 128 +++++++++++++++++++------
36
tests/qemu-iotests/143.out | 4 +-
37
tests/qemu-iotests/144.out | 28 +++++-
38
tests/qemu-iotests/153.out | 18 ++--
39
tests/qemu-iotests/156.out | 39 ++++++--
40
tests/qemu-iotests/161.out | 18 +++-
41
tests/qemu-iotests/173.out | 25 ++++-
42
tests/qemu-iotests/182.out | 42 +++++++--
43
tests/qemu-iotests/183.out | 19 +++-
44
tests/qemu-iotests/185.out | 45 +++++++--
45
tests/qemu-iotests/191.out | 12 ++-
46
tests/qemu-iotests/223.out | 92 ++++++++++++------
47
tests/qemu-iotests/229.out | 13 ++-
48
tests/qemu-iotests/249.out | 16 +++-
49
tests/qemu-iotests/308.out | 103 +++++++++++++++++---
50
tests/qemu-iotests/312.out | 10 +-
51
tests/qemu-iotests/common.qemu | 11 +--
52
24 files changed, 728 insertions(+), 199 deletions(-)
53
54
diff --git a/tests/qemu-iotests/085.out b/tests/qemu-iotests/085.out
55
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
56
--- a/tests/qemu-iotests/085.out
18
--- a/block/block-backend.c
57
+++ b/tests/qemu-iotests/085.out
19
+++ b/block/block-backend.c
58
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.IMGFMT.2', fmt=IMGFMT size=134217728
20
@@ -XXX,XX +XXX,XX @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
59
60
=== Create a single snapshot on virtio0 ===
61
62
-{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'virtio0', 'snapshot-file':'TEST_DIR/1-snapshot-v0.IMGFMT', 'format': 'IMGFMT' } }
63
+{ 'execute': 'blockdev-snapshot-sync',
64
+ 'arguments': { 'device': 'virtio0',
65
+ 'snapshot-file':'TEST_DIR/1-snapshot-v0.IMGFMT',
66
+ 'format': 'IMGFMT' } }
67
Formatting 'TEST_DIR/1-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/t.qcow2.1 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
68
{"return": {}}
69
70
=== Invalid command - missing device and nodename ===
71
72
-{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'snapshot-file':'TEST_DIR/1-snapshot-v0.IMGFMT', 'format': 'IMGFMT' } }
73
+{ 'execute': 'blockdev-snapshot-sync',
74
+ 'arguments': { 'snapshot-file':'TEST_DIR/1-snapshot-v0.IMGFMT',
75
+ 'format': 'IMGFMT' } }
76
{"error": {"class": "GenericError", "desc": "Cannot find device= nor node_name="}}
77
78
=== Invalid command - missing snapshot-file ===
79
80
-{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'virtio0', 'format': 'IMGFMT' } }
81
+{ 'execute': 'blockdev-snapshot-sync',
82
+ 'arguments': { 'device': 'virtio0',
83
+ 'format': 'IMGFMT' } }
84
{"error": {"class": "GenericError", "desc": "Parameter 'snapshot-file' is missing"}}
85
86
87
=== Create several transactional group snapshots ===
88
89
-{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/2-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/2-snapshot-v1.IMGFMT' } } ] } }
90
+{ 'execute': 'transaction', 'arguments':
91
+ {'actions': [
92
+ { 'type': 'blockdev-snapshot-sync', 'data' :
93
+ { 'device': 'virtio0',
94
+ 'snapshot-file': 'TEST_DIR/2-snapshot-v0.IMGFMT' } },
95
+ { 'type': 'blockdev-snapshot-sync', 'data' :
96
+ { 'device': 'virtio1',
97
+ 'snapshot-file': 'TEST_DIR/2-snapshot-v1.IMGFMT' } } ]
98
+ } }
99
Formatting 'TEST_DIR/2-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/1-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
100
Formatting 'TEST_DIR/2-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/t.qcow2.2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
101
{"return": {}}
102
-{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/3-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/3-snapshot-v1.IMGFMT' } } ] } }
103
+{ 'execute': 'transaction', 'arguments':
104
+ {'actions': [
105
+ { 'type': 'blockdev-snapshot-sync', 'data' :
106
+ { 'device': 'virtio0',
107
+ 'snapshot-file': 'TEST_DIR/3-snapshot-v0.IMGFMT' } },
108
+ { 'type': 'blockdev-snapshot-sync', 'data' :
109
+ { 'device': 'virtio1',
110
+ 'snapshot-file': 'TEST_DIR/3-snapshot-v1.IMGFMT' } } ]
111
+ } }
112
Formatting 'TEST_DIR/3-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/2-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
113
Formatting 'TEST_DIR/3-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/2-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
114
{"return": {}}
115
-{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/4-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/4-snapshot-v1.IMGFMT' } } ] } }
116
+{ 'execute': 'transaction', 'arguments':
117
+ {'actions': [
118
+ { 'type': 'blockdev-snapshot-sync', 'data' :
119
+ { 'device': 'virtio0',
120
+ 'snapshot-file': 'TEST_DIR/4-snapshot-v0.IMGFMT' } },
121
+ { 'type': 'blockdev-snapshot-sync', 'data' :
122
+ { 'device': 'virtio1',
123
+ 'snapshot-file': 'TEST_DIR/4-snapshot-v1.IMGFMT' } } ]
124
+ } }
125
Formatting 'TEST_DIR/4-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/3-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
126
Formatting 'TEST_DIR/4-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/3-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
127
{"return": {}}
128
-{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/5-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/5-snapshot-v1.IMGFMT' } } ] } }
129
+{ 'execute': 'transaction', 'arguments':
130
+ {'actions': [
131
+ { 'type': 'blockdev-snapshot-sync', 'data' :
132
+ { 'device': 'virtio0',
133
+ 'snapshot-file': 'TEST_DIR/5-snapshot-v0.IMGFMT' } },
134
+ { 'type': 'blockdev-snapshot-sync', 'data' :
135
+ { 'device': 'virtio1',
136
+ 'snapshot-file': 'TEST_DIR/5-snapshot-v1.IMGFMT' } } ]
137
+ } }
138
Formatting 'TEST_DIR/5-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/4-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
139
Formatting 'TEST_DIR/5-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/4-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
140
{"return": {}}
141
-{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/6-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/6-snapshot-v1.IMGFMT' } } ] } }
142
+{ 'execute': 'transaction', 'arguments':
143
+ {'actions': [
144
+ { 'type': 'blockdev-snapshot-sync', 'data' :
145
+ { 'device': 'virtio0',
146
+ 'snapshot-file': 'TEST_DIR/6-snapshot-v0.IMGFMT' } },
147
+ { 'type': 'blockdev-snapshot-sync', 'data' :
148
+ { 'device': 'virtio1',
149
+ 'snapshot-file': 'TEST_DIR/6-snapshot-v1.IMGFMT' } } ]
150
+ } }
151
Formatting 'TEST_DIR/6-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/5-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
152
Formatting 'TEST_DIR/6-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/5-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
153
{"return": {}}
154
-{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/7-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/7-snapshot-v1.IMGFMT' } } ] } }
155
+{ 'execute': 'transaction', 'arguments':
156
+ {'actions': [
157
+ { 'type': 'blockdev-snapshot-sync', 'data' :
158
+ { 'device': 'virtio0',
159
+ 'snapshot-file': 'TEST_DIR/7-snapshot-v0.IMGFMT' } },
160
+ { 'type': 'blockdev-snapshot-sync', 'data' :
161
+ { 'device': 'virtio1',
162
+ 'snapshot-file': 'TEST_DIR/7-snapshot-v1.IMGFMT' } } ]
163
+ } }
164
Formatting 'TEST_DIR/7-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/6-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
165
Formatting 'TEST_DIR/7-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/6-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
166
{"return": {}}
167
-{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/8-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/8-snapshot-v1.IMGFMT' } } ] } }
168
+{ 'execute': 'transaction', 'arguments':
169
+ {'actions': [
170
+ { 'type': 'blockdev-snapshot-sync', 'data' :
171
+ { 'device': 'virtio0',
172
+ 'snapshot-file': 'TEST_DIR/8-snapshot-v0.IMGFMT' } },
173
+ { 'type': 'blockdev-snapshot-sync', 'data' :
174
+ { 'device': 'virtio1',
175
+ 'snapshot-file': 'TEST_DIR/8-snapshot-v1.IMGFMT' } } ]
176
+ } }
177
Formatting 'TEST_DIR/8-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/7-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
178
Formatting 'TEST_DIR/8-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/7-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
179
{"return": {}}
180
-{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/9-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/9-snapshot-v1.IMGFMT' } } ] } }
181
+{ 'execute': 'transaction', 'arguments':
182
+ {'actions': [
183
+ { 'type': 'blockdev-snapshot-sync', 'data' :
184
+ { 'device': 'virtio0',
185
+ 'snapshot-file': 'TEST_DIR/9-snapshot-v0.IMGFMT' } },
186
+ { 'type': 'blockdev-snapshot-sync', 'data' :
187
+ { 'device': 'virtio1',
188
+ 'snapshot-file': 'TEST_DIR/9-snapshot-v1.IMGFMT' } } ]
189
+ } }
190
Formatting 'TEST_DIR/9-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/8-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
191
Formatting 'TEST_DIR/9-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/8-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
192
{"return": {}}
193
-{ 'execute': 'transaction', 'arguments': {'actions': [ { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio0', 'snapshot-file': 'TEST_DIR/10-snapshot-v0.IMGFMT' } }, { 'type': 'blockdev-snapshot-sync', 'data' : { 'device': 'virtio1', 'snapshot-file': 'TEST_DIR/10-snapshot-v1.IMGFMT' } } ] } }
194
+{ 'execute': 'transaction', 'arguments':
195
+ {'actions': [
196
+ { 'type': 'blockdev-snapshot-sync', 'data' :
197
+ { 'device': 'virtio0',
198
+ 'snapshot-file': 'TEST_DIR/10-snapshot-v0.IMGFMT' } },
199
+ { 'type': 'blockdev-snapshot-sync', 'data' :
200
+ { 'device': 'virtio1',
201
+ 'snapshot-file': 'TEST_DIR/10-snapshot-v1.IMGFMT' } } ]
202
+ } }
203
Formatting 'TEST_DIR/10-snapshot-v0.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/9-snapshot-v0.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
204
Formatting 'TEST_DIR/10-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=134217728 backing_file=TEST_DIR/9-snapshot-v1.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
205
{"return": {}}
206
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/10-snapshot-v1.qcow2', fmt=qcow2 cluster_size=65536 extende
207
=== Create a couple of snapshots using blockdev-snapshot ===
208
209
Formatting 'TEST_DIR/11-snapshot-v0.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/10-snapshot-v0.IMGFMT backing_fmt=IMGFMT
210
-{ 'execute': 'blockdev-add', 'arguments': { 'driver': 'IMGFMT', 'node-name': 'snap_11', 'backing': null, 'file': { 'driver': 'file', 'filename': 'TEST_DIR/11-snapshot-v0.IMGFMT', 'node-name': 'file_11' } } }
211
+{ 'execute': 'blockdev-add', 'arguments':
212
+ { 'driver': 'IMGFMT', 'node-name': 'snap_11', 'backing': null,
213
+ 'file':
214
+ { 'driver': 'file', 'filename': 'TEST_DIR/11-snapshot-v0.IMGFMT',
215
+ 'node-name': 'file_11' } } }
216
{"return": {}}
217
-{ 'execute': 'blockdev-snapshot', 'arguments': { 'node': 'virtio0', 'overlay':'snap_11' } }
218
+{ 'execute': 'blockdev-snapshot',
219
+ 'arguments': { 'node': 'virtio0',
220
+ 'overlay':'snap_11' } }
221
{"return": {}}
222
Formatting 'TEST_DIR/12-snapshot-v0.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/11-snapshot-v0.IMGFMT backing_fmt=IMGFMT
223
-{ 'execute': 'blockdev-add', 'arguments': { 'driver': 'IMGFMT', 'node-name': 'snap_12', 'backing': null, 'file': { 'driver': 'file', 'filename': 'TEST_DIR/12-snapshot-v0.IMGFMT', 'node-name': 'file_12' } } }
224
+{ 'execute': 'blockdev-add', 'arguments':
225
+ { 'driver': 'IMGFMT', 'node-name': 'snap_12', 'backing': null,
226
+ 'file':
227
+ { 'driver': 'file', 'filename': 'TEST_DIR/12-snapshot-v0.IMGFMT',
228
+ 'node-name': 'file_12' } } }
229
{"return": {}}
230
-{ 'execute': 'blockdev-snapshot', 'arguments': { 'node': 'virtio0', 'overlay':'snap_12' } }
231
+{ 'execute': 'blockdev-snapshot',
232
+ 'arguments': { 'node': 'virtio0',
233
+ 'overlay':'snap_12' } }
234
{"return": {}}
235
236
=== Invalid command - cannot create a snapshot using a file BDS ===
237
238
-{ 'execute': 'blockdev-snapshot', 'arguments': { 'node':'virtio0', 'overlay':'file_12' } }
239
+{ 'execute': 'blockdev-snapshot',
240
+ 'arguments': { 'node':'virtio0',
241
+ 'overlay':'file_12' }
242
+ }
243
{"error": {"class": "GenericError", "desc": "The overlay is already in use"}}
244
245
=== Invalid command - snapshot node used as active layer ===
246
247
-{ 'execute': 'blockdev-snapshot', 'arguments': { 'node': 'virtio0', 'overlay':'snap_12' } }
248
+{ 'execute': 'blockdev-snapshot',
249
+ 'arguments': { 'node': 'virtio0',
250
+ 'overlay':'snap_12' } }
251
{"error": {"class": "GenericError", "desc": "The overlay is already in use"}}
252
-{ 'execute': 'blockdev-snapshot', 'arguments': { 'node':'virtio0', 'overlay':'virtio0' } }
253
+{ 'execute': 'blockdev-snapshot',
254
+ 'arguments': { 'node':'virtio0',
255
+ 'overlay':'virtio0' }
256
+ }
257
{"error": {"class": "GenericError", "desc": "The overlay is already in use"}}
258
-{ 'execute': 'blockdev-snapshot', 'arguments': { 'node':'virtio0', 'overlay':'virtio1' } }
259
+{ 'execute': 'blockdev-snapshot',
260
+ 'arguments': { 'node':'virtio0',
261
+ 'overlay':'virtio1' }
262
+ }
263
{"error": {"class": "GenericError", "desc": "The overlay is already in use"}}
264
265
=== Invalid command - snapshot node used as backing hd ===
266
267
-{ 'execute': 'blockdev-snapshot', 'arguments': { 'node': 'virtio0', 'overlay':'snap_11' } }
268
+{ 'execute': 'blockdev-snapshot',
269
+ 'arguments': { 'node': 'virtio0',
270
+ 'overlay':'snap_11' } }
271
{"error": {"class": "GenericError", "desc": "The overlay is already in use"}}
272
273
=== Invalid command - snapshot node has a backing image ===
274
275
Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134217728
276
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT
277
-{ 'execute': 'blockdev-add', 'arguments': { 'driver': 'IMGFMT', 'node-name': 'snap_13', 'file': { 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT', 'node-name': 'file_13' } } }
278
-{"return": {}}
279
-{ 'execute': 'blockdev-snapshot', 'arguments': { 'node': 'virtio0', 'overlay':'snap_13' } }
280
+{ 'execute': 'blockdev-add', 'arguments':
281
+ { 'driver': 'IMGFMT', 'node-name': 'snap_13',
282
+ 'file':
283
+ { 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT',
284
+ 'node-name': 'file_13' } } }
285
+{"return": {}}
286
+{ 'execute': 'blockdev-snapshot',
287
+ 'arguments': { 'node': 'virtio0',
288
+ 'overlay':'snap_13' } }
289
{"error": {"class": "GenericError", "desc": "The overlay already has a backing image"}}
290
291
=== Invalid command - The node does not exist ===
292
293
-{ 'execute': 'blockdev-snapshot', 'arguments': { 'node': 'virtio0', 'overlay':'snap_14' } }
294
+{ 'execute': 'blockdev-snapshot',
295
+ 'arguments': { 'node': 'virtio0',
296
+ 'overlay':'snap_14' } }
297
{"error": {"class": "GenericError", "desc": "Cannot find device=snap_14 nor node_name=snap_14"}}
298
-{ 'execute': 'blockdev-snapshot', 'arguments': { 'node':'nodevice', 'overlay':'snap_13' } }
299
+{ 'execute': 'blockdev-snapshot',
300
+ 'arguments': { 'node':'nodevice',
301
+ 'overlay':'snap_13' }
302
+ }
303
{"error": {"class": "GenericError", "desc": "Cannot find device=nodevice nor node_name=nodevice"}}
304
*** done
305
diff --git a/tests/qemu-iotests/094.out b/tests/qemu-iotests/094.out
306
index XXXXXXX..XXXXXXX 100644
307
--- a/tests/qemu-iotests/094.out
308
+++ b/tests/qemu-iotests/094.out
309
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
310
Formatting 'TEST_DIR/source.IMGFMT', fmt=IMGFMT size=67108864
311
{'execute': 'qmp_capabilities'}
312
{"return": {}}
313
-{'execute': 'drive-mirror', 'arguments': {'device': 'src', 'target': 'nbd+unix:///?socket=SOCK_DIR/nbd', 'format': 'nbd', 'sync':'full', 'mode':'existing'}}
314
+{'execute': 'drive-mirror',
315
+ 'arguments': {'device': 'src',
316
+ 'target': 'nbd+unix:///?socket=SOCK_DIR/nbd',
317
+ 'format': 'nbd',
318
+ 'sync':'full',
319
+ 'mode':'existing'}}
320
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
321
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
322
{"return": {}}
323
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}}
324
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 67108864, "offset": 67108864, "speed": 0, "type": "mirror"}}
325
-{'execute': 'block-job-complete', 'arguments': {'device': 'src'}}
326
+{'execute': 'block-job-complete',
327
+ 'arguments': {'device': 'src'}}
328
{"return": {}}
329
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}}
330
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}}
331
diff --git a/tests/qemu-iotests/095.out b/tests/qemu-iotests/095.out
332
index XXXXXXX..XXXXXXX 100644
333
--- a/tests/qemu-iotests/095.out
334
+++ b/tests/qemu-iotests/095.out
335
@@ -XXX,XX +XXX,XX @@ virtual size: 5 MiB (5242880 bytes)
336
337
{ 'execute': 'qmp_capabilities' }
338
{"return": {}}
339
-{ 'execute': 'block-commit', 'arguments': { 'device': 'test', 'top': 'TEST_DIR/t.IMGFMT.snp1' } }
340
+{ 'execute': 'block-commit',
341
+ 'arguments': { 'device': 'test',
342
+ 'top': 'TEST_DIR/t.IMGFMT.snp1' } }
343
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "test"}}
344
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "test"}}
345
{"return": {}}
346
diff --git a/tests/qemu-iotests/109.out b/tests/qemu-iotests/109.out
347
index XXXXXXX..XXXXXXX 100644
348
--- a/tests/qemu-iotests/109.out
349
+++ b/tests/qemu-iotests/109.out
350
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
351
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
352
{ 'execute': 'qmp_capabilities' }
353
{"return": {}}
354
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
355
+{'execute':'drive-mirror', 'arguments':{
356
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
357
+ 'mode': 'existing', 'sync': 'full'}}
358
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
359
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
360
Specify the 'raw' format explicitly to remove the restrictions.
361
@@ -XXX,XX +XXX,XX @@ read 512/512 bytes at offset 0
362
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
363
{ 'execute': 'qmp_capabilities' }
364
{"return": {}}
365
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
366
+{'execute':'drive-mirror', 'arguments':{
367
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
368
+ 'mode': 'existing', 'sync': 'full'}}
369
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
370
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
371
{"return": {}}
372
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
373
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
374
{ 'execute': 'qmp_capabilities' }
375
{"return": {}}
376
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
377
+{'execute':'drive-mirror', 'arguments':{
378
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
379
+ 'mode': 'existing', 'sync': 'full'}}
380
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
381
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
382
Specify the 'raw' format explicitly to remove the restrictions.
383
@@ -XXX,XX +XXX,XX @@ read 512/512 bytes at offset 0
384
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
385
{ 'execute': 'qmp_capabilities' }
386
{"return": {}}
387
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
388
+{'execute':'drive-mirror', 'arguments':{
389
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
390
+ 'mode': 'existing', 'sync': 'full'}}
391
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
392
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
393
{"return": {}}
394
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
395
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
396
{ 'execute': 'qmp_capabilities' }
397
{"return": {}}
398
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
399
+{'execute':'drive-mirror', 'arguments':{
400
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
401
+ 'mode': 'existing', 'sync': 'full'}}
402
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
403
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
404
Specify the 'raw' format explicitly to remove the restrictions.
405
@@ -XXX,XX +XXX,XX @@ read 512/512 bytes at offset 0
406
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
407
{ 'execute': 'qmp_capabilities' }
408
{"return": {}}
409
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
410
+{'execute':'drive-mirror', 'arguments':{
411
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
412
+ 'mode': 'existing', 'sync': 'full'}}
413
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
414
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
415
{"return": {}}
416
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
417
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
418
{ 'execute': 'qmp_capabilities' }
419
{"return": {}}
420
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
421
+{'execute':'drive-mirror', 'arguments':{
422
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
423
+ 'mode': 'existing', 'sync': 'full'}}
424
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
425
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
426
Specify the 'raw' format explicitly to remove the restrictions.
427
@@ -XXX,XX +XXX,XX @@ read 512/512 bytes at offset 0
428
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
429
{ 'execute': 'qmp_capabilities' }
430
{"return": {}}
431
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
432
+{'execute':'drive-mirror', 'arguments':{
433
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
434
+ 'mode': 'existing', 'sync': 'full'}}
435
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
436
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
437
{"return": {}}
438
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
439
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
440
{ 'execute': 'qmp_capabilities' }
441
{"return": {}}
442
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
443
+{'execute':'drive-mirror', 'arguments':{
444
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
445
+ 'mode': 'existing', 'sync': 'full'}}
446
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
447
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
448
Specify the 'raw' format explicitly to remove the restrictions.
449
@@ -XXX,XX +XXX,XX @@ read 512/512 bytes at offset 0
450
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
451
{ 'execute': 'qmp_capabilities' }
452
{"return": {}}
453
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
454
+{'execute':'drive-mirror', 'arguments':{
455
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
456
+ 'mode': 'existing', 'sync': 'full'}}
457
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
458
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
459
{"return": {}}
460
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.raw.src', fmt=IMGFMT size=67108864
461
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
462
{ 'execute': 'qmp_capabilities' }
463
{"return": {}}
464
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
465
+{'execute':'drive-mirror', 'arguments':{
466
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
467
+ 'mode': 'existing', 'sync': 'full'}}
468
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
469
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
470
Specify the 'raw' format explicitly to remove the restrictions.
471
@@ -XXX,XX +XXX,XX @@ read 512/512 bytes at offset 0
472
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
473
{ 'execute': 'qmp_capabilities' }
474
{"return": {}}
475
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
476
+{'execute':'drive-mirror', 'arguments':{
477
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
478
+ 'mode': 'existing', 'sync': 'full'}}
479
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
480
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
481
{"return": {}}
482
@@ -XXX,XX +XXX,XX @@ Images are identical.
483
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
484
{ 'execute': 'qmp_capabilities' }
485
{"return": {}}
486
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
487
+{'execute':'drive-mirror', 'arguments':{
488
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
489
+ 'mode': 'existing', 'sync': 'full'}}
490
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
491
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
492
Specify the 'raw' format explicitly to remove the restrictions.
493
@@ -XXX,XX +XXX,XX @@ read 512/512 bytes at offset 0
494
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
495
{ 'execute': 'qmp_capabilities' }
496
{"return": {}}
497
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
498
+{'execute':'drive-mirror', 'arguments':{
499
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
500
+ 'mode': 'existing', 'sync': 'full'}}
501
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
502
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
503
{"return": {}}
504
@@ -XXX,XX +XXX,XX @@ Images are identical.
505
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
506
{ 'execute': 'qmp_capabilities' }
507
{"return": {}}
508
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
509
+{'execute':'drive-mirror', 'arguments':{
510
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
511
+ 'mode': 'existing', 'sync': 'full'}}
512
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
513
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
514
Specify the 'raw' format explicitly to remove the restrictions.
515
@@ -XXX,XX +XXX,XX @@ read 512/512 bytes at offset 0
516
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
517
{ 'execute': 'qmp_capabilities' }
518
{"return": {}}
519
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
520
+{'execute':'drive-mirror', 'arguments':{
521
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
522
+ 'mode': 'existing', 'sync': 'full'}}
523
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
524
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
525
{"return": {}}
526
@@ -XXX,XX +XXX,XX @@ Images are identical.
527
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
528
{ 'execute': 'qmp_capabilities' }
529
{"return": {}}
530
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
531
+{'execute':'drive-mirror', 'arguments':{
532
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
533
+ 'mode': 'existing', 'sync': 'full'}}
534
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
535
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
536
Specify the 'raw' format explicitly to remove the restrictions.
537
@@ -XXX,XX +XXX,XX @@ read 512/512 bytes at offset 0
538
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
539
{ 'execute': 'qmp_capabilities' }
540
{"return": {}}
541
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
542
+{'execute':'drive-mirror', 'arguments':{
543
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
544
+ 'mode': 'existing', 'sync': 'full'}}
545
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
546
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
547
{"return": {}}
548
@@ -XXX,XX +XXX,XX @@ Images are identical.
549
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
550
{ 'execute': 'qmp_capabilities' }
551
{"return": {}}
552
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
553
+{'execute':'drive-mirror', 'arguments':{
554
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
555
+ 'mode': 'existing', 'sync': 'full'}}
556
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
557
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
558
Specify the 'raw' format explicitly to remove the restrictions.
559
@@ -XXX,XX +XXX,XX @@ read 512/512 bytes at offset 0
560
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
561
{ 'execute': 'qmp_capabilities' }
562
{"return": {}}
563
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
564
+{'execute':'drive-mirror', 'arguments':{
565
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
566
+ 'mode': 'existing', 'sync': 'full'}}
567
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
568
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
569
{"return": {}}
570
@@ -XXX,XX +XXX,XX @@ Images are identical.
571
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=SIZE
572
{ 'execute': 'qmp_capabilities' }
573
{"return": {}}
574
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'mode': 'existing', 'sync': 'full'}}
575
+{'execute':'drive-mirror', 'arguments':{
576
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT',
577
+ 'mode': 'existing', 'sync': 'full'}}
578
WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw.
579
Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted.
580
Specify the 'raw' format explicitly to remove the restrictions.
581
@@ -XXX,XX +XXX,XX @@ WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed
582
Images are identical.
583
{ 'execute': 'qmp_capabilities' }
584
{"return": {}}
585
-{'execute':'drive-mirror', 'arguments':{ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'existing', 'sync': 'full'}}
586
+{'execute':'drive-mirror', 'arguments':{
587
+ 'device': 'src', 'target': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT',
588
+ 'mode': 'existing', 'sync': 'full'}}
589
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "src"}}
590
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "src"}}
591
{"return": {}}
592
diff --git a/tests/qemu-iotests/117.out b/tests/qemu-iotests/117.out
593
index XXXXXXX..XXXXXXX 100644
594
--- a/tests/qemu-iotests/117.out
595
+++ b/tests/qemu-iotests/117.out
596
@@ -XXX,XX +XXX,XX @@ QA output created by 117
597
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=65536
598
{ 'execute': 'qmp_capabilities' }
599
{"return": {}}
600
-{ 'execute': 'blockdev-add', 'arguments': { 'node-name': 'protocol', 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT' } }
601
+{ 'execute': 'blockdev-add',
602
+ 'arguments': { 'node-name': 'protocol',
603
+ 'driver': 'file',
604
+ 'filename': 'TEST_DIR/t.IMGFMT' } }
605
{"return": {}}
606
-{ 'execute': 'blockdev-add', 'arguments': { 'node-name': 'format', 'driver': 'IMGFMT', 'file': 'protocol' } }
607
+{ 'execute': 'blockdev-add',
608
+ 'arguments': { 'node-name': 'format',
609
+ 'driver': 'IMGFMT',
610
+ 'file': 'protocol' } }
611
{"return": {}}
612
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io format "write -P 42 0 64k"' } }
613
+{ 'execute': 'human-monitor-command',
614
+ 'arguments': { 'command-line': 'qemu-io format "write -P 42 0 64k"' } }
615
wrote 65536/65536 bytes at offset 0
616
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
617
{"return": ""}
618
diff --git a/tests/qemu-iotests/127.out b/tests/qemu-iotests/127.out
619
index XXXXXXX..XXXXXXX 100644
620
--- a/tests/qemu-iotests/127.out
621
+++ b/tests/qemu-iotests/127.out
622
@@ -XXX,XX +XXX,XX @@ wrote 42/42 bytes at offset 0
623
42 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
624
{ 'execute': 'qmp_capabilities' }
625
{"return": {}}
626
-{ 'execute': 'drive-mirror', 'arguments': { 'job-id': 'mirror', 'device': 'source', 'target': 'TEST_DIR/t.IMGFMT.overlay1', 'mode': 'existing', 'sync': 'top' } }
627
+{ 'execute': 'drive-mirror',
628
+ 'arguments': {
629
+ 'job-id': 'mirror',
630
+ 'device': 'source',
631
+ 'target': 'TEST_DIR/t.IMGFMT.overlay1',
632
+ 'mode': 'existing',
633
+ 'sync': 'top'
634
+ } }
635
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "mirror"}}
636
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "mirror"}}
637
{"return": {}}
638
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "mirror"}}
639
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "mirror", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
640
-{ 'execute': 'block-job-complete', 'arguments': { 'device': 'mirror' } }
641
+{ 'execute': 'block-job-complete',
642
+ 'arguments': { 'device': 'mirror' } }
643
{"return": {}}
644
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "mirror"}}
645
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "mirror"}}
646
diff --git a/tests/qemu-iotests/140.out b/tests/qemu-iotests/140.out
647
index XXXXXXX..XXXXXXX 100644
648
--- a/tests/qemu-iotests/140.out
649
+++ b/tests/qemu-iotests/140.out
650
@@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 0
651
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
652
{ 'execute': 'qmp_capabilities' }
653
{"return": {}}
654
-{ 'execute': 'nbd-server-start', 'arguments': { 'addr': { 'type': 'unix', 'data': { 'path': 'SOCK_DIR/nbd' }}}}
655
+{ 'execute': 'nbd-server-start',
656
+ 'arguments': { 'addr': { 'type': 'unix',
657
+ 'data': { 'path': 'SOCK_DIR/nbd' }}}}
658
{"return": {}}
659
-{ 'execute': 'nbd-server-add', 'arguments': { 'device': 'drv' }}
660
+{ 'execute': 'nbd-server-add',
661
+ 'arguments': { 'device': 'drv' }}
662
{"return": {}}
663
read 65536/65536 bytes at offset 0
664
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
665
-{ 'execute': 'eject', 'arguments': { 'device': 'drv' }}
666
+{ 'execute': 'eject',
667
+ 'arguments': { 'device': 'drv' }}
668
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "drv"}}
669
qemu-io: can't open device nbd+unix:///drv?socket=SOCK_DIR/nbd: Requested export not available
670
server reported: export 'drv' not present
671
diff --git a/tests/qemu-iotests/141.out b/tests/qemu-iotests/141.out
672
index XXXXXXX..XXXXXXX 100644
673
--- a/tests/qemu-iotests/141.out
674
+++ b/tests/qemu-iotests/141.out
675
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/m.
676
677
=== Testing drive-backup ===
678
679
-{'execute': 'blockdev-add', 'arguments': { 'node-name': 'drv0', 'driver': 'IMGFMT', 'file': { 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT' }}}
680
-{"return": {}}
681
-{'execute': 'drive-backup', 'arguments': {'job-id': 'job0', 'device': 'drv0', 'target': 'TEST_DIR/o.IMGFMT', 'format': 'IMGFMT', 'sync': 'none'}}
682
+{'execute': 'blockdev-add',
683
+ 'arguments': {
684
+ 'node-name': 'drv0',
685
+ 'driver': 'IMGFMT',
686
+ 'file': {
687
+ 'driver': 'file',
688
+ 'filename': 'TEST_DIR/t.IMGFMT'
689
+ }}}
690
+{"return": {}}
691
+{'execute': 'drive-backup',
692
+'arguments': {'job-id': 'job0',
693
+'device': 'drv0',
694
+'target': 'TEST_DIR/o.IMGFMT',
695
+'format': 'IMGFMT',
696
+'sync': 'none'}}
697
Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT
698
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
699
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
700
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "paused", "id": "job0"}}
701
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
702
-{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
703
+{'execute': 'blockdev-del',
704
+ 'arguments': {'node-name': 'drv0'}}
705
{"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: node is used as backing hd of 'NODE_NAME'"}}
706
-{'execute': 'block-job-cancel', 'arguments': {'device': 'job0'}}
707
+{'execute': 'block-job-cancel',
708
+ 'arguments': {'device': 'job0'}}
709
{"return": {}}
710
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "job0"}}
711
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 0, "speed": 0, "type": "backup"}}
712
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
713
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
714
-{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
715
+{'execute': 'blockdev-del',
716
+ 'arguments': {'node-name': 'drv0'}}
717
{"return": {}}
718
719
=== Testing drive-mirror ===
720
721
-{'execute': 'blockdev-add', 'arguments': { 'node-name': 'drv0', 'driver': 'IMGFMT', 'file': { 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT' }}}
722
-{"return": {}}
723
-{'execute': 'drive-mirror', 'arguments': {'job-id': 'job0', 'device': 'drv0', 'target': 'TEST_DIR/o.IMGFMT', 'format': 'IMGFMT', 'sync': 'none'}}
724
+{'execute': 'blockdev-add',
725
+ 'arguments': {
726
+ 'node-name': 'drv0',
727
+ 'driver': 'IMGFMT',
728
+ 'file': {
729
+ 'driver': 'file',
730
+ 'filename': 'TEST_DIR/t.IMGFMT'
731
+ }}}
732
+{"return": {}}
733
+{'execute': 'drive-mirror',
734
+'arguments': {'job-id': 'job0',
735
+'device': 'drv0',
736
+'target': 'TEST_DIR/o.IMGFMT',
737
+'format': 'IMGFMT',
738
+'sync': 'none'}}
739
Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT
740
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
741
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
742
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job0"}}
743
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "mirror"}}
744
-{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
745
+{'execute': 'blockdev-del',
746
+ 'arguments': {'node-name': 'drv0'}}
747
{"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: block device is in use by block job: mirror"}}
748
-{'execute': 'block-job-cancel', 'arguments': {'device': 'job0'}}
749
+{'execute': 'block-job-cancel',
750
+ 'arguments': {'device': 'job0'}}
751
{"return": {}}
752
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job0"}}
753
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job0"}}
754
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "mirror"}}
755
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
756
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
757
-{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
758
+{'execute': 'blockdev-del',
759
+ 'arguments': {'node-name': 'drv0'}}
760
{"return": {}}
761
762
=== Testing active block-commit ===
763
764
-{'execute': 'blockdev-add', 'arguments': { 'node-name': 'drv0', 'driver': 'IMGFMT', 'file': { 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT' }}}
765
-{"return": {}}
766
-{'execute': 'block-commit', 'arguments': {'job-id': 'job0', 'device': 'drv0'}}
767
+{'execute': 'blockdev-add',
768
+ 'arguments': {
769
+ 'node-name': 'drv0',
770
+ 'driver': 'IMGFMT',
771
+ 'file': {
772
+ 'driver': 'file',
773
+ 'filename': 'TEST_DIR/t.IMGFMT'
774
+ }}}
775
+{"return": {}}
776
+{'execute': 'block-commit',
777
+'arguments': {'job-id': 'job0', 'device': 'drv0'}}
778
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
779
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
780
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job0"}}
781
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}}
782
-{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
783
+{'execute': 'blockdev-del',
784
+ 'arguments': {'node-name': 'drv0'}}
785
{"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: block device is in use by block job: commit"}}
786
-{'execute': 'block-job-cancel', 'arguments': {'device': 'job0'}}
787
+{'execute': 'block-job-cancel',
788
+ 'arguments': {'device': 'job0'}}
789
{"return": {}}
790
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job0"}}
791
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job0"}}
792
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}}
793
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
794
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
795
-{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
796
+{'execute': 'blockdev-del',
797
+ 'arguments': {'node-name': 'drv0'}}
798
{"return": {}}
799
800
=== Testing non-active block-commit ===
801
802
wrote 1048576/1048576 bytes at offset 0
803
1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
804
-{'execute': 'blockdev-add', 'arguments': { 'node-name': 'drv0', 'driver': 'IMGFMT', 'file': { 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT' }}}
805
-{"return": {}}
806
-{'execute': 'block-commit', 'arguments': {'job-id': 'job0', 'device': 'drv0', 'top': 'TEST_DIR/m.IMGFMT', 'speed': 1}}
807
+{'execute': 'blockdev-add',
808
+ 'arguments': {
809
+ 'node-name': 'drv0',
810
+ 'driver': 'IMGFMT',
811
+ 'file': {
812
+ 'driver': 'file',
813
+ 'filename': 'TEST_DIR/t.IMGFMT'
814
+ }}}
815
+{"return": {}}
816
+{'execute': 'block-commit',
817
+'arguments': {'job-id': 'job0',
818
+'device': 'drv0',
819
+'top': 'TEST_DIR/m.IMGFMT',
820
+'speed': 1}}
821
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
822
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
823
-{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
824
+{'execute': 'blockdev-del',
825
+ 'arguments': {'node-name': 'drv0'}}
826
{"error": {"class": "GenericError", "desc": "Node drv0 is in use"}}
827
-{'execute': 'block-job-cancel', 'arguments': {'device': 'job0'}}
828
+{'execute': 'block-job-cancel',
829
+ 'arguments': {'device': 'job0'}}
830
{"return": {}}
831
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "job0"}}
832
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 524288, "speed": 1, "type": "commit"}}
833
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
834
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
835
-{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
836
+{'execute': 'blockdev-del',
837
+ 'arguments': {'node-name': 'drv0'}}
838
{"return": {}}
839
840
=== Testing block-stream ===
841
842
wrote 1048576/1048576 bytes at offset 0
843
1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
844
-{'execute': 'blockdev-add', 'arguments': { 'node-name': 'drv0', 'driver': 'IMGFMT', 'file': { 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT' }}}
845
-{"return": {}}
846
-{'execute': 'block-stream', 'arguments': {'job-id': 'job0', 'device': 'drv0', 'speed': 1}}
847
+{'execute': 'blockdev-add',
848
+ 'arguments': {
849
+ 'node-name': 'drv0',
850
+ 'driver': 'IMGFMT',
851
+ 'file': {
852
+ 'driver': 'file',
853
+ 'filename': 'TEST_DIR/t.IMGFMT'
854
+ }}}
855
+{"return": {}}
856
+{'execute': 'block-stream',
857
+'arguments': {'job-id': 'job0',
858
+'device': 'drv0',
859
+'speed': 1}}
860
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
861
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
862
-{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
863
+{'execute': 'blockdev-del',
864
+ 'arguments': {'node-name': 'drv0'}}
865
{"error": {"class": "GenericError", "desc": "Node drv0 is in use"}}
866
-{'execute': 'block-job-cancel', 'arguments': {'device': 'job0'}}
867
+{'execute': 'block-job-cancel',
868
+ 'arguments': {'device': 'job0'}}
869
{"return": {}}
870
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "job0"}}
871
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 1048576, "offset": 524288, "speed": 1, "type": "stream"}}
872
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job0"}}
873
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
874
-{'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}}
875
+{'execute': 'blockdev-del',
876
+ 'arguments': {'node-name': 'drv0'}}
877
{"return": {}}
878
*** done
879
diff --git a/tests/qemu-iotests/143.out b/tests/qemu-iotests/143.out
880
index XXXXXXX..XXXXXXX 100644
881
--- a/tests/qemu-iotests/143.out
882
+++ b/tests/qemu-iotests/143.out
883
@@ -XXX,XX +XXX,XX @@
884
QA output created by 143
885
{ 'execute': 'qmp_capabilities' }
886
{"return": {}}
887
-{ 'execute': 'nbd-server-start', 'arguments': { 'addr': { 'type': 'unix', 'data': { 'path': 'SOCK_DIR/nbd' }}}}
888
+{ 'execute': 'nbd-server-start',
889
+ 'arguments': { 'addr': { 'type': 'unix',
890
+ 'data': { 'path': 'SOCK_DIR/nbd' }}}}
891
{"return": {}}
892
qemu-io: can't open device nbd+unix:///no_such_export?socket=SOCK_DIR/nbd: Requested export not available
893
server reported: export 'no_such_export' not present
894
diff --git a/tests/qemu-iotests/144.out b/tests/qemu-iotests/144.out
895
index XXXXXXX..XXXXXXX 100644
896
--- a/tests/qemu-iotests/144.out
897
+++ b/tests/qemu-iotests/144.out
898
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=536870912
899
900
{ 'execute': 'qmp_capabilities' }
901
{"return": {}}
902
-{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'virtio0', 'snapshot-file':'TEST_DIR/tmp.IMGFMT', 'format': 'IMGFMT' } }
903
+{ 'execute': 'blockdev-snapshot-sync',
904
+ 'arguments': {
905
+ 'device': 'virtio0',
906
+ 'snapshot-file':'TEST_DIR/tmp.IMGFMT',
907
+ 'format': 'IMGFMT'
908
+ }
909
+ }
910
Formatting 'TEST_DIR/tmp.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=536870912 backing_file=TEST_DIR/t.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
911
{"return": {}}
912
913
=== Performing block-commit on active layer ===
914
915
-{ 'execute': 'block-commit', 'arguments': { 'device': 'virtio0' } }
916
+{ 'execute': 'block-commit',
917
+ 'arguments': {
918
+ 'device': 'virtio0'
919
+ }
920
+ }
921
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "virtio0"}}
922
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "virtio0"}}
923
{"return": {}}
924
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "virtio0"}}
925
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "virtio0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}}
926
-{ 'execute': 'block-job-complete', 'arguments': { 'device': 'virtio0' } }
927
+{ 'execute': 'block-job-complete',
928
+ 'arguments': {
929
+ 'device': 'virtio0'
930
+ }
931
+ }
932
{"return": {}}
933
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "virtio0"}}
934
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "virtio0"}}
935
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/tmp.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off co
936
937
=== Performing Live Snapshot 2 ===
938
939
-{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'virtio0', 'snapshot-file':'TEST_DIR/tmp2.IMGFMT', 'format': 'IMGFMT' } }
940
+{ 'execute': 'blockdev-snapshot-sync',
941
+ 'arguments': {
942
+ 'device': 'virtio0',
943
+ 'snapshot-file':'TEST_DIR/tmp2.IMGFMT',
944
+ 'format': 'IMGFMT'
945
+ }
946
+ }
947
Formatting 'TEST_DIR/tmp2.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=536870912 backing_file=TEST_DIR/t.qcow2 backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
948
{"return": {}}
949
*** done
950
diff --git a/tests/qemu-iotests/153.out b/tests/qemu-iotests/153.out
951
index XXXXXXX..XXXXXXX 100644
952
--- a/tests/qemu-iotests/153.out
953
+++ b/tests/qemu-iotests/153.out
954
@@ -XXX,XX +XXX,XX @@ _qemu_img_wrapper commit -b TEST_DIR/t.qcow2.b TEST_DIR/t.qcow2.c
955
{ 'execute': 'qmp_capabilities' }
956
{"return": {}}
957
Adding drive
958
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'drive_add 0 if=none,id=d0,file=TEST_DIR/t.IMGFMT' } }
959
+{ 'execute': 'human-monitor-command',
960
+ 'arguments': { 'command-line': 'drive_add 0 if=none,id=d0,file=TEST_DIR/t.IMGFMT' } }
961
{"return": "OKrn"}
962
963
_qemu_io_wrapper TEST_DIR/t.qcow2 -c write 0 512
964
@@ -XXX,XX +XXX,XX @@ Creating overlay with qemu-img when the guest is running should be allowed
965
966
_qemu_img_wrapper create -f qcow2 -b TEST_DIR/t.qcow2 -F qcow2 TEST_DIR/t.qcow2.overlay
967
== Closing an image should unlock it ==
968
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'drive_del d0' } }
969
+{ 'execute': 'human-monitor-command',
970
+ 'arguments': { 'command-line': 'drive_del d0' } }
971
{"return": ""}
972
973
_qemu_io_wrapper TEST_DIR/t.qcow2 -c write 0 512
974
Adding two and closing one
975
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'drive_add 0 if=none,id=d0,file=TEST_DIR/t.IMGFMT,readonly=on' } }
976
+{ 'execute': 'human-monitor-command',
977
+ 'arguments': { 'command-line': 'drive_add 0 if=none,id=d0,file=TEST_DIR/t.IMGFMT,readonly=on' } }
978
{"return": "OKrn"}
979
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'drive_add 0 if=none,id=d1,file=TEST_DIR/t.IMGFMT,readonly=on' } }
980
+{ 'execute': 'human-monitor-command',
981
+ 'arguments': { 'command-line': 'drive_add 0 if=none,id=d1,file=TEST_DIR/t.IMGFMT,readonly=on' } }
982
{"return": "OKrn"}
983
984
_qemu_img_wrapper info TEST_DIR/t.qcow2
985
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'drive_del d0' } }
986
+{ 'execute': 'human-monitor-command',
987
+ 'arguments': { 'command-line': 'drive_del d0' } }
988
{"return": ""}
989
990
_qemu_io_wrapper TEST_DIR/t.qcow2 -c write 0 512
991
qemu-io: can't open device TEST_DIR/t.qcow2: Failed to get "write" lock
992
Is another process using the image [TEST_DIR/t.qcow2]?
993
Closing the other
994
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'drive_del d1' } }
995
+{ 'execute': 'human-monitor-command',
996
+ 'arguments': { 'command-line': 'drive_del d1' } }
997
{"return": ""}
998
999
_qemu_io_wrapper TEST_DIR/t.qcow2 -c write 0 512
1000
diff --git a/tests/qemu-iotests/156.out b/tests/qemu-iotests/156.out
1001
index XXXXXXX..XXXXXXX 100644
1002
--- a/tests/qemu-iotests/156.out
1003
+++ b/tests/qemu-iotests/156.out
1004
@@ -XXX,XX +XXX,XX @@ wrote 196608/196608 bytes at offset 65536
1005
{ 'execute': 'qmp_capabilities' }
1006
{"return": {}}
1007
Formatting 'TEST_DIR/t.IMGFMT.overlay', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT
1008
-{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'source', 'snapshot-file': 'TEST_DIR/t.IMGFMT.overlay', 'format': 'IMGFMT', 'mode': 'existing' } }
1009
+{ 'execute': 'blockdev-snapshot-sync',
1010
+ 'arguments': { 'device': 'source',
1011
+ 'snapshot-file': 'TEST_DIR/t.IMGFMT.overlay',
1012
+ 'format': 'IMGFMT',
1013
+ 'mode': 'existing' } }
1014
{"return": {}}
1015
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io source "write -P 3 128k 128k"' } }
1016
+{ 'execute': 'human-monitor-command',
1017
+ 'arguments': { 'command-line':
1018
+ 'qemu-io source "write -P 3 128k 128k"' } }
1019
wrote 131072/131072 bytes at offset 131072
1020
128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1021
{"return": ""}
1022
Formatting 'TEST_DIR/t.IMGFMT.target.overlay', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT.target backing_fmt=IMGFMT
1023
-{ 'execute': 'drive-mirror', 'arguments': { 'device': 'source', 'target': 'TEST_DIR/t.IMGFMT.target.overlay', 'mode': 'existing', 'sync': 'top' } }
1024
+{ 'execute': 'drive-mirror',
1025
+ 'arguments': { 'device': 'source',
1026
+ 'target': 'TEST_DIR/t.IMGFMT.target.overlay',
1027
+ 'mode': 'existing',
1028
+ 'sync': 'top' } }
1029
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "source"}}
1030
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "source"}}
1031
{"return": {}}
1032
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "source"}}
1033
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "source", "len": 131072, "offset": 131072, "speed": 0, "type": "mirror"}}
1034
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io source "write -P 4 192k 64k"' } }
1035
+{ 'execute': 'human-monitor-command',
1036
+ 'arguments': { 'command-line':
1037
+ 'qemu-io source "write -P 4 192k 64k"' } }
1038
wrote 65536/65536 bytes at offset 196608
1039
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1040
{"return": ""}
1041
-{ 'execute': 'block-job-complete', 'arguments': { 'device': 'source' } }
1042
+{ 'execute': 'block-job-complete',
1043
+ 'arguments': { 'device': 'source' } }
1044
{"return": {}}
1045
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "source"}}
1046
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "source"}}
1047
@@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 196608
1048
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "source"}}
1049
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "source"}}
1050
1051
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io source "read -P 1 0k 64k"' } }
1052
+{ 'execute': 'human-monitor-command',
1053
+ 'arguments': { 'command-line':
1054
+ 'qemu-io source "read -P 1 0k 64k"' } }
1055
read 65536/65536 bytes at offset 0
1056
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1057
{"return": ""}
1058
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io source "read -P 2 64k 64k"' } }
1059
+{ 'execute': 'human-monitor-command',
1060
+ 'arguments': { 'command-line':
1061
+ 'qemu-io source "read -P 2 64k 64k"' } }
1062
read 65536/65536 bytes at offset 65536
1063
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1064
{"return": ""}
1065
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io source "read -P 3 128k 64k"' } }
1066
+{ 'execute': 'human-monitor-command',
1067
+ 'arguments': { 'command-line':
1068
+ 'qemu-io source "read -P 3 128k 64k"' } }
1069
read 65536/65536 bytes at offset 131072
1070
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1071
{"return": ""}
1072
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io source "read -P 4 192k 64k"' } }
1073
+{ 'execute': 'human-monitor-command',
1074
+ 'arguments': { 'command-line':
1075
+ 'qemu-io source "read -P 4 192k 64k"' } }
1076
read 65536/65536 bytes at offset 196608
1077
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1078
{"return": ""}
1079
diff --git a/tests/qemu-iotests/161.out b/tests/qemu-iotests/161.out
1080
index XXXXXXX..XXXXXXX 100644
1081
--- a/tests/qemu-iotests/161.out
1082
+++ b/tests/qemu-iotests/161.out
1083
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.
1084
1085
{ 'execute': 'qmp_capabilities' }
1086
{"return": {}}
1087
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io none0 "reopen -o backing.detect-zeroes=on"' } }
1088
+{ 'execute': 'human-monitor-command',
1089
+ 'arguments': { 'command-line':
1090
+ 'qemu-io none0 "reopen -o backing.detect-zeroes=on"' } }
1091
{"return": ""}
1092
1093
*** Stream and then change an option on the backing file
1094
1095
{ 'execute': 'qmp_capabilities' }
1096
{"return": {}}
1097
-{ 'execute': 'block-stream', 'arguments': { 'device': 'none0', 'base': 'TEST_DIR/t.IMGFMT.base' } }
1098
+{ 'execute': 'block-stream', 'arguments': { 'device': 'none0',
1099
+ 'base': 'TEST_DIR/t.IMGFMT.base' } }
1100
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "none0"}}
1101
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "none0"}}
1102
{"return": {}}
1103
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io none0 "reopen -o backing.detect-zeroes=on"' } }
1104
+{ 'execute': 'human-monitor-command',
1105
+ 'arguments': { 'command-line':
1106
+ 'qemu-io none0 "reopen -o backing.detect-zeroes=on"' } }
1107
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "none0"}}
1108
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "none0"}}
1109
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "none0", "len": 1048576, "offset": 1048576, "speed": 0, "type": "stream"}}
1110
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.IMGFMT.int', fmt=IMGFMT size=1048576 backing_file=TEST_DI
1111
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT.int backing_fmt=IMGFMT
1112
{ 'execute': 'qmp_capabilities' }
1113
{"return": {}}
1114
-{ 'execute': 'block-commit', 'arguments': { 'device': 'none0', 'top': 'TEST_DIR/t.IMGFMT.int' } }
1115
+{ 'execute': 'block-commit', 'arguments': { 'device': 'none0',
1116
+ 'top': 'TEST_DIR/t.IMGFMT.int' } }
1117
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "none0"}}
1118
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "none0"}}
1119
{"return": {}}
1120
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io none0 "reopen -o backing.detect-zeroes=on"' } }
1121
+{ 'execute': 'human-monitor-command',
1122
+ 'arguments': { 'command-line':
1123
+ 'qemu-io none0 "reopen -o backing.detect-zeroes=on"' } }
1124
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "none0"}}
1125
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "none0"}}
1126
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "none0", "len": 1048576, "offset": 1048576, "speed": 0, "type": "commit"}}
1127
diff --git a/tests/qemu-iotests/173.out b/tests/qemu-iotests/173.out
1128
index XXXXXXX..XXXXXXX 100644
1129
--- a/tests/qemu-iotests/173.out
1130
+++ b/tests/qemu-iotests/173.out
1131
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/image.snp1', fmt=IMGFMT size=104857600
1132
1133
{ 'execute': 'qmp_capabilities' }
1134
{"return": {}}
1135
-{ 'arguments': { 'device': 'disk2', 'format': 'IMGFMT', 'mode': 'existing', 'snapshot-file': 'TEST_DIR/image.snp1', 'snapshot-node-name': 'snp1' }, 'execute': 'blockdev-snapshot-sync' }
1136
+{ 'arguments': {
1137
+ 'device': 'disk2',
1138
+ 'format': 'IMGFMT',
1139
+ 'mode': 'existing',
1140
+ 'snapshot-file': 'TEST_DIR/image.snp1',
1141
+ 'snapshot-node-name': 'snp1'
1142
+ },
1143
+ 'execute': 'blockdev-snapshot-sync'
1144
+ }
1145
{"return": {}}
1146
-{ 'arguments': { 'backing-file': 'image.base', 'device': 'disk2', 'image-node-name': 'snp1' }, 'execute': 'change-backing-file' }
1147
+{ 'arguments': {
1148
+ 'backing-file': 'image.base',
1149
+ 'device': 'disk2',
1150
+ 'image-node-name': 'snp1'
1151
+ },
1152
+ 'execute': 'change-backing-file'
1153
+ }
1154
{"return": {}}
1155
-{ 'arguments': { 'base': 'TEST_DIR/image.base', 'device': 'disk2' }, 'execute': 'block-stream' }
1156
+{ 'arguments': {
1157
+ 'base': 'TEST_DIR/image.base',
1158
+ 'device': 'disk2'
1159
+ },
1160
+ 'execute': 'block-stream'
1161
+ }
1162
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk2"}}
1163
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk2"}}
1164
{"return": {}}
1165
diff --git a/tests/qemu-iotests/182.out b/tests/qemu-iotests/182.out
1166
index XXXXXXX..XXXXXXX 100644
1167
--- a/tests/qemu-iotests/182.out
1168
+++ b/tests/qemu-iotests/182.out
1169
@@ -XXX,XX +XXX,XX @@ Is another process using the image [TEST_DIR/t.qcow2]?
1170
1171
{'execute': 'qmp_capabilities'}
1172
{"return": {}}
1173
-{'execute': 'blockdev-add', 'arguments': { 'node-name': 'node0', 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT', 'locking': 'on' } }
1174
-{"return": {}}
1175
-{'execute': 'blockdev-snapshot-sync', 'arguments': { 'node-name': 'node0', 'snapshot-file': 'TEST_DIR/t.IMGFMT.overlay', 'snapshot-node-name': 'node1' } }
1176
+{'execute': 'blockdev-add',
1177
+ 'arguments': {
1178
+ 'node-name': 'node0',
1179
+ 'driver': 'file',
1180
+ 'filename': 'TEST_DIR/t.IMGFMT',
1181
+ 'locking': 'on'
1182
+ } }
1183
+{"return": {}}
1184
+{'execute': 'blockdev-snapshot-sync',
1185
+ 'arguments': {
1186
+ 'node-name': 'node0',
1187
+ 'snapshot-file': 'TEST_DIR/t.IMGFMT.overlay',
1188
+ 'snapshot-node-name': 'node1'
1189
+ } }
1190
Formatting 'TEST_DIR/t.qcow2.overlay', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=197120 backing_file=TEST_DIR/t.qcow2 backing_fmt=file lazy_refcounts=off refcount_bits=16
1191
{"return": {}}
1192
-{'execute': 'blockdev-add', 'arguments': { 'node-name': 'node1', 'driver': 'file', 'filename': 'TEST_DIR/t.IMGFMT', 'locking': 'on' } }
1193
-{"return": {}}
1194
-{'execute': 'nbd-server-start', 'arguments': { 'addr': { 'type': 'unix', 'data': { 'path': 'SOCK_DIR/nbd.socket' } } } }
1195
-{"return": {}}
1196
-{'execute': 'nbd-server-add', 'arguments': { 'device': 'node1' } }
1197
+{'execute': 'blockdev-add',
1198
+ 'arguments': {
1199
+ 'node-name': 'node1',
1200
+ 'driver': 'file',
1201
+ 'filename': 'TEST_DIR/t.IMGFMT',
1202
+ 'locking': 'on'
1203
+ } }
1204
+{"return": {}}
1205
+{'execute': 'nbd-server-start',
1206
+ 'arguments': {
1207
+ 'addr': {
1208
+ 'type': 'unix',
1209
+ 'data': {
1210
+ 'path': 'SOCK_DIR/nbd.socket'
1211
+ } } } }
1212
+{"return": {}}
1213
+{'execute': 'nbd-server-add',
1214
+ 'arguments': {
1215
+ 'device': 'node1'
1216
+ } }
1217
{"return": {}}
1218
1219
=== Testing failure to loosen restrictions ===
1220
diff --git a/tests/qemu-iotests/183.out b/tests/qemu-iotests/183.out
1221
index XXXXXXX..XXXXXXX 100644
1222
--- a/tests/qemu-iotests/183.out
1223
+++ b/tests/qemu-iotests/183.out
1224
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.IMGFMT.dest', fmt=IMGFMT size=67108864
1225
1226
=== Write something on the source ===
1227
1228
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io disk "write -P 0x55 0 64k"' } }
1229
+{ 'execute': 'human-monitor-command',
1230
+ 'arguments': { 'command-line':
1231
+ 'qemu-io disk "write -P 0x55 0 64k"' } }
1232
wrote 65536/65536 bytes at offset 0
1233
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1234
{"return": ""}
1235
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io disk "read -P 0x55 0 64k"' } }
1236
+{ 'execute': 'human-monitor-command',
1237
+ 'arguments': { 'command-line':
1238
+ 'qemu-io disk "read -P 0x55 0 64k"' } }
1239
read 65536/65536 bytes at offset 0
1240
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1241
{"return": ""}
1242
1243
=== Do block migration to destination ===
1244
1245
-{ 'execute': 'migrate', 'arguments': { 'uri': 'unix:SOCK_DIR/migrate', 'blk': true } }
1246
+{ 'execute': 'migrate',
1247
+ 'arguments': { 'uri': 'unix:SOCK_DIR/migrate', 'blk': true } }
1248
{"return": {}}
1249
{ 'execute': 'query-status' }
1250
{"return": {"status": "postmigrate", "singlestep": false, "running": false}}
1251
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
1252
{ 'execute': 'query-status' }
1253
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "RESUME"}
1254
{"return": {"status": "running", "singlestep": false, "running": true}}
1255
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io disk "read -P 0x55 0 64k"' } }
1256
+{ 'execute': 'human-monitor-command',
1257
+ 'arguments': { 'command-line':
1258
+ 'qemu-io disk "read -P 0x55 0 64k"' } }
1259
read 65536/65536 bytes at offset 0
1260
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1261
{"return": ""}
1262
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io disk "write -P 0x66 1M 64k"' } }
1263
+{ 'execute': 'human-monitor-command',
1264
+ 'arguments': { 'command-line':
1265
+ 'qemu-io disk "write -P 0x66 1M 64k"' } }
1266
wrote 65536/65536 bytes at offset 1048576
1267
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1268
{"return": ""}
1269
diff --git a/tests/qemu-iotests/185.out b/tests/qemu-iotests/185.out
1270
index XXXXXXX..XXXXXXX 100644
1271
--- a/tests/qemu-iotests/185.out
1272
+++ b/tests/qemu-iotests/185.out
1273
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=67108864
1274
1275
=== Creating backing chain ===
1276
1277
-{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'disk', 'snapshot-file': 'TEST_DIR/t.IMGFMT.mid', 'format': 'IMGFMT', 'mode': 'absolute-paths' } }
1278
+{ 'execute': 'blockdev-snapshot-sync',
1279
+ 'arguments': { 'device': 'disk',
1280
+ 'snapshot-file': 'TEST_DIR/t.IMGFMT.mid',
1281
+ 'format': 'IMGFMT',
1282
+ 'mode': 'absolute-paths' } }
1283
Formatting 'TEST_DIR/t.qcow2.mid', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 backing_file=TEST_DIR/t.qcow2.base backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
1284
{"return": {}}
1285
-{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'qemu-io disk "write 0 4M"' } }
1286
+{ 'execute': 'human-monitor-command',
1287
+ 'arguments': { 'command-line':
1288
+ 'qemu-io disk "write 0 4M"' } }
1289
wrote 4194304/4194304 bytes at offset 0
1290
4 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1291
{"return": ""}
1292
-{ 'execute': 'blockdev-snapshot-sync', 'arguments': { 'device': 'disk', 'snapshot-file': 'TEST_DIR/t.IMGFMT', 'format': 'IMGFMT', 'mode': 'absolute-paths' } }
1293
+{ 'execute': 'blockdev-snapshot-sync',
1294
+ 'arguments': { 'device': 'disk',
1295
+ 'snapshot-file': 'TEST_DIR/t.IMGFMT',
1296
+ 'format': 'IMGFMT',
1297
+ 'mode': 'absolute-paths' } }
1298
Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 backing_file=TEST_DIR/t.qcow2.mid backing_fmt=qcow2 lazy_refcounts=off refcount_bits=16
1299
{"return": {}}
1300
1301
=== Start commit job and exit qemu ===
1302
1303
-{ 'execute': 'block-commit', 'arguments': { 'device': 'disk', 'base':'TEST_DIR/t.IMGFMT.base', 'top': 'TEST_DIR/t.IMGFMT.mid', 'speed': 65536 } }
1304
+{ 'execute': 'block-commit',
1305
+ 'arguments': { 'device': 'disk',
1306
+ 'base':'TEST_DIR/t.IMGFMT.base',
1307
+ 'top': 'TEST_DIR/t.IMGFMT.mid',
1308
+ 'speed': 65536 } }
1309
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
1310
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
1311
{"return": {}}
1312
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off comp
1313
1314
{ 'execute': 'qmp_capabilities' }
1315
{"return": {}}
1316
-{ 'execute': 'block-commit', 'arguments': { 'device': 'disk', 'base':'TEST_DIR/t.IMGFMT.base', 'speed': 65536 } }
1317
+{ 'execute': 'block-commit',
1318
+ 'arguments': { 'device': 'disk',
1319
+ 'base':'TEST_DIR/t.IMGFMT.base',
1320
+ 'speed': 65536 } }
1321
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
1322
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
1323
{"return": {}}
1324
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off comp
1325
1326
{ 'execute': 'qmp_capabilities' }
1327
{"return": {}}
1328
-{ 'execute': 'drive-mirror', 'arguments': { 'device': 'disk', 'target': 'TEST_DIR/t.IMGFMT.copy', 'format': 'IMGFMT', 'sync': 'full', 'speed': 65536 } }
1329
+{ 'execute': 'drive-mirror',
1330
+ 'arguments': { 'device': 'disk',
1331
+ 'target': 'TEST_DIR/t.IMGFMT.copy',
1332
+ 'format': 'IMGFMT',
1333
+ 'sync': 'full',
1334
+ 'speed': 65536 } }
1335
Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 lazy_refcounts=off refcount_bits=16
1336
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
1337
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
1338
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 cluster_size=65536 extended_l2=off
1339
1340
{ 'execute': 'qmp_capabilities' }
1341
{"return": {}}
1342
-{ 'execute': 'drive-backup', 'arguments': { 'device': 'disk', 'target': 'TEST_DIR/t.IMGFMT.copy', 'format': 'IMGFMT', 'sync': 'full', 'speed': 65536 } }
1343
+{ 'execute': 'drive-backup',
1344
+ 'arguments': { 'device': 'disk',
1345
+ 'target': 'TEST_DIR/t.IMGFMT.copy',
1346
+ 'format': 'IMGFMT',
1347
+ 'sync': 'full',
1348
+ 'speed': 65536 } }
1349
Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 lazy_refcounts=off refcount_bits=16
1350
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
1351
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
1352
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 cluster_size=65536 extended_l2=off
1353
1354
{ 'execute': 'qmp_capabilities' }
1355
{"return": {}}
1356
-{ 'execute': 'block-stream', 'arguments': { 'device': 'disk', 'speed': 65536 } }
1357
+{ 'execute': 'block-stream',
1358
+ 'arguments': { 'device': 'disk',
1359
+ 'speed': 65536 } }
1360
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}}
1361
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}}
1362
{"return": {}}
1363
diff --git a/tests/qemu-iotests/191.out b/tests/qemu-iotests/191.out
1364
index XXXXXXX..XXXXXXX 100644
1365
--- a/tests/qemu-iotests/191.out
1366
+++ b/tests/qemu-iotests/191.out
1367
@@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576
1368
1369
=== Perform commit job ===
1370
1371
-{ 'execute': 'block-commit', 'arguments': { 'job-id': 'commit0', 'device': 'top', 'base':'TEST_DIR/t.IMGFMT.base', 'top': 'TEST_DIR/t.IMGFMT.mid' } }
1372
+{ 'execute': 'block-commit',
1373
+ 'arguments': { 'job-id': 'commit0',
1374
+ 'device': 'top',
1375
+ 'base':'TEST_DIR/t.IMGFMT.base',
1376
+ 'top': 'TEST_DIR/t.IMGFMT.mid' } }
1377
{
21
{
1378
"timestamp": {
22
QEMUIOVector qiov;
1379
"seconds": TIMESTAMP,
23
struct iovec iov;
1380
@@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576
24
- Coroutine *co;
1381
25
BlkRwCo rwco;
1382
=== Perform commit job ===
26
1383
27
iov = (struct iovec) {
1384
-{ 'execute': 'block-commit', 'arguments': { 'job-id': 'commit0', 'device': 'top', 'base':'TEST_DIR/t.IMGFMT.base', 'top': 'TEST_DIR/t.IMGFMT.mid' } }
28
@@ -XXX,XX +XXX,XX @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
1385
+{ 'execute': 'block-commit',
29
.ret = NOT_DONE,
1386
+ 'arguments': { 'job-id': 'commit0',
30
};
1387
+ 'device': 'top',
31
1388
+ 'base':'TEST_DIR/t.IMGFMT.base',
32
- co = qemu_coroutine_create(co_entry, &rwco);
1389
+ 'top': 'TEST_DIR/t.IMGFMT.mid' } }
33
- qemu_coroutine_enter(co);
1390
{
34
- BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
1391
"timestamp": {
35
+ if (qemu_in_coroutine()) {
1392
"seconds": TIMESTAMP,
36
+ /* Fast-path if already in coroutine context */
1393
diff --git a/tests/qemu-iotests/223.out b/tests/qemu-iotests/223.out
37
+ co_entry(&rwco);
1394
index XXXXXXX..XXXXXXX 100644
38
+ } else {
1395
--- a/tests/qemu-iotests/223.out
39
+ Coroutine *co = qemu_coroutine_create(co_entry, &rwco);
1396
+++ b/tests/qemu-iotests/223.out
40
+ qemu_coroutine_enter(co);
1397
@@ -XXX,XX +XXX,XX @@ wrote 2097152/2097152 bytes at offset 2097152
41
+ BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
1398
42
+ }
1399
{"execute":"qmp_capabilities"}
43
1400
{"return": {}}
44
return rwco.ret;
1401
-{"execute":"blockdev-add", "arguments":{"driver":"IMGFMT", "node-name":"n", "file":{"driver":"file", "filename":"TEST_DIR/t.IMGFMT"}}}
45
}
1402
+{"execute":"blockdev-add",
1403
+ "arguments":{"driver":"IMGFMT", "node-name":"n",
1404
+ "file":{"driver":"file", "filename":"TEST_DIR/t.IMGFMT"}}}
1405
{"return": {}}
1406
-{"execute":"block-dirty-bitmap-disable", "arguments":{"node":"n", "name":"b"}}
1407
+{"execute":"block-dirty-bitmap-disable",
1408
+ "arguments":{"node":"n", "name":"b"}}
1409
{"return": {}}
1410
1411
=== Set up NBD with normal access ===
1412
1413
-{"execute":"nbd-server-add", "arguments":{"device":"n"}}
1414
+{"execute":"nbd-server-add",
1415
+ "arguments":{"device":"n"}}
1416
{"error": {"class": "GenericError", "desc": "NBD server not running"}}
1417
-{"execute":"nbd-server-start", "arguments":{"addr":{"type":"unix", "data":{"path":"SOCK_DIR/nbd"}}}}
1418
+{"execute":"nbd-server-start",
1419
+ "arguments":{"addr":{"type":"unix",
1420
+ "data":{"path":"SOCK_DIR/nbd"}}}}
1421
{"return": {}}
1422
-{"execute":"nbd-server-start", "arguments":{"addr":{"type":"unix", "data":{"path":"SOCK_DIR/nbd1"}}}}
1423
+{"execute":"nbd-server-start",
1424
+ "arguments":{"addr":{"type":"unix",
1425
+ "data":{"path":"SOCK_DIR/nbd1"}}}}
1426
{"error": {"class": "GenericError", "desc": "NBD server already running"}}
1427
exports available: 0
1428
-{"execute":"nbd-server-add", "arguments":{"device":"n", "bitmap":"b"}}
1429
+{"execute":"nbd-server-add",
1430
+ "arguments":{"device":"n", "bitmap":"b"}}
1431
{"return": {}}
1432
-{"execute":"nbd-server-add", "arguments":{"device":"nosuch"}}
1433
+{"execute":"nbd-server-add",
1434
+ "arguments":{"device":"nosuch"}}
1435
{"error": {"class": "GenericError", "desc": "Cannot find device=nosuch nor node_name=nosuch"}}
1436
-{"execute":"nbd-server-add", "arguments":{"device":"n"}}
1437
+{"execute":"nbd-server-add",
1438
+ "arguments":{"device":"n"}}
1439
{"error": {"class": "GenericError", "desc": "Block export id 'n' is already in use"}}
1440
-{"execute":"nbd-server-add", "arguments":{"device":"n", "name":"n2", "bitmap":"b2"}}
1441
+{"execute":"nbd-server-add",
1442
+ "arguments":{"device":"n", "name":"n2",
1443
+ "bitmap":"b2"}}
1444
{"error": {"class": "GenericError", "desc": "Enabled bitmap 'b2' incompatible with readonly export"}}
1445
-{"execute":"nbd-server-add", "arguments":{"device":"n", "name":"n2", "bitmap":"b3"}}
1446
+{"execute":"nbd-server-add",
1447
+ "arguments":{"device":"n", "name":"n2",
1448
+ "bitmap":"b3"}}
1449
{"error": {"class": "GenericError", "desc": "Bitmap 'b3' is not found"}}
1450
-{"execute":"nbd-server-add", "arguments":{"device":"n", "name":"n2", "writable":true, "description":"some text", "bitmap":"b2"}}
1451
+{"execute":"nbd-server-add",
1452
+ "arguments":{"device":"n", "name":"n2", "writable":true,
1453
+ "description":"some text", "bitmap":"b2"}}
1454
{"return": {}}
1455
exports available: 2
1456
export: 'n'
1457
@@ -XXX,XX +XXX,XX @@ read 2097152/2097152 bytes at offset 2097152
1458
1459
=== End qemu NBD server ===
1460
1461
-{"execute":"nbd-server-remove", "arguments":{"name":"n"}}
1462
+{"execute":"nbd-server-remove",
1463
+ "arguments":{"name":"n"}}
1464
{"return": {}}
1465
-{"execute":"nbd-server-remove", "arguments":{"name":"n2"}}
1466
+{"execute":"nbd-server-remove",
1467
+ "arguments":{"name":"n2"}}
1468
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "n"}}
1469
{"return": {}}
1470
-{"execute":"nbd-server-remove", "arguments":{"name":"n2"}}
1471
+{"execute":"nbd-server-remove",
1472
+ "arguments":{"name":"n2"}}
1473
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "n2"}}
1474
{"error": {"class": "GenericError", "desc": "Export 'n2' is not found"}}
1475
{"execute":"nbd-server-stop"}
1476
@@ -XXX,XX +XXX,XX @@ read 2097152/2097152 bytes at offset 2097152
1477
1478
=== Set up NBD with iothread access ===
1479
1480
-{"execute":"x-blockdev-set-iothread", "arguments":{"node-name":"n", "iothread":"io0"}}
1481
+{"execute":"x-blockdev-set-iothread",
1482
+ "arguments":{"node-name":"n", "iothread":"io0"}}
1483
{"return": {}}
1484
-{"execute":"nbd-server-add", "arguments":{"device":"n"}}
1485
+{"execute":"nbd-server-add",
1486
+ "arguments":{"device":"n"}}
1487
{"error": {"class": "GenericError", "desc": "NBD server not running"}}
1488
-{"execute":"nbd-server-start", "arguments":{"addr":{"type":"unix", "data":{"path":"SOCK_DIR/nbd"}}}}
1489
+{"execute":"nbd-server-start",
1490
+ "arguments":{"addr":{"type":"unix",
1491
+ "data":{"path":"SOCK_DIR/nbd"}}}}
1492
{"return": {}}
1493
-{"execute":"nbd-server-start", "arguments":{"addr":{"type":"unix", "data":{"path":"SOCK_DIR/nbd1"}}}}
1494
+{"execute":"nbd-server-start",
1495
+ "arguments":{"addr":{"type":"unix",
1496
+ "data":{"path":"SOCK_DIR/nbd1"}}}}
1497
{"error": {"class": "GenericError", "desc": "NBD server already running"}}
1498
exports available: 0
1499
-{"execute":"nbd-server-add", "arguments":{"device":"n", "bitmap":"b"}}
1500
+{"execute":"nbd-server-add",
1501
+ "arguments":{"device":"n", "bitmap":"b"}}
1502
{"return": {}}
1503
-{"execute":"nbd-server-add", "arguments":{"device":"nosuch"}}
1504
+{"execute":"nbd-server-add",
1505
+ "arguments":{"device":"nosuch"}}
1506
{"error": {"class": "GenericError", "desc": "Cannot find device=nosuch nor node_name=nosuch"}}
1507
-{"execute":"nbd-server-add", "arguments":{"device":"n"}}
1508
+{"execute":"nbd-server-add",
1509
+ "arguments":{"device":"n"}}
1510
{"error": {"class": "GenericError", "desc": "Block export id 'n' is already in use"}}
1511
-{"execute":"nbd-server-add", "arguments":{"device":"n", "name":"n2", "bitmap":"b2"}}
1512
+{"execute":"nbd-server-add",
1513
+ "arguments":{"device":"n", "name":"n2",
1514
+ "bitmap":"b2"}}
1515
{"error": {"class": "GenericError", "desc": "Enabled bitmap 'b2' incompatible with readonly export"}}
1516
-{"execute":"nbd-server-add", "arguments":{"device":"n", "name":"n2", "bitmap":"b3"}}
1517
+{"execute":"nbd-server-add",
1518
+ "arguments":{"device":"n", "name":"n2",
1519
+ "bitmap":"b3"}}
1520
{"error": {"class": "GenericError", "desc": "Bitmap 'b3' is not found"}}
1521
-{"execute":"nbd-server-add", "arguments":{"device":"n", "name":"n2", "writable":true, "description":"some text", "bitmap":"b2"}}
1522
+{"execute":"nbd-server-add",
1523
+ "arguments":{"device":"n", "name":"n2", "writable":true,
1524
+ "description":"some text", "bitmap":"b2"}}
1525
{"return": {}}
1526
exports available: 2
1527
export: 'n'
1528
@@ -XXX,XX +XXX,XX @@ read 2097152/2097152 bytes at offset 2097152
1529
1530
=== End qemu NBD server ===
1531
1532
-{"execute":"nbd-server-remove", "arguments":{"name":"n"}}
1533
+{"execute":"nbd-server-remove",
1534
+ "arguments":{"name":"n"}}
1535
{"return": {}}
1536
-{"execute":"nbd-server-remove", "arguments":{"name":"n2"}}
1537
+{"execute":"nbd-server-remove",
1538
+ "arguments":{"name":"n2"}}
1539
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "n"}}
1540
{"return": {}}
1541
-{"execute":"nbd-server-remove", "arguments":{"name":"n2"}}
1542
+{"execute":"nbd-server-remove",
1543
+ "arguments":{"name":"n2"}}
1544
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "n2"}}
1545
{"error": {"class": "GenericError", "desc": "Export 'n2' is not found"}}
1546
{"execute":"nbd-server-stop"}
1547
diff --git a/tests/qemu-iotests/229.out b/tests/qemu-iotests/229.out
1548
index XXXXXXX..XXXXXXX 100644
1549
--- a/tests/qemu-iotests/229.out
1550
+++ b/tests/qemu-iotests/229.out
1551
@@ -XXX,XX +XXX,XX @@ wrote 2097152/2097152 bytes at offset 0
1552
1553
=== Starting drive-mirror, causing error & stop ===
1554
1555
-{'execute': 'drive-mirror', 'arguments': {'device': 'testdisk', 'format': 'IMGFMT', 'target': 'blkdebug:TEST_DIR/blkdebug.conf:TEST_DIR/t.IMGFMT.dest', 'sync': 'full', 'mode': 'existing', 'on-source-error': 'stop', 'on-target-error': 'stop' }}
1556
+{'execute': 'drive-mirror',
1557
+ 'arguments': {'device': 'testdisk',
1558
+ 'format': 'IMGFMT',
1559
+ 'target': 'blkdebug:TEST_DIR/blkdebug.conf:TEST_DIR/t.IMGFMT.dest',
1560
+ 'sync': 'full',
1561
+ 'mode': 'existing',
1562
+ 'on-source-error': 'stop',
1563
+ 'on-target-error': 'stop' }}
1564
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "testdisk"}}
1565
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "testdisk"}}
1566
{"return": {}}
1567
@@ -XXX,XX +XXX,XX @@ wrote 2097152/2097152 bytes at offset 0
1568
1569
=== Force cancel job paused in error state ===
1570
1571
-{'execute': 'block-job-cancel', 'arguments': { 'device': 'testdisk', 'force': true}}
1572
+{'execute': 'block-job-cancel',
1573
+ 'arguments': { 'device': 'testdisk',
1574
+ 'force': true}}
1575
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "testdisk"}}
1576
{"return": {}}
1577
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "testdisk"}}
1578
diff --git a/tests/qemu-iotests/249.out b/tests/qemu-iotests/249.out
1579
index XXXXXXX..XXXXXXX 100644
1580
--- a/tests/qemu-iotests/249.out
1581
+++ b/tests/qemu-iotests/249.out
1582
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.
1583
1584
=== Send a write command to a drive opened in read-only mode (1)
1585
1586
-{ 'execute': 'human-monitor-command', 'arguments': {'command-line': 'qemu-io none0 "aio_write 0 2k"'}}
1587
+{ 'execute': 'human-monitor-command',
1588
+ 'arguments': {'command-line': 'qemu-io none0 "aio_write 0 2k"'}}
1589
{"return": "Block node is read-onlyrn"}
1590
1591
=== Run block-commit on base using an invalid filter node name
1592
1593
-{ 'execute': 'block-commit', 'arguments': {'job-id': 'job0', 'device': 'none1', 'top-node': 'int', 'filter-node-name': '1234'}}
1594
+{ 'execute': 'block-commit',
1595
+ 'arguments': {'job-id': 'job0', 'device': 'none1', 'top-node': 'int',
1596
+ 'filter-node-name': '1234'}}
1597
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
1598
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}}
1599
{"error": {"class": "GenericError", "desc": "Invalid node name"}}
1600
1601
=== Send a write command to a drive opened in read-only mode (2)
1602
1603
-{ 'execute': 'human-monitor-command', 'arguments': {'command-line': 'qemu-io none0 "aio_write 0 2k"'}}
1604
+{ 'execute': 'human-monitor-command',
1605
+ 'arguments': {'command-line': 'qemu-io none0 "aio_write 0 2k"'}}
1606
{"return": "Block node is read-onlyrn"}
1607
1608
=== Run block-commit on base using the default filter node name
1609
1610
-{ 'execute': 'block-commit', 'arguments': {'job-id': 'job0', 'device': 'none1', 'top-node': 'int'}}
1611
+{ 'execute': 'block-commit',
1612
+ 'arguments': {'job-id': 'job0', 'device': 'none1', 'top-node': 'int'}}
1613
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}}
1614
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}}
1615
{"return": {}}
1616
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.
1617
1618
=== Send a write command to a drive opened in read-only mode (3)
1619
1620
-{ 'execute': 'human-monitor-command', 'arguments': {'command-line': 'qemu-io none0 "aio_write 0 2k"'}}
1621
+{ 'execute': 'human-monitor-command',
1622
+ 'arguments': {'command-line': 'qemu-io none0 "aio_write 0 2k"'}}
1623
{"return": "Block node is read-onlyrn"}
1624
*** done
1625
diff --git a/tests/qemu-iotests/308.out b/tests/qemu-iotests/308.out
1626
index XXXXXXX..XXXXXXX 100644
1627
--- a/tests/qemu-iotests/308.out
1628
+++ b/tests/qemu-iotests/308.out
1629
@@ -XXX,XX +XXX,XX @@ wrote 67108864/67108864 bytes at offset 0
1630
64 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1631
{'execute': 'qmp_capabilities'}
1632
{"return": {}}
1633
-{'execute': 'blockdev-add', 'arguments': { 'driver': 'file', 'node-name': 'node-protocol', 'filename': 'TEST_DIR/t.IMGFMT' } }
1634
+{'execute': 'blockdev-add',
1635
+ 'arguments': {
1636
+ 'driver': 'file',
1637
+ 'node-name': 'node-protocol',
1638
+ 'filename': 'TEST_DIR/t.IMGFMT'
1639
+ } }
1640
{"return": {}}
1641
-{'execute': 'blockdev-add', 'arguments': { 'driver': 'IMGFMT', 'node-name': 'node-format', 'file': 'node-protocol' } }
1642
+{'execute': 'blockdev-add',
1643
+ 'arguments': {
1644
+ 'driver': 'IMGFMT',
1645
+ 'node-name': 'node-format',
1646
+ 'file': 'node-protocol'
1647
+ } }
1648
{"return": {}}
1649
1650
=== Mountpoint not present ===
1651
-{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-err', 'node-name': 'node-format', 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse' } }
1652
+{'execute': 'block-export-add',
1653
+ 'arguments': {
1654
+ 'type': 'fuse',
1655
+ 'id': 'export-err',
1656
+ 'node-name': 'node-format',
1657
+ 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse'
1658
+ } }
1659
{"error": {"class": "GenericError", "desc": "Failed to stat 'TEST_DIR/t.IMGFMT.fuse': No such file or directory"}}
1660
1661
=== Mountpoint is a directory ===
1662
-{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-err', 'node-name': 'node-format', 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse' } }
1663
+{'execute': 'block-export-add',
1664
+ 'arguments': {
1665
+ 'type': 'fuse',
1666
+ 'id': 'export-err',
1667
+ 'node-name': 'node-format',
1668
+ 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse'
1669
+ } }
1670
{"error": {"class": "GenericError", "desc": "'TEST_DIR/t.IMGFMT.fuse' is not a regular file"}}
1671
1672
=== Mountpoint is a regular file ===
1673
-{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-mp', 'node-name': 'node-format', 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse' } }
1674
+{'execute': 'block-export-add',
1675
+ 'arguments': {
1676
+ 'type': 'fuse',
1677
+ 'id': 'export-mp',
1678
+ 'node-name': 'node-format',
1679
+ 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse'
1680
+ } }
1681
{"return": {}}
1682
Images are identical.
1683
1684
=== Mount over existing file ===
1685
-{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-img', 'node-name': 'node-format', 'mountpoint': 'TEST_DIR/t.IMGFMT' } }
1686
+{'execute': 'block-export-add',
1687
+ 'arguments': {
1688
+ 'type': 'fuse',
1689
+ 'id': 'export-img',
1690
+ 'node-name': 'node-format',
1691
+ 'mountpoint': 'TEST_DIR/t.IMGFMT'
1692
+ } }
1693
{"return": {}}
1694
Images are identical.
1695
1696
=== Double export ===
1697
-{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-err', 'node-name': 'node-format', 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse' } }
1698
+{'execute': 'block-export-add',
1699
+ 'arguments': {
1700
+ 'type': 'fuse',
1701
+ 'id': 'export-err',
1702
+ 'node-name': 'node-format',
1703
+ 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse'
1704
+ } }
1705
{"error": {"class": "GenericError", "desc": "There already is a FUSE export on 'TEST_DIR/t.IMGFMT.fuse'"}}
1706
1707
=== Remove export ===
1708
virtual size: 64 MiB (67108864 bytes)
1709
-{'execute': 'block-export-del', 'arguments': { 'id': 'export-mp' } }
1710
+{'execute': 'block-export-del',
1711
+ 'arguments': {
1712
+ 'id': 'export-mp'
1713
+ } }
1714
{"return": {}}
1715
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "export-mp"}}
1716
virtual size: 0 B (0 bytes)
1717
1718
=== Writable export ===
1719
-{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-mp', 'node-name': 'node-format', 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true } }
1720
+{'execute': 'block-export-add',
1721
+ 'arguments': {
1722
+ 'type': 'fuse',
1723
+ 'id': 'export-mp',
1724
+ 'node-name': 'node-format',
1725
+ 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true
1726
+ } }
1727
{"return": {}}
1728
write failed: Permission denied
1729
wrote 65536/65536 bytes at offset 1048576
1730
@@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576
1731
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1732
1733
=== Resizing exports ===
1734
-{'execute': 'block-export-del', 'arguments': { 'id': 'export-mp' } }
1735
+{'execute': 'block-export-del',
1736
+ 'arguments': {
1737
+ 'id': 'export-mp'
1738
+ } }
1739
{"return": {}}
1740
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "export-mp"}}
1741
-{'execute': 'block-export-del', 'arguments': { 'id': 'export-img' } }
1742
+{'execute': 'block-export-del',
1743
+ 'arguments': {
1744
+ 'id': 'export-img'
1745
+ } }
1746
{"return": {}}
1747
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "export-img"}}
1748
-{'execute': 'blockdev-del', 'arguments': { 'node-name': 'node-format' } }
1749
+{'execute': 'blockdev-del',
1750
+ 'arguments': {
1751
+ 'node-name': 'node-format'
1752
+ } }
1753
{"return": {}}
1754
-{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-mp', 'node-name': 'node-protocol', 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true } }
1755
+{'execute': 'block-export-add',
1756
+ 'arguments': {
1757
+ 'type': 'fuse',
1758
+ 'id': 'export-mp',
1759
+ 'node-name': 'node-protocol',
1760
+ 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true
1761
+ } }
1762
{"return": {}}
1763
1764
--- Try growing non-growable export ---
1765
@@ -XXX,XX +XXX,XX @@ OK: Post-truncate image size is as expected
1766
OK: Disk usage grew with fallocate
1767
1768
--- Try growing growable export ---
1769
-{'execute': 'block-export-del', 'arguments': { 'id': 'export-mp' } }
1770
+{'execute': 'block-export-del',
1771
+ 'arguments': {
1772
+ 'id': 'export-mp'
1773
+ } }
1774
{"return": {}}
1775
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "export-mp"}}
1776
-{'execute': 'block-export-add', 'arguments': { 'type': 'fuse', 'id': 'export-mp', 'node-name': 'node-protocol', 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true, 'growable': true } }
1777
+{'execute': 'block-export-add',
1778
+ 'arguments': {
1779
+ 'type': 'fuse',
1780
+ 'id': 'export-mp',
1781
+ 'node-name': 'node-protocol',
1782
+ 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true, 'growable': true
1783
+ } }
1784
{"return": {}}
1785
65536+0 records in
1786
65536+0 records out
1787
diff --git a/tests/qemu-iotests/312.out b/tests/qemu-iotests/312.out
1788
index XXXXXXX..XXXXXXX 100644
1789
--- a/tests/qemu-iotests/312.out
1790
+++ b/tests/qemu-iotests/312.out
1791
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 2424832
1792
1793
{ 'execute': 'qmp_capabilities' }
1794
{"return": {}}
1795
-{'execute': 'drive-mirror', 'arguments': {'device': 'virtio0', 'format': 'IMGFMT', 'target': 'TEST_DIR/t.IMGFMT.3', 'sync': 'full', 'mode': 'existing' }}
1796
+{'execute': 'drive-mirror',
1797
+ 'arguments': {'device': 'virtio0',
1798
+ 'format': 'IMGFMT',
1799
+ 'target': 'TEST_DIR/t.IMGFMT.3',
1800
+ 'sync': 'full',
1801
+ 'mode': 'existing' }}
1802
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "virtio0"}}
1803
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "virtio0"}}
1804
{"return": {}}
1805
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "virtio0"}}
1806
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "virtio0", "len": 10485760, "offset": 10485760, "speed": 0, "type": "mirror"}}
1807
-{ 'execute': 'block-job-complete', 'arguments': { 'device': 'virtio0' } }
1808
+{ 'execute': 'block-job-complete',
1809
+ 'arguments': { 'device': 'virtio0' } }
1810
{"return": {}}
1811
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "virtio0"}}
1812
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "virtio0"}}
1813
diff --git a/tests/qemu-iotests/common.qemu b/tests/qemu-iotests/common.qemu
1814
index XXXXXXX..XXXXXXX 100644
1815
--- a/tests/qemu-iotests/common.qemu
1816
+++ b/tests/qemu-iotests/common.qemu
1817
@@ -XXX,XX +XXX,XX @@ _send_qemu_cmd()
1818
count=${qemu_cmd_repeat}
1819
use_error="no"
1820
fi
1821
- # This array element extraction is done to accommodate pathnames with spaces
1822
- if [ -z "${success_or_failure}" ]; then
1823
- cmd=${@: 1:${#@}-1}
1824
- shift $(($# - 1))
1825
- else
1826
- cmd=${@: 1:${#@}-2}
1827
- shift $(($# - 2))
1828
- fi
1829
+
1830
+ cmd=$1
1831
+ shift
1832
1833
# Display QMP being sent, but not HMP (since HMP already echoes its
1834
# input back to output); decide based on leading '{'
1835
--
46
--
1836
2.29.2
47
2.9.3
1837
48
1838
49
diff view generated by jsdifflib
1
The first parameter passed to _send_qemu_cmd is supposed to be the
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
$QEMU_HANDLE. 102 does not do so here, fix it.
3
2
4
As a result, the output changes: Now we see the prompt this command is
3
Once the thread pool starts using aio_co_wake, it will also need
5
supposedly waiting for before the resize message - as it should be.
4
qemu_get_current_aio_context(). Make test-thread-pool create
5
an AioContext with qemu_init_main_loop, so that stubs/iothread.c
6
and tests/iothread.c can provide the rest.
6
7
7
Signed-off-by: Max Reitz <mreitz@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Message-Id: <20201217153803.101231-2-mreitz@redhat.com>
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
10
Reviewed-by: Fam Zheng <famz@redhat.com>
11
Message-id: 20170213135235.12274-5-pbonzini@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
13
---
11
tests/qemu-iotests/102 | 2 +-
14
tests/test-thread-pool.c | 12 +++---------
12
tests/qemu-iotests/102.out | 2 +-
15
1 file changed, 3 insertions(+), 9 deletions(-)
13
2 files changed, 2 insertions(+), 2 deletions(-)
14
16
15
diff --git a/tests/qemu-iotests/102 b/tests/qemu-iotests/102
17
diff --git a/tests/test-thread-pool.c b/tests/test-thread-pool.c
16
index XXXXXXX..XXXXXXX 100755
17
--- a/tests/qemu-iotests/102
18
+++ b/tests/qemu-iotests/102
19
@@ -XXX,XX +XXX,XX @@ $QEMU_IO -c 'write 0 64k' "$TEST_IMG" | _filter_qemu_io
20
qemu_comm_method=monitor _launch_qemu -drive if=none,file="$TEST_IMG",id=drv0
21
22
# Wait for a prompt to appear (so we know qemu has opened the image)
23
-_send_qemu_cmd '' '(qemu)'
24
+_send_qemu_cmd $QEMU_HANDLE '' '(qemu)'
25
26
$QEMU_IMG resize --shrink --image-opts \
27
"driver=raw,file.driver=file,file.filename=$TEST_IMG,file.locking=off" \
28
diff --git a/tests/qemu-iotests/102.out b/tests/qemu-iotests/102.out
29
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/qemu-iotests/102.out
19
--- a/tests/test-thread-pool.c
31
+++ b/tests/qemu-iotests/102.out
20
+++ b/tests/test-thread-pool.c
32
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=65536
21
@@ -XXX,XX +XXX,XX @@
33
wrote 65536/65536 bytes at offset 0
22
#include "qapi/error.h"
34
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
23
#include "qemu/timer.h"
35
QEMU X.Y.Z monitor - type 'help' for more information
24
#include "qemu/error-report.h"
36
-Image resized.
25
+#include "qemu/main-loop.h"
37
(qemu)
26
38
+Image resized.
27
static AioContext *ctx;
39
(qemu) qemu-io drv0 map
28
static ThreadPool *pool;
40
64 KiB (0x10000) bytes allocated at offset 0 bytes (0x0)
29
@@ -XXX,XX +XXX,XX @@ static void test_cancel_async(void)
41
*** done
30
int main(int argc, char **argv)
31
{
32
int ret;
33
- Error *local_error = NULL;
34
35
- init_clocks();
36
-
37
- ctx = aio_context_new(&local_error);
38
- if (!ctx) {
39
- error_reportf_err(local_error, "Failed to create AIO Context: ");
40
- exit(1);
41
- }
42
+ qemu_init_main_loop(&error_abort);
43
+ ctx = qemu_get_current_aio_context();
44
pool = aio_get_thread_pool(ctx);
45
46
g_test_init(&argc, &argv, NULL);
47
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
48
49
ret = g_test_run();
50
51
- aio_context_unref(ctx);
52
return ret;
53
}
42
--
54
--
43
2.29.2
55
2.9.3
44
56
45
57
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
This will be used in further test.
3
This is in preparation for making qio_channel_yield work on
4
4
AioContexts other than the main one.
5
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
5
6
Reviewed-by: Max Reitz <mreitz@redhat.com>
6
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
7
Message-Id: <20201021145859.11201-10-vsementsov@virtuozzo.com>
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Signed-off-by: Max Reitz <mreitz@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Reviewed-by: Fam Zheng <famz@redhat.com>
10
Message-id: 20170213135235.12274-6-pbonzini@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
12
---
10
qemu-io-cmds.c | 46 ++++++++++++++++++++++++++++++++--------------
13
include/io/channel.h | 25 +++++++++++++++++++++++++
11
1 file changed, 32 insertions(+), 14 deletions(-)
14
io/channel-command.c | 13 +++++++++++++
12
15
io/channel-file.c | 11 +++++++++++
13
diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c
16
io/channel-socket.c | 16 +++++++++++-----
14
index XXXXXXX..XXXXXXX 100644
17
io/channel-tls.c | 12 ++++++++++++
15
--- a/qemu-io-cmds.c
18
io/channel-watch.c | 6 ++++++
16
+++ b/qemu-io-cmds.c
19
io/channel.c | 11 +++++++++++
17
@@ -XXX,XX +XXX,XX @@ static const cmdinfo_t flush_cmd = {
20
7 files changed, 89 insertions(+), 5 deletions(-)
18
.oneline = "flush all in-core file state to disk",
21
22
diff --git a/include/io/channel.h b/include/io/channel.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/include/io/channel.h
25
+++ b/include/io/channel.h
26
@@ -XXX,XX +XXX,XX @@
27
28
#include "qemu-common.h"
29
#include "qom/object.h"
30
+#include "block/aio.h"
31
32
#define TYPE_QIO_CHANNEL "qio-channel"
33
#define QIO_CHANNEL(obj) \
34
@@ -XXX,XX +XXX,XX @@ struct QIOChannelClass {
35
off_t offset,
36
int whence,
37
Error **errp);
38
+ void (*io_set_aio_fd_handler)(QIOChannel *ioc,
39
+ AioContext *ctx,
40
+ IOHandler *io_read,
41
+ IOHandler *io_write,
42
+ void *opaque);
19
};
43
};
20
44
21
+static int truncate_f(BlockBackend *blk, int argc, char **argv);
45
/* General I/O handling functions */
22
+static const cmdinfo_t truncate_cmd = {
46
@@ -XXX,XX +XXX,XX @@ void qio_channel_yield(QIOChannel *ioc,
23
+ .name = "truncate",
47
void qio_channel_wait(QIOChannel *ioc,
24
+ .altname = "t",
48
GIOCondition condition);
25
+ .cfunc = truncate_f,
49
26
+ .perm = BLK_PERM_WRITE | BLK_PERM_RESIZE,
50
+/**
27
+ .argmin = 1,
51
+ * qio_channel_set_aio_fd_handler:
28
+ .argmax = 3,
52
+ * @ioc: the channel object
29
+ .args = "[-m prealloc_mode] off",
53
+ * @ctx: the AioContext to set the handlers on
30
+ .oneline = "truncates the current file at the given offset",
54
+ * @io_read: the read handler
31
+};
55
+ * @io_write: the write handler
32
+
56
+ * @opaque: the opaque value passed to the handler
33
static int truncate_f(BlockBackend *blk, int argc, char **argv)
57
+ *
34
{
58
+ * This is used internally by qio_channel_yield(). It can
35
Error *local_err = NULL;
59
+ * be used by channel implementations to forward the handlers
36
int64_t offset;
60
+ * to another channel (e.g. from #QIOChannelTLS to the
37
- int ret;
61
+ * underlying socket).
38
+ int c, ret;
62
+ */
39
+ PreallocMode prealloc = PREALLOC_MODE_OFF;
63
+void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
40
64
+ AioContext *ctx,
41
- offset = cvtnum(argv[1]);
65
+ IOHandler *io_read,
42
+ while ((c = getopt(argc, argv, "m:")) != -1) {
66
+ IOHandler *io_write,
43
+ switch (c) {
67
+ void *opaque);
44
+ case 'm':
68
+
45
+ prealloc = qapi_enum_parse(&PreallocMode_lookup, optarg,
69
#endif /* QIO_CHANNEL_H */
46
+ PREALLOC_MODE__MAX, NULL);
70
diff --git a/io/channel-command.c b/io/channel-command.c
47
+ if (prealloc == PREALLOC_MODE__MAX) {
71
index XXXXXXX..XXXXXXX 100644
48
+ error_report("Invalid preallocation mode '%s'", optarg);
72
--- a/io/channel-command.c
49
+ return -EINVAL;
73
+++ b/io/channel-command.c
50
+ }
74
@@ -XXX,XX +XXX,XX @@ static int qio_channel_command_close(QIOChannel *ioc,
51
+ break;
75
}
52
+ default:
76
53
+ qemuio_command_usage(&truncate_cmd);
77
54
+ return -EINVAL;
78
+static void qio_channel_command_set_aio_fd_handler(QIOChannel *ioc,
55
+ }
79
+ AioContext *ctx,
56
+ }
80
+ IOHandler *io_read,
57
+
81
+ IOHandler *io_write,
58
+ offset = cvtnum(argv[optind]);
82
+ void *opaque)
59
if (offset < 0) {
83
+{
60
print_cvtnum_err(offset, argv[1]);
84
+ QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc);
61
return offset;
85
+ aio_set_fd_handler(ctx, cioc->readfd, false, io_read, NULL, NULL, opaque);
62
@@ -XXX,XX +XXX,XX @@ static int truncate_f(BlockBackend *blk, int argc, char **argv)
86
+ aio_set_fd_handler(ctx, cioc->writefd, false, NULL, io_write, NULL, opaque);
63
* exact=true. It is better to err on the "emit more errors" side
87
+}
64
* than to be overly permissive.
88
+
65
*/
89
+
66
- ret = blk_truncate(blk, offset, false, PREALLOC_MODE_OFF, 0, &local_err);
90
static GSource *qio_channel_command_create_watch(QIOChannel *ioc,
67
+ ret = blk_truncate(blk, offset, false, prealloc, 0, &local_err);
91
GIOCondition condition)
68
if (ret < 0) {
92
{
69
error_report_err(local_err);
93
@@ -XXX,XX +XXX,XX @@ static void qio_channel_command_class_init(ObjectClass *klass,
70
return ret;
94
ioc_klass->io_set_blocking = qio_channel_command_set_blocking;
71
@@ -XXX,XX +XXX,XX @@ static int truncate_f(BlockBackend *blk, int argc, char **argv)
95
ioc_klass->io_close = qio_channel_command_close;
96
ioc_klass->io_create_watch = qio_channel_command_create_watch;
97
+ ioc_klass->io_set_aio_fd_handler = qio_channel_command_set_aio_fd_handler;
98
}
99
100
static const TypeInfo qio_channel_command_info = {
101
diff --git a/io/channel-file.c b/io/channel-file.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/io/channel-file.c
104
+++ b/io/channel-file.c
105
@@ -XXX,XX +XXX,XX @@ static int qio_channel_file_close(QIOChannel *ioc,
106
}
107
108
109
+static void qio_channel_file_set_aio_fd_handler(QIOChannel *ioc,
110
+ AioContext *ctx,
111
+ IOHandler *io_read,
112
+ IOHandler *io_write,
113
+ void *opaque)
114
+{
115
+ QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc);
116
+ aio_set_fd_handler(ctx, fioc->fd, false, io_read, io_write, NULL, opaque);
117
+}
118
+
119
static GSource *qio_channel_file_create_watch(QIOChannel *ioc,
120
GIOCondition condition)
121
{
122
@@ -XXX,XX +XXX,XX @@ static void qio_channel_file_class_init(ObjectClass *klass,
123
ioc_klass->io_seek = qio_channel_file_seek;
124
ioc_klass->io_close = qio_channel_file_close;
125
ioc_klass->io_create_watch = qio_channel_file_create_watch;
126
+ ioc_klass->io_set_aio_fd_handler = qio_channel_file_set_aio_fd_handler;
127
}
128
129
static const TypeInfo qio_channel_file_info = {
130
diff --git a/io/channel-socket.c b/io/channel-socket.c
131
index XXXXXXX..XXXXXXX 100644
132
--- a/io/channel-socket.c
133
+++ b/io/channel-socket.c
134
@@ -XXX,XX +XXX,XX @@ qio_channel_socket_set_blocking(QIOChannel *ioc,
135
qemu_set_block(sioc->fd);
136
} else {
137
qemu_set_nonblock(sioc->fd);
138
-#ifdef WIN32
139
- WSAEventSelect(sioc->fd, ioc->event,
140
- FD_READ | FD_ACCEPT | FD_CLOSE |
141
- FD_CONNECT | FD_WRITE | FD_OOB);
142
-#endif
143
}
72
return 0;
144
return 0;
73
}
145
}
74
146
@@ -XXX,XX +XXX,XX @@ qio_channel_socket_shutdown(QIOChannel *ioc,
75
-static const cmdinfo_t truncate_cmd = {
147
return 0;
76
- .name = "truncate",
148
}
77
- .altname = "t",
149
78
- .cfunc = truncate_f,
150
+static void qio_channel_socket_set_aio_fd_handler(QIOChannel *ioc,
79
- .perm = BLK_PERM_WRITE | BLK_PERM_RESIZE,
151
+ AioContext *ctx,
80
- .argmin = 1,
152
+ IOHandler *io_read,
81
- .argmax = 1,
153
+ IOHandler *io_write,
82
- .args = "off",
154
+ void *opaque)
83
- .oneline = "truncates the current file at the given offset",
155
+{
84
-};
156
+ QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
85
-
157
+ aio_set_fd_handler(ctx, sioc->fd, false, io_read, io_write, NULL, opaque);
86
static int length_f(BlockBackend *blk, int argc, char **argv)
158
+}
87
{
159
+
88
int64_t size;
160
static GSource *qio_channel_socket_create_watch(QIOChannel *ioc,
161
GIOCondition condition)
162
{
163
@@ -XXX,XX +XXX,XX @@ static void qio_channel_socket_class_init(ObjectClass *klass,
164
ioc_klass->io_set_cork = qio_channel_socket_set_cork;
165
ioc_klass->io_set_delay = qio_channel_socket_set_delay;
166
ioc_klass->io_create_watch = qio_channel_socket_create_watch;
167
+ ioc_klass->io_set_aio_fd_handler = qio_channel_socket_set_aio_fd_handler;
168
}
169
170
static const TypeInfo qio_channel_socket_info = {
171
diff --git a/io/channel-tls.c b/io/channel-tls.c
172
index XXXXXXX..XXXXXXX 100644
173
--- a/io/channel-tls.c
174
+++ b/io/channel-tls.c
175
@@ -XXX,XX +XXX,XX @@ static int qio_channel_tls_close(QIOChannel *ioc,
176
return qio_channel_close(tioc->master, errp);
177
}
178
179
+static void qio_channel_tls_set_aio_fd_handler(QIOChannel *ioc,
180
+ AioContext *ctx,
181
+ IOHandler *io_read,
182
+ IOHandler *io_write,
183
+ void *opaque)
184
+{
185
+ QIOChannelTLS *tioc = QIO_CHANNEL_TLS(ioc);
186
+
187
+ qio_channel_set_aio_fd_handler(tioc->master, ctx, io_read, io_write, opaque);
188
+}
189
+
190
static GSource *qio_channel_tls_create_watch(QIOChannel *ioc,
191
GIOCondition condition)
192
{
193
@@ -XXX,XX +XXX,XX @@ static void qio_channel_tls_class_init(ObjectClass *klass,
194
ioc_klass->io_close = qio_channel_tls_close;
195
ioc_klass->io_shutdown = qio_channel_tls_shutdown;
196
ioc_klass->io_create_watch = qio_channel_tls_create_watch;
197
+ ioc_klass->io_set_aio_fd_handler = qio_channel_tls_set_aio_fd_handler;
198
}
199
200
static const TypeInfo qio_channel_tls_info = {
201
diff --git a/io/channel-watch.c b/io/channel-watch.c
202
index XXXXXXX..XXXXXXX 100644
203
--- a/io/channel-watch.c
204
+++ b/io/channel-watch.c
205
@@ -XXX,XX +XXX,XX @@ GSource *qio_channel_create_socket_watch(QIOChannel *ioc,
206
GSource *source;
207
QIOChannelSocketSource *ssource;
208
209
+#ifdef WIN32
210
+ WSAEventSelect(socket, ioc->event,
211
+ FD_READ | FD_ACCEPT | FD_CLOSE |
212
+ FD_CONNECT | FD_WRITE | FD_OOB);
213
+#endif
214
+
215
source = g_source_new(&qio_channel_socket_source_funcs,
216
sizeof(QIOChannelSocketSource));
217
ssource = (QIOChannelSocketSource *)source;
218
diff --git a/io/channel.c b/io/channel.c
219
index XXXXXXX..XXXXXXX 100644
220
--- a/io/channel.c
221
+++ b/io/channel.c
222
@@ -XXX,XX +XXX,XX @@ GSource *qio_channel_create_watch(QIOChannel *ioc,
223
}
224
225
226
+void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
227
+ AioContext *ctx,
228
+ IOHandler *io_read,
229
+ IOHandler *io_write,
230
+ void *opaque)
231
+{
232
+ QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
233
+
234
+ klass->io_set_aio_fd_handler(ioc, ctx, io_read, io_write, opaque);
235
+}
236
+
237
guint qio_channel_add_watch(QIOChannel *ioc,
238
GIOCondition condition,
239
QIOChannelFunc func,
89
--
240
--
90
2.29.2
241
2.9.3
91
242
92
243
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
3
Support separate coroutines for reading and writing, and place the
4
Message-Id: <20201021145859.11201-14-vsementsov@virtuozzo.com>
4
read/write handlers on the AioContext that the QIOChannel is registered
5
Reviewed-by: Max Reitz <mreitz@redhat.com>
5
with.
6
Signed-off-by: Max Reitz <mreitz@redhat.com>
6
7
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
Reviewed-by: Fam Zheng <famz@redhat.com>
11
Message-id: 20170213135235.12274-7-pbonzini@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
7
---
13
---
8
scripts/simplebench/simplebench.py | 12 ++++++------
14
include/io/channel.h | 47 ++++++++++++++++++++++++++--
9
1 file changed, 6 insertions(+), 6 deletions(-)
15
io/channel.c | 86 +++++++++++++++++++++++++++++++++++++++-------------
10
16
2 files changed, 109 insertions(+), 24 deletions(-)
11
diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py
17
18
diff --git a/include/io/channel.h b/include/io/channel.h
12
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
13
--- a/scripts/simplebench/simplebench.py
20
--- a/include/io/channel.h
14
+++ b/scripts/simplebench/simplebench.py
21
+++ b/include/io/channel.h
15
@@ -XXX,XX +XXX,XX @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
22
@@ -XXX,XX +XXX,XX @@
16
23
17
result = {'runs': runs}
24
#include "qemu-common.h"
18
25
#include "qom/object.h"
19
- successed = [r for r in runs if ('seconds' in r)]
26
+#include "qemu/coroutine.h"
20
- if successed:
27
#include "block/aio.h"
21
- avg = sum(r['seconds'] for r in successed) / len(successed)
28
22
+ succeeded = [r for r in runs if ('seconds' in r)]
29
#define TYPE_QIO_CHANNEL "qio-channel"
23
+ if succeeded:
30
@@ -XXX,XX +XXX,XX @@ struct QIOChannel {
24
+ avg = sum(r['seconds'] for r in succeeded) / len(succeeded)
31
Object parent;
25
result['average'] = avg
32
unsigned int features; /* bitmask of QIOChannelFeatures */
26
- result['delta'] = max(abs(r['seconds'] - avg) for r in successed)
33
char *name;
27
+ result['delta'] = max(abs(r['seconds'] - avg) for r in succeeded)
34
+ AioContext *ctx;
28
35
+ Coroutine *read_coroutine;
29
- if len(successed) < count:
36
+ Coroutine *write_coroutine;
30
- result['n-failed'] = count - len(successed)
37
#ifdef _WIN32
31
+ if len(succeeded) < count:
38
HANDLE event; /* For use with GSource on Win32 */
32
+ result['n-failed'] = count - len(succeeded)
39
#endif
33
40
@@ -XXX,XX +XXX,XX @@ guint qio_channel_add_watch(QIOChannel *ioc,
34
return result
41
42
43
/**
44
+ * qio_channel_attach_aio_context:
45
+ * @ioc: the channel object
46
+ * @ctx: the #AioContext to set the handlers on
47
+ *
48
+ * Request that qio_channel_yield() sets I/O handlers on
49
+ * the given #AioContext. If @ctx is %NULL, qio_channel_yield()
50
+ * uses QEMU's main thread event loop.
51
+ *
52
+ * You can move a #QIOChannel from one #AioContext to another even if
53
+ * I/O handlers are set for a coroutine. However, #QIOChannel provides
54
+ * no synchronization between the calls to qio_channel_yield() and
55
+ * qio_channel_attach_aio_context().
56
+ *
57
+ * Therefore you should first call qio_channel_detach_aio_context()
58
+ * to ensure that the coroutine is not entered concurrently. Then,
59
+ * while the coroutine has yielded, call qio_channel_attach_aio_context(),
60
+ * and then aio_co_schedule() to place the coroutine on the new
61
+ * #AioContext. The calls to qio_channel_detach_aio_context()
62
+ * and qio_channel_attach_aio_context() should be protected with
63
+ * aio_context_acquire() and aio_context_release().
64
+ */
65
+void qio_channel_attach_aio_context(QIOChannel *ioc,
66
+ AioContext *ctx);
67
+
68
+/**
69
+ * qio_channel_detach_aio_context:
70
+ * @ioc: the channel object
71
+ *
72
+ * Disable any I/O handlers set by qio_channel_yield(). With the
73
+ * help of aio_co_schedule(), this allows moving a coroutine that was
74
+ * paused by qio_channel_yield() to another context.
75
+ */
76
+void qio_channel_detach_aio_context(QIOChannel *ioc);
77
+
78
+/**
79
* qio_channel_yield:
80
* @ioc: the channel object
81
* @condition: the I/O condition to wait for
82
*
83
- * Yields execution from the current coroutine until
84
- * the condition indicated by @condition becomes
85
- * available.
86
+ * Yields execution from the current coroutine until the condition
87
+ * indicated by @condition becomes available. @condition must
88
+ * be either %G_IO_IN or %G_IO_OUT; it cannot contain both. In
89
+ * addition, no two coroutine can be waiting on the same condition
90
+ * and channel at the same time.
91
*
92
* This must only be called from coroutine context
93
*/
94
diff --git a/io/channel.c b/io/channel.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/io/channel.c
97
+++ b/io/channel.c
98
@@ -XXX,XX +XXX,XX @@
99
#include "qemu/osdep.h"
100
#include "io/channel.h"
101
#include "qapi/error.h"
102
-#include "qemu/coroutine.h"
103
+#include "qemu/main-loop.h"
104
105
bool qio_channel_has_feature(QIOChannel *ioc,
106
QIOChannelFeature feature)
107
@@ -XXX,XX +XXX,XX @@ off_t qio_channel_io_seek(QIOChannel *ioc,
108
}
109
110
111
-typedef struct QIOChannelYieldData QIOChannelYieldData;
112
-struct QIOChannelYieldData {
113
- QIOChannel *ioc;
114
- Coroutine *co;
115
-};
116
+static void qio_channel_set_aio_fd_handlers(QIOChannel *ioc);
117
118
+static void qio_channel_restart_read(void *opaque)
119
+{
120
+ QIOChannel *ioc = opaque;
121
+ Coroutine *co = ioc->read_coroutine;
122
+
123
+ ioc->read_coroutine = NULL;
124
+ qio_channel_set_aio_fd_handlers(ioc);
125
+ aio_co_wake(co);
126
+}
127
128
-static gboolean qio_channel_yield_enter(QIOChannel *ioc,
129
- GIOCondition condition,
130
- gpointer opaque)
131
+static void qio_channel_restart_write(void *opaque)
132
{
133
- QIOChannelYieldData *data = opaque;
134
- qemu_coroutine_enter(data->co);
135
- return FALSE;
136
+ QIOChannel *ioc = opaque;
137
+ Coroutine *co = ioc->write_coroutine;
138
+
139
+ ioc->write_coroutine = NULL;
140
+ qio_channel_set_aio_fd_handlers(ioc);
141
+ aio_co_wake(co);
142
}
143
144
+static void qio_channel_set_aio_fd_handlers(QIOChannel *ioc)
145
+{
146
+ IOHandler *rd_handler = NULL, *wr_handler = NULL;
147
+ AioContext *ctx;
148
+
149
+ if (ioc->read_coroutine) {
150
+ rd_handler = qio_channel_restart_read;
151
+ }
152
+ if (ioc->write_coroutine) {
153
+ wr_handler = qio_channel_restart_write;
154
+ }
155
+
156
+ ctx = ioc->ctx ? ioc->ctx : iohandler_get_aio_context();
157
+ qio_channel_set_aio_fd_handler(ioc, ctx, rd_handler, wr_handler, ioc);
158
+}
159
+
160
+void qio_channel_attach_aio_context(QIOChannel *ioc,
161
+ AioContext *ctx)
162
+{
163
+ AioContext *old_ctx;
164
+ if (ioc->ctx == ctx) {
165
+ return;
166
+ }
167
+
168
+ old_ctx = ioc->ctx ? ioc->ctx : iohandler_get_aio_context();
169
+ qio_channel_set_aio_fd_handler(ioc, old_ctx, NULL, NULL, NULL);
170
+ ioc->ctx = ctx;
171
+ qio_channel_set_aio_fd_handlers(ioc);
172
+}
173
+
174
+void qio_channel_detach_aio_context(QIOChannel *ioc)
175
+{
176
+ ioc->read_coroutine = NULL;
177
+ ioc->write_coroutine = NULL;
178
+ qio_channel_set_aio_fd_handlers(ioc);
179
+ ioc->ctx = NULL;
180
+}
181
182
void coroutine_fn qio_channel_yield(QIOChannel *ioc,
183
GIOCondition condition)
184
{
185
- QIOChannelYieldData data;
186
-
187
assert(qemu_in_coroutine());
188
- data.ioc = ioc;
189
- data.co = qemu_coroutine_self();
190
- qio_channel_add_watch(ioc,
191
- condition,
192
- qio_channel_yield_enter,
193
- &data,
194
- NULL);
195
+ if (condition == G_IO_IN) {
196
+ assert(!ioc->read_coroutine);
197
+ ioc->read_coroutine = qemu_coroutine_self();
198
+ } else if (condition == G_IO_OUT) {
199
+ assert(!ioc->write_coroutine);
200
+ ioc->write_coroutine = qemu_coroutine_self();
201
+ } else {
202
+ abort();
203
+ }
204
+ qio_channel_set_aio_fd_handlers(ioc);
205
qemu_coroutine_yield();
206
}
35
207
36
--
208
--
37
2.29.2
209
2.9.3
38
210
39
211
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
The only users of this thing are:
3
In the client, read the reply headers from a coroutine, switching the
4
1. bdrv_child_try_set_perm, to ignore failures on loosen restrictions
4
read side between the "read header" coroutine and the I/O coroutine that
5
2. assertion in bdrv_replace_child
5
reads the body of the reply.
6
3. assertion in bdrv_inactivate_recurse
7
6
8
Assertions are not enough reason for overcomplication the permission
7
In the server, if the server can read more requests it will create a new
9
update system. So, look at bdrv_child_try_set_perm.
8
"read request" coroutine as soon as a request has been read. Otherwise,
9
the new coroutine is created in nbd_request_put.
10
10
11
We are interested in tighten_restrictions only on failure. But on
11
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
12
failure this field is not reliable: we may fail in the middle of
12
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
13
permission update, some nodes are not touched and we don't know should
13
Reviewed-by: Fam Zheng <famz@redhat.com>
14
their permissions be tighten or not. So, we rely on the fact that if we
14
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
15
loose restrictions on some node (or BdrvChild), we'll not tighten
15
Message-id: 20170213135235.12274-8-pbonzini@redhat.com
16
restriction in the whole subtree as part of this update (assertions 2
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
and 3 rely on this fact as well). And, if we rely on this fact anyway,
17
---
18
we can just check it on top, and don't pass additional pointer through
18
block/nbd-client.h | 2 +-
19
the whole recursive infrastructure.
19
block/nbd-client.c | 117 ++++++++++++++++++++++++-----------------------------
20
nbd/client.c | 2 +-
21
nbd/common.c | 9 +----
22
nbd/server.c | 94 +++++++++++++-----------------------------
23
5 files changed, 83 insertions(+), 141 deletions(-)
20
24
21
Note also, that further patches will fix real bugs in permission update
25
diff --git a/block/nbd-client.h b/block/nbd-client.h
22
system, so now is good time to simplify it, as a help for further
23
refactorings.
24
25
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
26
Message-Id: <20201106124241.16950-8-vsementsov@virtuozzo.com>
27
[mreitz: Fixed rebase conflict]
28
Signed-off-by: Max Reitz <mreitz@redhat.com>
29
---
30
block.c | 89 +++++++++++----------------------------------------------
31
1 file changed, 17 insertions(+), 72 deletions(-)
32
33
diff --git a/block.c b/block.c
34
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
35
--- a/block.c
27
--- a/block/nbd-client.h
36
+++ b/block.c
28
+++ b/block/nbd-client.h
37
@@ -XXX,XX +XXX,XX @@ static int bdrv_fill_options(QDict **options, const char *filename,
29
@@ -XXX,XX +XXX,XX @@ typedef struct NBDClientSession {
38
30
39
static int bdrv_child_check_perm(BdrvChild *c, BlockReopenQueue *q,
31
CoMutex send_mutex;
40
uint64_t perm, uint64_t shared,
32
CoQueue free_sema;
41
- GSList *ignore_children,
33
- Coroutine *send_coroutine;
42
- bool *tighten_restrictions, Error **errp);
34
+ Coroutine *read_reply_co;
43
+ GSList *ignore_children, Error **errp);
35
int in_flight;
44
static void bdrv_child_abort_perm_update(BdrvChild *c);
36
45
static void bdrv_child_set_perm(BdrvChild *c);
37
Coroutine *recv_coroutine[MAX_NBD_REQUESTS];
46
38
diff --git a/block/nbd-client.c b/block/nbd-client.c
47
@@ -XXX,XX +XXX,XX @@ static void bdrv_child_perm(BlockDriverState *bs, BlockDriverState *child_bs,
39
index XXXXXXX..XXXXXXX 100644
48
* permissions of all its parents. This involves checking whether all necessary
40
--- a/block/nbd-client.c
49
* permission changes to child nodes can be performed.
41
+++ b/block/nbd-client.c
50
*
42
@@ -XXX,XX +XXX,XX @@
51
- * Will set *tighten_restrictions to true if and only if new permissions have to
43
#define HANDLE_TO_INDEX(bs, handle) ((handle) ^ ((uint64_t)(intptr_t)bs))
52
- * be taken or currently shared permissions are to be unshared. Otherwise,
44
#define INDEX_TO_HANDLE(bs, index) ((index) ^ ((uint64_t)(intptr_t)bs))
53
- * errors are not fatal as long as the caller accepts that the restrictions
45
54
- * remain tighter than they need to be. The caller still has to abort the
46
-static void nbd_recv_coroutines_enter_all(NBDClientSession *s)
55
- * transaction.
47
+static void nbd_recv_coroutines_enter_all(BlockDriverState *bs)
56
- * @tighten_restrictions cannot be used together with @q: When reopening, we may
48
{
57
- * encounter fatal errors even though no restrictions are to be tightened. For
49
+ NBDClientSession *s = nbd_get_client_session(bs);
58
- * example, changing a node from RW to RO will fail if the WRITE permission is
50
int i;
59
- * to be kept.
51
60
- *
52
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
61
* A call to this function must always be followed by a call to bdrv_set_perm()
53
@@ -XXX,XX +XXX,XX @@ static void nbd_recv_coroutines_enter_all(NBDClientSession *s)
62
* or bdrv_abort_perm_update().
54
qemu_coroutine_enter(s->recv_coroutine[i]);
63
*/
55
}
64
static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q,
56
}
65
uint64_t cumulative_perms,
57
+ BDRV_POLL_WHILE(bs, s->read_reply_co);
66
uint64_t cumulative_shared_perms,
58
}
67
- GSList *ignore_children,
59
68
- bool *tighten_restrictions, Error **errp)
60
static void nbd_teardown_connection(BlockDriverState *bs)
69
+ GSList *ignore_children, Error **errp)
61
@@ -XXX,XX +XXX,XX @@ static void nbd_teardown_connection(BlockDriverState *bs)
70
{
62
qio_channel_shutdown(client->ioc,
71
BlockDriver *drv = bs->drv;
63
QIO_CHANNEL_SHUTDOWN_BOTH,
72
BdrvChild *c;
64
NULL);
65
- nbd_recv_coroutines_enter_all(client);
66
+ nbd_recv_coroutines_enter_all(bs);
67
68
nbd_client_detach_aio_context(bs);
69
object_unref(OBJECT(client->sioc));
70
@@ -XXX,XX +XXX,XX @@ static void nbd_teardown_connection(BlockDriverState *bs)
71
client->ioc = NULL;
72
}
73
74
-static void nbd_reply_ready(void *opaque)
75
+static coroutine_fn void nbd_read_reply_entry(void *opaque)
76
{
77
- BlockDriverState *bs = opaque;
78
- NBDClientSession *s = nbd_get_client_session(bs);
79
+ NBDClientSession *s = opaque;
80
uint64_t i;
73
int ret;
81
int ret;
74
82
75
- assert(!q || !tighten_restrictions);
83
- if (!s->ioc) { /* Already closed */
76
-
84
- return;
77
- if (tighten_restrictions) {
78
- uint64_t current_perms, current_shared;
79
- uint64_t added_perms, removed_shared_perms;
80
-
81
- bdrv_get_cumulative_perm(bs, &current_perms, &current_shared);
82
-
83
- added_perms = cumulative_perms & ~current_perms;
84
- removed_shared_perms = current_shared & ~cumulative_shared_perms;
85
-
86
- *tighten_restrictions = added_perms || removed_shared_perms;
87
- }
85
- }
88
-
86
-
89
/* Write permissions never work with read-only images */
87
- if (s->reply.handle == 0) {
90
if ((cumulative_perms & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) &&
88
- /* No reply already in flight. Fetch a header. It is possible
91
!bdrv_is_writable_after_reopen(bs, q))
89
- * that another thread has done the same thing in parallel, so
92
@@ -XXX,XX +XXX,XX @@ static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q,
90
- * the socket is not readable anymore.
93
/* Check all children */
91
- */
94
QLIST_FOREACH(c, &bs->children, next) {
92
+ for (;;) {
95
uint64_t cur_perm, cur_shared;
93
+ assert(s->reply.handle == 0);
96
- bool child_tighten_restr;
94
ret = nbd_receive_reply(s->ioc, &s->reply);
97
95
- if (ret == -EAGAIN) {
98
bdrv_child_perm(bs, c->bs, c, c->role, q,
96
- return;
99
cumulative_perms, cumulative_shared_perms,
100
&cur_perm, &cur_shared);
101
ret = bdrv_child_check_perm(c, q, cur_perm, cur_shared, ignore_children,
102
- tighten_restrictions ? &child_tighten_restr
103
- : NULL,
104
errp);
105
- if (tighten_restrictions) {
106
- *tighten_restrictions |= child_tighten_restr;
107
- }
97
- }
108
if (ret < 0) {
98
if (ret < 0) {
109
return ret;
99
- s->reply.handle = 0;
100
- goto fail;
101
+ break;
110
}
102
}
111
@@ -XXX,XX +XXX,XX @@ char *bdrv_perm_names(uint64_t perm)
103
- }
112
* set, the BdrvChild objects in this list are ignored in the calculations;
104
113
* this allows checking permission updates for an existing reference.
105
- /* There's no need for a mutex on the receive side, because the
114
*
106
- * handler acts as a synchronization point and ensures that only
115
- * See bdrv_check_perm() for the semantics of @tighten_restrictions.
107
- * one coroutine is called until the reply finishes. */
116
- *
108
- i = HANDLE_TO_INDEX(s, s->reply.handle);
117
* Needs to be followed by a call to either bdrv_set_perm() or
109
- if (i >= MAX_NBD_REQUESTS) {
118
* bdrv_abort_perm_update(). */
110
- goto fail;
119
static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q,
111
- }
120
uint64_t new_used_perm,
112
+ /* There's no need for a mutex on the receive side, because the
121
uint64_t new_shared_perm,
113
+ * handler acts as a synchronization point and ensures that only
122
GSList *ignore_children,
114
+ * one coroutine is called until the reply finishes.
123
- bool *tighten_restrictions,
115
+ */
124
Error **errp)
116
+ i = HANDLE_TO_INDEX(s, s->reply.handle);
125
{
117
+ if (i >= MAX_NBD_REQUESTS || !s->recv_coroutine[i]) {
126
BdrvChild *c;
118
+ break;
127
uint64_t cumulative_perms = new_used_perm;
119
+ }
128
uint64_t cumulative_shared_perms = new_shared_perm;
120
129
121
- if (s->recv_coroutine[i]) {
130
- assert(!q || !tighten_restrictions);
122
- qemu_coroutine_enter(s->recv_coroutine[i]);
131
123
- return;
132
/* There is no reason why anyone couldn't tolerate write_unchanged */
124
+ /* We're woken up by the recv_coroutine itself. Note that there
133
assert(new_shared_perm & BLK_PERM_WRITE_UNCHANGED);
125
+ * is no race between yielding and reentering read_reply_co. This
134
@@ -XXX,XX +XXX,XX @@ static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q,
126
+ * is because:
135
char *user = bdrv_child_user_desc(c);
127
+ *
136
char *perm_names = bdrv_perm_names(new_used_perm & ~c->shared_perm);
128
+ * - if recv_coroutine[i] runs on the same AioContext, it is only
137
129
+ * entered after we yield
138
- if (tighten_restrictions) {
130
+ *
139
- *tighten_restrictions = true;
131
+ * - if recv_coroutine[i] runs on a different AioContext, reentering
140
- }
132
+ * read_reply_co happens through a bottom half, which can only
141
-
133
+ * run after we yield.
142
error_setg(errp, "Conflicts with use by %s as '%s', which does not "
134
+ */
143
"allow '%s' on %s",
135
+ aio_co_wake(s->recv_coroutine[i]);
144
user, c->name, perm_names, bdrv_get_node_name(c->bs));
136
+ qemu_coroutine_yield();
145
@@ -XXX,XX +XXX,XX @@ static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q,
137
}
146
char *user = bdrv_child_user_desc(c);
138
-
147
char *perm_names = bdrv_perm_names(c->perm & ~new_shared_perm);
139
-fail:
148
140
- nbd_teardown_connection(bs);
149
- if (tighten_restrictions) {
141
-}
150
- *tighten_restrictions = true;
142
-
151
- }
143
-static void nbd_restart_write(void *opaque)
152
-
144
-{
153
error_setg(errp, "Conflicts with use by %s as '%s', which uses "
145
- BlockDriverState *bs = opaque;
154
"'%s' on %s",
146
-
155
user, c->name, perm_names, bdrv_get_node_name(c->bs));
147
- qemu_coroutine_enter(nbd_get_client_session(bs)->send_coroutine);
156
@@ -XXX,XX +XXX,XX @@ static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q,
148
+ s->read_reply_co = NULL;
157
}
149
}
158
150
159
return bdrv_check_perm(bs, q, cumulative_perms, cumulative_shared_perms,
151
static int nbd_co_send_request(BlockDriverState *bs,
160
- ignore_children, tighten_restrictions, errp);
152
@@ -XXX,XX +XXX,XX @@ static int nbd_co_send_request(BlockDriverState *bs,
161
+ ignore_children, errp);
153
QEMUIOVector *qiov)
162
}
154
{
163
155
NBDClientSession *s = nbd_get_client_session(bs);
164
/* Needs to be followed by a call to either bdrv_child_set_perm() or
156
- AioContext *aio_context;
165
* bdrv_child_abort_perm_update(). */
157
int rc, ret, i;
166
static int bdrv_child_check_perm(BdrvChild *c, BlockReopenQueue *q,
158
167
uint64_t perm, uint64_t shared,
159
qemu_co_mutex_lock(&s->send_mutex);
168
- GSList *ignore_children,
160
@@ -XXX,XX +XXX,XX @@ static int nbd_co_send_request(BlockDriverState *bs,
169
- bool *tighten_restrictions, Error **errp)
161
return -EPIPE;
170
+ GSList *ignore_children, Error **errp)
162
}
163
164
- s->send_coroutine = qemu_coroutine_self();
165
- aio_context = bdrv_get_aio_context(bs);
166
-
167
- aio_set_fd_handler(aio_context, s->sioc->fd, false,
168
- nbd_reply_ready, nbd_restart_write, NULL, bs);
169
if (qiov) {
170
qio_channel_set_cork(s->ioc, true);
171
rc = nbd_send_request(s->ioc, request);
172
@@ -XXX,XX +XXX,XX @@ static int nbd_co_send_request(BlockDriverState *bs,
173
} else {
174
rc = nbd_send_request(s->ioc, request);
175
}
176
- aio_set_fd_handler(aio_context, s->sioc->fd, false,
177
- nbd_reply_ready, NULL, NULL, bs);
178
- s->send_coroutine = NULL;
179
qemu_co_mutex_unlock(&s->send_mutex);
180
return rc;
181
}
182
@@ -XXX,XX +XXX,XX @@ static void nbd_co_receive_reply(NBDClientSession *s,
171
{
183
{
172
int ret;
184
int ret;
173
185
174
ignore_children = g_slist_prepend(g_slist_copy(ignore_children), c);
186
- /* Wait until we're woken up by the read handler. TODO: perhaps
175
- ret = bdrv_check_update_perm(c->bs, q, perm, shared, ignore_children,
187
- * peek at the next reply and avoid yielding if it's ours? */
176
- tighten_restrictions, errp);
188
+ /* Wait until we're woken up by nbd_read_reply_entry. */
177
+ ret = bdrv_check_update_perm(c->bs, q, perm, shared, ignore_children, errp);
189
qemu_coroutine_yield();
178
g_slist_free(ignore_children);
190
*reply = s->reply;
179
191
if (reply->handle != request->handle ||
180
if (ret < 0) {
192
@@ -XXX,XX +XXX,XX @@ static void nbd_coroutine_start(NBDClientSession *s,
181
@@ -XXX,XX +XXX,XX @@ static void bdrv_child_abort_perm_update(BdrvChild *c)
193
/* s->recv_coroutine[i] is set as soon as we get the send_lock. */
182
bdrv_abort_perm_update(c->bs);
194
}
183
}
195
184
196
-static void nbd_coroutine_end(NBDClientSession *s,
185
-static int bdrv_refresh_perms(BlockDriverState *bs, bool *tighten_restrictions,
197
+static void nbd_coroutine_end(BlockDriverState *bs,
186
- Error **errp)
198
NBDRequest *request)
187
+static int bdrv_refresh_perms(BlockDriverState *bs, Error **errp)
199
{
188
{
200
+ NBDClientSession *s = nbd_get_client_session(bs);
189
int ret;
201
int i = HANDLE_TO_INDEX(s, request->handle);
190
uint64_t perm, shared_perm;
202
+
191
203
s->recv_coroutine[i] = NULL;
192
bdrv_get_cumulative_perm(bs, &perm, &shared_perm);
204
- if (s->in_flight-- == MAX_NBD_REQUESTS) {
193
- ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL,
205
- qemu_co_queue_next(&s->free_sema);
194
- tighten_restrictions, errp);
206
+ s->in_flight--;
195
+ ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL, errp);
207
+ qemu_co_queue_next(&s->free_sema);
196
if (ret < 0) {
208
+
197
bdrv_abort_perm_update(bs);
209
+ /* Kick the read_reply_co to get the next reply. */
210
+ if (s->read_reply_co) {
211
+ aio_co_wake(s->read_reply_co);
212
}
213
}
214
215
@@ -XXX,XX +XXX,XX @@ int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
216
} else {
217
nbd_co_receive_reply(client, &request, &reply, qiov);
218
}
219
- nbd_coroutine_end(client, &request);
220
+ nbd_coroutine_end(bs, &request);
221
return -reply.error;
222
}
223
224
@@ -XXX,XX +XXX,XX @@ int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
225
} else {
226
nbd_co_receive_reply(client, &request, &reply, NULL);
227
}
228
- nbd_coroutine_end(client, &request);
229
+ nbd_coroutine_end(bs, &request);
230
return -reply.error;
231
}
232
233
@@ -XXX,XX +XXX,XX @@ int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
234
} else {
235
nbd_co_receive_reply(client, &request, &reply, NULL);
236
}
237
- nbd_coroutine_end(client, &request);
238
+ nbd_coroutine_end(bs, &request);
239
return -reply.error;
240
}
241
242
@@ -XXX,XX +XXX,XX @@ int nbd_client_co_flush(BlockDriverState *bs)
243
} else {
244
nbd_co_receive_reply(client, &request, &reply, NULL);
245
}
246
- nbd_coroutine_end(client, &request);
247
+ nbd_coroutine_end(bs, &request);
248
return -reply.error;
249
}
250
251
@@ -XXX,XX +XXX,XX @@ int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
252
} else {
253
nbd_co_receive_reply(client, &request, &reply, NULL);
254
}
255
- nbd_coroutine_end(client, &request);
256
+ nbd_coroutine_end(bs, &request);
257
return -reply.error;
258
259
}
260
261
void nbd_client_detach_aio_context(BlockDriverState *bs)
262
{
263
- aio_set_fd_handler(bdrv_get_aio_context(bs),
264
- nbd_get_client_session(bs)->sioc->fd,
265
- false, NULL, NULL, NULL, NULL);
266
+ NBDClientSession *client = nbd_get_client_session(bs);
267
+ qio_channel_detach_aio_context(QIO_CHANNEL(client->sioc));
268
}
269
270
void nbd_client_attach_aio_context(BlockDriverState *bs,
271
AioContext *new_context)
272
{
273
- aio_set_fd_handler(new_context, nbd_get_client_session(bs)->sioc->fd,
274
- false, nbd_reply_ready, NULL, NULL, bs);
275
+ NBDClientSession *client = nbd_get_client_session(bs);
276
+ qio_channel_attach_aio_context(QIO_CHANNEL(client->sioc), new_context);
277
+ aio_co_schedule(new_context, client->read_reply_co);
278
}
279
280
void nbd_client_close(BlockDriverState *bs)
281
@@ -XXX,XX +XXX,XX @@ int nbd_client_init(BlockDriverState *bs,
282
/* Now that we're connected, set the socket to be non-blocking and
283
* kick the reply mechanism. */
284
qio_channel_set_blocking(QIO_CHANNEL(sioc), false, NULL);
285
-
286
+ client->read_reply_co = qemu_coroutine_create(nbd_read_reply_entry, client);
287
nbd_client_attach_aio_context(bs, bdrv_get_aio_context(bs));
288
289
logout("Established connection with NBD server\n");
290
diff --git a/nbd/client.c b/nbd/client.c
291
index XXXXXXX..XXXXXXX 100644
292
--- a/nbd/client.c
293
+++ b/nbd/client.c
294
@@ -XXX,XX +XXX,XX @@ ssize_t nbd_receive_reply(QIOChannel *ioc, NBDReply *reply)
295
ssize_t ret;
296
297
ret = read_sync(ioc, buf, sizeof(buf));
298
- if (ret < 0) {
299
+ if (ret <= 0) {
198
return ret;
300
return ret;
199
@@ -XXX,XX +XXX,XX @@ int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared,
301
}
200
{
302
201
Error *local_err = NULL;
303
diff --git a/nbd/common.c b/nbd/common.c
202
int ret;
304
index XXXXXXX..XXXXXXX 100644
203
- bool tighten_restrictions;
305
--- a/nbd/common.c
204
306
+++ b/nbd/common.c
205
- ret = bdrv_child_check_perm(c, NULL, perm, shared, NULL,
307
@@ -XXX,XX +XXX,XX @@ ssize_t nbd_wr_syncv(QIOChannel *ioc,
206
- &tighten_restrictions, &local_err);
308
}
207
+ ret = bdrv_child_check_perm(c, NULL, perm, shared, NULL, &local_err);
309
if (len == QIO_CHANNEL_ERR_BLOCK) {
208
if (ret < 0) {
310
if (qemu_in_coroutine()) {
209
bdrv_child_abort_perm_update(c);
311
- /* XXX figure out if we can create a variant on
210
- if (tighten_restrictions) {
312
- * qio_channel_yield() that works with AIO contexts
211
+ if ((perm & ~c->perm) || (c->shared_perm & ~shared)) {
313
- * and consider using that in this branch */
212
+ /* tighten permissions */
314
- qemu_coroutine_yield();
213
error_propagate(errp, local_err);
315
- } else if (done) {
214
} else {
316
- /* XXX this is needed by nbd_reply_ready. */
215
/*
317
- qio_channel_wait(ioc,
216
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child(BdrvChild *child, BlockDriverState *new_bs)
318
- do_read ? G_IO_IN : G_IO_OUT);
217
}
319
+ qio_channel_yield(ioc, do_read ? G_IO_IN : G_IO_OUT);
218
320
} else {
219
if (old_bs) {
321
return -EAGAIN;
220
- bool tighten_restrictions;
322
}
221
-
323
diff --git a/nbd/server.c b/nbd/server.c
222
/*
324
index XXXXXXX..XXXXXXX 100644
223
* Update permissions for old node. We're just taking a parent away, so
325
--- a/nbd/server.c
224
* we're loosening restrictions. Errors of permission update are not
326
+++ b/nbd/server.c
225
* fatal in this case, ignore them.
327
@@ -XXX,XX +XXX,XX @@ struct NBDClient {
328
CoMutex send_lock;
329
Coroutine *send_coroutine;
330
331
- bool can_read;
332
-
333
QTAILQ_ENTRY(NBDClient) next;
334
int nb_requests;
335
bool closing;
336
@@ -XXX,XX +XXX,XX @@ struct NBDClient {
337
338
/* That's all folks */
339
340
-static void nbd_set_handlers(NBDClient *client);
341
-static void nbd_unset_handlers(NBDClient *client);
342
-static void nbd_update_can_read(NBDClient *client);
343
+static void nbd_client_receive_next_request(NBDClient *client);
344
345
static gboolean nbd_negotiate_continue(QIOChannel *ioc,
346
GIOCondition condition,
347
@@ -XXX,XX +XXX,XX @@ void nbd_client_put(NBDClient *client)
226
*/
348
*/
227
- bdrv_refresh_perms(old_bs, &tighten_restrictions, NULL);
349
assert(client->closing);
228
- assert(tighten_restrictions == false);
350
229
+ bdrv_refresh_perms(old_bs, NULL);
351
- nbd_unset_handlers(client);
230
352
+ qio_channel_detach_aio_context(client->ioc);
231
/* When the parent requiring a non-default AioContext is removed, the
353
object_unref(OBJECT(client->sioc));
232
* node moves back to the main AioContext */
354
object_unref(OBJECT(client->ioc));
233
@@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
355
if (client->tlscreds) {
234
Error *local_err = NULL;
356
@@ -XXX,XX +XXX,XX @@ static NBDRequestData *nbd_request_get(NBDClient *client)
235
int ret;
357
236
358
assert(client->nb_requests <= MAX_NBD_REQUESTS - 1);
237
- ret = bdrv_check_update_perm(child_bs, NULL, perm, shared_perm, NULL, NULL,
359
client->nb_requests++;
238
- errp);
360
- nbd_update_can_read(client);
239
+ ret = bdrv_check_update_perm(child_bs, NULL, perm, shared_perm, NULL, errp);
361
240
if (ret < 0) {
362
req = g_new0(NBDRequestData, 1);
241
bdrv_abort_perm_update(child_bs);
363
nbd_client_get(client);
242
bdrv_unref(child_bs);
364
@@ -XXX,XX +XXX,XX @@ static void nbd_request_put(NBDRequestData *req)
243
@@ -XXX,XX +XXX,XX @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
365
g_free(req);
244
QTAILQ_FOREACH(bs_entry, bs_queue, entry) {
366
245
BDRVReopenState *state = &bs_entry->state;
367
client->nb_requests--;
246
ret = bdrv_check_perm(state->bs, bs_queue, state->perm,
368
- nbd_update_can_read(client);
247
- state->shared_perm, NULL, NULL, errp);
369
+ nbd_client_receive_next_request(client);
248
+ state->shared_perm, NULL, errp);
370
+
249
if (ret < 0) {
371
nbd_client_put(client);
250
goto cleanup_perm;
372
}
251
}
373
252
@@ -XXX,XX +XXX,XX @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
374
@@ -XXX,XX +XXX,XX @@ static void blk_aio_attached(AioContext *ctx, void *opaque)
253
bs_queue, state->perm, state->shared_perm,
375
exp->ctx = ctx;
254
&nperm, &nshared);
376
255
ret = bdrv_check_update_perm(state->new_backing_bs, NULL,
377
QTAILQ_FOREACH(client, &exp->clients, next) {
256
- nperm, nshared, NULL, NULL, errp);
378
- nbd_set_handlers(client);
257
+ nperm, nshared, NULL, errp);
379
+ qio_channel_attach_aio_context(client->ioc, ctx);
258
if (ret < 0) {
380
+ if (client->recv_coroutine) {
259
goto cleanup_perm;
381
+ aio_co_schedule(ctx, client->recv_coroutine);
260
}
382
+ }
261
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_node_common(BlockDriverState *from,
383
+ if (client->send_coroutine) {
262
384
+ aio_co_schedule(ctx, client->send_coroutine);
263
/* Check whether the required permissions can be granted on @to, ignoring
385
+ }
264
* all BdrvChild in @list so that they can't block themselves. */
386
}
265
- ret = bdrv_check_update_perm(to, NULL, perm, shared, list, NULL, errp);
387
}
266
+ ret = bdrv_check_update_perm(to, NULL, perm, shared, list, errp);
388
267
if (ret < 0) {
389
@@ -XXX,XX +XXX,XX @@ static void blk_aio_detach(void *opaque)
268
bdrv_abort_perm_update(to);
390
TRACE("Export %s: Detaching clients from AIO context %p\n", exp->name, exp->ctx);
391
392
QTAILQ_FOREACH(client, &exp->clients, next) {
393
- nbd_unset_handlers(client);
394
+ qio_channel_detach_aio_context(client->ioc);
395
}
396
397
exp->ctx = NULL;
398
@@ -XXX,XX +XXX,XX @@ static ssize_t nbd_co_send_reply(NBDRequestData *req, NBDReply *reply,
399
g_assert(qemu_in_coroutine());
400
qemu_co_mutex_lock(&client->send_lock);
401
client->send_coroutine = qemu_coroutine_self();
402
- nbd_set_handlers(client);
403
404
if (!len) {
405
rc = nbd_send_reply(client->ioc, reply);
406
@@ -XXX,XX +XXX,XX @@ static ssize_t nbd_co_send_reply(NBDRequestData *req, NBDReply *reply,
407
}
408
409
client->send_coroutine = NULL;
410
- nbd_set_handlers(client);
411
qemu_co_mutex_unlock(&client->send_lock);
412
return rc;
413
}
414
@@ -XXX,XX +XXX,XX @@ static ssize_t nbd_co_receive_request(NBDRequestData *req,
415
ssize_t rc;
416
417
g_assert(qemu_in_coroutine());
418
- client->recv_coroutine = qemu_coroutine_self();
419
- nbd_update_can_read(client);
420
-
421
+ assert(client->recv_coroutine == qemu_coroutine_self());
422
rc = nbd_receive_request(client->ioc, request);
423
if (rc < 0) {
424
if (rc != -EAGAIN) {
425
@@ -XXX,XX +XXX,XX @@ static ssize_t nbd_co_receive_request(NBDRequestData *req,
426
427
out:
428
client->recv_coroutine = NULL;
429
- nbd_update_can_read(client);
430
+ nbd_client_receive_next_request(client);
431
432
return rc;
433
}
434
435
-static void nbd_trip(void *opaque)
436
+/* Owns a reference to the NBDClient passed as opaque. */
437
+static coroutine_fn void nbd_trip(void *opaque)
438
{
439
NBDClient *client = opaque;
440
NBDExport *exp = client->exp;
441
NBDRequestData *req;
442
- NBDRequest request;
443
+ NBDRequest request = { 0 }; /* GCC thinks it can be used uninitialized */
444
NBDReply reply;
445
ssize_t ret;
446
int flags;
447
448
TRACE("Reading request.");
449
if (client->closing) {
450
+ nbd_client_put(client);
451
return;
452
}
453
454
@@ -XXX,XX +XXX,XX @@ static void nbd_trip(void *opaque)
455
456
done:
457
nbd_request_put(req);
458
+ nbd_client_put(client);
459
return;
460
461
out:
462
nbd_request_put(req);
463
client_close(client);
464
+ nbd_client_put(client);
465
}
466
467
-static void nbd_read(void *opaque)
468
+static void nbd_client_receive_next_request(NBDClient *client)
469
{
470
- NBDClient *client = opaque;
471
-
472
- if (client->recv_coroutine) {
473
- qemu_coroutine_enter(client->recv_coroutine);
474
- } else {
475
- qemu_coroutine_enter(qemu_coroutine_create(nbd_trip, client));
476
- }
477
-}
478
-
479
-static void nbd_restart_write(void *opaque)
480
-{
481
- NBDClient *client = opaque;
482
-
483
- qemu_coroutine_enter(client->send_coroutine);
484
-}
485
-
486
-static void nbd_set_handlers(NBDClient *client)
487
-{
488
- if (client->exp && client->exp->ctx) {
489
- aio_set_fd_handler(client->exp->ctx, client->sioc->fd, true,
490
- client->can_read ? nbd_read : NULL,
491
- client->send_coroutine ? nbd_restart_write : NULL,
492
- NULL, client);
493
- }
494
-}
495
-
496
-static void nbd_unset_handlers(NBDClient *client)
497
-{
498
- if (client->exp && client->exp->ctx) {
499
- aio_set_fd_handler(client->exp->ctx, client->sioc->fd, true, NULL,
500
- NULL, NULL, NULL);
501
- }
502
-}
503
-
504
-static void nbd_update_can_read(NBDClient *client)
505
-{
506
- bool can_read = client->recv_coroutine ||
507
- client->nb_requests < MAX_NBD_REQUESTS;
508
-
509
- if (can_read != client->can_read) {
510
- client->can_read = can_read;
511
- nbd_set_handlers(client);
512
-
513
- /* There is no need to invoke aio_notify(), since aio_set_fd_handler()
514
- * in nbd_set_handlers() will have taken care of that */
515
+ if (!client->recv_coroutine && client->nb_requests < MAX_NBD_REQUESTS) {
516
+ nbd_client_get(client);
517
+ client->recv_coroutine = qemu_coroutine_create(nbd_trip, client);
518
+ aio_co_schedule(client->exp->ctx, client->recv_coroutine);
519
}
520
}
521
522
@@ -XXX,XX +XXX,XX @@ static coroutine_fn void nbd_co_client_start(void *opaque)
269
goto out;
523
goto out;
270
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp)
524
}
271
*/
525
qemu_co_mutex_init(&client->send_lock);
272
if (bs->open_flags & BDRV_O_INACTIVE) {
526
- nbd_set_handlers(client);
273
bs->open_flags &= ~BDRV_O_INACTIVE;
527
274
- ret = bdrv_refresh_perms(bs, NULL, errp);
528
if (exp) {
275
+ ret = bdrv_refresh_perms(bs, errp);
529
QTAILQ_INSERT_TAIL(&exp->clients, client, next);
276
if (ret < 0) {
530
}
277
bs->open_flags |= BDRV_O_INACTIVE;
531
+
278
return ret;
532
+ nbd_client_receive_next_request(client);
279
@@ -XXX,XX +XXX,XX @@ static bool bdrv_has_bds_parent(BlockDriverState *bs, bool only_active)
533
+
280
static int bdrv_inactivate_recurse(BlockDriverState *bs)
534
out:
281
{
535
g_free(data);
282
BdrvChild *child, *parent;
536
}
283
- bool tighten_restrictions;
537
@@ -XXX,XX +XXX,XX @@ void nbd_client_new(NBDExport *exp,
284
int ret;
538
object_ref(OBJECT(client->sioc));
285
539
client->ioc = QIO_CHANNEL(sioc);
286
if (!bs->drv) {
540
object_ref(OBJECT(client->ioc));
287
@@ -XXX,XX +XXX,XX @@ static int bdrv_inactivate_recurse(BlockDriverState *bs)
541
- client->can_read = true;
288
* We only tried to loosen restrictions, so errors are not fatal, ignore
542
client->close = close_fn;
289
* them.
543
290
*/
544
data->client = client;
291
- bdrv_refresh_perms(bs, &tighten_restrictions, NULL);
292
- assert(tighten_restrictions == false);
293
+ bdrv_refresh_perms(bs, NULL);
294
295
/* Recursively inactivate children */
296
QLIST_FOREACH(child, &bs->children, next) {
297
--
545
--
298
2.29.2
546
2.9.3
299
547
300
548
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
Benchmark for new preallocate filter.
3
As a small step towards the introduction of multiqueue, we want
4
coroutines to remain on the same AioContext that started them,
5
unless they are moved explicitly with e.g. aio_co_schedule. This patch
6
avoids that coroutines switch AioContext when they use a CoMutex.
7
For now it does not make much of a difference, because the CoMutex
8
is not thread-safe and the AioContext itself is used to protect the
9
CoMutex from concurrent access. However, this is going to change.
4
10
5
Example usage:
11
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
6
./bench_prealloc.py ../../build/qemu-img \
12
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
7
ssd-ext4:/path/to/mount/point \
13
Reviewed-by: Fam Zheng <famz@redhat.com>
8
ssd-xfs:/path2 hdd-ext4:/path3 hdd-xfs:/path4
14
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
15
Message-id: 20170213135235.12274-9-pbonzini@redhat.com
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
---
18
util/qemu-coroutine-lock.c | 5 ++---
19
util/trace-events | 1 -
20
2 files changed, 2 insertions(+), 4 deletions(-)
9
21
10
The benchmark shows performance improvement (or degradation) when use
22
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
11
new preallocate filter with qcow2 image.
23
index XXXXXXX..XXXXXXX 100644
12
24
--- a/util/qemu-coroutine-lock.c
13
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
25
+++ b/util/qemu-coroutine-lock.c
14
Message-Id: <20201021145859.11201-22-vsementsov@virtuozzo.com>
15
Reviewed-by: Max Reitz <mreitz@redhat.com>
16
Signed-off-by: Max Reitz <mreitz@redhat.com>
17
---
18
scripts/simplebench/bench_prealloc.py | 132 ++++++++++++++++++++++++++
19
1 file changed, 132 insertions(+)
20
create mode 100755 scripts/simplebench/bench_prealloc.py
21
22
diff --git a/scripts/simplebench/bench_prealloc.py b/scripts/simplebench/bench_prealloc.py
23
new file mode 100755
24
index XXXXXXX..XXXXXXX
25
--- /dev/null
26
+++ b/scripts/simplebench/bench_prealloc.py
27
@@ -XXX,XX +XXX,XX @@
26
@@ -XXX,XX +XXX,XX @@
28
+#!/usr/bin/env python3
27
#include "qemu/coroutine.h"
29
+#
28
#include "qemu/coroutine_int.h"
30
+# Benchmark preallocate filter
29
#include "qemu/queue.h"
31
+#
30
+#include "block/aio.h"
32
+# Copyright (c) 2020 Virtuozzo International GmbH.
31
#include "trace.h"
33
+#
32
34
+# This program is free software; you can redistribute it and/or modify
33
void qemu_co_queue_init(CoQueue *queue)
35
+# it under the terms of the GNU General Public License as published by
34
@@ -XXX,XX +XXX,XX @@ void qemu_co_queue_run_restart(Coroutine *co)
36
+# the Free Software Foundation; either version 2 of the License, or
35
37
+# (at your option) any later version.
36
static bool qemu_co_queue_do_restart(CoQueue *queue, bool single)
38
+#
37
{
39
+# This program is distributed in the hope that it will be useful,
38
- Coroutine *self = qemu_coroutine_self();
40
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
39
Coroutine *next;
41
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
40
42
+# GNU General Public License for more details.
41
if (QSIMPLEQ_EMPTY(&queue->entries)) {
43
+#
42
@@ -XXX,XX +XXX,XX @@ static bool qemu_co_queue_do_restart(CoQueue *queue, bool single)
44
+# You should have received a copy of the GNU General Public License
43
45
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
44
while ((next = QSIMPLEQ_FIRST(&queue->entries)) != NULL) {
46
+#
45
QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next);
47
+
46
- QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, next, co_queue_next);
48
+
47
- trace_qemu_co_queue_next(next);
49
+import sys
48
+ aio_co_wake(next);
50
+import os
49
if (single) {
51
+import subprocess
50
break;
52
+import re
51
}
53
+import json
52
diff --git a/util/trace-events b/util/trace-events
54
+
53
index XXXXXXX..XXXXXXX 100644
55
+import simplebench
54
--- a/util/trace-events
56
+from results_to_text import results_to_text
55
+++ b/util/trace-events
57
+
56
@@ -XXX,XX +XXX,XX @@ qemu_coroutine_terminate(void *co) "self %p"
58
+
57
59
+def qemu_img_bench(args):
58
# util/qemu-coroutine-lock.c
60
+ p = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
59
qemu_co_queue_run_restart(void *co) "co %p"
61
+ universal_newlines=True)
60
-qemu_co_queue_next(void *nxt) "next %p"
62
+
61
qemu_co_mutex_lock_entry(void *mutex, void *self) "mutex %p self %p"
63
+ if p.returncode == 0:
62
qemu_co_mutex_lock_return(void *mutex, void *self) "mutex %p self %p"
64
+ try:
63
qemu_co_mutex_unlock_entry(void *mutex, void *self) "mutex %p self %p"
65
+ m = re.search(r'Run completed in (\d+.\d+) seconds.', p.stdout)
66
+ return {'seconds': float(m.group(1))}
67
+ except Exception:
68
+ return {'error': f'failed to parse qemu-img output: {p.stdout}'}
69
+ else:
70
+ return {'error': f'qemu-img failed: {p.returncode}: {p.stdout}'}
71
+
72
+
73
+def bench_func(env, case):
74
+ fname = f"{case['dir']}/prealloc-test.qcow2"
75
+ try:
76
+ os.remove(fname)
77
+ except OSError:
78
+ pass
79
+
80
+ subprocess.run([env['qemu-img-binary'], 'create', '-f', 'qcow2', fname,
81
+ '16G'], stdout=subprocess.DEVNULL,
82
+ stderr=subprocess.DEVNULL, check=True)
83
+
84
+ args = [env['qemu-img-binary'], 'bench', '-c', str(case['count']),
85
+ '-d', '64', '-s', case['block-size'], '-t', 'none', '-n', '-w']
86
+ if env['prealloc']:
87
+ args += ['--image-opts',
88
+ 'driver=qcow2,file.driver=preallocate,file.file.driver=file,'
89
+ f'file.file.filename={fname}']
90
+ else:
91
+ args += ['-f', 'qcow2', fname]
92
+
93
+ return qemu_img_bench(args)
94
+
95
+
96
+def auto_count_bench_func(env, case):
97
+ case['count'] = 100
98
+ while True:
99
+ res = bench_func(env, case)
100
+ if 'error' in res:
101
+ return res
102
+
103
+ if res['seconds'] >= 1:
104
+ break
105
+
106
+ case['count'] *= 10
107
+
108
+ if res['seconds'] < 5:
109
+ case['count'] = round(case['count'] * 5 / res['seconds'])
110
+ res = bench_func(env, case)
111
+ if 'error' in res:
112
+ return res
113
+
114
+ res['iops'] = case['count'] / res['seconds']
115
+ return res
116
+
117
+
118
+if __name__ == '__main__':
119
+ if len(sys.argv) < 2:
120
+ print(f'USAGE: {sys.argv[0]} <qemu-img binary> '
121
+ 'DISK_NAME:DIR_PATH ...')
122
+ exit(1)
123
+
124
+ qemu_img = sys.argv[1]
125
+
126
+ envs = [
127
+ {
128
+ 'id': 'no-prealloc',
129
+ 'qemu-img-binary': qemu_img,
130
+ 'prealloc': False
131
+ },
132
+ {
133
+ 'id': 'prealloc',
134
+ 'qemu-img-binary': qemu_img,
135
+ 'prealloc': True
136
+ }
137
+ ]
138
+
139
+ aligned_cases = []
140
+ unaligned_cases = []
141
+
142
+ for disk in sys.argv[2:]:
143
+ name, path = disk.split(':')
144
+ aligned_cases.append({
145
+ 'id': f'{name}, aligned sequential 16k',
146
+ 'block-size': '16k',
147
+ 'dir': path
148
+ })
149
+ unaligned_cases.append({
150
+ 'id': f'{name}, unaligned sequential 64k',
151
+ 'block-size': '16k',
152
+ 'dir': path
153
+ })
154
+
155
+ result = simplebench.bench(auto_count_bench_func, envs,
156
+ aligned_cases + unaligned_cases, count=5)
157
+ print(results_to_text(result))
158
+ with open('results.json', 'w') as f:
159
+ json.dump(result, f, indent=4)
160
--
64
--
161
2.29.2
65
2.9.3
162
66
163
67
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
Add a parameter to skip test if some needed additional formats are not
3
Keep the coroutine on the same AioContext. Without this change,
4
supported (for example filter drivers).
4
there would be a race between yielding the coroutine and reentering it.
5
While the race cannot happen now, because the code only runs from a single
6
AioContext, this will change with multiqueue support in the block layer.
5
7
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
8
While doing the change, replace custom bottom half with aio_co_schedule.
7
Message-Id: <20201021145859.11201-12-vsementsov@virtuozzo.com>
9
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
10
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
Reviewed-by: Fam Zheng <famz@redhat.com>
12
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
14
Message-id: 20170213135235.12274-10-pbonzini@redhat.com
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
16
---
11
tests/qemu-iotests/iotests.py | 9 ++++++++-
17
block/blkdebug.c | 9 +--------
12
1 file changed, 8 insertions(+), 1 deletion(-)
18
1 file changed, 1 insertion(+), 8 deletions(-)
13
19
14
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
20
diff --git a/block/blkdebug.c b/block/blkdebug.c
15
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
16
--- a/tests/qemu-iotests/iotests.py
22
--- a/block/blkdebug.c
17
+++ b/tests/qemu-iotests/iotests.py
23
+++ b/block/blkdebug.c
18
@@ -XXX,XX +XXX,XX @@ def _verify_aio_mode(supported_aio_modes: Sequence[str] = ()) -> None:
24
@@ -XXX,XX +XXX,XX @@ out:
19
if supported_aio_modes and (aiomode not in supported_aio_modes):
25
return ret;
20
notrun('not suitable for this aio mode: %s' % aiomode)
26
}
21
27
22
+def _verify_formats(required_formats: Sequence[str] = ()) -> None:
28
-static void error_callback_bh(void *opaque)
23
+ usf_list = list(set(required_formats) - set(supported_formats()))
29
-{
24
+ if usf_list:
30
- Coroutine *co = opaque;
25
+ notrun(f'formats {usf_list} are not whitelisted')
31
- qemu_coroutine_enter(co);
26
+
32
-}
27
def supports_quorum():
33
-
28
return 'quorum' in qemu_img_pipe('--help')
34
static int inject_error(BlockDriverState *bs, BlkdebugRule *rule)
29
35
{
30
@@ -XXX,XX +XXX,XX @@ def execute_setup_common(supported_fmts: Sequence[str] = (),
36
BDRVBlkdebugState *s = bs->opaque;
31
supported_aio_modes: Sequence[str] = (),
37
@@ -XXX,XX +XXX,XX @@ static int inject_error(BlockDriverState *bs, BlkdebugRule *rule)
32
unsupported_fmts: Sequence[str] = (),
38
}
33
supported_protocols: Sequence[str] = (),
39
34
- unsupported_protocols: Sequence[str] = ()) -> bool:
40
if (!immediately) {
35
+ unsupported_protocols: Sequence[str] = (),
41
- aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), error_callback_bh,
36
+ required_fmts: Sequence[str] = ()) -> bool:
42
- qemu_coroutine_self());
37
"""
43
+ aio_co_schedule(qemu_get_current_aio_context(), qemu_coroutine_self());
38
Perform necessary setup for either script-style or unittest-style tests.
44
qemu_coroutine_yield();
39
45
}
40
@@ -XXX,XX +XXX,XX @@ def execute_setup_common(supported_fmts: Sequence[str] = (),
41
_verify_platform(supported=supported_platforms)
42
_verify_cache_mode(supported_cache_modes)
43
_verify_aio_mode(supported_aio_modes)
44
+ _verify_formats(required_fmts)
45
46
return debug
47
46
48
--
47
--
49
2.29.2
48
2.9.3
50
49
51
50
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
We should never set permissions other than cumulative permissions of
3
qed_aio_start_io and qed_aio_next_io will not have to acquire/release
4
parents. During bdrv_reopen_multiple() we _check_ for synthetic
4
the AioContext, while qed_aio_next_io_cb will. Split the functionality
5
permissions but when we do _set_ the graph is already updated.
5
and gain a little type-safety in the process.
6
Add an assertion to bdrv_reopen_multiple(), other cases are more
7
obvious.
8
6
9
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Message-Id: <20201106124241.16950-6-vsementsov@virtuozzo.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
11
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Reviewed-by: Fam Zheng <famz@redhat.com>
12
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
11
Message-id: 20170213135235.12274-11-pbonzini@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
13
---
14
block.c | 29 +++++++++++++++--------------
14
block/qed.c | 39 +++++++++++++++++++++++++--------------
15
1 file changed, 15 insertions(+), 14 deletions(-)
15
1 file changed, 25 insertions(+), 14 deletions(-)
16
16
17
diff --git a/block.c b/block.c
17
diff --git a/block/qed.c b/block/qed.c
18
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block.c
19
--- a/block/qed.c
20
+++ b/block.c
20
+++ b/block/qed.c
21
@@ -XXX,XX +XXX,XX @@ static void bdrv_abort_perm_update(BlockDriverState *bs)
21
@@ -XXX,XX +XXX,XX @@ static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
22
return l2_table;
23
}
24
25
-static void qed_aio_next_io(void *opaque, int ret);
26
+static void qed_aio_next_io(QEDAIOCB *acb, int ret);
27
+
28
+static void qed_aio_start_io(QEDAIOCB *acb)
29
+{
30
+ qed_aio_next_io(acb, 0);
31
+}
32
+
33
+static void qed_aio_next_io_cb(void *opaque, int ret)
34
+{
35
+ QEDAIOCB *acb = opaque;
36
+
37
+ qed_aio_next_io(acb, ret);
38
+}
39
40
static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
41
{
42
@@ -XXX,XX +XXX,XX @@ static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
43
44
acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
45
if (acb) {
46
- qed_aio_next_io(acb, 0);
47
+ qed_aio_start_io(acb);
22
}
48
}
23
}
49
}
24
50
25
-static void bdrv_set_perm(BlockDriverState *bs, uint64_t cumulative_perms,
51
@@ -XXX,XX +XXX,XX @@ static void qed_aio_complete(QEDAIOCB *acb, int ret)
26
- uint64_t cumulative_shared_perms)
52
QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
27
+static void bdrv_set_perm(BlockDriverState *bs)
53
acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
28
{
54
if (acb) {
29
+ uint64_t cumulative_perms, cumulative_shared_perms;
55
- qed_aio_next_io(acb, 0);
30
BlockDriver *drv = bs->drv;
56
+ qed_aio_start_io(acb);
31
BdrvChild *c;
57
} else if (s->header.features & QED_F_NEED_CHECK) {
32
58
qed_start_need_check_timer(s);
33
@@ -XXX,XX +XXX,XX @@ static void bdrv_set_perm(BlockDriverState *bs, uint64_t cumulative_perms,
59
}
60
@@ -XXX,XX +XXX,XX @@ static void qed_commit_l2_update(void *opaque, int ret)
61
acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
62
assert(acb->request.l2_table != NULL);
63
64
- qed_aio_next_io(opaque, ret);
65
+ qed_aio_next_io(acb, ret);
66
}
67
68
/**
69
@@ -XXX,XX +XXX,XX @@ static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
70
if (need_alloc) {
71
/* Write out the whole new L2 table */
72
qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
73
- qed_aio_write_l1_update, acb);
74
+ qed_aio_write_l1_update, acb);
75
} else {
76
/* Write out only the updated part of the L2 table */
77
qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
78
- qed_aio_next_io, acb);
79
+ qed_aio_next_io_cb, acb);
80
}
81
return;
82
83
@@ -XXX,XX +XXX,XX @@ static void qed_aio_write_main(void *opaque, int ret)
84
}
85
86
if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
87
- next_fn = qed_aio_next_io;
88
+ next_fn = qed_aio_next_io_cb;
89
} else {
90
if (s->bs->backing) {
91
next_fn = qed_aio_write_flush_before_l2_update;
92
@@ -XXX,XX +XXX,XX @@ static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
93
if (acb->flags & QED_AIOCB_ZERO) {
94
/* Skip ahead if the clusters are already zero */
95
if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
96
- qed_aio_next_io(acb, 0);
97
+ qed_aio_start_io(acb);
98
return;
99
}
100
101
@@ -XXX,XX +XXX,XX @@ static void qed_aio_read_data(void *opaque, int ret,
102
/* Handle zero cluster and backing file reads */
103
if (ret == QED_CLUSTER_ZERO) {
104
qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
105
- qed_aio_next_io(acb, 0);
106
+ qed_aio_start_io(acb);
107
return;
108
} else if (ret != QED_CLUSTER_FOUND) {
109
qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
110
- &acb->backing_qiov, qed_aio_next_io, acb);
111
+ &acb->backing_qiov, qed_aio_next_io_cb, acb);
34
return;
112
return;
35
}
113
}
36
114
37
+ bdrv_get_cumulative_perm(bs, &cumulative_perms, &cumulative_shared_perms);
115
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
38
+
116
bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
39
/* Update this node */
117
&acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
40
if (drv->bdrv_set_perm) {
118
- qed_aio_next_io, acb);
41
drv->bdrv_set_perm(bs, cumulative_perms, cumulative_shared_perms);
119
+ qed_aio_next_io_cb, acb);
42
@@ -XXX,XX +XXX,XX @@ static int bdrv_child_check_perm(BdrvChild *c, BlockReopenQueue *q,
120
return;
43
121
44
static void bdrv_child_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared)
122
err:
123
@@ -XXX,XX +XXX,XX @@ err:
124
/**
125
* Begin next I/O or complete the request
126
*/
127
-static void qed_aio_next_io(void *opaque, int ret)
128
+static void qed_aio_next_io(QEDAIOCB *acb, int ret)
45
{
129
{
46
- uint64_t cumulative_perms, cumulative_shared_perms;
130
- QEDAIOCB *acb = opaque;
47
-
131
BDRVQEDState *s = acb_to_s(acb);
48
c->has_backup_perm = false;
132
QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
49
133
qed_aio_write_data : qed_aio_read_data;
50
c->perm = perm;
134
@@ -XXX,XX +XXX,XX @@ static BlockAIOCB *qed_aio_setup(BlockDriverState *bs,
51
c->shared_perm = shared;
135
qemu_iovec_init(&acb->cur_qiov, qiov->niov);
52
136
53
- bdrv_get_cumulative_perm(c->bs, &cumulative_perms,
137
/* Start request */
54
- &cumulative_shared_perms);
138
- qed_aio_next_io(acb, 0);
55
- bdrv_set_perm(c->bs, cumulative_perms, cumulative_shared_perms);
139
+ qed_aio_start_io(acb);
56
+ bdrv_set_perm(c->bs);
140
return &acb->common;
57
}
141
}
58
142
59
static void bdrv_child_abort_perm_update(BdrvChild *c)
60
@@ -XXX,XX +XXX,XX @@ static int bdrv_refresh_perms(BlockDriverState *bs, bool *tighten_restrictions,
61
bdrv_abort_perm_update(bs);
62
return ret;
63
}
64
- bdrv_set_perm(bs, perm, shared_perm);
65
+ bdrv_set_perm(bs);
66
67
return 0;
68
}
69
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_noperm(BdrvChild *child,
70
static void bdrv_replace_child(BdrvChild *child, BlockDriverState *new_bs)
71
{
72
BlockDriverState *old_bs = child->bs;
73
- uint64_t perm, shared_perm;
74
75
/* Asserts that child->frozen == false */
76
bdrv_replace_child_noperm(child, new_bs);
77
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child(BdrvChild *child, BlockDriverState *new_bs)
78
* restrictions.
79
*/
80
if (new_bs) {
81
- bdrv_get_cumulative_perm(new_bs, &perm, &shared_perm);
82
- bdrv_set_perm(new_bs, perm, shared_perm);
83
+ bdrv_set_perm(new_bs);
84
}
85
86
if (old_bs) {
87
@@ -XXX,XX +XXX,XX @@ cleanup_perm:
88
}
89
90
if (ret == 0) {
91
- bdrv_set_perm(state->bs, state->perm, state->shared_perm);
92
+ uint64_t perm, shared;
93
+
94
+ bdrv_get_cumulative_perm(state->bs, &perm, &shared);
95
+ assert(perm == state->perm);
96
+ assert(shared == state->shared_perm);
97
+
98
+ bdrv_set_perm(state->bs);
99
} else {
100
bdrv_abort_perm_update(state->bs);
101
if (state->replace_backing_bs && state->new_backing_bs) {
102
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_node_common(BlockDriverState *from,
103
bdrv_unref(from);
104
}
105
106
- bdrv_get_cumulative_perm(to, &perm, &shared);
107
- bdrv_set_perm(to, perm, shared);
108
+ bdrv_set_perm(to);
109
110
out:
111
g_slist_free(list);
112
--
143
--
113
2.29.2
144
2.9.3
114
145
115
146
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
Do generic processing even for drivers which define .bdrv_check_perm
3
The AioContext data structures are now protected by list_lock and/or
4
handler. It's needed for further preallocate filter: it will need to do
4
they are walked with FOREACH_RCU primitives. There is no need anymore
5
additional action on bdrv_check_perm, but don't want to reimplement
5
to acquire the AioContext for the entire duration of aio_dispatch.
6
generic logic.
6
Instead, just acquire it before and after invoking the callbacks.
7
The next step is then to push it further down.
7
8
8
The patch doesn't change existing behaviour: the only driver that
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
implements bdrv_check_perm is file-posix, but it never has any
10
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
children.
11
Reviewed-by: Fam Zheng <famz@redhat.com>
12
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
13
Message-id: 20170213135235.12274-12-pbonzini@redhat.com
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
---
16
util/aio-posix.c | 25 +++++++++++--------------
17
util/aio-win32.c | 15 +++++++--------
18
util/async.c | 2 ++
19
3 files changed, 20 insertions(+), 22 deletions(-)
11
20
12
Also, bdrv_set_perm() don't stop processing if driver has
21
diff --git a/util/aio-posix.c b/util/aio-posix.c
13
.bdrv_set_perm handler as well.
14
15
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
16
Message-Id: <20201021145859.11201-8-vsementsov@virtuozzo.com>
17
Reviewed-by: Max Reitz <mreitz@redhat.com>
18
Signed-off-by: Max Reitz <mreitz@redhat.com>
19
---
20
block.c | 7 +++++--
21
1 file changed, 5 insertions(+), 2 deletions(-)
22
23
diff --git a/block.c b/block.c
24
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
25
--- a/block.c
23
--- a/util/aio-posix.c
26
+++ b/block.c
24
+++ b/util/aio-posix.c
27
@@ -XXX,XX +XXX,XX @@ static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q,
25
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx)
26
(revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
27
aio_node_check(ctx, node->is_external) &&
28
node->io_read) {
29
+ aio_context_acquire(ctx);
30
node->io_read(node->opaque);
31
+ aio_context_release(ctx);
32
33
/* aio_notify() does not count as progress */
34
if (node->opaque != &ctx->notifier) {
35
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx)
36
(revents & (G_IO_OUT | G_IO_ERR)) &&
37
aio_node_check(ctx, node->is_external) &&
38
node->io_write) {
39
+ aio_context_acquire(ctx);
40
node->io_write(node->opaque);
41
+ aio_context_release(ctx);
42
progress = true;
43
}
44
45
@@ -XXX,XX +XXX,XX @@ bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
28
}
46
}
29
47
30
if (drv->bdrv_check_perm) {
48
/* Run our timers */
31
- return drv->bdrv_check_perm(bs, cumulative_perms,
49
+ aio_context_acquire(ctx);
32
- cumulative_shared_perms, errp);
50
progress |= timerlistgroup_run_timers(&ctx->tlg);
33
+ ret = drv->bdrv_check_perm(bs, cumulative_perms,
51
+ aio_context_release(ctx);
34
+ cumulative_shared_perms, errp);
52
35
+ if (ret < 0) {
53
return progress;
36
+ return ret;
54
}
37
+ }
55
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
56
int64_t timeout;
57
int64_t start = 0;
58
59
- aio_context_acquire(ctx);
60
- progress = false;
61
-
62
/* aio_notify can avoid the expensive event_notifier_set if
63
* everything (file descriptors, bottom halves, timers) will
64
* be re-evaluated before the next blocking poll(). This is
65
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
66
start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
38
}
67
}
39
68
40
/* Drivers that never have children can omit .bdrv_child_perm() */
69
- if (try_poll_mode(ctx, blocking)) {
70
- progress = true;
71
- } else {
72
+ aio_context_acquire(ctx);
73
+ progress = try_poll_mode(ctx, blocking);
74
+ aio_context_release(ctx);
75
+
76
+ if (!progress) {
77
assert(npfd == 0);
78
79
/* fill pollfds */
80
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
81
timeout = blocking ? aio_compute_timeout(ctx) : 0;
82
83
/* wait until next event */
84
- if (timeout) {
85
- aio_context_release(ctx);
86
- }
87
if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
88
AioHandler epoll_handler;
89
90
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
91
} else {
92
ret = qemu_poll_ns(pollfds, npfd, timeout);
93
}
94
- if (timeout) {
95
- aio_context_acquire(ctx);
96
- }
97
}
98
99
if (blocking) {
100
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
101
progress = true;
102
}
103
104
- aio_context_release(ctx);
105
-
106
return progress;
107
}
108
109
diff --git a/util/aio-win32.c b/util/aio-win32.c
110
index XXXXXXX..XXXXXXX 100644
111
--- a/util/aio-win32.c
112
+++ b/util/aio-win32.c
113
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
114
(revents || event_notifier_get_handle(node->e) == event) &&
115
node->io_notify) {
116
node->pfd.revents = 0;
117
+ aio_context_acquire(ctx);
118
node->io_notify(node->e);
119
+ aio_context_release(ctx);
120
121
/* aio_notify() does not count as progress */
122
if (node->e != &ctx->notifier) {
123
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
124
(node->io_read || node->io_write)) {
125
node->pfd.revents = 0;
126
if ((revents & G_IO_IN) && node->io_read) {
127
+ aio_context_acquire(ctx);
128
node->io_read(node->opaque);
129
+ aio_context_release(ctx);
130
progress = true;
131
}
132
if ((revents & G_IO_OUT) && node->io_write) {
133
+ aio_context_acquire(ctx);
134
node->io_write(node->opaque);
135
+ aio_context_release(ctx);
136
progress = true;
137
}
138
139
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
140
int count;
141
int timeout;
142
143
- aio_context_acquire(ctx);
144
progress = false;
145
146
/* aio_notify can avoid the expensive event_notifier_set if
147
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
148
149
timeout = blocking && !have_select_revents
150
? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
151
- if (timeout) {
152
- aio_context_release(ctx);
153
- }
154
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
155
if (blocking) {
156
assert(first);
157
atomic_sub(&ctx->notify_me, 2);
158
}
159
- if (timeout) {
160
- aio_context_acquire(ctx);
161
- }
162
163
if (first) {
164
aio_notify_accept(ctx);
165
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
166
progress |= aio_dispatch_handlers(ctx, event);
167
} while (count > 0);
168
169
+ aio_context_acquire(ctx);
170
progress |= timerlistgroup_run_timers(&ctx->tlg);
171
-
172
aio_context_release(ctx);
173
return progress;
174
}
175
diff --git a/util/async.c b/util/async.c
176
index XXXXXXX..XXXXXXX 100644
177
--- a/util/async.c
178
+++ b/util/async.c
179
@@ -XXX,XX +XXX,XX @@ int aio_bh_poll(AioContext *ctx)
180
ret = 1;
181
}
182
bh->idle = 0;
183
+ aio_context_acquire(ctx);
184
aio_bh_call(bh);
185
+ aio_context_release(ctx);
186
}
187
if (bh->deleted) {
188
deleted = true;
41
--
189
--
42
2.29.2
190
2.9.3
43
191
44
192
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
Add flag to make serialising request no wait: if there are conflicting
3
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
4
requests, just return error immediately. It's will be used in upcoming
4
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
5
preallocate filter.
5
Reviewed-by: Fam Zheng <famz@redhat.com>
6
6
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
7
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Message-id: 20170213135235.12274-13-pbonzini@redhat.com
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Message-Id: <20201021145859.11201-7-vsementsov@virtuozzo.com>
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
---
9
---
12
include/block/block.h | 9 ++++++++-
10
block/qed.h | 3 +++
13
block/io.c | 11 ++++++++++-
11
block/curl.c | 2 ++
14
2 files changed, 18 insertions(+), 2 deletions(-)
12
block/io.c | 5 +++++
15
13
block/iscsi.c | 8 ++++++--
16
diff --git a/include/block/block.h b/include/block/block.h
14
block/null.c | 4 ++++
17
index XXXXXXX..XXXXXXX 100644
15
block/qed.c | 12 ++++++++++++
18
--- a/include/block/block.h
16
block/throttle-groups.c | 2 ++
19
+++ b/include/block/block.h
17
util/aio-posix.c | 2 --
20
@@ -XXX,XX +XXX,XX @@ typedef enum {
18
util/aio-win32.c | 2 --
21
* written to qiov parameter which may be NULL.
19
util/qemu-coroutine-sleep.c | 2 +-
22
*/
20
10 files changed, 35 insertions(+), 7 deletions(-)
23
BDRV_REQ_PREFETCH = 0x200,
21
24
+
22
diff --git a/block/qed.h b/block/qed.h
25
+ /*
23
index XXXXXXX..XXXXXXX 100644
26
+ * If we need to wait for other requests, just fail immediately. Used
24
--- a/block/qed.h
27
+ * only together with BDRV_REQ_SERIALISING.
25
+++ b/block/qed.h
28
+ */
26
@@ -XXX,XX +XXX,XX @@ enum {
29
+ BDRV_REQ_NO_WAIT = 0x400,
27
*/
30
+
28
typedef void QEDFindClusterFunc(void *opaque, int ret, uint64_t offset, size_t len);
31
/* Mask of valid flags */
29
32
- BDRV_REQ_MASK = 0x3ff,
30
+void qed_acquire(BDRVQEDState *s);
33
+ BDRV_REQ_MASK = 0x7ff,
31
+void qed_release(BDRVQEDState *s);
34
} BdrvRequestFlags;
32
+
35
33
/**
36
typedef struct BlockSizes {
34
* Generic callback for chaining async callbacks
35
*/
36
diff --git a/block/curl.c b/block/curl.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/block/curl.c
39
+++ b/block/curl.c
40
@@ -XXX,XX +XXX,XX @@ static void curl_multi_timeout_do(void *arg)
41
return;
42
}
43
44
+ aio_context_acquire(s->aio_context);
45
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
46
47
curl_multi_check_completion(s);
48
+ aio_context_release(s->aio_context);
49
#else
50
abort();
51
#endif
37
diff --git a/block/io.c b/block/io.c
52
diff --git a/block/io.c b/block/io.c
38
index XXXXXXX..XXXXXXX 100644
53
index XXXXXXX..XXXXXXX 100644
39
--- a/block/io.c
54
--- a/block/io.c
40
+++ b/block/io.c
55
+++ b/block/io.c
41
@@ -XXX,XX +XXX,XX @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
56
@@ -XXX,XX +XXX,XX @@ void bdrv_aio_cancel(BlockAIOCB *acb)
42
assert(!(bs->open_flags & BDRV_O_INACTIVE));
57
if (acb->aiocb_info->get_aio_context) {
43
assert((bs->open_flags & BDRV_O_NO_IO) == 0);
58
aio_poll(acb->aiocb_info->get_aio_context(acb), true);
44
assert(!(flags & ~BDRV_REQ_MASK));
59
} else if (acb->bs) {
45
+ assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
60
+ /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
46
61
+ * assert that we're not using an I/O thread. Thread-safe
47
if (flags & BDRV_REQ_SERIALISING) {
62
+ * code should use bdrv_aio_cancel_async exclusively.
48
- bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
63
+ */
49
+ QEMU_LOCK_GUARD(&bs->reqs_lock);
64
+ assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
50
+
65
aio_poll(bdrv_get_aio_context(acb->bs), true);
51
+ tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
66
} else {
52
+
67
abort();
53
+ if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
68
diff --git a/block/iscsi.c b/block/iscsi.c
54
+ return -EBUSY;
69
index XXXXXXX..XXXXXXX 100644
55
+ }
70
--- a/block/iscsi.c
56
+
71
+++ b/block/iscsi.c
57
+ bdrv_wait_serialising_requests_locked(req);
72
@@ -XXX,XX +XXX,XX @@ static void iscsi_retry_timer_expired(void *opaque)
58
} else {
73
struct IscsiTask *iTask = opaque;
59
bdrv_wait_serialising_requests(req);
74
iTask->complete = 1;
60
}
75
if (iTask->co) {
76
- qemu_coroutine_enter(iTask->co);
77
+ aio_co_wake(iTask->co);
78
}
79
}
80
81
@@ -XXX,XX +XXX,XX @@ static void iscsi_nop_timed_event(void *opaque)
82
{
83
IscsiLun *iscsilun = opaque;
84
85
+ aio_context_acquire(iscsilun->aio_context);
86
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
87
error_report("iSCSI: NOP timeout. Reconnecting...");
88
iscsilun->request_timed_out = true;
89
} else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) {
90
error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages.");
91
- return;
92
+ goto out;
93
}
94
95
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
96
iscsi_set_events(iscsilun);
97
+
98
+out:
99
+ aio_context_release(iscsilun->aio_context);
100
}
101
102
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
103
diff --git a/block/null.c b/block/null.c
104
index XXXXXXX..XXXXXXX 100644
105
--- a/block/null.c
106
+++ b/block/null.c
107
@@ -XXX,XX +XXX,XX @@ static void null_bh_cb(void *opaque)
108
static void null_timer_cb(void *opaque)
109
{
110
NullAIOCB *acb = opaque;
111
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
112
+
113
+ aio_context_acquire(ctx);
114
acb->common.cb(acb->common.opaque, 0);
115
+ aio_context_release(ctx);
116
timer_deinit(&acb->timer);
117
qemu_aio_unref(acb);
118
}
119
diff --git a/block/qed.c b/block/qed.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/block/qed.c
122
+++ b/block/qed.c
123
@@ -XXX,XX +XXX,XX @@ static void qed_need_check_timer_cb(void *opaque)
124
125
trace_qed_need_check_timer_cb(s);
126
127
+ qed_acquire(s);
128
qed_plug_allocating_write_reqs(s);
129
130
/* Ensure writes are on disk before clearing flag */
131
bdrv_aio_flush(s->bs->file->bs, qed_clear_need_check, s);
132
+ qed_release(s);
133
+}
134
+
135
+void qed_acquire(BDRVQEDState *s)
136
+{
137
+ aio_context_acquire(bdrv_get_aio_context(s->bs));
138
+}
139
+
140
+void qed_release(BDRVQEDState *s)
141
+{
142
+ aio_context_release(bdrv_get_aio_context(s->bs));
143
}
144
145
static void qed_start_need_check_timer(BDRVQEDState *s)
146
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
147
index XXXXXXX..XXXXXXX 100644
148
--- a/block/throttle-groups.c
149
+++ b/block/throttle-groups.c
150
@@ -XXX,XX +XXX,XX @@ static void timer_cb(BlockBackend *blk, bool is_write)
151
qemu_mutex_unlock(&tg->lock);
152
153
/* Run the request that was waiting for this timer */
154
+ aio_context_acquire(blk_get_aio_context(blk));
155
empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]);
156
+ aio_context_release(blk_get_aio_context(blk));
157
158
/* If the request queue was empty then we have to take care of
159
* scheduling the next one */
160
diff --git a/util/aio-posix.c b/util/aio-posix.c
161
index XXXXXXX..XXXXXXX 100644
162
--- a/util/aio-posix.c
163
+++ b/util/aio-posix.c
164
@@ -XXX,XX +XXX,XX @@ bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
165
}
166
167
/* Run our timers */
168
- aio_context_acquire(ctx);
169
progress |= timerlistgroup_run_timers(&ctx->tlg);
170
- aio_context_release(ctx);
171
172
return progress;
173
}
174
diff --git a/util/aio-win32.c b/util/aio-win32.c
175
index XXXXXXX..XXXXXXX 100644
176
--- a/util/aio-win32.c
177
+++ b/util/aio-win32.c
178
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
179
progress |= aio_dispatch_handlers(ctx, event);
180
} while (count > 0);
181
182
- aio_context_acquire(ctx);
183
progress |= timerlistgroup_run_timers(&ctx->tlg);
184
- aio_context_release(ctx);
185
return progress;
186
}
187
188
diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c
189
index XXXXXXX..XXXXXXX 100644
190
--- a/util/qemu-coroutine-sleep.c
191
+++ b/util/qemu-coroutine-sleep.c
192
@@ -XXX,XX +XXX,XX @@ static void co_sleep_cb(void *opaque)
193
{
194
CoSleepCB *sleep_cb = opaque;
195
196
- qemu_coroutine_enter(sleep_cb->co);
197
+ aio_co_wake(sleep_cb->co);
198
}
199
200
void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type,
61
--
201
--
62
2.29.2
202
2.9.3
63
203
64
204
diff view generated by jsdifflib
1
From: Alberto Garcia <berto@igalia.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
This simply calls bdrv_co_pwrite_zeroes() in all children.
3
This covers both file descriptor callbacks and polling callbacks,
4
since they execute related code.
4
5
5
bs->supported_zero_flags is also set to the flags that are supported
6
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
6
by all children.
7
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
8
Reviewed-by: Fam Zheng <famz@redhat.com>
9
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
10
Message-id: 20170213135235.12274-14-pbonzini@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
block/curl.c | 16 +++++++++++++---
14
block/iscsi.c | 4 ++++
15
block/linux-aio.c | 4 ++++
16
block/nfs.c | 6 ++++++
17
block/sheepdog.c | 29 +++++++++++++++--------------
18
block/ssh.c | 29 +++++++++--------------------
19
block/win32-aio.c | 10 ++++++----
20
hw/block/virtio-blk.c | 5 ++++-
21
hw/scsi/virtio-scsi.c | 7 +++++++
22
util/aio-posix.c | 7 -------
23
util/aio-win32.c | 6 ------
24
11 files changed, 68 insertions(+), 55 deletions(-)
7
25
8
Signed-off-by: Alberto Garcia <berto@igalia.com>
26
diff --git a/block/curl.c b/block/curl.c
9
Message-Id: <2f09c842781fe336b4c2e40036bba577b7430190.1605286097.git.berto@igalia.com>
27
index XXXXXXX..XXXXXXX 100644
10
Reviewed-by: Max Reitz <mreitz@redhat.com>
28
--- a/block/curl.c
11
Signed-off-by: Max Reitz <mreitz@redhat.com>
29
+++ b/block/curl.c
12
---
30
@@ -XXX,XX +XXX,XX @@ static void curl_multi_check_completion(BDRVCURLState *s)
13
block/quorum.c | 36 ++++++++++++++++++++++++++++++++++--
31
}
14
tests/qemu-iotests/312 | 11 +++++++++++
32
}
15
tests/qemu-iotests/312.out | 8 ++++++++
33
16
3 files changed, 53 insertions(+), 2 deletions(-)
34
-static void curl_multi_do(void *arg)
17
35
+static void curl_multi_do_locked(CURLState *s)
18
diff --git a/block/quorum.c b/block/quorum.c
36
{
19
index XXXXXXX..XXXXXXX 100644
37
- CURLState *s = (CURLState *)arg;
20
--- a/block/quorum.c
38
CURLSocket *socket, *next_socket;
21
+++ b/block/quorum.c
39
int running;
22
@@ -XXX,XX +XXX,XX @@ static void write_quorum_entry(void *opaque)
40
int r;
23
QuorumChildRequest *sacb = &acb->qcrs[i];
41
@@ -XXX,XX +XXX,XX @@ static void curl_multi_do(void *arg)
24
42
}
25
sacb->bs = s->children[i]->bs;
43
}
26
- sacb->ret = bdrv_co_pwritev(s->children[i], acb->offset, acb->bytes,
44
27
- acb->qiov, acb->flags);
45
+static void curl_multi_do(void *arg)
28
+ if (acb->flags & BDRV_REQ_ZERO_WRITE) {
46
+{
29
+ sacb->ret = bdrv_co_pwrite_zeroes(s->children[i], acb->offset,
47
+ CURLState *s = (CURLState *)arg;
30
+ acb->bytes, acb->flags);
48
+
31
+ } else {
49
+ aio_context_acquire(s->s->aio_context);
32
+ sacb->ret = bdrv_co_pwritev(s->children[i], acb->offset, acb->bytes,
50
+ curl_multi_do_locked(s);
33
+ acb->qiov, acb->flags);
51
+ aio_context_release(s->s->aio_context);
34
+ }
52
+}
35
if (sacb->ret == 0) {
53
+
36
acb->success_count++;
54
static void curl_multi_read(void *arg)
37
} else {
55
{
38
@@ -XXX,XX +XXX,XX @@ static int quorum_co_pwritev(BlockDriverState *bs, uint64_t offset,
56
CURLState *s = (CURLState *)arg;
57
58
- curl_multi_do(arg);
59
+ aio_context_acquire(s->s->aio_context);
60
+ curl_multi_do_locked(s);
61
curl_multi_check_completion(s->s);
62
+ aio_context_release(s->s->aio_context);
63
}
64
65
static void curl_multi_timeout_do(void *arg)
66
diff --git a/block/iscsi.c b/block/iscsi.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/block/iscsi.c
69
+++ b/block/iscsi.c
70
@@ -XXX,XX +XXX,XX @@ iscsi_process_read(void *arg)
71
IscsiLun *iscsilun = arg;
72
struct iscsi_context *iscsi = iscsilun->iscsi;
73
74
+ aio_context_acquire(iscsilun->aio_context);
75
iscsi_service(iscsi, POLLIN);
76
iscsi_set_events(iscsilun);
77
+ aio_context_release(iscsilun->aio_context);
78
}
79
80
static void
81
@@ -XXX,XX +XXX,XX @@ iscsi_process_write(void *arg)
82
IscsiLun *iscsilun = arg;
83
struct iscsi_context *iscsi = iscsilun->iscsi;
84
85
+ aio_context_acquire(iscsilun->aio_context);
86
iscsi_service(iscsi, POLLOUT);
87
iscsi_set_events(iscsilun);
88
+ aio_context_release(iscsilun->aio_context);
89
}
90
91
static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun)
92
diff --git a/block/linux-aio.c b/block/linux-aio.c
93
index XXXXXXX..XXXXXXX 100644
94
--- a/block/linux-aio.c
95
+++ b/block/linux-aio.c
96
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_completion_cb(EventNotifier *e)
97
LinuxAioState *s = container_of(e, LinuxAioState, e);
98
99
if (event_notifier_test_and_clear(&s->e)) {
100
+ aio_context_acquire(s->aio_context);
101
qemu_laio_process_completions_and_submit(s);
102
+ aio_context_release(s->aio_context);
103
}
104
}
105
106
@@ -XXX,XX +XXX,XX @@ static bool qemu_laio_poll_cb(void *opaque)
107
return false;
108
}
109
110
+ aio_context_acquire(s->aio_context);
111
qemu_laio_process_completions_and_submit(s);
112
+ aio_context_release(s->aio_context);
113
return true;
114
}
115
116
diff --git a/block/nfs.c b/block/nfs.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/block/nfs.c
119
+++ b/block/nfs.c
120
@@ -XXX,XX +XXX,XX @@ static void nfs_set_events(NFSClient *client)
121
static void nfs_process_read(void *arg)
122
{
123
NFSClient *client = arg;
124
+
125
+ aio_context_acquire(client->aio_context);
126
nfs_service(client->context, POLLIN);
127
nfs_set_events(client);
128
+ aio_context_release(client->aio_context);
129
}
130
131
static void nfs_process_write(void *arg)
132
{
133
NFSClient *client = arg;
134
+
135
+ aio_context_acquire(client->aio_context);
136
nfs_service(client->context, POLLOUT);
137
nfs_set_events(client);
138
+ aio_context_release(client->aio_context);
139
}
140
141
static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
142
diff --git a/block/sheepdog.c b/block/sheepdog.c
143
index XXXXXXX..XXXXXXX 100644
144
--- a/block/sheepdog.c
145
+++ b/block/sheepdog.c
146
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int send_co_req(int sockfd, SheepdogReq *hdr, void *data,
39
return ret;
147
return ret;
40
}
148
}
41
149
42
+static int quorum_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
150
-static void restart_co_req(void *opaque)
43
+ int bytes, BdrvRequestFlags flags)
151
-{
44
+
152
- Coroutine *co = opaque;
153
-
154
- qemu_coroutine_enter(co);
155
-}
156
-
157
typedef struct SheepdogReqCo {
158
int sockfd;
159
BlockDriverState *bs;
160
@@ -XXX,XX +XXX,XX @@ typedef struct SheepdogReqCo {
161
unsigned int *rlen;
162
int ret;
163
bool finished;
164
+ Coroutine *co;
165
} SheepdogReqCo;
166
167
+static void restart_co_req(void *opaque)
45
+{
168
+{
46
+ return quorum_co_pwritev(bs, offset, bytes, NULL,
169
+ SheepdogReqCo *srco = opaque;
47
+ flags | BDRV_REQ_ZERO_WRITE);
170
+
171
+ aio_co_wake(srco->co);
48
+}
172
+}
49
+
173
+
50
static int64_t quorum_getlength(BlockDriverState *bs)
174
static coroutine_fn void do_co_req(void *opaque)
51
{
175
{
52
BDRVQuorumState *s = bs->opaque;
176
int ret;
53
@@ -XXX,XX +XXX,XX @@ static QemuOptsList quorum_runtime_opts = {
177
- Coroutine *co;
54
},
178
SheepdogReqCo *srco = opaque;
179
int sockfd = srco->sockfd;
180
SheepdogReq *hdr = srco->hdr;
181
@@ -XXX,XX +XXX,XX @@ static coroutine_fn void do_co_req(void *opaque)
182
unsigned int *wlen = srco->wlen;
183
unsigned int *rlen = srco->rlen;
184
185
- co = qemu_coroutine_self();
186
+ srco->co = qemu_coroutine_self();
187
aio_set_fd_handler(srco->aio_context, sockfd, false,
188
- NULL, restart_co_req, NULL, co);
189
+ NULL, restart_co_req, NULL, srco);
190
191
ret = send_co_req(sockfd, hdr, data, wlen);
192
if (ret < 0) {
193
@@ -XXX,XX +XXX,XX @@ static coroutine_fn void do_co_req(void *opaque)
194
}
195
196
aio_set_fd_handler(srco->aio_context, sockfd, false,
197
- restart_co_req, NULL, NULL, co);
198
+ restart_co_req, NULL, NULL, srco);
199
200
ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
201
if (ret != sizeof(*hdr)) {
202
@@ -XXX,XX +XXX,XX @@ out:
203
aio_set_fd_handler(srco->aio_context, sockfd, false,
204
NULL, NULL, NULL, NULL);
205
206
+ srco->co = NULL;
207
srco->ret = ret;
208
srco->finished = true;
209
if (srco->bs) {
210
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque)
211
* We've finished all requests which belong to the AIOCB, so
212
* we can switch back to sd_co_readv/writev now.
213
*/
214
- qemu_coroutine_enter(acb->coroutine);
215
+ aio_co_wake(acb->coroutine);
216
}
217
218
return;
219
@@ -XXX,XX +XXX,XX @@ static void co_read_response(void *opaque)
220
s->co_recv = qemu_coroutine_create(aio_read_response, opaque);
221
}
222
223
- qemu_coroutine_enter(s->co_recv);
224
+ aio_co_wake(s->co_recv);
225
}
226
227
static void co_write_request(void *opaque)
228
{
229
BDRVSheepdogState *s = opaque;
230
231
- qemu_coroutine_enter(s->co_send);
232
+ aio_co_wake(s->co_send);
233
}
234
235
/*
236
diff --git a/block/ssh.c b/block/ssh.c
237
index XXXXXXX..XXXXXXX 100644
238
--- a/block/ssh.c
239
+++ b/block/ssh.c
240
@@ -XXX,XX +XXX,XX @@ static void restart_coroutine(void *opaque)
241
242
DPRINTF("co=%p", co);
243
244
- qemu_coroutine_enter(co);
245
+ aio_co_wake(co);
246
}
247
248
-static coroutine_fn void set_fd_handler(BDRVSSHState *s, BlockDriverState *bs)
249
+/* A non-blocking call returned EAGAIN, so yield, ensuring the
250
+ * handlers are set up so that we'll be rescheduled when there is an
251
+ * interesting event on the socket.
252
+ */
253
+static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs)
254
{
255
int r;
256
IOHandler *rd_handler = NULL, *wr_handler = NULL;
257
@@ -XXX,XX +XXX,XX @@ static coroutine_fn void set_fd_handler(BDRVSSHState *s, BlockDriverState *bs)
258
259
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
260
false, rd_handler, wr_handler, NULL, co);
261
-}
262
-
263
-static coroutine_fn void clear_fd_handler(BDRVSSHState *s,
264
- BlockDriverState *bs)
265
-{
266
- DPRINTF("s->sock=%d", s->sock);
267
- aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
268
- false, NULL, NULL, NULL, NULL);
269
-}
270
-
271
-/* A non-blocking call returned EAGAIN, so yield, ensuring the
272
- * handlers are set up so that we'll be rescheduled when there is an
273
- * interesting event on the socket.
274
- */
275
-static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs)
276
-{
277
- set_fd_handler(s, bs);
278
qemu_coroutine_yield();
279
- clear_fd_handler(s, bs);
280
+ DPRINTF("s->sock=%d - back", s->sock);
281
+ aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock, false,
282
+ NULL, NULL, NULL, NULL);
283
}
284
285
/* SFTP has a function `libssh2_sftp_seek64' which seeks to a position
286
diff --git a/block/win32-aio.c b/block/win32-aio.c
287
index XXXXXXX..XXXXXXX 100644
288
--- a/block/win32-aio.c
289
+++ b/block/win32-aio.c
290
@@ -XXX,XX +XXX,XX @@ struct QEMUWin32AIOState {
291
HANDLE hIOCP;
292
EventNotifier e;
293
int count;
294
- bool is_aio_context_attached;
295
+ AioContext *aio_ctx;
55
};
296
};
56
297
57
+static void quorum_refresh_flags(BlockDriverState *bs)
298
typedef struct QEMUWin32AIOCB {
58
+{
299
@@ -XXX,XX +XXX,XX @@ static void win32_aio_process_completion(QEMUWin32AIOState *s,
59
+ BDRVQuorumState *s = bs->opaque;
300
}
60
+ int i;
301
61
+
302
62
+ bs->supported_zero_flags =
303
+ aio_context_acquire(s->aio_ctx);
63
+ BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK;
304
waiocb->common.cb(waiocb->common.opaque, ret);
64
+
305
+ aio_context_release(s->aio_ctx);
65
+ for (i = 0; i < s->num_children; i++) {
306
qemu_aio_unref(waiocb);
66
+ bs->supported_zero_flags &= s->children[i]->bs->supported_zero_flags;
307
}
67
+ }
308
68
+
309
@@ -XXX,XX +XXX,XX @@ void win32_aio_detach_aio_context(QEMUWin32AIOState *aio,
69
+ bs->supported_zero_flags |= BDRV_REQ_WRITE_UNCHANGED;
310
AioContext *old_context)
70
+}
311
{
71
+
312
aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL);
72
static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
313
- aio->is_aio_context_attached = false;
73
Error **errp)
314
+ aio->aio_ctx = NULL;
74
{
315
}
75
@@ -XXX,XX +XXX,XX @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
316
76
s->next_child_index = s->num_children;
317
void win32_aio_attach_aio_context(QEMUWin32AIOState *aio,
77
318
AioContext *new_context)
78
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
319
{
79
+ quorum_refresh_flags(bs);
320
- aio->is_aio_context_attached = true;
80
321
+ aio->aio_ctx = new_context;
81
g_free(opened);
322
aio_set_event_notifier(new_context, &aio->e, false,
82
goto exit;
323
win32_aio_completion_cb, NULL);
83
@@ -XXX,XX +XXX,XX @@ static void quorum_add_child(BlockDriverState *bs, BlockDriverState *child_bs,
324
}
84
}
325
@@ -XXX,XX +XXX,XX @@ out_free_state:
85
s->children = g_renew(BdrvChild *, s->children, s->num_children + 1);
326
86
s->children[s->num_children++] = child;
327
void win32_aio_cleanup(QEMUWin32AIOState *aio)
87
+ quorum_refresh_flags(bs);
328
{
88
329
- assert(!aio->is_aio_context_attached);
89
out:
330
+ assert(!aio->aio_ctx);
90
bdrv_drained_end(bs);
331
CloseHandle(aio->hIOCP);
91
@@ -XXX,XX +XXX,XX @@ static void quorum_del_child(BlockDriverState *bs, BdrvChild *child,
332
event_notifier_cleanup(&aio->e);
92
s->children = g_renew(BdrvChild *, s->children, --s->num_children);
333
g_free(aio);
93
bdrv_unref_child(bs, child);
334
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
94
335
index XXXXXXX..XXXXXXX 100644
95
+ quorum_refresh_flags(bs);
336
--- a/hw/block/virtio-blk.c
96
bdrv_drained_end(bs);
337
+++ b/hw/block/virtio-blk.c
97
}
338
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_ioctl_complete(void *opaque, int status)
98
339
{
99
@@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_quorum = {
340
VirtIOBlockIoctlReq *ioctl_req = opaque;
100
341
VirtIOBlockReq *req = ioctl_req->req;
101
.bdrv_co_preadv = quorum_co_preadv,
342
- VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
102
.bdrv_co_pwritev = quorum_co_pwritev,
343
+ VirtIOBlock *s = req->dev;
103
+ .bdrv_co_pwrite_zeroes = quorum_co_pwrite_zeroes,
344
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
104
345
struct virtio_scsi_inhdr *scsi;
105
.bdrv_add_child = quorum_add_child,
346
struct sg_io_hdr *hdr;
106
.bdrv_del_child = quorum_del_child,
347
107
diff --git a/tests/qemu-iotests/312 b/tests/qemu-iotests/312
348
@@ -XXX,XX +XXX,XX @@ bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
108
index XXXXXXX..XXXXXXX 100755
349
MultiReqBuffer mrb = {};
109
--- a/tests/qemu-iotests/312
350
bool progress = false;
110
+++ b/tests/qemu-iotests/312
351
111
@@ -XXX,XX +XXX,XX @@ $QEMU_IO -c "write -P 0 $((0x200000)) $((0x10000))" "$TEST_IMG.0" | _filter_qemu
352
+ aio_context_acquire(blk_get_aio_context(s->blk));
112
$QEMU_IO -c "write -z $((0x200000)) $((0x30000))" "$TEST_IMG.1" | _filter_qemu_io
353
blk_io_plug(s->blk);
113
$QEMU_IO -c "write -P 0 $((0x200000)) $((0x20000))" "$TEST_IMG.2" | _filter_qemu_io
354
114
355
do {
115
+# Test 5: write data to a region and then zeroize it, doing it
356
@@ -XXX,XX +XXX,XX @@ bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
116
+# directly on the quorum device instead of the individual images.
357
}
117
+# This has no effect on the end result but proves that the quorum driver
358
118
+# supports 'write -z'.
359
blk_io_unplug(s->blk);
119
+$QEMU_IO -c "open -o $quorum" -c "write -P 1 $((0x250000)) $((0x10000))" | _filter_qemu_io
360
+ aio_context_release(blk_get_aio_context(s->blk));
120
+# Verify the data that we just wrote
361
return progress;
121
+$QEMU_IO -c "open -o $quorum" -c "read -P 1 $((0x250000)) $((0x10000))" | _filter_qemu_io
362
}
122
+$QEMU_IO -c "open -o $quorum" -c "write -z $((0x250000)) $((0x10000))" | _filter_qemu_io
363
123
+# Now it should read back as zeroes
364
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
124
+$QEMU_IO -c "open -o $quorum" -c "read -P 0 $((0x250000)) $((0x10000))" | _filter_qemu_io
365
index XXXXXXX..XXXXXXX 100644
125
+
366
--- a/hw/scsi/virtio-scsi.c
126
echo
367
+++ b/hw/scsi/virtio-scsi.c
127
echo '### Launch the drive-mirror job'
368
@@ -XXX,XX +XXX,XX @@ bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
128
echo
369
VirtIOSCSIReq *req;
129
diff --git a/tests/qemu-iotests/312.out b/tests/qemu-iotests/312.out
370
bool progress = false;
130
index XXXXXXX..XXXXXXX 100644
371
131
--- a/tests/qemu-iotests/312.out
372
+ virtio_scsi_acquire(s);
132
+++ b/tests/qemu-iotests/312.out
373
while ((req = virtio_scsi_pop_req(s, vq))) {
133
@@ -XXX,XX +XXX,XX @@ wrote 196608/196608 bytes at offset 2097152
374
progress = true;
134
192 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
375
virtio_scsi_handle_ctrl_req(s, req);
135
wrote 131072/131072 bytes at offset 2097152
376
}
136
128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
377
+ virtio_scsi_release(s);
137
+wrote 65536/65536 bytes at offset 2424832
378
return progress;
138
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
379
}
139
+read 65536/65536 bytes at offset 2424832
380
140
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
381
@@ -XXX,XX +XXX,XX @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
141
+wrote 65536/65536 bytes at offset 2424832
382
142
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
383
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
143
+read 65536/65536 bytes at offset 2424832
384
144
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
385
+ virtio_scsi_acquire(s);
145
386
do {
146
### Launch the drive-mirror job
387
virtio_queue_set_notification(vq, 0);
388
389
@@ -XXX,XX +XXX,XX @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
390
QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
391
virtio_scsi_handle_cmd_req_submit(s, req);
392
}
393
+ virtio_scsi_release(s);
394
return progress;
395
}
396
397
@@ -XXX,XX +XXX,XX @@ out:
398
399
bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
400
{
401
+ virtio_scsi_acquire(s);
402
if (s->events_dropped) {
403
virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
404
+ virtio_scsi_release(s);
405
return true;
406
}
407
+ virtio_scsi_release(s);
408
return false;
409
}
410
411
diff --git a/util/aio-posix.c b/util/aio-posix.c
412
index XXXXXXX..XXXXXXX 100644
413
--- a/util/aio-posix.c
414
+++ b/util/aio-posix.c
415
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx)
416
(revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
417
aio_node_check(ctx, node->is_external) &&
418
node->io_read) {
419
- aio_context_acquire(ctx);
420
node->io_read(node->opaque);
421
- aio_context_release(ctx);
422
423
/* aio_notify() does not count as progress */
424
if (node->opaque != &ctx->notifier) {
425
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx)
426
(revents & (G_IO_OUT | G_IO_ERR)) &&
427
aio_node_check(ctx, node->is_external) &&
428
node->io_write) {
429
- aio_context_acquire(ctx);
430
node->io_write(node->opaque);
431
- aio_context_release(ctx);
432
progress = true;
433
}
434
435
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
436
start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
437
}
438
439
- aio_context_acquire(ctx);
440
progress = try_poll_mode(ctx, blocking);
441
- aio_context_release(ctx);
442
-
443
if (!progress) {
444
assert(npfd == 0);
445
446
diff --git a/util/aio-win32.c b/util/aio-win32.c
447
index XXXXXXX..XXXXXXX 100644
448
--- a/util/aio-win32.c
449
+++ b/util/aio-win32.c
450
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
451
(revents || event_notifier_get_handle(node->e) == event) &&
452
node->io_notify) {
453
node->pfd.revents = 0;
454
- aio_context_acquire(ctx);
455
node->io_notify(node->e);
456
- aio_context_release(ctx);
457
458
/* aio_notify() does not count as progress */
459
if (node->e != &ctx->notifier) {
460
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
461
(node->io_read || node->io_write)) {
462
node->pfd.revents = 0;
463
if ((revents & G_IO_IN) && node->io_read) {
464
- aio_context_acquire(ctx);
465
node->io_read(node->opaque);
466
- aio_context_release(ctx);
467
progress = true;
468
}
469
if ((revents & G_IO_OUT) && node->io_write) {
470
- aio_context_acquire(ctx);
471
node->io_write(node->opaque);
472
- aio_context_release(ctx);
473
progress = true;
474
}
147
475
148
--
476
--
149
2.29.2
477
2.9.3
150
478
151
479
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
To be reused in separate.
3
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
4
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
5
Reviewed-by: Fam Zheng <famz@redhat.com>
6
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
7
Message-id: 20170213135235.12274-15-pbonzini@redhat.com
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
10
block/archipelago.c | 3 +++
11
block/blkreplay.c | 2 +-
12
block/block-backend.c | 6 ++++++
13
block/curl.c | 26 ++++++++++++++++++--------
14
block/gluster.c | 9 +--------
15
block/io.c | 6 +++++-
16
block/iscsi.c | 6 +++++-
17
block/linux-aio.c | 15 +++++++++------
18
block/nfs.c | 3 ++-
19
block/null.c | 4 ++++
20
block/qed.c | 3 +++
21
block/rbd.c | 4 ++++
22
dma-helpers.c | 2 ++
23
hw/block/virtio-blk.c | 2 ++
24
hw/scsi/scsi-bus.c | 2 ++
25
util/async.c | 4 ++--
26
util/thread-pool.c | 2 ++
27
17 files changed, 71 insertions(+), 28 deletions(-)
4
28
5
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
29
diff --git a/block/archipelago.c b/block/archipelago.c
6
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
30
index XXXXXXX..XXXXXXX 100644
7
Message-Id: <20201021145859.11201-4-vsementsov@virtuozzo.com>
31
--- a/block/archipelago.c
8
Signed-off-by: Max Reitz <mreitz@redhat.com>
32
+++ b/block/archipelago.c
9
---
33
@@ -XXX,XX +XXX,XX @@ static void qemu_archipelago_complete_aio(void *opaque)
10
block/io.c | 71 +++++++++++++++++++++++++++++++-----------------------
34
{
11
1 file changed, 41 insertions(+), 30 deletions(-)
35
AIORequestData *reqdata = (AIORequestData *) opaque;
12
36
ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb;
37
+ AioContext *ctx = bdrv_get_aio_context(aio_cb->common.bs);
38
39
+ aio_context_acquire(ctx);
40
aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret);
41
+ aio_context_release(ctx);
42
aio_cb->status = 0;
43
44
qemu_aio_unref(aio_cb);
45
diff --git a/block/blkreplay.c b/block/blkreplay.c
46
index XXXXXXX..XXXXXXX 100755
47
--- a/block/blkreplay.c
48
+++ b/block/blkreplay.c
49
@@ -XXX,XX +XXX,XX @@ static int64_t blkreplay_getlength(BlockDriverState *bs)
50
static void blkreplay_bh_cb(void *opaque)
51
{
52
Request *req = opaque;
53
- qemu_coroutine_enter(req->co);
54
+ aio_co_wake(req->co);
55
qemu_bh_delete(req->bh);
56
g_free(req);
57
}
58
diff --git a/block/block-backend.c b/block/block-backend.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/block/block-backend.c
61
+++ b/block/block-backend.c
62
@@ -XXX,XX +XXX,XX @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
63
static void error_callback_bh(void *opaque)
64
{
65
struct BlockBackendAIOCB *acb = opaque;
66
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
67
68
bdrv_dec_in_flight(acb->common.bs);
69
+ aio_context_acquire(ctx);
70
acb->common.cb(acb->common.opaque, acb->ret);
71
+ aio_context_release(ctx);
72
qemu_aio_unref(acb);
73
}
74
75
@@ -XXX,XX +XXX,XX @@ static void blk_aio_complete(BlkAioEmAIOCB *acb)
76
static void blk_aio_complete_bh(void *opaque)
77
{
78
BlkAioEmAIOCB *acb = opaque;
79
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
80
81
assert(acb->has_returned);
82
+ aio_context_acquire(ctx);
83
blk_aio_complete(acb);
84
+ aio_context_release(ctx);
85
}
86
87
static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
88
diff --git a/block/curl.c b/block/curl.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/block/curl.c
91
+++ b/block/curl.c
92
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
93
{
94
CURLState *state;
95
int running;
96
+ int ret = -EINPROGRESS;
97
98
CURLAIOCB *acb = p;
99
- BDRVCURLState *s = acb->common.bs->opaque;
100
+ BlockDriverState *bs = acb->common.bs;
101
+ BDRVCURLState *s = bs->opaque;
102
+ AioContext *ctx = bdrv_get_aio_context(bs);
103
104
size_t start = acb->sector_num * BDRV_SECTOR_SIZE;
105
size_t end;
106
107
+ aio_context_acquire(ctx);
108
+
109
// In case we have the requested data already (e.g. read-ahead),
110
// we can just call the callback and be done.
111
switch (curl_find_buf(s, start, acb->nb_sectors * BDRV_SECTOR_SIZE, acb)) {
112
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
113
qemu_aio_unref(acb);
114
// fall through
115
case FIND_RET_WAIT:
116
- return;
117
+ goto out;
118
default:
119
break;
120
}
121
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
122
// No cache found, so let's start a new request
123
state = curl_init_state(acb->common.bs, s);
124
if (!state) {
125
- acb->common.cb(acb->common.opaque, -EIO);
126
- qemu_aio_unref(acb);
127
- return;
128
+ ret = -EIO;
129
+ goto out;
130
}
131
132
acb->start = 0;
133
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
134
state->orig_buf = g_try_malloc(state->buf_len);
135
if (state->buf_len && state->orig_buf == NULL) {
136
curl_clean_state(state);
137
- acb->common.cb(acb->common.opaque, -ENOMEM);
138
- qemu_aio_unref(acb);
139
- return;
140
+ ret = -ENOMEM;
141
+ goto out;
142
}
143
state->acb[0] = acb;
144
145
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
146
147
/* Tell curl it needs to kick things off */
148
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
149
+
150
+out:
151
+ if (ret != -EINPROGRESS) {
152
+ acb->common.cb(acb->common.opaque, ret);
153
+ qemu_aio_unref(acb);
154
+ }
155
+ aio_context_release(ctx);
156
}
157
158
static BlockAIOCB *curl_aio_readv(BlockDriverState *bs,
159
diff --git a/block/gluster.c b/block/gluster.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/block/gluster.c
162
+++ b/block/gluster.c
163
@@ -XXX,XX +XXX,XX @@ static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf,
164
return qemu_gluster_glfs_init(gconf, errp);
165
}
166
167
-static void qemu_gluster_complete_aio(void *opaque)
168
-{
169
- GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
170
-
171
- qemu_coroutine_enter(acb->coroutine);
172
-}
173
-
174
/*
175
* AIO callback routine called from GlusterFS thread.
176
*/
177
@@ -XXX,XX +XXX,XX @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
178
acb->ret = -EIO; /* Partial read/write - fail it */
179
}
180
181
- aio_bh_schedule_oneshot(acb->aio_context, qemu_gluster_complete_aio, acb);
182
+ aio_co_schedule(acb->aio_context, acb->coroutine);
183
}
184
185
static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
13
diff --git a/block/io.c b/block/io.c
186
diff --git a/block/io.c b/block/io.c
14
index XXXXXXX..XXXXXXX 100644
187
index XXXXXXX..XXXXXXX 100644
15
--- a/block/io.c
188
--- a/block/io.c
16
+++ b/block/io.c
189
+++ b/block/io.c
17
@@ -XXX,XX +XXX,XX @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req,
190
@@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque)
191
bdrv_dec_in_flight(bs);
192
bdrv_drained_begin(bs);
193
data->done = true;
194
- qemu_coroutine_enter(co);
195
+ aio_co_wake(co);
196
}
197
198
static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
199
@@ -XXX,XX +XXX,XX @@ static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
200
static void bdrv_co_em_bh(void *opaque)
201
{
202
BlockAIOCBCoroutine *acb = opaque;
203
+ BlockDriverState *bs = acb->common.bs;
204
+ AioContext *ctx = bdrv_get_aio_context(bs);
205
206
assert(!acb->need_bh);
207
+ aio_context_acquire(ctx);
208
bdrv_co_complete(acb);
209
+ aio_context_release(ctx);
210
}
211
212
static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
213
diff --git a/block/iscsi.c b/block/iscsi.c
214
index XXXXXXX..XXXXXXX 100644
215
--- a/block/iscsi.c
216
+++ b/block/iscsi.c
217
@@ -XXX,XX +XXX,XX @@ static void
218
iscsi_bh_cb(void *p)
219
{
220
IscsiAIOCB *acb = p;
221
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
222
223
qemu_bh_delete(acb->bh);
224
225
g_free(acb->buf);
226
acb->buf = NULL;
227
228
+ aio_context_acquire(ctx);
229
acb->common.cb(acb->common.opaque, acb->status);
230
+ aio_context_release(ctx);
231
232
if (acb->task != NULL) {
233
scsi_free_scsi_task(acb->task);
234
@@ -XXX,XX +XXX,XX @@ iscsi_schedule_bh(IscsiAIOCB *acb)
235
static void iscsi_co_generic_bh_cb(void *opaque)
236
{
237
struct IscsiTask *iTask = opaque;
238
+
239
iTask->complete = 1;
240
- qemu_coroutine_enter(iTask->co);
241
+ aio_co_wake(iTask->co);
242
}
243
244
static void iscsi_retry_timer_expired(void *opaque)
245
diff --git a/block/linux-aio.c b/block/linux-aio.c
246
index XXXXXXX..XXXXXXX 100644
247
--- a/block/linux-aio.c
248
+++ b/block/linux-aio.c
249
@@ -XXX,XX +XXX,XX @@ struct LinuxAioState {
250
io_context_t ctx;
251
EventNotifier e;
252
253
- /* io queue for submit at batch */
254
+ /* io queue for submit at batch. Protected by AioContext lock. */
255
LaioQueue io_q;
256
257
- /* I/O completion processing */
258
+ /* I/O completion processing. Only runs in I/O thread. */
259
QEMUBH *completion_bh;
260
int event_idx;
261
int event_max;
262
@@ -XXX,XX +XXX,XX @@ static inline ssize_t io_event_ret(struct io_event *ev)
263
*/
264
static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
265
{
266
+ LinuxAioState *s = laiocb->ctx;
267
int ret;
268
269
ret = laiocb->ret;
270
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
271
}
272
273
laiocb->ret = ret;
274
+ aio_context_acquire(s->aio_context);
275
if (laiocb->co) {
276
/* If the coroutine is already entered it must be in ioq_submit() and
277
* will notice laio->ret has been filled in when it eventually runs
278
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
279
laiocb->common.cb(laiocb->common.opaque, ret);
280
qemu_aio_unref(laiocb);
281
}
282
+ aio_context_release(s->aio_context);
283
}
284
285
/**
286
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completions(LinuxAioState *s)
287
static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
288
{
289
qemu_laio_process_completions(s);
290
+
291
+ aio_context_acquire(s->aio_context);
292
if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
293
ioq_submit(s);
294
}
295
+ aio_context_release(s->aio_context);
296
}
297
298
static void qemu_laio_completion_bh(void *opaque)
299
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_completion_cb(EventNotifier *e)
300
LinuxAioState *s = container_of(e, LinuxAioState, e);
301
302
if (event_notifier_test_and_clear(&s->e)) {
303
- aio_context_acquire(s->aio_context);
304
qemu_laio_process_completions_and_submit(s);
305
- aio_context_release(s->aio_context);
306
}
307
}
308
309
@@ -XXX,XX +XXX,XX @@ static bool qemu_laio_poll_cb(void *opaque)
310
return false;
311
}
312
313
- aio_context_acquire(s->aio_context);
314
qemu_laio_process_completions_and_submit(s);
315
- aio_context_release(s->aio_context);
18
return true;
316
return true;
19
}
317
}
20
318
21
+/* Called with self->bs->reqs_lock held */
319
@@ -XXX,XX +XXX,XX @@ void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
22
+static BdrvTrackedRequest *
320
{
23
+bdrv_find_conflicting_request(BdrvTrackedRequest *self)
321
aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
24
+{
322
qemu_bh_delete(s->completion_bh);
25
+ BdrvTrackedRequest *req;
323
+ s->aio_context = NULL;
26
+
324
}
27
+ QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
325
28
+ if (req == self || (!req->serialising && !self->serialising)) {
326
void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
29
+ continue;
327
diff --git a/block/nfs.c b/block/nfs.c
30
+ }
328
index XXXXXXX..XXXXXXX 100644
31
+ if (tracked_request_overlaps(req, self->overlap_offset,
329
--- a/block/nfs.c
32
+ self->overlap_bytes))
330
+++ b/block/nfs.c
33
+ {
331
@@ -XXX,XX +XXX,XX @@ static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
34
+ /*
332
static void nfs_co_generic_bh_cb(void *opaque)
35
+ * Hitting this means there was a reentrant request, for
333
{
36
+ * example, a block driver issuing nested requests. This must
334
NFSRPC *task = opaque;
37
+ * never happen since it means deadlock.
335
+
38
+ */
336
task->complete = 1;
39
+ assert(qemu_coroutine_self() != req->co);
337
- qemu_coroutine_enter(task->co);
40
+
338
+ aio_co_wake(task->co);
41
+ /*
339
}
42
+ * If the request is already (indirectly) waiting for us, or
340
43
+ * will wait for us as soon as it wakes up, then just go on
341
static void
44
+ * (instead of producing a deadlock in the former case).
342
diff --git a/block/null.c b/block/null.c
45
+ */
343
index XXXXXXX..XXXXXXX 100644
46
+ if (!req->waiting_for) {
344
--- a/block/null.c
47
+ return req;
345
+++ b/block/null.c
48
+ }
346
@@ -XXX,XX +XXX,XX @@ static const AIOCBInfo null_aiocb_info = {
49
+ }
347
static void null_bh_cb(void *opaque)
50
+ }
348
{
51
+
349
NullAIOCB *acb = opaque;
52
+ return NULL;
350
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
53
+}
351
+
54
+
352
+ aio_context_acquire(ctx);
55
static bool coroutine_fn
353
acb->common.cb(acb->common.opaque, 0);
56
bdrv_wait_serialising_requests_locked(BlockDriverState *bs,
354
+ aio_context_release(ctx);
57
BdrvTrackedRequest *self)
355
qemu_aio_unref(acb);
58
{
356
}
59
BdrvTrackedRequest *req;
357
60
- bool retry;
358
diff --git a/block/qed.c b/block/qed.c
61
bool waited = false;
359
index XXXXXXX..XXXXXXX 100644
62
360
--- a/block/qed.c
63
- do {
361
+++ b/block/qed.c
64
- retry = false;
362
@@ -XXX,XX +XXX,XX @@ static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
65
- QLIST_FOREACH(req, &bs->tracked_requests, list) {
363
static void qed_aio_complete_bh(void *opaque)
66
- if (req == self || (!req->serialising && !self->serialising)) {
364
{
67
- continue;
365
QEDAIOCB *acb = opaque;
68
- }
366
+ BDRVQEDState *s = acb_to_s(acb);
69
- if (tracked_request_overlaps(req, self->overlap_offset,
367
BlockCompletionFunc *cb = acb->common.cb;
70
- self->overlap_bytes))
368
void *user_opaque = acb->common.opaque;
71
- {
369
int ret = acb->bh_ret;
72
- /* Hitting this means there was a reentrant request, for
370
@@ -XXX,XX +XXX,XX @@ static void qed_aio_complete_bh(void *opaque)
73
- * example, a block driver issuing nested requests. This must
371
qemu_aio_unref(acb);
74
- * never happen since it means deadlock.
372
75
- */
373
/* Invoke callback */
76
- assert(qemu_coroutine_self() != req->co);
374
+ qed_acquire(s);
77
-
375
cb(user_opaque, ret);
78
- /* If the request is already (indirectly) waiting for us, or
376
+ qed_release(s);
79
- * will wait for us as soon as it wakes up, then just go on
377
}
80
- * (instead of producing a deadlock in the former case). */
378
81
- if (!req->waiting_for) {
379
static void qed_aio_complete(QEDAIOCB *acb, int ret)
82
- self->waiting_for = req;
380
diff --git a/block/rbd.c b/block/rbd.c
83
- qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
381
index XXXXXXX..XXXXXXX 100644
84
- self->waiting_for = NULL;
382
--- a/block/rbd.c
85
- retry = true;
383
+++ b/block/rbd.c
86
- waited = true;
384
@@ -XXX,XX +XXX,XX @@ shutdown:
87
- break;
385
static void qemu_rbd_complete_aio(RADOSCB *rcb)
88
- }
386
{
89
- }
387
RBDAIOCB *acb = rcb->acb;
90
- }
388
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
91
- } while (retry);
389
int64_t r;
92
+ while ((req = bdrv_find_conflicting_request(self))) {
390
93
+ self->waiting_for = req;
391
r = rcb->ret;
94
+ qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
392
@@ -XXX,XX +XXX,XX @@ static void qemu_rbd_complete_aio(RADOSCB *rcb)
95
+ self->waiting_for = NULL;
393
qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
96
+ waited = true;
394
}
97
+ }
395
qemu_vfree(acb->bounce);
98
+
396
+
99
return waited;
397
+ aio_context_acquire(ctx);
100
}
398
acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
101
399
+ aio_context_release(ctx);
400
401
qemu_aio_unref(acb);
402
}
403
diff --git a/dma-helpers.c b/dma-helpers.c
404
index XXXXXXX..XXXXXXX 100644
405
--- a/dma-helpers.c
406
+++ b/dma-helpers.c
407
@@ -XXX,XX +XXX,XX @@ static void dma_blk_cb(void *opaque, int ret)
408
QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align));
409
}
410
411
+ aio_context_acquire(dbs->ctx);
412
dbs->acb = dbs->io_func(dbs->offset, &dbs->iov,
413
dma_blk_cb, dbs, dbs->io_func_opaque);
414
+ aio_context_release(dbs->ctx);
415
assert(dbs->acb);
416
}
417
418
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
419
index XXXXXXX..XXXXXXX 100644
420
--- a/hw/block/virtio-blk.c
421
+++ b/hw/block/virtio-blk.c
422
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_dma_restart_bh(void *opaque)
423
424
s->rq = NULL;
425
426
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
427
while (req) {
428
VirtIOBlockReq *next = req->next;
429
if (virtio_blk_handle_request(req, &mrb)) {
430
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_dma_restart_bh(void *opaque)
431
if (mrb.num_reqs) {
432
virtio_blk_submit_multireq(s->blk, &mrb);
433
}
434
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
435
}
436
437
static void virtio_blk_dma_restart_cb(void *opaque, int running,
438
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
439
index XXXXXXX..XXXXXXX 100644
440
--- a/hw/scsi/scsi-bus.c
441
+++ b/hw/scsi/scsi-bus.c
442
@@ -XXX,XX +XXX,XX @@ static void scsi_dma_restart_bh(void *opaque)
443
qemu_bh_delete(s->bh);
444
s->bh = NULL;
445
446
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
447
QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
448
scsi_req_ref(req);
449
if (req->retry) {
450
@@ -XXX,XX +XXX,XX @@ static void scsi_dma_restart_bh(void *opaque)
451
}
452
scsi_req_unref(req);
453
}
454
+ aio_context_release(blk_get_aio_context(s->conf.blk));
455
}
456
457
void scsi_req_retry(SCSIRequest *req)
458
diff --git a/util/async.c b/util/async.c
459
index XXXXXXX..XXXXXXX 100644
460
--- a/util/async.c
461
+++ b/util/async.c
462
@@ -XXX,XX +XXX,XX @@ int aio_bh_poll(AioContext *ctx)
463
ret = 1;
464
}
465
bh->idle = 0;
466
- aio_context_acquire(ctx);
467
aio_bh_call(bh);
468
- aio_context_release(ctx);
469
}
470
if (bh->deleted) {
471
deleted = true;
472
@@ -XXX,XX +XXX,XX @@ static void co_schedule_bh_cb(void *opaque)
473
Coroutine *co = QSLIST_FIRST(&straight);
474
QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
475
trace_aio_co_schedule_bh_cb(ctx, co);
476
+ aio_context_acquire(ctx);
477
qemu_coroutine_enter(co);
478
+ aio_context_release(ctx);
479
}
480
}
481
482
diff --git a/util/thread-pool.c b/util/thread-pool.c
483
index XXXXXXX..XXXXXXX 100644
484
--- a/util/thread-pool.c
485
+++ b/util/thread-pool.c
486
@@ -XXX,XX +XXX,XX @@ static void thread_pool_completion_bh(void *opaque)
487
ThreadPool *pool = opaque;
488
ThreadPoolElement *elem, *next;
489
490
+ aio_context_acquire(pool->ctx);
491
restart:
492
QLIST_FOREACH_SAFE(elem, &pool->head, all, next) {
493
if (elem->state != THREAD_DONE) {
494
@@ -XXX,XX +XXX,XX @@ restart:
495
qemu_aio_unref(elem);
496
}
497
}
498
+ aio_context_release(pool->ctx);
499
}
500
501
static void thread_pool_cancel(BlockAIOCB *acb)
102
--
502
--
103
2.29.2
503
2.9.3
104
504
105
505
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
The comments states, that on misaligned request we should have already
3
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
4
been waiting. But for bdrv_padding_rmw_read, we called
4
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
5
bdrv_mark_request_serialising with align = request_alignment, and now
5
Reviewed-by: Fam Zheng <famz@redhat.com>
6
we serialise with align = cluster_size. So we may have to wait again
6
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
7
with larger alignment.
7
Message-id: 20170213135235.12274-16-pbonzini@redhat.com
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
10
block/archipelago.c | 3 ---
11
block/block-backend.c | 7 -------
12
block/curl.c | 2 +-
13
block/io.c | 6 +-----
14
block/iscsi.c | 3 ---
15
block/linux-aio.c | 5 +----
16
block/mirror.c | 12 +++++++++---
17
block/null.c | 8 --------
18
block/qed-cluster.c | 2 ++
19
block/qed-table.c | 12 ++++++++++--
20
block/qed.c | 4 ++--
21
block/rbd.c | 4 ----
22
block/win32-aio.c | 3 ---
23
hw/block/virtio-blk.c | 12 +++++++++++-
24
hw/scsi/scsi-disk.c | 15 +++++++++++++++
25
hw/scsi/scsi-generic.c | 20 +++++++++++++++++---
26
util/thread-pool.c | 4 +++-
27
17 files changed, 72 insertions(+), 50 deletions(-)
8
28
9
Note, that the only user of BDRV_REQ_SERIALISING is backup which issues
29
diff --git a/block/archipelago.c b/block/archipelago.c
10
cluster-aligned requests, so seems the assertion should not fire for
30
index XXXXXXX..XXXXXXX 100644
11
now. But it's wrong anyway.
31
--- a/block/archipelago.c
12
32
+++ b/block/archipelago.c
13
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
33
@@ -XXX,XX +XXX,XX @@ static void qemu_archipelago_complete_aio(void *opaque)
14
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
34
{
15
Message-Id: <20201021145859.11201-3-vsementsov@virtuozzo.com>
35
AIORequestData *reqdata = (AIORequestData *) opaque;
16
Signed-off-by: Max Reitz <mreitz@redhat.com>
36
ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb;
17
---
37
- AioContext *ctx = bdrv_get_aio_context(aio_cb->common.bs);
18
block/io.c | 11 +----------
38
19
1 file changed, 1 insertion(+), 10 deletions(-)
39
- aio_context_acquire(ctx);
20
40
aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret);
41
- aio_context_release(ctx);
42
aio_cb->status = 0;
43
44
qemu_aio_unref(aio_cb);
45
diff --git a/block/block-backend.c b/block/block-backend.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/block/block-backend.c
48
+++ b/block/block-backend.c
49
@@ -XXX,XX +XXX,XX @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
50
static void error_callback_bh(void *opaque)
51
{
52
struct BlockBackendAIOCB *acb = opaque;
53
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
54
55
bdrv_dec_in_flight(acb->common.bs);
56
- aio_context_acquire(ctx);
57
acb->common.cb(acb->common.opaque, acb->ret);
58
- aio_context_release(ctx);
59
qemu_aio_unref(acb);
60
}
61
62
@@ -XXX,XX +XXX,XX @@ static void blk_aio_complete(BlkAioEmAIOCB *acb)
63
static void blk_aio_complete_bh(void *opaque)
64
{
65
BlkAioEmAIOCB *acb = opaque;
66
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
67
-
68
assert(acb->has_returned);
69
- aio_context_acquire(ctx);
70
blk_aio_complete(acb);
71
- aio_context_release(ctx);
72
}
73
74
static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
75
diff --git a/block/curl.c b/block/curl.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/block/curl.c
78
+++ b/block/curl.c
79
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
80
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
81
82
out:
83
+ aio_context_release(ctx);
84
if (ret != -EINPROGRESS) {
85
acb->common.cb(acb->common.opaque, ret);
86
qemu_aio_unref(acb);
87
}
88
- aio_context_release(ctx);
89
}
90
91
static BlockAIOCB *curl_aio_readv(BlockDriverState *bs,
21
diff --git a/block/io.c b/block/io.c
92
diff --git a/block/io.c b/block/io.c
22
index XXXXXXX..XXXXXXX 100644
93
index XXXXXXX..XXXXXXX 100644
23
--- a/block/io.c
94
--- a/block/io.c
24
+++ b/block/io.c
95
+++ b/block/io.c
25
@@ -XXX,XX +XXX,XX @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
96
@@ -XXX,XX +XXX,XX @@ static void bdrv_co_io_em_complete(void *opaque, int ret)
26
BdrvTrackedRequest *req, int flags)
97
CoroutineIOCompletion *co = opaque;
27
{
98
28
BlockDriverState *bs = child->bs;
99
co->ret = ret;
29
- bool waited;
100
- qemu_coroutine_enter(co->coroutine);
30
int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
101
+ aio_co_wake(co->coroutine);
31
102
}
32
if (bs->read_only) {
103
33
@@ -XXX,XX +XXX,XX @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
104
static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
34
assert(!(flags & ~BDRV_REQ_MASK));
105
@@ -XXX,XX +XXX,XX @@ static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
35
106
static void bdrv_co_em_bh(void *opaque)
36
if (flags & BDRV_REQ_SERIALISING) {
107
{
37
- waited = bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
108
BlockAIOCBCoroutine *acb = opaque;
38
- /*
109
- BlockDriverState *bs = acb->common.bs;
39
- * For a misaligned request we should have already waited earlier,
110
- AioContext *ctx = bdrv_get_aio_context(bs);
40
- * because we come after bdrv_padding_rmw_read which must be called
111
41
- * with the request already marked as serialising.
112
assert(!acb->need_bh);
42
- */
113
- aio_context_acquire(ctx);
43
- assert(!waited ||
114
bdrv_co_complete(acb);
44
- (req->offset == req->overlap_offset &&
115
- aio_context_release(ctx);
45
- req->bytes == req->overlap_bytes));
116
}
46
+ bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
117
118
static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
119
diff --git a/block/iscsi.c b/block/iscsi.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/block/iscsi.c
122
+++ b/block/iscsi.c
123
@@ -XXX,XX +XXX,XX @@ static void
124
iscsi_bh_cb(void *p)
125
{
126
IscsiAIOCB *acb = p;
127
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
128
129
qemu_bh_delete(acb->bh);
130
131
g_free(acb->buf);
132
acb->buf = NULL;
133
134
- aio_context_acquire(ctx);
135
acb->common.cb(acb->common.opaque, acb->status);
136
- aio_context_release(ctx);
137
138
if (acb->task != NULL) {
139
scsi_free_scsi_task(acb->task);
140
diff --git a/block/linux-aio.c b/block/linux-aio.c
141
index XXXXXXX..XXXXXXX 100644
142
--- a/block/linux-aio.c
143
+++ b/block/linux-aio.c
144
@@ -XXX,XX +XXX,XX @@ static inline ssize_t io_event_ret(struct io_event *ev)
145
*/
146
static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
147
{
148
- LinuxAioState *s = laiocb->ctx;
149
int ret;
150
151
ret = laiocb->ret;
152
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
153
}
154
155
laiocb->ret = ret;
156
- aio_context_acquire(s->aio_context);
157
if (laiocb->co) {
158
/* If the coroutine is already entered it must be in ioq_submit() and
159
* will notice laio->ret has been filled in when it eventually runs
160
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
161
* that!
162
*/
163
if (!qemu_coroutine_entered(laiocb->co)) {
164
- qemu_coroutine_enter(laiocb->co);
165
+ aio_co_wake(laiocb->co);
166
}
47
} else {
167
} else {
48
bdrv_wait_serialising_requests(req);
168
laiocb->common.cb(laiocb->common.opaque, ret);
49
}
169
qemu_aio_unref(laiocb);
170
}
171
- aio_context_release(s->aio_context);
172
}
173
174
/**
175
diff --git a/block/mirror.c b/block/mirror.c
176
index XXXXXXX..XXXXXXX 100644
177
--- a/block/mirror.c
178
+++ b/block/mirror.c
179
@@ -XXX,XX +XXX,XX @@ static void mirror_write_complete(void *opaque, int ret)
180
{
181
MirrorOp *op = opaque;
182
MirrorBlockJob *s = op->s;
183
+
184
+ aio_context_acquire(blk_get_aio_context(s->common.blk));
185
if (ret < 0) {
186
BlockErrorAction action;
187
188
@@ -XXX,XX +XXX,XX @@ static void mirror_write_complete(void *opaque, int ret)
189
}
190
}
191
mirror_iteration_done(op, ret);
192
+ aio_context_release(blk_get_aio_context(s->common.blk));
193
}
194
195
static void mirror_read_complete(void *opaque, int ret)
196
{
197
MirrorOp *op = opaque;
198
MirrorBlockJob *s = op->s;
199
+
200
+ aio_context_acquire(blk_get_aio_context(s->common.blk));
201
if (ret < 0) {
202
BlockErrorAction action;
203
204
@@ -XXX,XX +XXX,XX @@ static void mirror_read_complete(void *opaque, int ret)
205
}
206
207
mirror_iteration_done(op, ret);
208
- return;
209
+ } else {
210
+ blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov,
211
+ 0, mirror_write_complete, op);
212
}
213
- blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov,
214
- 0, mirror_write_complete, op);
215
+ aio_context_release(blk_get_aio_context(s->common.blk));
216
}
217
218
static inline void mirror_clip_sectors(MirrorBlockJob *s,
219
diff --git a/block/null.c b/block/null.c
220
index XXXXXXX..XXXXXXX 100644
221
--- a/block/null.c
222
+++ b/block/null.c
223
@@ -XXX,XX +XXX,XX @@ static const AIOCBInfo null_aiocb_info = {
224
static void null_bh_cb(void *opaque)
225
{
226
NullAIOCB *acb = opaque;
227
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
228
-
229
- aio_context_acquire(ctx);
230
acb->common.cb(acb->common.opaque, 0);
231
- aio_context_release(ctx);
232
qemu_aio_unref(acb);
233
}
234
235
static void null_timer_cb(void *opaque)
236
{
237
NullAIOCB *acb = opaque;
238
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
239
-
240
- aio_context_acquire(ctx);
241
acb->common.cb(acb->common.opaque, 0);
242
- aio_context_release(ctx);
243
timer_deinit(&acb->timer);
244
qemu_aio_unref(acb);
245
}
246
diff --git a/block/qed-cluster.c b/block/qed-cluster.c
247
index XXXXXXX..XXXXXXX 100644
248
--- a/block/qed-cluster.c
249
+++ b/block/qed-cluster.c
250
@@ -XXX,XX +XXX,XX @@ static void qed_find_cluster_cb(void *opaque, int ret)
251
unsigned int index;
252
unsigned int n;
253
254
+ qed_acquire(s);
255
if (ret) {
256
goto out;
257
}
258
@@ -XXX,XX +XXX,XX @@ static void qed_find_cluster_cb(void *opaque, int ret)
259
260
out:
261
find_cluster_cb->cb(find_cluster_cb->opaque, ret, offset, len);
262
+ qed_release(s);
263
g_free(find_cluster_cb);
264
}
265
266
diff --git a/block/qed-table.c b/block/qed-table.c
267
index XXXXXXX..XXXXXXX 100644
268
--- a/block/qed-table.c
269
+++ b/block/qed-table.c
270
@@ -XXX,XX +XXX,XX @@ static void qed_read_table_cb(void *opaque, int ret)
271
{
272
QEDReadTableCB *read_table_cb = opaque;
273
QEDTable *table = read_table_cb->table;
274
+ BDRVQEDState *s = read_table_cb->s;
275
int noffsets = read_table_cb->qiov.size / sizeof(uint64_t);
276
int i;
277
278
@@ -XXX,XX +XXX,XX @@ static void qed_read_table_cb(void *opaque, int ret)
279
}
280
281
/* Byteswap offsets */
282
+ qed_acquire(s);
283
for (i = 0; i < noffsets; i++) {
284
table->offsets[i] = le64_to_cpu(table->offsets[i]);
285
}
286
+ qed_release(s);
287
288
out:
289
/* Completion */
290
- trace_qed_read_table_cb(read_table_cb->s, read_table_cb->table, ret);
291
+ trace_qed_read_table_cb(s, read_table_cb->table, ret);
292
gencb_complete(&read_table_cb->gencb, ret);
293
}
294
295
@@ -XXX,XX +XXX,XX @@ typedef struct {
296
static void qed_write_table_cb(void *opaque, int ret)
297
{
298
QEDWriteTableCB *write_table_cb = opaque;
299
+ BDRVQEDState *s = write_table_cb->s;
300
301
- trace_qed_write_table_cb(write_table_cb->s,
302
+ trace_qed_write_table_cb(s,
303
write_table_cb->orig_table,
304
write_table_cb->flush,
305
ret);
306
@@ -XXX,XX +XXX,XX @@ static void qed_write_table_cb(void *opaque, int ret)
307
if (write_table_cb->flush) {
308
/* We still need to flush first */
309
write_table_cb->flush = false;
310
+ qed_acquire(s);
311
bdrv_aio_flush(write_table_cb->s->bs, qed_write_table_cb,
312
write_table_cb);
313
+ qed_release(s);
314
return;
315
}
316
317
@@ -XXX,XX +XXX,XX @@ static void qed_read_l2_table_cb(void *opaque, int ret)
318
CachedL2Table *l2_table = request->l2_table;
319
uint64_t l2_offset = read_l2_table_cb->l2_offset;
320
321
+ qed_acquire(s);
322
if (ret) {
323
/* can't trust loaded L2 table anymore */
324
qed_unref_l2_cache_entry(l2_table);
325
@@ -XXX,XX +XXX,XX @@ static void qed_read_l2_table_cb(void *opaque, int ret)
326
request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
327
assert(request->l2_table != NULL);
328
}
329
+ qed_release(s);
330
331
gencb_complete(&read_l2_table_cb->gencb, ret);
332
}
333
diff --git a/block/qed.c b/block/qed.c
334
index XXXXXXX..XXXXXXX 100644
335
--- a/block/qed.c
336
+++ b/block/qed.c
337
@@ -XXX,XX +XXX,XX @@ static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t l
338
}
339
340
if (cb->co) {
341
- qemu_coroutine_enter(cb->co);
342
+ aio_co_wake(cb->co);
343
}
344
}
345
346
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn qed_co_pwrite_zeroes_cb(void *opaque, int ret)
347
cb->done = true;
348
cb->ret = ret;
349
if (cb->co) {
350
- qemu_coroutine_enter(cb->co);
351
+ aio_co_wake(cb->co);
352
}
353
}
354
355
diff --git a/block/rbd.c b/block/rbd.c
356
index XXXXXXX..XXXXXXX 100644
357
--- a/block/rbd.c
358
+++ b/block/rbd.c
359
@@ -XXX,XX +XXX,XX @@ shutdown:
360
static void qemu_rbd_complete_aio(RADOSCB *rcb)
361
{
362
RBDAIOCB *acb = rcb->acb;
363
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
364
int64_t r;
365
366
r = rcb->ret;
367
@@ -XXX,XX +XXX,XX @@ static void qemu_rbd_complete_aio(RADOSCB *rcb)
368
qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
369
}
370
qemu_vfree(acb->bounce);
371
-
372
- aio_context_acquire(ctx);
373
acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
374
- aio_context_release(ctx);
375
376
qemu_aio_unref(acb);
377
}
378
diff --git a/block/win32-aio.c b/block/win32-aio.c
379
index XXXXXXX..XXXXXXX 100644
380
--- a/block/win32-aio.c
381
+++ b/block/win32-aio.c
382
@@ -XXX,XX +XXX,XX @@ static void win32_aio_process_completion(QEMUWin32AIOState *s,
383
qemu_vfree(waiocb->buf);
384
}
385
386
-
387
- aio_context_acquire(s->aio_ctx);
388
waiocb->common.cb(waiocb->common.opaque, ret);
389
- aio_context_release(s->aio_ctx);
390
qemu_aio_unref(waiocb);
391
}
392
393
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
394
index XXXXXXX..XXXXXXX 100644
395
--- a/hw/block/virtio-blk.c
396
+++ b/hw/block/virtio-blk.c
397
@@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
398
static void virtio_blk_rw_complete(void *opaque, int ret)
399
{
400
VirtIOBlockReq *next = opaque;
401
+ VirtIOBlock *s = next->dev;
402
403
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
404
while (next) {
405
VirtIOBlockReq *req = next;
406
next = req->mr_next;
407
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_rw_complete(void *opaque, int ret)
408
block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
409
virtio_blk_free_request(req);
410
}
411
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
412
}
413
414
static void virtio_blk_flush_complete(void *opaque, int ret)
415
{
416
VirtIOBlockReq *req = opaque;
417
+ VirtIOBlock *s = req->dev;
418
419
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
420
if (ret) {
421
if (virtio_blk_handle_rw_error(req, -ret, 0)) {
422
- return;
423
+ goto out;
424
}
425
}
426
427
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
428
block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
429
virtio_blk_free_request(req);
430
+
431
+out:
432
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
433
}
434
435
#ifdef __linux__
436
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_ioctl_complete(void *opaque, int status)
437
virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len);
438
439
out:
440
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
441
virtio_blk_req_complete(req, status);
442
virtio_blk_free_request(req);
443
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
444
g_free(ioctl_req);
445
}
446
447
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
448
index XXXXXXX..XXXXXXX 100644
449
--- a/hw/scsi/scsi-disk.c
450
+++ b/hw/scsi/scsi-disk.c
451
@@ -XXX,XX +XXX,XX @@ static void scsi_aio_complete(void *opaque, int ret)
452
453
assert(r->req.aiocb != NULL);
454
r->req.aiocb = NULL;
455
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
456
if (scsi_disk_req_check_error(r, ret, true)) {
457
goto done;
458
}
459
@@ -XXX,XX +XXX,XX @@ static void scsi_aio_complete(void *opaque, int ret)
460
scsi_req_complete(&r->req, GOOD);
461
462
done:
463
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
464
scsi_req_unref(&r->req);
465
}
466
467
@@ -XXX,XX +XXX,XX @@ static void scsi_dma_complete(void *opaque, int ret)
468
assert(r->req.aiocb != NULL);
469
r->req.aiocb = NULL;
470
471
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
472
if (ret < 0) {
473
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
474
} else {
475
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
476
}
477
scsi_dma_complete_noio(r, ret);
478
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
479
}
480
481
static void scsi_read_complete(void * opaque, int ret)
482
@@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret)
483
484
assert(r->req.aiocb != NULL);
485
r->req.aiocb = NULL;
486
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
487
if (scsi_disk_req_check_error(r, ret, true)) {
488
goto done;
489
}
490
@@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret)
491
492
done:
493
scsi_req_unref(&r->req);
494
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
495
}
496
497
/* Actually issue a read to the block device. */
498
@@ -XXX,XX +XXX,XX @@ static void scsi_do_read_cb(void *opaque, int ret)
499
assert (r->req.aiocb != NULL);
500
r->req.aiocb = NULL;
501
502
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
503
if (ret < 0) {
504
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
505
} else {
506
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
507
}
508
scsi_do_read(opaque, ret);
509
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
510
}
511
512
/* Read more data from scsi device into buffer. */
513
@@ -XXX,XX +XXX,XX @@ static void scsi_write_complete(void * opaque, int ret)
514
assert (r->req.aiocb != NULL);
515
r->req.aiocb = NULL;
516
517
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
518
if (ret < 0) {
519
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
520
} else {
521
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
522
}
523
scsi_write_complete_noio(r, ret);
524
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
525
}
526
527
static void scsi_write_data(SCSIRequest *req)
528
@@ -XXX,XX +XXX,XX @@ static void scsi_unmap_complete(void *opaque, int ret)
529
{
530
UnmapCBData *data = opaque;
531
SCSIDiskReq *r = data->r;
532
+ SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
533
534
assert(r->req.aiocb != NULL);
535
r->req.aiocb = NULL;
536
537
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
538
scsi_unmap_complete_noio(data, ret);
539
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
540
}
541
542
static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
543
@@ -XXX,XX +XXX,XX @@ static void scsi_write_same_complete(void *opaque, int ret)
544
545
assert(r->req.aiocb != NULL);
546
r->req.aiocb = NULL;
547
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
548
if (scsi_disk_req_check_error(r, ret, true)) {
549
goto done;
550
}
551
@@ -XXX,XX +XXX,XX @@ done:
552
scsi_req_unref(&r->req);
553
qemu_vfree(data->iov.iov_base);
554
g_free(data);
555
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
556
}
557
558
static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
559
diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c
560
index XXXXXXX..XXXXXXX 100644
561
--- a/hw/scsi/scsi-generic.c
562
+++ b/hw/scsi/scsi-generic.c
563
@@ -XXX,XX +XXX,XX @@ done:
564
static void scsi_command_complete(void *opaque, int ret)
565
{
566
SCSIGenericReq *r = (SCSIGenericReq *)opaque;
567
+ SCSIDevice *s = r->req.dev;
568
569
assert(r->req.aiocb != NULL);
570
r->req.aiocb = NULL;
571
+
572
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
573
scsi_command_complete_noio(r, ret);
574
+ aio_context_release(blk_get_aio_context(s->conf.blk));
575
}
576
577
static int execute_command(BlockBackend *blk,
578
@@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret)
579
assert(r->req.aiocb != NULL);
580
r->req.aiocb = NULL;
581
582
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
583
+
584
if (ret || r->req.io_canceled) {
585
scsi_command_complete_noio(r, ret);
586
- return;
587
+ goto done;
588
}
589
590
len = r->io_header.dxfer_len - r->io_header.resid;
591
@@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret)
592
r->len = -1;
593
if (len == 0) {
594
scsi_command_complete_noio(r, 0);
595
- return;
596
+ goto done;
597
}
598
599
/* Snoop READ CAPACITY output to set the blocksize. */
600
@@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret)
601
}
602
scsi_req_data(&r->req, len);
603
scsi_req_unref(&r->req);
604
+
605
+done:
606
+ aio_context_release(blk_get_aio_context(s->conf.blk));
607
}
608
609
/* Read more data from scsi device into buffer. */
610
@@ -XXX,XX +XXX,XX @@ static void scsi_write_complete(void * opaque, int ret)
611
assert(r->req.aiocb != NULL);
612
r->req.aiocb = NULL;
613
614
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
615
+
616
if (ret || r->req.io_canceled) {
617
scsi_command_complete_noio(r, ret);
618
- return;
619
+ goto done;
620
}
621
622
if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
623
@@ -XXX,XX +XXX,XX @@ static void scsi_write_complete(void * opaque, int ret)
624
}
625
626
scsi_command_complete_noio(r, ret);
627
+
628
+done:
629
+ aio_context_release(blk_get_aio_context(s->conf.blk));
630
}
631
632
/* Write data to a scsi device. Returns nonzero on failure.
633
diff --git a/util/thread-pool.c b/util/thread-pool.c
634
index XXXXXXX..XXXXXXX 100644
635
--- a/util/thread-pool.c
636
+++ b/util/thread-pool.c
637
@@ -XXX,XX +XXX,XX @@ restart:
638
*/
639
qemu_bh_schedule(pool->completion_bh);
640
641
+ aio_context_release(pool->ctx);
642
elem->common.cb(elem->common.opaque, elem->ret);
643
+ aio_context_acquire(pool->ctx);
644
qemu_aio_unref(elem);
645
goto restart;
646
} else {
647
@@ -XXX,XX +XXX,XX @@ static void thread_pool_co_cb(void *opaque, int ret)
648
ThreadPoolCo *co = opaque;
649
650
co->ret = ret;
651
- qemu_coroutine_enter(co->co);
652
+ aio_co_wake(co->co);
653
}
654
655
int coroutine_fn thread_pool_submit_co(ThreadPool *pool, ThreadPoolFunc *func,
50
--
656
--
51
2.29.2
657
2.9.3
52
658
53
659
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
1. BDRV_REQ_NO_SERIALISING doesn't exist already, don't mention it.
3
This patch prepares for the removal of unnecessary lockcnt inc/dec pairs.
4
Extract the dispatching loop for file descriptor handlers into a new
5
function aio_dispatch_handlers, and then inline aio_dispatch into
6
aio_poll.
4
7
5
2. We are going to add one more user of BDRV_REQ_SERIALISING, so
8
aio_dispatch can now become void.
6
comment about backup becomes a bit confusing here. The use case in
7
backup is documented in block/backup.c, so let's just drop
8
duplication here.
9
9
10
3. The fact that BDRV_REQ_SERIALISING is only for write requests is
10
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
11
omitted. Add a note.
11
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
12
Reviewed-by: Fam Zheng <famz@redhat.com>
13
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
14
Message-id: 20170213135235.12274-17-pbonzini@redhat.com
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
16
---
17
include/block/aio.h | 6 +-----
18
util/aio-posix.c | 44 ++++++++++++++------------------------------
19
util/aio-win32.c | 13 ++++---------
20
util/async.c | 2 +-
21
4 files changed, 20 insertions(+), 45 deletions(-)
12
22
13
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
23
diff --git a/include/block/aio.h b/include/block/aio.h
14
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
15
Reviewed-by: Alberto Garcia <berto@igalia.com>
16
Message-Id: <20201021145859.11201-2-vsementsov@virtuozzo.com>
17
Signed-off-by: Max Reitz <mreitz@redhat.com>
18
---
19
include/block/block.h | 11 +----------
20
1 file changed, 1 insertion(+), 10 deletions(-)
21
22
diff --git a/include/block/block.h b/include/block/block.h
23
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
24
--- a/include/block/block.h
25
--- a/include/block/aio.h
25
+++ b/include/block/block.h
26
+++ b/include/block/aio.h
26
@@ -XXX,XX +XXX,XX @@ typedef enum {
27
@@ -XXX,XX +XXX,XX @@ bool aio_pending(AioContext *ctx);
27
* content. */
28
/* Dispatch any pending callbacks from the GSource attached to the AioContext.
28
BDRV_REQ_WRITE_UNCHANGED = 0x40,
29
*
30
* This is used internally in the implementation of the GSource.
31
- *
32
- * @dispatch_fds: true to process fds, false to skip them
33
- * (can be used as an optimization by callers that know there
34
- * are no fds ready)
35
*/
36
-bool aio_dispatch(AioContext *ctx, bool dispatch_fds);
37
+void aio_dispatch(AioContext *ctx);
38
39
/* Progress in completing AIO work to occur. This can issue new pending
40
* aio as a result of executing I/O completion or bh callbacks.
41
diff --git a/util/aio-posix.c b/util/aio-posix.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/util/aio-posix.c
44
+++ b/util/aio-posix.c
45
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx)
46
AioHandler *node, *tmp;
47
bool progress = false;
29
48
30
- /*
49
- /*
31
- * BDRV_REQ_SERIALISING forces request serialisation for writes.
50
- * We have to walk very carefully in case aio_set_fd_handler is
32
- * It is used to ensure that writes to the backing file of a backup process
51
- * called while we're walking.
33
- * target cannot race with a read of the backup target that defers to the
34
- * backing file.
35
- *
36
- * Note, that BDRV_REQ_SERIALISING is _not_ opposite in meaning to
37
- * BDRV_REQ_NO_SERIALISING. A more descriptive name for the latter might be
38
- * _DO_NOT_WAIT_FOR_SERIALISING, except that is too long.
39
- */
52
- */
40
+ /* Forces request serialisation. Use only with write requests. */
53
- qemu_lockcnt_inc(&ctx->list_lock);
41
BDRV_REQ_SERIALISING = 0x80,
54
-
42
55
QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
43
/* Execute the request only if the operation can be offloaded or otherwise
56
int revents;
57
58
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx)
59
}
60
}
61
62
- qemu_lockcnt_dec(&ctx->list_lock);
63
return progress;
64
}
65
66
-/*
67
- * Note that dispatch_fds == false has the side-effect of post-poning the
68
- * freeing of deleted handlers.
69
- */
70
-bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
71
+void aio_dispatch(AioContext *ctx)
72
{
73
- bool progress;
74
+ aio_bh_poll(ctx);
75
76
- /*
77
- * If there are callbacks left that have been queued, we need to call them.
78
- * Do not call select in this case, because it is possible that the caller
79
- * does not need a complete flush (as is the case for aio_poll loops).
80
- */
81
- progress = aio_bh_poll(ctx);
82
+ qemu_lockcnt_inc(&ctx->list_lock);
83
+ aio_dispatch_handlers(ctx);
84
+ qemu_lockcnt_dec(&ctx->list_lock);
85
86
- if (dispatch_fds) {
87
- progress |= aio_dispatch_handlers(ctx);
88
- }
89
-
90
- /* Run our timers */
91
- progress |= timerlistgroup_run_timers(&ctx->tlg);
92
-
93
- return progress;
94
+ timerlistgroup_run_timers(&ctx->tlg);
95
}
96
97
/* These thread-local variables are used only in a small part of aio_poll
98
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
99
npfd = 0;
100
qemu_lockcnt_dec(&ctx->list_lock);
101
102
- /* Run dispatch even if there were no readable fds to run timers */
103
- if (aio_dispatch(ctx, ret > 0)) {
104
- progress = true;
105
+ progress |= aio_bh_poll(ctx);
106
+
107
+ if (ret > 0) {
108
+ qemu_lockcnt_inc(&ctx->list_lock);
109
+ progress |= aio_dispatch_handlers(ctx);
110
+ qemu_lockcnt_dec(&ctx->list_lock);
111
}
112
113
+ progress |= timerlistgroup_run_timers(&ctx->tlg);
114
+
115
return progress;
116
}
117
118
diff --git a/util/aio-win32.c b/util/aio-win32.c
119
index XXXXXXX..XXXXXXX 100644
120
--- a/util/aio-win32.c
121
+++ b/util/aio-win32.c
122
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
123
return progress;
124
}
125
126
-bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
127
+void aio_dispatch(AioContext *ctx)
128
{
129
- bool progress;
130
-
131
- progress = aio_bh_poll(ctx);
132
- if (dispatch_fds) {
133
- progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
134
- }
135
- progress |= timerlistgroup_run_timers(&ctx->tlg);
136
- return progress;
137
+ aio_bh_poll(ctx);
138
+ aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
139
+ timerlistgroup_run_timers(&ctx->tlg);
140
}
141
142
bool aio_poll(AioContext *ctx, bool blocking)
143
diff --git a/util/async.c b/util/async.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/util/async.c
146
+++ b/util/async.c
147
@@ -XXX,XX +XXX,XX @@ aio_ctx_dispatch(GSource *source,
148
AioContext *ctx = (AioContext *) source;
149
150
assert(callback == NULL);
151
- aio_dispatch(ctx, true);
152
+ aio_dispatch(ctx);
153
return true;
154
}
155
44
--
156
--
45
2.29.2
157
2.9.3
46
158
47
159
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
We must set the permission used for _check_. Assert that we have
3
Pull the increment/decrement pair out of aio_bh_poll and into the
4
backup and drop extra arguments.
4
callers.
5
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Message-Id: <20201106124241.16950-7-vsementsov@virtuozzo.com>
7
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
8
Reviewed-by: Fam Zheng <famz@redhat.com>
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
9
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
10
Message-id: 20170213135235.12274-18-pbonzini@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
12
---
11
block.c | 15 ++++-----------
13
util/aio-posix.c | 8 +++-----
12
1 file changed, 4 insertions(+), 11 deletions(-)
14
util/aio-win32.c | 8 ++++----
15
util/async.c | 12 ++++++------
16
3 files changed, 13 insertions(+), 15 deletions(-)
13
17
14
diff --git a/block.c b/block.c
18
diff --git a/util/aio-posix.c b/util/aio-posix.c
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/block.c
20
--- a/util/aio-posix.c
17
+++ b/block.c
21
+++ b/util/aio-posix.c
18
@@ -XXX,XX +XXX,XX @@ static int bdrv_child_check_perm(BdrvChild *c, BlockReopenQueue *q,
22
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx)
19
GSList *ignore_children,
23
20
bool *tighten_restrictions, Error **errp);
24
void aio_dispatch(AioContext *ctx)
21
static void bdrv_child_abort_perm_update(BdrvChild *c);
25
{
22
-static void bdrv_child_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared);
26
+ qemu_lockcnt_inc(&ctx->list_lock);
23
+static void bdrv_child_set_perm(BdrvChild *c);
27
aio_bh_poll(ctx);
24
28
-
25
typedef struct BlockReopenQueueEntry {
29
- qemu_lockcnt_inc(&ctx->list_lock);
26
bool prepared;
30
aio_dispatch_handlers(ctx);
27
@@ -XXX,XX +XXX,XX @@ static void bdrv_set_perm(BlockDriverState *bs)
31
qemu_lockcnt_dec(&ctx->list_lock);
28
32
29
/* Update all children */
33
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
30
QLIST_FOREACH(c, &bs->children, next) {
31
- uint64_t cur_perm, cur_shared;
32
- bdrv_child_perm(bs, c->bs, c, c->role, NULL,
33
- cumulative_perms, cumulative_shared_perms,
34
- &cur_perm, &cur_shared);
35
- bdrv_child_set_perm(c, cur_perm, cur_shared);
36
+ bdrv_child_set_perm(c);
37
}
34
}
35
36
npfd = 0;
37
- qemu_lockcnt_dec(&ctx->list_lock);
38
39
progress |= aio_bh_poll(ctx);
40
41
if (ret > 0) {
42
- qemu_lockcnt_inc(&ctx->list_lock);
43
progress |= aio_dispatch_handlers(ctx);
44
- qemu_lockcnt_dec(&ctx->list_lock);
45
}
46
47
+ qemu_lockcnt_dec(&ctx->list_lock);
48
+
49
progress |= timerlistgroup_run_timers(&ctx->tlg);
50
51
return progress;
52
diff --git a/util/aio-win32.c b/util/aio-win32.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/util/aio-win32.c
55
+++ b/util/aio-win32.c
56
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
57
bool progress = false;
58
AioHandler *tmp;
59
60
- qemu_lockcnt_inc(&ctx->list_lock);
61
-
62
/*
63
* We have to walk very carefully in case aio_set_fd_handler is
64
* called while we're walking.
65
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
66
}
67
}
68
69
- qemu_lockcnt_dec(&ctx->list_lock);
70
return progress;
38
}
71
}
39
72
40
@@ -XXX,XX +XXX,XX @@ static int bdrv_child_check_perm(BdrvChild *c, BlockReopenQueue *q,
73
void aio_dispatch(AioContext *ctx)
41
return 0;
74
{
75
+ qemu_lockcnt_inc(&ctx->list_lock);
76
aio_bh_poll(ctx);
77
aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
78
+ qemu_lockcnt_dec(&ctx->list_lock);
79
timerlistgroup_run_timers(&ctx->tlg);
42
}
80
}
43
81
44
-static void bdrv_child_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared)
82
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
45
+static void bdrv_child_set_perm(BdrvChild *c)
83
}
84
}
85
86
- qemu_lockcnt_dec(&ctx->list_lock);
87
first = true;
88
89
/* ctx->notifier is always registered. */
90
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
91
progress |= aio_dispatch_handlers(ctx, event);
92
} while (count > 0);
93
94
+ qemu_lockcnt_dec(&ctx->list_lock);
95
+
96
progress |= timerlistgroup_run_timers(&ctx->tlg);
97
return progress;
98
}
99
diff --git a/util/async.c b/util/async.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/util/async.c
102
+++ b/util/async.c
103
@@ -XXX,XX +XXX,XX @@ void aio_bh_call(QEMUBH *bh)
104
bh->cb(bh->opaque);
105
}
106
107
-/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
108
+/* Multiple occurrences of aio_bh_poll cannot be called concurrently.
109
+ * The count in ctx->list_lock is incremented before the call, and is
110
+ * not affected by the call.
111
+ */
112
int aio_bh_poll(AioContext *ctx)
46
{
113
{
47
c->has_backup_perm = false;
114
QEMUBH *bh, **bhp, *next;
48
115
int ret;
49
- c->perm = perm;
116
bool deleted = false;
50
- c->shared_perm = shared;
117
118
- qemu_lockcnt_inc(&ctx->list_lock);
51
-
119
-
52
bdrv_set_perm(c->bs);
120
ret = 0;
53
}
121
for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) {
54
122
next = atomic_rcu_read(&bh->next);
55
@@ -XXX,XX +XXX,XX @@ int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared,
123
@@ -XXX,XX +XXX,XX @@ int aio_bh_poll(AioContext *ctx)
124
125
/* remove deleted bhs */
126
if (!deleted) {
127
- qemu_lockcnt_dec(&ctx->list_lock);
56
return ret;
128
return ret;
57
}
129
}
58
130
59
- bdrv_child_set_perm(c, perm, shared);
131
- if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) {
60
+ bdrv_child_set_perm(c);
132
+ if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
61
133
bhp = &ctx->first_bh;
62
return 0;
134
while (*bhp) {
135
bh = *bhp;
136
@@ -XXX,XX +XXX,XX @@ int aio_bh_poll(AioContext *ctx)
137
bhp = &bh->next;
138
}
139
}
140
- qemu_lockcnt_unlock(&ctx->list_lock);
141
+ qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
142
}
143
return ret;
63
}
144
}
64
--
145
--
65
2.29.2
146
2.9.3
66
147
67
148
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
Make results_to_text a tool to dump results saved in JSON file.
3
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
4
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
5
Reviewed-by: Fam Zheng <famz@redhat.com>
6
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
7
Message-id: 20170213135235.12274-19-pbonzini@redhat.com
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
10
include/block/block_int.h | 64 +++++++++++++++++++++++++-----------------
11
include/sysemu/block-backend.h | 14 ++++++---
12
2 files changed, 49 insertions(+), 29 deletions(-)
4
13
5
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
14
diff --git a/include/block/block_int.h b/include/block/block_int.h
6
Message-Id: <20201021145859.11201-21-vsementsov@virtuozzo.com>
15
index XXXXXXX..XXXXXXX 100644
7
Reviewed-by: Max Reitz <mreitz@redhat.com>
16
--- a/include/block/block_int.h
8
Signed-off-by: Max Reitz <mreitz@redhat.com>
17
+++ b/include/block/block_int.h
9
---
18
@@ -XXX,XX +XXX,XX @@ struct BdrvChild {
10
scripts/simplebench/results_to_text.py | 14 ++++++++++++++
19
* copied as well.
11
1 file changed, 14 insertions(+)
20
*/
12
mode change 100644 => 100755 scripts/simplebench/results_to_text.py
21
struct BlockDriverState {
13
22
- int64_t total_sectors; /* if we are reading a disk image, give its
14
diff --git a/scripts/simplebench/results_to_text.py b/scripts/simplebench/results_to_text.py
23
- size in sectors */
15
old mode 100644
24
+ /* Protected by big QEMU lock or read-only after opening. No special
16
new mode 100755
25
+ * locking needed during I/O...
17
index XXXXXXX..XXXXXXX
26
+ */
18
--- a/scripts/simplebench/results_to_text.py
27
int open_flags; /* flags used to open the file, re-used for re-open */
19
+++ b/scripts/simplebench/results_to_text.py
28
bool read_only; /* if true, the media is read only */
20
@@ -XXX,XX +XXX,XX @@
29
bool encrypted; /* if true, the media is encrypted */
21
+#!/usr/bin/env python3
30
@@ -XXX,XX +XXX,XX @@ struct BlockDriverState {
22
+#
31
bool sg; /* if true, the device is a /dev/sg* */
23
# Simple benchmarking framework
32
bool probed; /* if true, format was probed rather than specified */
24
#
33
25
# Copyright (c) 2019 Virtuozzo International GmbH.
34
- int copy_on_read; /* if nonzero, copy read backing sectors into image.
26
@@ -XXX,XX +XXX,XX @@ def results_to_text(results):
35
- note this is a reference count */
27
tab.append(row)
36
-
28
37
- CoQueue flush_queue; /* Serializing flush queue */
29
return f'All results are in {dim}\n\n' + tabulate.tabulate(tab)
38
- bool active_flush_req; /* Flush request in flight? */
39
- unsigned int write_gen; /* Current data generation */
40
- unsigned int flushed_gen; /* Flushed write generation */
41
-
42
BlockDriver *drv; /* NULL means no media */
43
void *opaque;
44
45
@@ -XXX,XX +XXX,XX @@ struct BlockDriverState {
46
BdrvChild *backing;
47
BdrvChild *file;
48
49
- /* Callback before write request is processed */
50
- NotifierWithReturnList before_write_notifiers;
51
-
52
- /* number of in-flight requests; overall and serialising */
53
- unsigned int in_flight;
54
- unsigned int serialising_in_flight;
55
-
56
- bool wakeup;
57
-
58
- /* Offset after the highest byte written to */
59
- uint64_t wr_highest_offset;
60
-
61
/* I/O Limits */
62
BlockLimits bl;
63
64
@@ -XXX,XX +XXX,XX @@ struct BlockDriverState {
65
QTAILQ_ENTRY(BlockDriverState) bs_list;
66
/* element of the list of monitor-owned BDS */
67
QTAILQ_ENTRY(BlockDriverState) monitor_list;
68
- QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
69
int refcnt;
70
71
- QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
72
-
73
/* operation blockers */
74
QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX];
75
76
@@ -XXX,XX +XXX,XX @@ struct BlockDriverState {
77
/* The error object in use for blocking operations on backing_hd */
78
Error *backing_blocker;
79
80
+ /* Protected by AioContext lock */
30
+
81
+
82
+ /* If true, copy read backing sectors into image. Can be >1 if more
83
+ * than one client has requested copy-on-read.
84
+ */
85
+ int copy_on_read;
31
+
86
+
32
+if __name__ == '__main__':
87
+ /* If we are reading a disk image, give its size in sectors.
33
+ import sys
88
+ * Generally read-only; it is written to by load_vmstate and save_vmstate,
34
+ import json
89
+ * but the block layer is quiescent during those.
90
+ */
91
+ int64_t total_sectors;
35
+
92
+
36
+ if len(sys.argv) < 2:
93
+ /* Callback before write request is processed */
37
+ print(f'USAGE: {sys.argv[0]} results.json')
94
+ NotifierWithReturnList before_write_notifiers;
38
+ exit(1)
39
+
95
+
40
+ with open(sys.argv[1]) as f:
96
+ /* number of in-flight requests; overall and serialising */
41
+ print(results_to_text(json.load(f)))
97
+ unsigned int in_flight;
98
+ unsigned int serialising_in_flight;
99
+
100
+ bool wakeup;
101
+
102
+ /* Offset after the highest byte written to */
103
+ uint64_t wr_highest_offset;
104
+
105
/* threshold limit for writes, in bytes. "High water mark". */
106
uint64_t write_threshold_offset;
107
NotifierWithReturn write_threshold_notifier;
108
@@ -XXX,XX +XXX,XX @@ struct BlockDriverState {
109
/* counter for nested bdrv_io_plug */
110
unsigned io_plugged;
111
112
+ QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
113
+ CoQueue flush_queue; /* Serializing flush queue */
114
+ bool active_flush_req; /* Flush request in flight? */
115
+ unsigned int write_gen; /* Current data generation */
116
+ unsigned int flushed_gen; /* Flushed write generation */
117
+
118
+ QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
119
+
120
+ /* do we need to tell the quest if we have a volatile write cache? */
121
+ int enable_write_cache;
122
+
123
int quiesce_counter;
124
};
125
126
diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h
127
index XXXXXXX..XXXXXXX 100644
128
--- a/include/sysemu/block-backend.h
129
+++ b/include/sysemu/block-backend.h
130
@@ -XXX,XX +XXX,XX @@ typedef struct BlockDevOps {
131
* fields that must be public. This is in particular for QLIST_ENTRY() and
132
* friends so that BlockBackends can be kept in lists outside block-backend.c */
133
typedef struct BlockBackendPublic {
134
- /* I/O throttling.
135
- * throttle_state tells us if this BlockBackend has I/O limits configured.
136
- * io_limits_disabled tells us if they are currently being enforced */
137
+ /* I/O throttling has its own locking, but also some fields are
138
+ * protected by the AioContext lock.
139
+ */
140
+
141
+ /* Protected by AioContext lock. */
142
CoQueue throttled_reqs[2];
143
+
144
+ /* Nonzero if the I/O limits are currently being ignored; generally
145
+ * it is zero. */
146
unsigned int io_limits_disabled;
147
148
/* The following fields are protected by the ThrottleGroup lock.
149
- * See the ThrottleGroup documentation for details. */
150
+ * See the ThrottleGroup documentation for details.
151
+ * throttle_state tells us if I/O limits are configured. */
152
ThrottleState *throttle_state;
153
ThrottleTimers throttle_timers;
154
unsigned pending_reqs[2];
42
--
155
--
43
2.29.2
156
2.9.3
44
157
45
158
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
It's intended to be inserted between format and protocol nodes to
3
This uses the lock-free mutex described in the paper '"Blocking without
4
preallocate additional space (expanding protocol file) on writes
4
Locking", or LFTHREADS: A lock-free thread library' by Gidenstam and
5
crossing EOF. It improves performance for file-systems with slow
5
Papatriantafilou. The same technique is used in OSv, and in fact
6
allocation.
6
the code is essentially a conversion to C of OSv's code.
7
7
8
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
8
[Added missing coroutine_fn in tests/test-aio-multithread.c.
9
Message-Id: <20201021145859.11201-9-vsementsov@virtuozzo.com>
9
--Stefan]
10
Reviewed-by: Max Reitz <mreitz@redhat.com>
10
11
[mreitz: Two comment fixes, and bumped the version from 5.2 to 6.0]
11
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
12
Signed-off-by: Max Reitz <mreitz@redhat.com>
12
Reviewed-by: Fam Zheng <famz@redhat.com>
13
Message-id: 20170213181244.16297-2-pbonzini@redhat.com
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
15
---
14
docs/system/qemu-block-drivers.rst.inc | 26 ++
16
include/qemu/coroutine.h | 17 ++++-
15
qapi/block-core.json | 20 +-
17
tests/test-aio-multithread.c | 86 ++++++++++++++++++++++++
16
block/preallocate.c | 559 +++++++++++++++++++++++++
18
util/qemu-coroutine-lock.c | 155 ++++++++++++++++++++++++++++++++++++++++---
17
block/meson.build | 1 +
19
util/trace-events | 1 +
18
4 files changed, 605 insertions(+), 1 deletion(-)
20
4 files changed, 246 insertions(+), 13 deletions(-)
19
create mode 100644 block/preallocate.c
21
20
22
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
21
diff --git a/docs/system/qemu-block-drivers.rst.inc b/docs/system/qemu-block-drivers.rst.inc
22
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
23
--- a/docs/system/qemu-block-drivers.rst.inc
24
--- a/include/qemu/coroutine.h
24
+++ b/docs/system/qemu-block-drivers.rst.inc
25
+++ b/include/qemu/coroutine.h
25
@@ -XXX,XX +XXX,XX @@ on host and see if there are locks held by the QEMU process on the image file.
26
@@ -XXX,XX +XXX,XX @@ bool qemu_co_queue_empty(CoQueue *queue);
26
More than one byte could be locked by the QEMU instance, each byte of which
27
/**
27
reflects a particular permission that is acquired or protected by the running
28
* Provides a mutex that can be used to synchronise coroutines
28
block driver.
29
*/
29
+
30
+struct CoWaitRecord;
30
+Filter drivers
31
typedef struct CoMutex {
31
+~~~~~~~~~~~~~~
32
- bool locked;
32
+
33
+ /* Count of pending lockers; 0 for a free mutex, 1 for an
33
+QEMU supports several filter drivers, which don't store any data, but perform
34
+ * uncontended mutex.
34
+some additional tasks, hooking io requests.
35
+ */
35
+
36
+ unsigned locked;
36
+.. program:: filter-drivers
37
+
37
+.. option:: preallocate
38
+ /* A queue of waiters. Elements are added atomically in front of
38
+
39
+ * from_push. to_pop is only populated, and popped from, by whoever
39
+ The preallocate filter driver is intended to be inserted between format
40
+ * is in charge of the next wakeup. This can be an unlocker or,
40
+ and protocol nodes and preallocates some additional space
41
+ * through the handoff protocol, a locker that is about to go to sleep.
41
+ (expanding the protocol file) when writing past the file’s end. This can be
42
+ */
42
+ useful for file-systems with slow allocation.
43
+ QSLIST_HEAD(, CoWaitRecord) from_push, to_pop;
43
+
44
+
44
+ Supported options:
45
+ unsigned handoff, sequence;
45
+
46
+
46
+ .. program:: preallocate
47
Coroutine *holder;
47
+ .. option:: prealloc-align
48
- CoQueue queue;
48
+
49
} CoMutex;
49
+ On preallocation, align the file length to this value (in bytes), default 1M.
50
50
+
51
/**
51
+ .. program:: preallocate
52
diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c
52
+ .. option:: prealloc-size
53
+
54
+ How much to preallocate (in bytes), default 128M.
55
diff --git a/qapi/block-core.json b/qapi/block-core.json
56
index XXXXXXX..XXXXXXX 100644
53
index XXXXXXX..XXXXXXX 100644
57
--- a/qapi/block-core.json
54
--- a/tests/test-aio-multithread.c
58
+++ b/qapi/block-core.json
55
+++ b/tests/test-aio-multithread.c
56
@@ -XXX,XX +XXX,XX @@ static void test_multi_co_schedule_10(void)
57
test_multi_co_schedule(10);
58
}
59
60
+/* CoMutex thread-safety. */
61
+
62
+static uint32_t atomic_counter;
63
+static uint32_t running;
64
+static uint32_t counter;
65
+static CoMutex comutex;
66
+
67
+static void coroutine_fn test_multi_co_mutex_entry(void *opaque)
68
+{
69
+ while (!atomic_mb_read(&now_stopping)) {
70
+ qemu_co_mutex_lock(&comutex);
71
+ counter++;
72
+ qemu_co_mutex_unlock(&comutex);
73
+
74
+ /* Increase atomic_counter *after* releasing the mutex. Otherwise
75
+ * there is a chance (it happens about 1 in 3 runs) that the iothread
76
+ * exits before the coroutine is woken up, causing a spurious
77
+ * assertion failure.
78
+ */
79
+ atomic_inc(&atomic_counter);
80
+ }
81
+ atomic_dec(&running);
82
+}
83
+
84
+static void test_multi_co_mutex(int threads, int seconds)
85
+{
86
+ int i;
87
+
88
+ qemu_co_mutex_init(&comutex);
89
+ counter = 0;
90
+ atomic_counter = 0;
91
+ now_stopping = false;
92
+
93
+ create_aio_contexts();
94
+ assert(threads <= NUM_CONTEXTS);
95
+ running = threads;
96
+ for (i = 0; i < threads; i++) {
97
+ Coroutine *co1 = qemu_coroutine_create(test_multi_co_mutex_entry, NULL);
98
+ aio_co_schedule(ctx[i], co1);
99
+ }
100
+
101
+ g_usleep(seconds * 1000000);
102
+
103
+ atomic_mb_set(&now_stopping, true);
104
+ while (running > 0) {
105
+ g_usleep(100000);
106
+ }
107
+
108
+ join_aio_contexts();
109
+ g_test_message("%d iterations/second\n", counter / seconds);
110
+ g_assert_cmpint(counter, ==, atomic_counter);
111
+}
112
+
113
+/* Testing with NUM_CONTEXTS threads focuses on the queue. The mutex however
114
+ * is too contended (and the threads spend too much time in aio_poll)
115
+ * to actually stress the handoff protocol.
116
+ */
117
+static void test_multi_co_mutex_1(void)
118
+{
119
+ test_multi_co_mutex(NUM_CONTEXTS, 1);
120
+}
121
+
122
+static void test_multi_co_mutex_10(void)
123
+{
124
+ test_multi_co_mutex(NUM_CONTEXTS, 10);
125
+}
126
+
127
+/* Testing with fewer threads stresses the handoff protocol too. Still, the
128
+ * case where the locker _can_ pick up a handoff is very rare, happening
129
+ * about 10 times in 1 million, so increase the runtime a bit compared to
130
+ * other "quick" testcases that only run for 1 second.
131
+ */
132
+static void test_multi_co_mutex_2_3(void)
133
+{
134
+ test_multi_co_mutex(2, 3);
135
+}
136
+
137
+static void test_multi_co_mutex_2_30(void)
138
+{
139
+ test_multi_co_mutex(2, 30);
140
+}
141
+
142
/* End of tests. */
143
144
int main(int argc, char **argv)
145
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
146
g_test_add_func("/aio/multi/lifecycle", test_lifecycle);
147
if (g_test_quick()) {
148
g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1);
149
+ g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_1);
150
+ g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_3);
151
} else {
152
g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10);
153
+ g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_10);
154
+ g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_30);
155
}
156
return g_test_run();
157
}
158
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
159
index XXXXXXX..XXXXXXX 100644
160
--- a/util/qemu-coroutine-lock.c
161
+++ b/util/qemu-coroutine-lock.c
59
@@ -XXX,XX +XXX,XX @@
162
@@ -XXX,XX +XXX,XX @@
60
'cloop', 'compress', 'copy-on-read', 'dmg', 'file', 'ftp', 'ftps',
163
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
61
'gluster', 'host_cdrom', 'host_device', 'http', 'https', 'iscsi',
164
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
62
'luks', 'nbd', 'nfs', 'null-aio', 'null-co', 'nvme', 'parallels',
165
* THE SOFTWARE.
63
- 'qcow', 'qcow2', 'qed', 'quorum', 'raw', 'rbd',
64
+ 'preallocate', 'qcow', 'qcow2', 'qed', 'quorum', 'raw', 'rbd',
65
{ 'name': 'replication', 'if': 'defined(CONFIG_REPLICATION)' },
66
'sheepdog',
67
'ssh', 'throttle', 'vdi', 'vhdx', 'vmdk', 'vpc', 'vvfat' ] }
68
@@ -XXX,XX +XXX,XX @@
69
'data': { 'aes': 'QCryptoBlockOptionsQCow',
70
'luks': 'QCryptoBlockOptionsLUKS'} }
71
72
+##
73
+# @BlockdevOptionsPreallocate:
74
+#
75
+# Filter driver intended to be inserted between format and protocol node
76
+# and do preallocation in protocol node on write.
77
+#
78
+# @prealloc-align: on preallocation, align file length to this number,
79
+# default 1048576 (1M)
80
+#
81
+# @prealloc-size: how much to preallocate, default 134217728 (128M)
82
+#
83
+# Since: 6.0
84
+##
85
+{ 'struct': 'BlockdevOptionsPreallocate',
86
+ 'base': 'BlockdevOptionsGenericFormat',
87
+ 'data': { '*prealloc-align': 'int', '*prealloc-size': 'int' } }
88
+
89
##
90
# @BlockdevOptionsQcow2:
91
#
92
@@ -XXX,XX +XXX,XX @@
93
'null-co': 'BlockdevOptionsNull',
94
'nvme': 'BlockdevOptionsNVMe',
95
'parallels': 'BlockdevOptionsGenericFormat',
96
+ 'preallocate':'BlockdevOptionsPreallocate',
97
'qcow2': 'BlockdevOptionsQcow2',
98
'qcow': 'BlockdevOptionsQcow',
99
'qed': 'BlockdevOptionsGenericCOWFormat',
100
diff --git a/block/preallocate.c b/block/preallocate.c
101
new file mode 100644
102
index XXXXXXX..XXXXXXX
103
--- /dev/null
104
+++ b/block/preallocate.c
105
@@ -XXX,XX +XXX,XX @@
106
+/*
107
+ * preallocate filter driver
108
+ *
166
+ *
109
+ * The driver performs preallocate operation: it is injected above
167
+ * The lock-free mutex implementation is based on OSv
110
+ * some node, and before each write over EOF it does additional preallocating
168
+ * (core/lfmutex.cc, include/lockfree/mutex.hh).
111
+ * write-zeroes request.
169
+ * Copyright (C) 2013 Cloudius Systems, Ltd.
170
*/
171
172
#include "qemu/osdep.h"
173
@@ -XXX,XX +XXX,XX @@ bool qemu_co_queue_empty(CoQueue *queue)
174
return QSIMPLEQ_FIRST(&queue->entries) == NULL;
175
}
176
177
+/* The wait records are handled with a multiple-producer, single-consumer
178
+ * lock-free queue. There cannot be two concurrent pop_waiter() calls
179
+ * because pop_waiter() can only be called while mutex->handoff is zero.
180
+ * This can happen in three cases:
181
+ * - in qemu_co_mutex_unlock, before the hand-off protocol has started.
182
+ * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and
183
+ * not take part in the handoff.
184
+ * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from
185
+ * qemu_co_mutex_unlock. In this case, qemu_co_mutex_unlock will fail
186
+ * the cmpxchg (it will see either 0 or the next sequence value) and
187
+ * exit. The next hand-off cannot begin until qemu_co_mutex_lock has
188
+ * woken up someone.
189
+ * - in qemu_co_mutex_unlock, if it takes the hand-off token itself.
190
+ * In this case another iteration starts with mutex->handoff == 0;
191
+ * a concurrent qemu_co_mutex_lock will fail the cmpxchg, and
192
+ * qemu_co_mutex_unlock will go back to case (1).
112
+ *
193
+ *
113
+ * Copyright (c) 2020 Virtuozzo International GmbH.
194
+ * The following functions manage this queue.
114
+ *
115
+ * Author:
116
+ * Sementsov-Ogievskiy Vladimir <vsementsov@virtuozzo.com>
117
+ *
118
+ * This program is free software; you can redistribute it and/or modify
119
+ * it under the terms of the GNU General Public License as published by
120
+ * the Free Software Foundation; either version 2 of the License, or
121
+ * (at your option) any later version.
122
+ *
123
+ * This program is distributed in the hope that it will be useful,
124
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
125
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
126
+ * GNU General Public License for more details.
127
+ *
128
+ * You should have received a copy of the GNU General Public License
129
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
130
+ */
195
+ */
131
+
196
+typedef struct CoWaitRecord {
132
+#include "qemu/osdep.h"
197
+ Coroutine *co;
133
+
198
+ QSLIST_ENTRY(CoWaitRecord) next;
134
+#include "qapi/error.h"
199
+} CoWaitRecord;
135
+#include "qemu/module.h"
200
+
136
+#include "qemu/option.h"
201
+static void push_waiter(CoMutex *mutex, CoWaitRecord *w)
137
+#include "qemu/units.h"
202
+{
138
+#include "block/block_int.h"
203
+ w->co = qemu_coroutine_self();
139
+
204
+ QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next);
140
+
205
+}
141
+typedef struct PreallocateOpts {
206
+
142
+ int64_t prealloc_size;
207
+static void move_waiters(CoMutex *mutex)
143
+ int64_t prealloc_align;
208
+{
144
+} PreallocateOpts;
209
+ QSLIST_HEAD(, CoWaitRecord) reversed;
145
+
210
+ QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push);
146
+typedef struct BDRVPreallocateState {
211
+ while (!QSLIST_EMPTY(&reversed)) {
147
+ PreallocateOpts opts;
212
+ CoWaitRecord *w = QSLIST_FIRST(&reversed);
148
+
213
+ QSLIST_REMOVE_HEAD(&reversed, next);
149
+ /*
214
+ QSLIST_INSERT_HEAD(&mutex->to_pop, w, next);
150
+ * Track real data end, to crop preallocation on close. If < 0 the status is
215
+ }
151
+ * unknown.
216
+}
152
+ *
217
+
153
+ * @data_end is a maximum of file size on open (or when we get write/resize
218
+static CoWaitRecord *pop_waiter(CoMutex *mutex)
154
+ * permissions) and all write request ends after it. So it's safe to
219
+{
155
+ * truncate to data_end if it is valid.
220
+ CoWaitRecord *w;
221
+
222
+ if (QSLIST_EMPTY(&mutex->to_pop)) {
223
+ move_waiters(mutex);
224
+ if (QSLIST_EMPTY(&mutex->to_pop)) {
225
+ return NULL;
226
+ }
227
+ }
228
+ w = QSLIST_FIRST(&mutex->to_pop);
229
+ QSLIST_REMOVE_HEAD(&mutex->to_pop, next);
230
+ return w;
231
+}
232
+
233
+static bool has_waiters(CoMutex *mutex)
234
+{
235
+ return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push);
236
+}
237
+
238
void qemu_co_mutex_init(CoMutex *mutex)
239
{
240
memset(mutex, 0, sizeof(*mutex));
241
- qemu_co_queue_init(&mutex->queue);
242
}
243
244
-void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
245
+static void coroutine_fn qemu_co_mutex_lock_slowpath(CoMutex *mutex)
246
{
247
Coroutine *self = qemu_coroutine_self();
248
+ CoWaitRecord w;
249
+ unsigned old_handoff;
250
251
trace_qemu_co_mutex_lock_entry(mutex, self);
252
+ w.co = self;
253
+ push_waiter(mutex, &w);
254
255
- while (mutex->locked) {
256
- qemu_co_queue_wait(&mutex->queue);
257
+ /* This is the "Responsibility Hand-Off" protocol; a lock() picks from
258
+ * a concurrent unlock() the responsibility of waking somebody up.
156
+ */
259
+ */
157
+ int64_t data_end;
260
+ old_handoff = atomic_mb_read(&mutex->handoff);
158
+
261
+ if (old_handoff &&
159
+ /*
262
+ has_waiters(mutex) &&
160
+ * Start of trailing preallocated area which reads as zero. May be smaller
263
+ atomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) {
161
+ * than data_end, if user does over-EOF write zero operation. If < 0 the
264
+ /* There can be no concurrent pops, because there can be only
162
+ * status is unknown.
265
+ * one active handoff at a time.
163
+ *
266
+ */
164
+ * If both @zero_start and @file_end are valid, the region
267
+ CoWaitRecord *to_wake = pop_waiter(mutex);
165
+ * [@zero_start, @file_end) is known to be preallocated zeroes. If @file_end
268
+ Coroutine *co = to_wake->co;
166
+ * is not valid, @zero_start doesn't make much sense.
269
+ if (co == self) {
167
+ */
270
+ /* We got the lock ourselves! */
168
+ int64_t zero_start;
271
+ assert(to_wake == &w);
169
+
272
+ return;
170
+ /*
273
+ }
171
+ * Real end of file. Actually the cache for bdrv_getlength(bs->file->bs),
274
+
172
+ * to avoid extra lseek() calls on each write operation. If < 0 the status
275
+ aio_co_wake(co);
173
+ * is unknown.
276
}
174
+ */
277
175
+ int64_t file_end;
278
- mutex->locked = true;
176
+
279
- mutex->holder = self;
177
+ /*
280
- self->locks_held++;
178
+ * All three states @data_end, @zero_start and @file_end are guaranteed to
281
-
179
+ * be invalid (< 0) when we don't have both exclusive BLK_PERM_RESIZE and
282
+ qemu_coroutine_yield();
180
+ * BLK_PERM_WRITE permissions on file child.
283
trace_qemu_co_mutex_lock_return(mutex, self);
181
+ */
284
}
182
+} BDRVPreallocateState;
285
183
+
286
+void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
184
+#define PREALLOCATE_OPT_PREALLOC_ALIGN "prealloc-align"
287
+{
185
+#define PREALLOCATE_OPT_PREALLOC_SIZE "prealloc-size"
288
+ Coroutine *self = qemu_coroutine_self();
186
+static QemuOptsList runtime_opts = {
289
+
187
+ .name = "preallocate",
290
+ if (atomic_fetch_inc(&mutex->locked) == 0) {
188
+ .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
291
+ /* Uncontended. */
189
+ .desc = {
292
+ trace_qemu_co_mutex_lock_uncontended(mutex, self);
190
+ {
293
+ } else {
191
+ .name = PREALLOCATE_OPT_PREALLOC_ALIGN,
294
+ qemu_co_mutex_lock_slowpath(mutex);
192
+ .type = QEMU_OPT_SIZE,
295
+ }
193
+ .help = "on preallocation, align file length to this number, "
296
+ mutex->holder = self;
194
+ "default 1M",
297
+ self->locks_held++;
195
+ },
298
+}
196
+ {
299
+
197
+ .name = PREALLOCATE_OPT_PREALLOC_SIZE,
300
void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
198
+ .type = QEMU_OPT_SIZE,
301
{
199
+ .help = "how much to preallocate, default 128M",
302
Coroutine *self = qemu_coroutine_self();
200
+ },
303
201
+ { /* end of list */ }
304
trace_qemu_co_mutex_unlock_entry(mutex, self);
202
+ },
305
203
+};
306
- assert(mutex->locked == true);
204
+
307
+ assert(mutex->locked);
205
+static bool preallocate_absorb_opts(PreallocateOpts *dest, QDict *options,
308
assert(mutex->holder == self);
206
+ BlockDriverState *child_bs, Error **errp)
309
assert(qemu_in_coroutine());
207
+{
310
208
+ QemuOpts *opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
311
- mutex->locked = false;
209
+
312
mutex->holder = NULL;
210
+ if (!qemu_opts_absorb_qdict(opts, options, errp)) {
313
self->locks_held--;
211
+ return false;
314
- qemu_co_queue_next(&mutex->queue);
212
+ }
315
+ if (atomic_fetch_dec(&mutex->locked) == 1) {
213
+
316
+ /* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */
214
+ dest->prealloc_align =
215
+ qemu_opt_get_size(opts, PREALLOCATE_OPT_PREALLOC_ALIGN, 1 * MiB);
216
+ dest->prealloc_size =
217
+ qemu_opt_get_size(opts, PREALLOCATE_OPT_PREALLOC_SIZE, 128 * MiB);
218
+
219
+ qemu_opts_del(opts);
220
+
221
+ if (!QEMU_IS_ALIGNED(dest->prealloc_align, BDRV_SECTOR_SIZE)) {
222
+ error_setg(errp, "prealloc-align parameter of preallocate filter "
223
+ "is not aligned to %llu", BDRV_SECTOR_SIZE);
224
+ return false;
225
+ }
226
+
227
+ if (!QEMU_IS_ALIGNED(dest->prealloc_align,
228
+ child_bs->bl.request_alignment)) {
229
+ error_setg(errp, "prealloc-align parameter of preallocate filter "
230
+ "is not aligned to underlying node request alignment "
231
+ "(%" PRIi32 ")", child_bs->bl.request_alignment);
232
+ return false;
233
+ }
234
+
235
+ return true;
236
+}
237
+
238
+static int preallocate_open(BlockDriverState *bs, QDict *options, int flags,
239
+ Error **errp)
240
+{
241
+ BDRVPreallocateState *s = bs->opaque;
242
+
243
+ /*
244
+ * s->data_end and friends should be initialized on permission update.
245
+ * For this to work, mark them invalid.
246
+ */
247
+ s->file_end = s->zero_start = s->data_end = -EINVAL;
248
+
249
+ bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
250
+ BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
251
+ false, errp);
252
+ if (!bs->file) {
253
+ return -EINVAL;
254
+ }
255
+
256
+ if (!preallocate_absorb_opts(&s->opts, options, bs->file->bs, errp)) {
257
+ return -EINVAL;
258
+ }
259
+
260
+ bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED |
261
+ (BDRV_REQ_FUA & bs->file->bs->supported_write_flags);
262
+
263
+ bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
264
+ ((BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) &
265
+ bs->file->bs->supported_zero_flags);
266
+
267
+ return 0;
268
+}
269
+
270
+static void preallocate_close(BlockDriverState *bs)
271
+{
272
+ int ret;
273
+ BDRVPreallocateState *s = bs->opaque;
274
+
275
+ if (s->data_end < 0) {
276
+ return;
317
+ return;
277
+ }
318
+ }
278
+
319
+
279
+ if (s->file_end < 0) {
320
+ for (;;) {
280
+ s->file_end = bdrv_getlength(bs->file->bs);
321
+ CoWaitRecord *to_wake = pop_waiter(mutex);
281
+ if (s->file_end < 0) {
322
+ unsigned our_handoff;
282
+ return;
323
+
283
+ }
324
+ if (to_wake) {
284
+ }
325
+ Coroutine *co = to_wake->co;
285
+
326
+ aio_co_wake(co);
286
+ if (s->data_end < s->file_end) {
327
+ break;
287
+ ret = bdrv_truncate(bs->file, s->data_end, true, PREALLOC_MODE_OFF, 0,
328
+ }
288
+ NULL);
329
+
289
+ s->file_end = ret < 0 ? ret : s->data_end;
330
+ /* Some concurrent lock() is in progress (we know this because
290
+ }
331
+ * mutex->locked was >1) but it hasn't yet put itself on the wait
291
+}
332
+ * queue. Pick a sequence number for the handoff protocol (not 0).
292
+
333
+ */
293
+
334
+ if (++mutex->sequence == 0) {
294
+/*
335
+ mutex->sequence = 1;
295
+ * Handle reopen.
336
+ }
296
+ *
337
+
297
+ * We must implement reopen handlers, otherwise reopen just don't work. Handle
338
+ our_handoff = mutex->sequence;
298
+ * new options and don't care about preallocation state, as it is handled in
339
+ atomic_mb_set(&mutex->handoff, our_handoff);
299
+ * set/check permission handlers.
340
+ if (!has_waiters(mutex)) {
300
+ */
341
+ /* The concurrent lock has not added itself yet, so it
301
+
342
+ * will be able to pick our handoff.
302
+static int preallocate_reopen_prepare(BDRVReopenState *reopen_state,
303
+ BlockReopenQueue *queue, Error **errp)
304
+{
305
+ PreallocateOpts *opts = g_new0(PreallocateOpts, 1);
306
+
307
+ if (!preallocate_absorb_opts(opts, reopen_state->options,
308
+ reopen_state->bs->file->bs, errp)) {
309
+ g_free(opts);
310
+ return -EINVAL;
311
+ }
312
+
313
+ reopen_state->opaque = opts;
314
+
315
+ return 0;
316
+}
317
+
318
+static void preallocate_reopen_commit(BDRVReopenState *state)
319
+{
320
+ BDRVPreallocateState *s = state->bs->opaque;
321
+
322
+ s->opts = *(PreallocateOpts *)state->opaque;
323
+
324
+ g_free(state->opaque);
325
+ state->opaque = NULL;
326
+}
327
+
328
+static void preallocate_reopen_abort(BDRVReopenState *state)
329
+{
330
+ g_free(state->opaque);
331
+ state->opaque = NULL;
332
+}
333
+
334
+static coroutine_fn int preallocate_co_preadv_part(
335
+ BlockDriverState *bs, uint64_t offset, uint64_t bytes,
336
+ QEMUIOVector *qiov, size_t qiov_offset, int flags)
337
+{
338
+ return bdrv_co_preadv_part(bs->file, offset, bytes, qiov, qiov_offset,
339
+ flags);
340
+}
341
+
342
+static int coroutine_fn preallocate_co_pdiscard(BlockDriverState *bs,
343
+ int64_t offset, int bytes)
344
+{
345
+ return bdrv_co_pdiscard(bs->file, offset, bytes);
346
+}
347
+
348
+static bool can_write_resize(uint64_t perm)
349
+{
350
+ return (perm & BLK_PERM_WRITE) && (perm & BLK_PERM_RESIZE);
351
+}
352
+
353
+static bool has_prealloc_perms(BlockDriverState *bs)
354
+{
355
+ BDRVPreallocateState *s = bs->opaque;
356
+
357
+ if (can_write_resize(bs->file->perm)) {
358
+ assert(!(bs->file->shared_perm & BLK_PERM_WRITE));
359
+ assert(!(bs->file->shared_perm & BLK_PERM_RESIZE));
360
+ return true;
361
+ }
362
+
363
+ assert(s->data_end < 0);
364
+ assert(s->zero_start < 0);
365
+ assert(s->file_end < 0);
366
+ return false;
367
+}
368
+
369
+/*
370
+ * Call on each write. Returns true if @want_merge_zero is true and the region
371
+ * [offset, offset + bytes) is zeroed (as a result of this call or earlier
372
+ * preallocation).
373
+ *
374
+ * want_merge_zero is used to merge write-zero request with preallocation in
375
+ * one bdrv_co_pwrite_zeroes() call.
376
+ */
377
+static bool coroutine_fn handle_write(BlockDriverState *bs, int64_t offset,
378
+ int64_t bytes, bool want_merge_zero)
379
+{
380
+ BDRVPreallocateState *s = bs->opaque;
381
+ int64_t end = offset + bytes;
382
+ int64_t prealloc_start, prealloc_end;
383
+ int ret;
384
+
385
+ if (!has_prealloc_perms(bs)) {
386
+ /* We don't have state neither should try to recover it */
387
+ return false;
388
+ }
389
+
390
+ if (s->data_end < 0) {
391
+ s->data_end = bdrv_getlength(bs->file->bs);
392
+ if (s->data_end < 0) {
393
+ return false;
394
+ }
395
+
396
+ if (s->file_end < 0) {
397
+ s->file_end = s->data_end;
398
+ }
399
+ }
400
+
401
+ if (end <= s->data_end) {
402
+ return false;
403
+ }
404
+
405
+ /* We have valid s->data_end, and request writes beyond it. */
406
+
407
+ s->data_end = end;
408
+ if (s->zero_start < 0 || !want_merge_zero) {
409
+ s->zero_start = end;
410
+ }
411
+
412
+ if (s->file_end < 0) {
413
+ s->file_end = bdrv_getlength(bs->file->bs);
414
+ if (s->file_end < 0) {
415
+ return false;
416
+ }
417
+ }
418
+
419
+ /* Now s->data_end, s->zero_start and s->file_end are valid. */
420
+
421
+ if (end <= s->file_end) {
422
+ /* No preallocation needed. */
423
+ return want_merge_zero && offset >= s->zero_start;
424
+ }
425
+
426
+ /* Now we want new preallocation, as request writes beyond s->file_end. */
427
+
428
+ prealloc_start = want_merge_zero ? MIN(offset, s->file_end) : s->file_end;
429
+ prealloc_end = QEMU_ALIGN_UP(end + s->opts.prealloc_size,
430
+ s->opts.prealloc_align);
431
+
432
+ ret = bdrv_co_pwrite_zeroes(
433
+ bs->file, prealloc_start, prealloc_end - prealloc_start,
434
+ BDRV_REQ_NO_FALLBACK | BDRV_REQ_SERIALISING | BDRV_REQ_NO_WAIT);
435
+ if (ret < 0) {
436
+ s->file_end = ret;
437
+ return false;
438
+ }
439
+
440
+ s->file_end = prealloc_end;
441
+ return want_merge_zero;
442
+}
443
+
444
+static int coroutine_fn preallocate_co_pwrite_zeroes(BlockDriverState *bs,
445
+ int64_t offset, int bytes, BdrvRequestFlags flags)
446
+{
447
+ bool want_merge_zero =
448
+ !(flags & ~(BDRV_REQ_ZERO_WRITE | BDRV_REQ_NO_FALLBACK));
449
+ if (handle_write(bs, offset, bytes, want_merge_zero)) {
450
+ return 0;
451
+ }
452
+
453
+ return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
454
+}
455
+
456
+static coroutine_fn int preallocate_co_pwritev_part(BlockDriverState *bs,
457
+ uint64_t offset,
458
+ uint64_t bytes,
459
+ QEMUIOVector *qiov,
460
+ size_t qiov_offset,
461
+ int flags)
462
+{
463
+ handle_write(bs, offset, bytes, false);
464
+
465
+ return bdrv_co_pwritev_part(bs->file, offset, bytes, qiov, qiov_offset,
466
+ flags);
467
+}
468
+
469
+static int coroutine_fn
470
+preallocate_co_truncate(BlockDriverState *bs, int64_t offset,
471
+ bool exact, PreallocMode prealloc,
472
+ BdrvRequestFlags flags, Error **errp)
473
+{
474
+ ERRP_GUARD();
475
+ BDRVPreallocateState *s = bs->opaque;
476
+ int ret;
477
+
478
+ if (s->data_end >= 0 && offset > s->data_end) {
479
+ if (s->file_end < 0) {
480
+ s->file_end = bdrv_getlength(bs->file->bs);
481
+ if (s->file_end < 0) {
482
+ error_setg(errp, "failed to get file length");
483
+ return s->file_end;
484
+ }
485
+ }
486
+
487
+ if (prealloc == PREALLOC_MODE_FALLOC) {
488
+ /*
489
+ * If offset <= s->file_end, the task is already done, just
490
+ * update s->data_end, to move part of "filter preallocation"
491
+ * to "preallocation requested by user".
492
+ * Otherwise just proceed to preallocate missing part.
493
+ */
343
+ */
494
+ if (offset <= s->file_end) {
344
+ break;
495
+ s->data_end = offset;
345
+ }
496
+ return 0;
346
+
497
+ }
347
+ /* Try to do the handoff protocol ourselves; if somebody else has
498
+ } else {
348
+ * already taken it, however, we're done and they're responsible.
499
+ /*
500
+ * We have to drop our preallocation, to
501
+ * - avoid "Cannot use preallocation for shrinking files" in
502
+ * case of offset < file_end
503
+ * - give PREALLOC_MODE_OFF a chance to keep small disk
504
+ * usage
505
+ * - give PREALLOC_MODE_FULL a chance to actually write the
506
+ * whole region as user expects
507
+ */
508
+ if (s->file_end > s->data_end) {
509
+ ret = bdrv_co_truncate(bs->file, s->data_end, true,
510
+ PREALLOC_MODE_OFF, 0, errp);
511
+ if (ret < 0) {
512
+ s->file_end = ret;
513
+ error_prepend(errp, "preallocate-filter: failed to drop "
514
+ "write-zero preallocation: ");
515
+ return ret;
516
+ }
517
+ s->file_end = s->data_end;
518
+ }
519
+ }
520
+
521
+ s->data_end = offset;
522
+ }
523
+
524
+ ret = bdrv_co_truncate(bs->file, offset, exact, prealloc, flags, errp);
525
+ if (ret < 0) {
526
+ s->file_end = s->zero_start = s->data_end = ret;
527
+ return ret;
528
+ }
529
+
530
+ if (has_prealloc_perms(bs)) {
531
+ s->file_end = s->zero_start = s->data_end = offset;
532
+ }
533
+ return 0;
534
+}
535
+
536
+static int coroutine_fn preallocate_co_flush(BlockDriverState *bs)
537
+{
538
+ return bdrv_co_flush(bs->file->bs);
539
+}
540
+
541
+static int64_t preallocate_getlength(BlockDriverState *bs)
542
+{
543
+ int64_t ret;
544
+ BDRVPreallocateState *s = bs->opaque;
545
+
546
+ if (s->data_end >= 0) {
547
+ return s->data_end;
548
+ }
549
+
550
+ ret = bdrv_getlength(bs->file->bs);
551
+
552
+ if (has_prealloc_perms(bs)) {
553
+ s->file_end = s->zero_start = s->data_end = ret;
554
+ }
555
+
556
+ return ret;
557
+}
558
+
559
+static int preallocate_check_perm(BlockDriverState *bs,
560
+ uint64_t perm, uint64_t shared, Error **errp)
561
+{
562
+ BDRVPreallocateState *s = bs->opaque;
563
+
564
+ if (s->data_end >= 0 && !can_write_resize(perm)) {
565
+ /*
566
+ * Lose permissions.
567
+ * We should truncate in check_perm, as in set_perm bs->file->perm will
568
+ * be already changed, and we should not violate it.
569
+ */
349
+ */
570
+ if (s->file_end < 0) {
350
+ if (atomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) {
571
+ s->file_end = bdrv_getlength(bs->file->bs);
351
+ break;
572
+ if (s->file_end < 0) {
352
+ }
573
+ error_setg(errp, "Failed to get file length");
353
+ }
574
+ return s->file_end;
354
575
+ }
355
trace_qemu_co_mutex_unlock_return(mutex, self);
576
+ }
356
}
577
+
357
diff --git a/util/trace-events b/util/trace-events
578
+ if (s->data_end < s->file_end) {
579
+ int ret = bdrv_truncate(bs->file, s->data_end, true,
580
+ PREALLOC_MODE_OFF, 0, NULL);
581
+ if (ret < 0) {
582
+ error_setg(errp, "Failed to drop preallocation");
583
+ s->file_end = ret;
584
+ return ret;
585
+ }
586
+ s->file_end = s->data_end;
587
+ }
588
+ }
589
+
590
+ return 0;
591
+}
592
+
593
+static void preallocate_set_perm(BlockDriverState *bs,
594
+ uint64_t perm, uint64_t shared)
595
+{
596
+ BDRVPreallocateState *s = bs->opaque;
597
+
598
+ if (can_write_resize(perm)) {
599
+ if (s->data_end < 0) {
600
+ s->data_end = s->file_end = s->zero_start =
601
+ bdrv_getlength(bs->file->bs);
602
+ }
603
+ } else {
604
+ /*
605
+ * We drop our permissions, as well as allow shared
606
+ * permissions (see preallocate_child_perm), anyone will be able to
607
+ * change the child, so mark all states invalid. We'll regain control if
608
+ * get good permissions back.
609
+ */
610
+ s->data_end = s->file_end = s->zero_start = -EINVAL;
611
+ }
612
+}
613
+
614
+static void preallocate_child_perm(BlockDriverState *bs, BdrvChild *c,
615
+ BdrvChildRole role, BlockReopenQueue *reopen_queue,
616
+ uint64_t perm, uint64_t shared, uint64_t *nperm, uint64_t *nshared)
617
+{
618
+ bdrv_default_perms(bs, c, role, reopen_queue, perm, shared, nperm, nshared);
619
+
620
+ if (can_write_resize(perm)) {
621
+ /* This should come by default, but let's enforce: */
622
+ *nperm |= BLK_PERM_WRITE | BLK_PERM_RESIZE;
623
+
624
+ /*
625
+ * Don't share, to keep our states s->file_end, s->data_end and
626
+ * s->zero_start valid.
627
+ */
628
+ *nshared &= ~(BLK_PERM_WRITE | BLK_PERM_RESIZE);
629
+ }
630
+}
631
+
632
+BlockDriver bdrv_preallocate_filter = {
633
+ .format_name = "preallocate",
634
+ .instance_size = sizeof(BDRVPreallocateState),
635
+
636
+ .bdrv_getlength = preallocate_getlength,
637
+ .bdrv_open = preallocate_open,
638
+ .bdrv_close = preallocate_close,
639
+
640
+ .bdrv_reopen_prepare = preallocate_reopen_prepare,
641
+ .bdrv_reopen_commit = preallocate_reopen_commit,
642
+ .bdrv_reopen_abort = preallocate_reopen_abort,
643
+
644
+ .bdrv_co_preadv_part = preallocate_co_preadv_part,
645
+ .bdrv_co_pwritev_part = preallocate_co_pwritev_part,
646
+ .bdrv_co_pwrite_zeroes = preallocate_co_pwrite_zeroes,
647
+ .bdrv_co_pdiscard = preallocate_co_pdiscard,
648
+ .bdrv_co_flush = preallocate_co_flush,
649
+ .bdrv_co_truncate = preallocate_co_truncate,
650
+
651
+ .bdrv_check_perm = preallocate_check_perm,
652
+ .bdrv_set_perm = preallocate_set_perm,
653
+ .bdrv_child_perm = preallocate_child_perm,
654
+
655
+ .has_variable_length = true,
656
+ .is_filter = true,
657
+};
658
+
659
+static void bdrv_preallocate_init(void)
660
+{
661
+ bdrv_register(&bdrv_preallocate_filter);
662
+}
663
+
664
+block_init(bdrv_preallocate_init);
665
diff --git a/block/meson.build b/block/meson.build
666
index XXXXXXX..XXXXXXX 100644
358
index XXXXXXX..XXXXXXX 100644
667
--- a/block/meson.build
359
--- a/util/trace-events
668
+++ b/block/meson.build
360
+++ b/util/trace-events
669
@@ -XXX,XX +XXX,XX @@ block_ss.add(files(
361
@@ -XXX,XX +XXX,XX @@ qemu_coroutine_terminate(void *co) "self %p"
670
'block-copy.c',
362
671
'commit.c',
363
# util/qemu-coroutine-lock.c
672
'copy-on-read.c',
364
qemu_co_queue_run_restart(void *co) "co %p"
673
+ 'preallocate.c',
365
+qemu_co_mutex_lock_uncontended(void *mutex, void *self) "mutex %p self %p"
674
'create.c',
366
qemu_co_mutex_lock_entry(void *mutex, void *self) "mutex %p self %p"
675
'crypto.c',
367
qemu_co_mutex_lock_return(void *mutex, void *self) "mutex %p self %p"
676
'dirty-bitmap.c',
368
qemu_co_mutex_unlock_entry(void *mutex, void *self) "mutex %p self %p"
677
--
369
--
678
2.29.2
370
2.9.3
679
371
680
372
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
Make separate function for common pattern.
3
Running a very small critical section on pthread_mutex_t and CoMutex
4
shows that pthread_mutex_t is much faster because it doesn't actually
5
go to sleep. What happens is that the critical section is shorter
6
than the latency of entering the kernel and thus FUTEX_WAIT always
7
fails. With CoMutex there is no such latency but you still want to
8
avoid wait and wakeup. So introduce it artificially.
4
9
5
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
10
This only works with one waiters; because CoMutex is fair, it will
6
Message-Id: <20201106124241.16950-5-vsementsov@virtuozzo.com>
11
always have more waits and wakeups than a pthread_mutex_t.
7
[mreitz: Squashed in
12
8
https://lists.nongnu.org/archive/html/qemu-block/2020-11/msg00299.html]
13
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
14
Reviewed-by: Fam Zheng <famz@redhat.com>
15
Message-id: 20170213181244.16297-3-pbonzini@redhat.com
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
17
---
11
block.c | 61 +++++++++++++++++++++++++++++----------------------------
18
include/qemu/coroutine.h | 5 +++++
12
1 file changed, 31 insertions(+), 30 deletions(-)
19
util/qemu-coroutine-lock.c | 51 ++++++++++++++++++++++++++++++++++++++++------
20
util/qemu-coroutine.c | 2 +-
21
3 files changed, 51 insertions(+), 7 deletions(-)
13
22
14
diff --git a/block.c b/block.c
23
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
15
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
16
--- a/block.c
25
--- a/include/qemu/coroutine.h
17
+++ b/block.c
26
+++ b/include/qemu/coroutine.h
18
@@ -XXX,XX +XXX,XX @@ static void bdrv_child_abort_perm_update(BdrvChild *c)
27
@@ -XXX,XX +XXX,XX @@ typedef struct CoMutex {
19
bdrv_abort_perm_update(c->bs);
28
*/
29
unsigned locked;
30
31
+ /* Context that is holding the lock. Useful to avoid spinning
32
+ * when two coroutines on the same AioContext try to get the lock. :)
33
+ */
34
+ AioContext *ctx;
35
+
36
/* A queue of waiters. Elements are added atomically in front of
37
* from_push. to_pop is only populated, and popped from, by whoever
38
* is in charge of the next wakeup. This can be an unlocker or,
39
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/util/qemu-coroutine-lock.c
42
+++ b/util/qemu-coroutine-lock.c
43
@@ -XXX,XX +XXX,XX @@
44
#include "qemu-common.h"
45
#include "qemu/coroutine.h"
46
#include "qemu/coroutine_int.h"
47
+#include "qemu/processor.h"
48
#include "qemu/queue.h"
49
#include "block/aio.h"
50
#include "trace.h"
51
@@ -XXX,XX +XXX,XX @@ void qemu_co_mutex_init(CoMutex *mutex)
52
memset(mutex, 0, sizeof(*mutex));
20
}
53
}
21
54
22
+static int bdrv_refresh_perms(BlockDriverState *bs, bool *tighten_restrictions,
55
-static void coroutine_fn qemu_co_mutex_lock_slowpath(CoMutex *mutex)
23
+ Error **errp)
56
+static void coroutine_fn qemu_co_mutex_wake(CoMutex *mutex, Coroutine *co)
24
+{
57
+{
25
+ int ret;
58
+ /* Read co before co->ctx; pairs with smp_wmb() in
26
+ uint64_t perm, shared_perm;
59
+ * qemu_coroutine_enter().
27
+
60
+ */
28
+ bdrv_get_cumulative_perm(bs, &perm, &shared_perm);
61
+ smp_read_barrier_depends();
29
+ ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL,
62
+ mutex->ctx = co->ctx;
30
+ tighten_restrictions, errp);
63
+ aio_co_wake(co);
31
+ if (ret < 0) {
32
+ bdrv_abort_perm_update(bs);
33
+ return ret;
34
+ }
35
+ bdrv_set_perm(bs, perm, shared_perm);
36
+
37
+ return 0;
38
+}
64
+}
39
+
65
+
40
int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared,
66
+static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx,
41
Error **errp)
67
+ CoMutex *mutex)
42
{
68
{
43
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child(BdrvChild *child, BlockDriverState *new_bs)
69
Coroutine *self = qemu_coroutine_self();
70
CoWaitRecord w;
71
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn qemu_co_mutex_lock_slowpath(CoMutex *mutex)
72
if (co == self) {
73
/* We got the lock ourselves! */
74
assert(to_wake == &w);
75
+ mutex->ctx = ctx;
76
return;
77
}
78
79
- aio_co_wake(co);
80
+ qemu_co_mutex_wake(mutex, co);
44
}
81
}
45
82
46
if (old_bs) {
83
qemu_coroutine_yield();
47
- /* Update permissions for old node. This is guaranteed to succeed
84
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn qemu_co_mutex_lock_slowpath(CoMutex *mutex)
48
- * because we're just taking a parent away, so we're loosening
85
49
- * restrictions. */
86
void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
50
bool tighten_restrictions;
51
- int ret;
52
53
- bdrv_get_cumulative_perm(old_bs, &perm, &shared_perm);
54
- ret = bdrv_check_perm(old_bs, NULL, perm, shared_perm, NULL,
55
- &tighten_restrictions, NULL);
56
+ /*
57
+ * Update permissions for old node. We're just taking a parent away, so
58
+ * we're loosening restrictions. Errors of permission update are not
59
+ * fatal in this case, ignore them.
60
+ */
61
+ bdrv_refresh_perms(old_bs, &tighten_restrictions, NULL);
62
assert(tighten_restrictions == false);
63
- if (ret < 0) {
64
- /* We only tried to loosen restrictions, so errors are not fatal */
65
- bdrv_abort_perm_update(old_bs);
66
- } else {
67
- bdrv_set_perm(old_bs, perm, shared_perm);
68
- }
69
70
/* When the parent requiring a non-default AioContext is removed, the
71
* node moves back to the main AioContext */
72
@@ -XXX,XX +XXX,XX @@ void bdrv_init_with_whitelist(void)
73
int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp)
74
{
87
{
75
BdrvChild *child, *parent;
88
+ AioContext *ctx = qemu_get_current_aio_context();
76
- uint64_t perm, shared_perm;
89
Coroutine *self = qemu_coroutine_self();
77
Error *local_err = NULL;
90
+ int waiters, i;
78
int ret;
91
79
BdrvDirtyBitmap *bm;
92
- if (atomic_fetch_inc(&mutex->locked) == 0) {
80
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp)
93
+ /* Running a very small critical section on pthread_mutex_t and CoMutex
94
+ * shows that pthread_mutex_t is much faster because it doesn't actually
95
+ * go to sleep. What happens is that the critical section is shorter
96
+ * than the latency of entering the kernel and thus FUTEX_WAIT always
97
+ * fails. With CoMutex there is no such latency but you still want to
98
+ * avoid wait and wakeup. So introduce it artificially.
99
+ */
100
+ i = 0;
101
+retry_fast_path:
102
+ waiters = atomic_cmpxchg(&mutex->locked, 0, 1);
103
+ if (waiters != 0) {
104
+ while (waiters == 1 && ++i < 1000) {
105
+ if (atomic_read(&mutex->ctx) == ctx) {
106
+ break;
107
+ }
108
+ if (atomic_read(&mutex->locked) == 0) {
109
+ goto retry_fast_path;
110
+ }
111
+ cpu_relax();
112
+ }
113
+ waiters = atomic_fetch_inc(&mutex->locked);
114
+ }
115
+
116
+ if (waiters == 0) {
117
/* Uncontended. */
118
trace_qemu_co_mutex_lock_uncontended(mutex, self);
119
+ mutex->ctx = ctx;
120
} else {
121
- qemu_co_mutex_lock_slowpath(mutex);
122
+ qemu_co_mutex_lock_slowpath(ctx, mutex);
123
}
124
mutex->holder = self;
125
self->locks_held++;
126
@@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
127
assert(mutex->holder == self);
128
assert(qemu_in_coroutine());
129
130
+ mutex->ctx = NULL;
131
mutex->holder = NULL;
132
self->locks_held--;
133
if (atomic_fetch_dec(&mutex->locked) == 1) {
134
@@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
135
unsigned our_handoff;
136
137
if (to_wake) {
138
- Coroutine *co = to_wake->co;
139
- aio_co_wake(co);
140
+ qemu_co_mutex_wake(mutex, to_wake->co);
141
break;
142
}
143
144
diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/util/qemu-coroutine.c
147
+++ b/util/qemu-coroutine.c
148
@@ -XXX,XX +XXX,XX @@ void qemu_coroutine_enter(Coroutine *co)
149
co->ctx = qemu_get_current_aio_context();
150
151
/* Store co->ctx before anything that stores co. Matches
152
- * barrier in aio_co_wake.
153
+ * barrier in aio_co_wake and qemu_co_mutex_wake.
81
*/
154
*/
82
if (bs->open_flags & BDRV_O_INACTIVE) {
155
smp_wmb();
83
bs->open_flags &= ~BDRV_O_INACTIVE;
156
84
- bdrv_get_cumulative_perm(bs, &perm, &shared_perm);
85
- ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL, NULL, errp);
86
+ ret = bdrv_refresh_perms(bs, NULL, errp);
87
if (ret < 0) {
88
- bdrv_abort_perm_update(bs);
89
bs->open_flags |= BDRV_O_INACTIVE;
90
return ret;
91
}
92
- bdrv_set_perm(bs, perm, shared_perm);
93
94
if (bs->drv->bdrv_co_invalidate_cache) {
95
bs->drv->bdrv_co_invalidate_cache(bs, &local_err);
96
@@ -XXX,XX +XXX,XX @@ static int bdrv_inactivate_recurse(BlockDriverState *bs)
97
{
98
BdrvChild *child, *parent;
99
bool tighten_restrictions;
100
- uint64_t perm, shared_perm;
101
int ret;
102
103
if (!bs->drv) {
104
@@ -XXX,XX +XXX,XX @@ static int bdrv_inactivate_recurse(BlockDriverState *bs)
105
106
bs->open_flags |= BDRV_O_INACTIVE;
107
108
- /* Update permissions, they may differ for inactive nodes */
109
- bdrv_get_cumulative_perm(bs, &perm, &shared_perm);
110
- ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL,
111
- &tighten_restrictions, NULL);
112
+ /*
113
+ * Update permissions, they may differ for inactive nodes.
114
+ * We only tried to loosen restrictions, so errors are not fatal, ignore
115
+ * them.
116
+ */
117
+ bdrv_refresh_perms(bs, &tighten_restrictions, NULL);
118
assert(tighten_restrictions == false);
119
- if (ret < 0) {
120
- /* We only tried to loosen restrictions, so errors are not fatal */
121
- bdrv_abort_perm_update(bs);
122
- } else {
123
- bdrv_set_perm(bs, perm, shared_perm);
124
- }
125
-
126
127
/* Recursively inactivate children */
128
QLIST_FOREACH(child, &bs->children, next) {
129
--
157
--
130
2.29.2
158
2.9.3
131
159
132
160
diff view generated by jsdifflib
1
From: Alberto Garcia <berto@igalia.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
The quorum driver does not implement bdrv_co_block_status() and
3
Add two implementations of the same benchmark as the previous patch,
4
because of that it always reports to contain data even if all its
4
but using pthreads. One uses a normal QemuMutex, the other is Linux
5
children are known to be empty.
5
only and implements a fair mutex based on MCS locks and futexes.
6
6
This shows that the slower performance of the 5-thread case is due to
7
One consequence of this is that if we for example create a quorum with
7
the fairness of CoMutex, rather than to coroutines. If fairness does
8
a size of 10GB and we mirror it to a new image the operation will
8
not matter, as is the case with two threads, CoMutex can actually be
9
write 10GB of actual zeroes to the destination image wasting a lot of
9
faster than pthreads.
10
time and disk space.
10
11
11
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
12
Since a quorum has an arbitrary number of children of potentially
12
Reviewed-by: Fam Zheng <famz@redhat.com>
13
different formats there is no way to report all possible allocation
13
Message-id: 20170213181244.16297-4-pbonzini@redhat.com
14
status flags in a way that makes sense, so this implementation only
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
reports when a given region is known to contain zeroes
16
(BDRV_BLOCK_ZERO) or not (BDRV_BLOCK_DATA).
17
18
If all children agree that a region contains zeroes then we can return
19
BDRV_BLOCK_ZERO using the smallest size reported by the children
20
(because all agree that a region of at least that size contains
21
zeroes).
22
23
If at least one child disagrees we have to return BDRV_BLOCK_DATA.
24
In this case we use the largest of the sizes reported by the children
25
that didn't return BDRV_BLOCK_ZERO (because we know that there won't
26
be an agreement for at least that size).
27
28
Signed-off-by: Alberto Garcia <berto@igalia.com>
29
Tested-by: Tao Xu <tao3.xu@intel.com>
30
Reviewed-by: Max Reitz <mreitz@redhat.com>
31
Message-Id: <db83149afcf0f793effc8878089d29af4c46ffe1.1605286097.git.berto@igalia.com>
32
Signed-off-by: Max Reitz <mreitz@redhat.com>
33
---
15
---
34
block/quorum.c | 52 +++++++++++++
16
tests/test-aio-multithread.c | 164 +++++++++++++++++++++++++++++++++++++++++++
35
tests/qemu-iotests/312 | 148 +++++++++++++++++++++++++++++++++++++
17
1 file changed, 164 insertions(+)
36
tests/qemu-iotests/312.out | 67 +++++++++++++++++
18
37
tests/qemu-iotests/group | 1 +
19
diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c
38
4 files changed, 268 insertions(+)
39
create mode 100755 tests/qemu-iotests/312
40
create mode 100644 tests/qemu-iotests/312.out
41
42
diff --git a/block/quorum.c b/block/quorum.c
43
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
44
--- a/block/quorum.c
21
--- a/tests/test-aio-multithread.c
45
+++ b/block/quorum.c
22
+++ b/tests/test-aio-multithread.c
46
@@ -XXX,XX +XXX,XX @@
23
@@ -XXX,XX +XXX,XX @@ static void test_multi_co_mutex_2_30(void)
47
#include "qemu/module.h"
24
test_multi_co_mutex(2, 30);
48
#include "qemu/option.h"
49
#include "block/block_int.h"
50
+#include "block/coroutines.h"
51
#include "block/qdict.h"
52
#include "qapi/error.h"
53
#include "qapi/qapi-events-block.h"
54
@@ -XXX,XX +XXX,XX @@ static void quorum_child_perm(BlockDriverState *bs, BdrvChild *c,
55
| DEFAULT_PERM_UNCHANGED;
56
}
25
}
57
26
58
+/*
27
+/* Same test with fair mutexes, for performance comparison. */
59
+ * Each one of the children can report different status flags even
28
+
60
+ * when they contain the same data, so what this function does is
29
+#ifdef CONFIG_LINUX
61
+ * return BDRV_BLOCK_ZERO if *all* children agree that a certain
30
+#include "qemu/futex.h"
62
+ * region contains zeroes, and BDRV_BLOCK_DATA otherwise.
31
+
32
+/* The nodes for the mutex reside in this structure (on which we try to avoid
33
+ * false sharing). The head of the mutex is in the "mutex_head" variable.
63
+ */
34
+ */
64
+static int coroutine_fn quorum_co_block_status(BlockDriverState *bs,
35
+static struct {
65
+ bool want_zero,
36
+ int next, locked;
66
+ int64_t offset, int64_t count,
37
+ int padding[14];
67
+ int64_t *pnum, int64_t *map,
38
+} nodes[NUM_CONTEXTS] __attribute__((__aligned__(64)));
68
+ BlockDriverState **file)
39
+
69
+{
40
+static int mutex_head = -1;
70
+ BDRVQuorumState *s = bs->opaque;
41
+
71
+ int i, ret;
42
+static void mcs_mutex_lock(void)
72
+ int64_t pnum_zero = count;
43
+{
73
+ int64_t pnum_data = 0;
44
+ int prev;
74
+
45
+
75
+ for (i = 0; i < s->num_children; i++) {
46
+ nodes[id].next = -1;
76
+ int64_t bytes;
47
+ nodes[id].locked = 1;
77
+ ret = bdrv_co_common_block_status_above(s->children[i]->bs, NULL, false,
48
+ prev = atomic_xchg(&mutex_head, id);
78
+ want_zero, offset, count,
49
+ if (prev != -1) {
79
+ &bytes, NULL, NULL, NULL);
50
+ atomic_set(&nodes[prev].next, id);
80
+ if (ret < 0) {
51
+ qemu_futex_wait(&nodes[id].locked, 1);
81
+ quorum_report_bad(QUORUM_OP_TYPE_READ, offset, count,
52
+ }
82
+ s->children[i]->bs->node_name, ret);
53
+}
83
+ pnum_data = count;
54
+
84
+ break;
55
+static void mcs_mutex_unlock(void)
56
+{
57
+ int next;
58
+ if (nodes[id].next == -1) {
59
+ if (atomic_read(&mutex_head) == id &&
60
+ atomic_cmpxchg(&mutex_head, id, -1) == id) {
61
+ /* Last item in the list, exit. */
62
+ return;
85
+ }
63
+ }
86
+ /*
64
+ while (atomic_read(&nodes[id].next) == -1) {
87
+ * Even if all children agree about whether there are zeroes
65
+ /* mcs_mutex_lock did the xchg, but has not updated
88
+ * or not at @offset they might disagree on the size, so use
66
+ * nodes[prev].next yet.
89
+ * the smallest when reporting BDRV_BLOCK_ZERO and the largest
67
+ */
90
+ * when reporting BDRV_BLOCK_DATA.
91
+ */
92
+ if (ret & BDRV_BLOCK_ZERO) {
93
+ pnum_zero = MIN(pnum_zero, bytes);
94
+ } else {
95
+ pnum_data = MAX(pnum_data, bytes);
96
+ }
68
+ }
97
+ }
69
+ }
98
+
70
+
99
+ if (pnum_data) {
71
+ /* Wake up the next in line. */
100
+ *pnum = pnum_data;
72
+ next = nodes[id].next;
101
+ return BDRV_BLOCK_DATA;
73
+ nodes[next].locked = 0;
102
+ } else {
74
+ qemu_futex_wake(&nodes[next].locked, 1);
103
+ *pnum = pnum_zero;
75
+}
104
+ return BDRV_BLOCK_ZERO;
76
+
105
+ }
77
+static void test_multi_fair_mutex_entry(void *opaque)
106
+}
78
+{
107
+
79
+ while (!atomic_mb_read(&now_stopping)) {
108
static const char *const quorum_strong_runtime_opts[] = {
80
+ mcs_mutex_lock();
109
QUORUM_OPT_VOTE_THRESHOLD,
81
+ counter++;
110
QUORUM_OPT_BLKVERIFY,
82
+ mcs_mutex_unlock();
111
@@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_quorum = {
83
+ atomic_inc(&atomic_counter);
112
.bdrv_close = quorum_close,
84
+ }
113
.bdrv_gather_child_options = quorum_gather_child_options,
85
+ atomic_dec(&running);
114
.bdrv_dirname = quorum_dirname,
86
+}
115
+ .bdrv_co_block_status = quorum_co_block_status,
87
+
116
88
+static void test_multi_fair_mutex(int threads, int seconds)
117
.bdrv_co_flush_to_disk = quorum_co_flush,
89
+{
118
90
+ int i;
119
diff --git a/tests/qemu-iotests/312 b/tests/qemu-iotests/312
91
+
120
new file mode 100755
92
+ assert(mutex_head == -1);
121
index XXXXXXX..XXXXXXX
93
+ counter = 0;
122
--- /dev/null
94
+ atomic_counter = 0;
123
+++ b/tests/qemu-iotests/312
95
+ now_stopping = false;
124
@@ -XXX,XX +XXX,XX @@
96
+
125
+#!/usr/bin/env bash
97
+ create_aio_contexts();
126
+#
98
+ assert(threads <= NUM_CONTEXTS);
127
+# Test drive-mirror with quorum
99
+ running = threads;
128
+#
100
+ for (i = 0; i < threads; i++) {
129
+# The goal of this test is to check how the quorum driver reports
101
+ Coroutine *co1 = qemu_coroutine_create(test_multi_fair_mutex_entry, NULL);
130
+# regions that are known to read as zeroes (BDRV_BLOCK_ZERO). The idea
102
+ aio_co_schedule(ctx[i], co1);
131
+# is that drive-mirror will try the efficient representation of zeroes
103
+ }
132
+# in the destination image instead of writing actual zeroes.
104
+
133
+#
105
+ g_usleep(seconds * 1000000);
134
+# Copyright (C) 2020 Igalia, S.L.
106
+
135
+# Author: Alberto Garcia <berto@igalia.com>
107
+ atomic_mb_set(&now_stopping, true);
136
+#
108
+ while (running > 0) {
137
+# This program is free software; you can redistribute it and/or modify
109
+ g_usleep(100000);
138
+# it under the terms of the GNU General Public License as published by
110
+ }
139
+# the Free Software Foundation; either version 2 of the License, or
111
+
140
+# (at your option) any later version.
112
+ join_aio_contexts();
141
+#
113
+ g_test_message("%d iterations/second\n", counter / seconds);
142
+# This program is distributed in the hope that it will be useful,
114
+ g_assert_cmpint(counter, ==, atomic_counter);
143
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
115
+}
144
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
116
+
145
+# GNU General Public License for more details.
117
+static void test_multi_fair_mutex_1(void)
146
+#
118
+{
147
+# You should have received a copy of the GNU General Public License
119
+ test_multi_fair_mutex(NUM_CONTEXTS, 1);
148
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
120
+}
149
+#
121
+
150
+
122
+static void test_multi_fair_mutex_10(void)
151
+# creator
123
+{
152
+owner=berto@igalia.com
124
+ test_multi_fair_mutex(NUM_CONTEXTS, 10);
153
+
125
+}
154
+seq=`basename $0`
126
+#endif
155
+echo "QA output created by $seq"
127
+
156
+
128
+/* Same test with pthread mutexes, for performance comparison and
157
+status=1    # failure is the default!
129
+ * portability. */
158
+
130
+
159
+_cleanup()
131
+static QemuMutex mutex;
160
+{
132
+
161
+ _rm_test_img "$TEST_IMG.0"
133
+static void test_multi_mutex_entry(void *opaque)
162
+ _rm_test_img "$TEST_IMG.1"
134
+{
163
+ _rm_test_img "$TEST_IMG.2"
135
+ while (!atomic_mb_read(&now_stopping)) {
164
+ _rm_test_img "$TEST_IMG.3"
136
+ qemu_mutex_lock(&mutex);
165
+ _cleanup_qemu
137
+ counter++;
166
+}
138
+ qemu_mutex_unlock(&mutex);
167
+trap "_cleanup; exit \$status" 0 1 2 3 15
139
+ atomic_inc(&atomic_counter);
168
+
140
+ }
169
+# get standard environment, filters and checks
141
+ atomic_dec(&running);
170
+. ./common.rc
142
+}
171
+. ./common.filter
143
+
172
+. ./common.qemu
144
+static void test_multi_mutex(int threads, int seconds)
173
+
145
+{
174
+_supported_fmt qcow2
146
+ int i;
175
+_supported_proto file
147
+
176
+_supported_os Linux
148
+ qemu_mutex_init(&mutex);
177
+_unsupported_imgopts cluster_size data_file
149
+ counter = 0;
178
+
150
+ atomic_counter = 0;
179
+echo
151
+ now_stopping = false;
180
+echo '### Create all images' # three source (quorum), one destination
152
+
181
+echo
153
+ create_aio_contexts();
182
+TEST_IMG="$TEST_IMG.0" _make_test_img -o cluster_size=64k 10M
154
+ assert(threads <= NUM_CONTEXTS);
183
+TEST_IMG="$TEST_IMG.1" _make_test_img -o cluster_size=64k 10M
155
+ running = threads;
184
+TEST_IMG="$TEST_IMG.2" _make_test_img -o cluster_size=64k 10M
156
+ for (i = 0; i < threads; i++) {
185
+TEST_IMG="$TEST_IMG.3" _make_test_img -o cluster_size=64k 10M
157
+ Coroutine *co1 = qemu_coroutine_create(test_multi_mutex_entry, NULL);
186
+
158
+ aio_co_schedule(ctx[i], co1);
187
+quorum="driver=raw,file.driver=quorum,file.vote-threshold=2"
159
+ }
188
+quorum="$quorum,file.children.0.file.filename=$TEST_IMG.0"
160
+
189
+quorum="$quorum,file.children.1.file.filename=$TEST_IMG.1"
161
+ g_usleep(seconds * 1000000);
190
+quorum="$quorum,file.children.2.file.filename=$TEST_IMG.2"
162
+
191
+quorum="$quorum,file.children.0.driver=$IMGFMT"
163
+ atomic_mb_set(&now_stopping, true);
192
+quorum="$quorum,file.children.1.driver=$IMGFMT"
164
+ while (running > 0) {
193
+quorum="$quorum,file.children.2.driver=$IMGFMT"
165
+ g_usleep(100000);
194
+
166
+ }
195
+echo
167
+
196
+echo '### Output of qemu-img map (empty quorum)'
168
+ join_aio_contexts();
197
+echo
169
+ g_test_message("%d iterations/second\n", counter / seconds);
198
+$QEMU_IMG map --image-opts $quorum | _filter_qemu_img_map
170
+ g_assert_cmpint(counter, ==, atomic_counter);
199
+
171
+}
200
+# Now we write data to the quorum. All three images will read as
172
+
201
+# zeroes in all cases, but with different ways to represent them
173
+static void test_multi_mutex_1(void)
202
+# (unallocated clusters, zero clusters, data clusters with zeroes)
174
+{
203
+# that will have an effect on how the data will be mirrored and the
175
+ test_multi_mutex(NUM_CONTEXTS, 1);
204
+# output of qemu-img map on the resulting image.
176
+}
205
+echo
177
+
206
+echo '### Write data to the quorum'
178
+static void test_multi_mutex_10(void)
207
+echo
179
+{
208
+# Test 1: data regions surrounded by unallocated clusters.
180
+ test_multi_mutex(NUM_CONTEXTS, 10);
209
+# Three data regions, the largest one (0x30000) will be picked, end result:
181
+}
210
+# offset 0x10000, length 0x30000 -> data
182
+
211
+$QEMU_IO -c "write -P 0 $((0x10000)) $((0x10000))" "$TEST_IMG.0" | _filter_qemu_io
183
/* End of tests. */
212
+$QEMU_IO -c "write -P 0 $((0x10000)) $((0x30000))" "$TEST_IMG.1" | _filter_qemu_io
184
213
+$QEMU_IO -c "write -P 0 $((0x10000)) $((0x20000))" "$TEST_IMG.2" | _filter_qemu_io
185
int main(int argc, char **argv)
214
+
186
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
215
+# Test 2: zero regions surrounded by data clusters.
187
g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1);
216
+# First we allocate the data clusters.
188
g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_1);
217
+$QEMU_IO -c "open -o $quorum" -c "write -P 0 $((0x100000)) $((0x40000))" | _filter_qemu_io
189
g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_3);
218
+
190
+#ifdef CONFIG_LINUX
219
+# Three zero regions, the smallest one (0x10000) will be picked, end result:
191
+ g_test_add_func("/aio/multi/mutex/mcs", test_multi_fair_mutex_1);
220
+# offset 0x100000, length 0x10000 -> data
192
+#endif
221
+# offset 0x110000, length 0x10000 -> zeroes
193
+ g_test_add_func("/aio/multi/mutex/pthread", test_multi_mutex_1);
222
+# offset 0x120000, length 0x20000 -> data
194
} else {
223
+$QEMU_IO -c "write -z $((0x110000)) $((0x10000))" "$TEST_IMG.0" | _filter_qemu_io
195
g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10);
224
+$QEMU_IO -c "write -z $((0x110000)) $((0x30000))" "$TEST_IMG.1" | _filter_qemu_io
196
g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_10);
225
+$QEMU_IO -c "write -z $((0x110000)) $((0x20000))" "$TEST_IMG.2" | _filter_qemu_io
197
g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_30);
226
+
198
+#ifdef CONFIG_LINUX
227
+# Test 3: zero clusters surrounded by unallocated clusters.
199
+ g_test_add_func("/aio/multi/mutex/mcs", test_multi_fair_mutex_10);
228
+# Everything reads as zeroes, no effect on the end result.
200
+#endif
229
+$QEMU_IO -c "write -z $((0x150000)) $((0x10000))" "$TEST_IMG.0" | _filter_qemu_io
201
+ g_test_add_func("/aio/multi/mutex/pthread", test_multi_mutex_10);
230
+$QEMU_IO -c "write -z $((0x150000)) $((0x30000))" "$TEST_IMG.1" | _filter_qemu_io
202
}
231
+$QEMU_IO -c "write -z $((0x150000)) $((0x20000))" "$TEST_IMG.2" | _filter_qemu_io
203
return g_test_run();
232
+
204
}
233
+# Test 4: mix of data and zero clusters.
234
+# The zero region will be ignored in favor of the largest data region
235
+# (0x20000), end result:
236
+# offset 0x200000, length 0x20000 -> data
237
+$QEMU_IO -c "write -P 0 $((0x200000)) $((0x10000))" "$TEST_IMG.0" | _filter_qemu_io
238
+$QEMU_IO -c "write -z $((0x200000)) $((0x30000))" "$TEST_IMG.1" | _filter_qemu_io
239
+$QEMU_IO -c "write -P 0 $((0x200000)) $((0x20000))" "$TEST_IMG.2" | _filter_qemu_io
240
+
241
+echo
242
+echo '### Launch the drive-mirror job'
243
+echo
244
+qemu_comm_method="qmp" _launch_qemu -drive if=virtio,"$quorum"
245
+h=$QEMU_HANDLE
246
+_send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return'
247
+
248
+_send_qemu_cmd $h \
249
+ "{'execute': 'drive-mirror',
250
+ 'arguments': {'device': 'virtio0',
251
+ 'format': '$IMGFMT',
252
+ 'target': '$TEST_IMG.3',
253
+ 'sync': 'full',
254
+ 'mode': 'existing' }}" \
255
+ "BLOCK_JOB_READY.*virtio0"
256
+
257
+_send_qemu_cmd $h \
258
+ "{ 'execute': 'block-job-complete',
259
+ 'arguments': { 'device': 'virtio0' } }" \
260
+ 'BLOCK_JOB_COMPLETED'
261
+
262
+_send_qemu_cmd $h "{ 'execute': 'quit' }" ''
263
+
264
+echo
265
+echo '### Output of qemu-img map (destination image)'
266
+echo
267
+$QEMU_IMG map "$TEST_IMG.3" | _filter_qemu_img_map
268
+
269
+# success, all done
270
+echo "*** done"
271
+rm -f $seq.full
272
+status=0
273
diff --git a/tests/qemu-iotests/312.out b/tests/qemu-iotests/312.out
274
new file mode 100644
275
index XXXXXXX..XXXXXXX
276
--- /dev/null
277
+++ b/tests/qemu-iotests/312.out
278
@@ -XXX,XX +XXX,XX @@
279
+QA output created by 312
280
+
281
+### Create all images
282
+
283
+Formatting 'TEST_DIR/t.IMGFMT.0', fmt=IMGFMT size=10485760
284
+Formatting 'TEST_DIR/t.IMGFMT.1', fmt=IMGFMT size=10485760
285
+Formatting 'TEST_DIR/t.IMGFMT.2', fmt=IMGFMT size=10485760
286
+Formatting 'TEST_DIR/t.IMGFMT.3', fmt=IMGFMT size=10485760
287
+
288
+### Output of qemu-img map (empty quorum)
289
+
290
+Offset Length File
291
+
292
+### Write data to the quorum
293
+
294
+wrote 65536/65536 bytes at offset 65536
295
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
296
+wrote 196608/196608 bytes at offset 65536
297
+192 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
298
+wrote 131072/131072 bytes at offset 65536
299
+128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
300
+wrote 262144/262144 bytes at offset 1048576
301
+256 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
302
+wrote 65536/65536 bytes at offset 1114112
303
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
304
+wrote 196608/196608 bytes at offset 1114112
305
+192 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
306
+wrote 131072/131072 bytes at offset 1114112
307
+128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
308
+wrote 65536/65536 bytes at offset 1376256
309
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
310
+wrote 196608/196608 bytes at offset 1376256
311
+192 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
312
+wrote 131072/131072 bytes at offset 1376256
313
+128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
314
+wrote 65536/65536 bytes at offset 2097152
315
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
316
+wrote 196608/196608 bytes at offset 2097152
317
+192 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
318
+wrote 131072/131072 bytes at offset 2097152
319
+128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
320
+
321
+### Launch the drive-mirror job
322
+
323
+{ 'execute': 'qmp_capabilities' }
324
+{"return": {}}
325
+{'execute': 'drive-mirror', 'arguments': {'device': 'virtio0', 'format': 'IMGFMT', 'target': 'TEST_DIR/t.IMGFMT.3', 'sync': 'full', 'mode': 'existing' }}
326
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "virtio0"}}
327
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "virtio0"}}
328
+{"return": {}}
329
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "virtio0"}}
330
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "virtio0", "len": 10485760, "offset": 10485760, "speed": 0, "type": "mirror"}}
331
+{ 'execute': 'block-job-complete', 'arguments': { 'device': 'virtio0' } }
332
+{"return": {}}
333
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "virtio0"}}
334
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "virtio0"}}
335
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "virtio0", "len": 10485760, "offset": 10485760, "speed": 0, "type": "mirror"}}
336
+{ 'execute': 'quit' }
337
+
338
+### Output of qemu-img map (destination image)
339
+
340
+Offset Length File
341
+0x10000 0x30000 TEST_DIR/t.IMGFMT.3
342
+0x100000 0x10000 TEST_DIR/t.IMGFMT.3
343
+0x120000 0x20000 TEST_DIR/t.IMGFMT.3
344
+0x200000 0x20000 TEST_DIR/t.IMGFMT.3
345
+*** done
346
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
347
index XXXXXXX..XXXXXXX 100644
348
--- a/tests/qemu-iotests/group
349
+++ b/tests/qemu-iotests/group
350
@@ -XXX,XX +XXX,XX @@
351
307 rw quick export
352
308 rw
353
309 rw auto quick
354
+312 rw auto quick
355
--
205
--
356
2.29.2
206
2.9.3
357
207
358
208
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
Performance improvements / degradations are usually discussed in
3
This will avoid forward references in the next patch. It is also
4
percentage. Let's make the script calculate it for us.
4
more logical because CoQueue is not anymore the basic primitive.
5
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
7
Message-Id: <20201021145859.11201-20-vsementsov@virtuozzo.com>
7
Reviewed-by: Fam Zheng <famz@redhat.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
8
Message-id: 20170213181244.16297-5-pbonzini@redhat.com
9
[mreitz: 'seconds' instead of 'secs']
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
---
10
---
12
scripts/simplebench/results_to_text.py | 67 +++++++++++++++++++++++---
11
include/qemu/coroutine.h | 89 ++++++++++++++++++++++++------------------------
13
1 file changed, 60 insertions(+), 7 deletions(-)
12
1 file changed, 44 insertions(+), 45 deletions(-)
14
13
15
diff --git a/scripts/simplebench/results_to_text.py b/scripts/simplebench/results_to_text.py
14
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/scripts/simplebench/results_to_text.py
16
--- a/include/qemu/coroutine.h
18
+++ b/scripts/simplebench/results_to_text.py
17
+++ b/include/qemu/coroutine.h
19
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ bool qemu_in_coroutine(void);
20
#
19
*/
21
20
bool qemu_coroutine_entered(Coroutine *co);
22
import math
21
23
+import tabulate
22
-
23
-/**
24
- * CoQueues are a mechanism to queue coroutines in order to continue executing
25
- * them later. They provide the fundamental primitives on which coroutine locks
26
- * are built.
27
- */
28
-typedef struct CoQueue {
29
- QSIMPLEQ_HEAD(, Coroutine) entries;
30
-} CoQueue;
31
-
32
-/**
33
- * Initialise a CoQueue. This must be called before any other operation is used
34
- * on the CoQueue.
35
- */
36
-void qemu_co_queue_init(CoQueue *queue);
37
-
38
-/**
39
- * Adds the current coroutine to the CoQueue and transfers control to the
40
- * caller of the coroutine.
41
- */
42
-void coroutine_fn qemu_co_queue_wait(CoQueue *queue);
43
-
44
-/**
45
- * Restarts the next coroutine in the CoQueue and removes it from the queue.
46
- *
47
- * Returns true if a coroutine was restarted, false if the queue is empty.
48
- */
49
-bool coroutine_fn qemu_co_queue_next(CoQueue *queue);
50
-
51
-/**
52
- * Restarts all coroutines in the CoQueue and leaves the queue empty.
53
- */
54
-void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue);
55
-
56
-/**
57
- * Enter the next coroutine in the queue
58
- */
59
-bool qemu_co_enter_next(CoQueue *queue);
60
-
61
-/**
62
- * Checks if the CoQueue is empty.
63
- */
64
-bool qemu_co_queue_empty(CoQueue *queue);
65
-
66
-
67
/**
68
* Provides a mutex that can be used to synchronise coroutines
69
*/
70
@@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex);
71
*/
72
void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex);
73
24
+
74
+
25
+# We want leading whitespace for difference row cells (see below)
75
+/**
26
+tabulate.PRESERVE_WHITESPACE = True
76
+ * CoQueues are a mechanism to queue coroutines in order to continue executing
27
77
+ * them later.
28
78
+ */
29
def format_value(x, stdev):
79
+typedef struct CoQueue {
30
@@ -XXX,XX +XXX,XX @@ def result_to_text(result):
80
+ QSIMPLEQ_HEAD(, Coroutine) entries;
31
return 'FAILED'
81
+} CoQueue;
32
33
34
-def results_to_text(results):
35
- """Return text representation of bench() returned dict."""
36
- from tabulate import tabulate
37
-
38
+def results_dimension(results):
39
dim = None
40
- tab = [[""] + [c['id'] for c in results['envs']]]
41
for case in results['cases']:
42
- row = [case['id']]
43
for env in results['envs']:
44
res = results['tab'][case['id']][env['id']]
45
if dim is None:
46
dim = res['dimension']
47
else:
48
assert dim == res['dimension']
49
+
82
+
50
+ assert dim in ('iops', 'seconds')
83
+/**
84
+ * Initialise a CoQueue. This must be called before any other operation is used
85
+ * on the CoQueue.
86
+ */
87
+void qemu_co_queue_init(CoQueue *queue);
51
+
88
+
52
+ return dim
89
+/**
90
+ * Adds the current coroutine to the CoQueue and transfers control to the
91
+ * caller of the coroutine.
92
+ */
93
+void coroutine_fn qemu_co_queue_wait(CoQueue *queue);
94
+
95
+/**
96
+ * Restarts the next coroutine in the CoQueue and removes it from the queue.
97
+ *
98
+ * Returns true if a coroutine was restarted, false if the queue is empty.
99
+ */
100
+bool coroutine_fn qemu_co_queue_next(CoQueue *queue);
101
+
102
+/**
103
+ * Restarts all coroutines in the CoQueue and leaves the queue empty.
104
+ */
105
+void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue);
106
+
107
+/**
108
+ * Enter the next coroutine in the queue
109
+ */
110
+bool qemu_co_enter_next(CoQueue *queue);
111
+
112
+/**
113
+ * Checks if the CoQueue is empty.
114
+ */
115
+bool qemu_co_queue_empty(CoQueue *queue);
53
+
116
+
54
+
117
+
55
+def results_to_text(results):
118
typedef struct CoRwlock {
56
+ """Return text representation of bench() returned dict."""
119
bool writer;
57
+ n_columns = len(results['envs'])
120
int reader;
58
+ named_columns = n_columns > 2
59
+ dim = results_dimension(results)
60
+ tab = []
61
+
62
+ if named_columns:
63
+ # Environment columns are named A, B, ...
64
+ tab.append([''] + [chr(ord('A') + i) for i in range(n_columns)])
65
+
66
+ tab.append([''] + [c['id'] for c in results['envs']])
67
+
68
+ for case in results['cases']:
69
+ row = [case['id']]
70
+ case_results = results['tab'][case['id']]
71
+ for env in results['envs']:
72
+ res = case_results[env['id']]
73
row.append(result_to_text(res))
74
tab.append(row)
75
76
- return f'All results are in {dim}\n\n' + tabulate(tab)
77
+ # Add row of difference between columns. For each column starting from
78
+ # B we calculate difference with all previous columns.
79
+ row = ['', ''] # case name and first column
80
+ for i in range(1, n_columns):
81
+ cell = ''
82
+ env = results['envs'][i]
83
+ res = case_results[env['id']]
84
+
85
+ if 'average' not in res:
86
+ # Failed result
87
+ row.append(cell)
88
+ continue
89
+
90
+ for j in range(0, i):
91
+ env_j = results['envs'][j]
92
+ res_j = case_results[env_j['id']]
93
+ cell += ' '
94
+
95
+ if 'average' not in res_j:
96
+ # Failed result
97
+ cell += '--'
98
+ continue
99
+
100
+ col_j = tab[0][j + 1] if named_columns else ''
101
+ diff_pr = round((res['average'] - res_j['average']) /
102
+ res_j['average'] * 100)
103
+ cell += f' {col_j}{diff_pr:+}%'
104
+ row.append(cell)
105
+ tab.append(row)
106
+
107
+ return f'All results are in {dim}\n\n' + tabulate.tabulate(tab)
108
--
121
--
109
2.29.2
122
2.9.3
110
123
111
124
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
We'll need a separate function, which will only "mark" request
3
All that CoQueue needs in order to become thread-safe is help
4
serialising with specified align but not wait for conflicting
4
from an external mutex. Add this to the API.
5
requests. So, it will be like old bdrv_mark_request_serialising(),
5
6
before merging bdrv_wait_serialising_requests_locked() into it.
6
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
7
7
Reviewed-by: Fam Zheng <famz@redhat.com>
8
To reduce the possible mess, let's do the following:
8
Message-id: 20170213181244.16297-6-pbonzini@redhat.com
9
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Public function that does both marking and waiting will be called
11
bdrv_make_request_serialising, and private function which will only
12
"mark" will be called tracked_request_set_serialising().
13
14
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
15
Reviewed-by: Max Reitz <mreitz@redhat.com>
16
Message-Id: <20201021145859.11201-6-vsementsov@virtuozzo.com>
17
Signed-off-by: Max Reitz <mreitz@redhat.com>
18
---
10
---
19
include/block/block_int.h | 3 ++-
11
include/qemu/coroutine.h | 8 +++++---
20
block/file-posix.c | 2 +-
12
block/backup.c | 2 +-
21
block/io.c | 35 +++++++++++++++++++++++------------
13
block/io.c | 4 ++--
22
3 files changed, 26 insertions(+), 14 deletions(-)
14
block/nbd-client.c | 2 +-
23
15
block/qcow2-cluster.c | 4 +---
24
diff --git a/include/block/block_int.h b/include/block/block_int.h
16
block/sheepdog.c | 2 +-
25
index XXXXXXX..XXXXXXX 100644
17
block/throttle-groups.c | 2 +-
26
--- a/include/block/block_int.h
18
hw/9pfs/9p.c | 2 +-
27
+++ b/include/block/block_int.h
19
util/qemu-coroutine-lock.c | 24 +++++++++++++++++++++---
28
@@ -XXX,XX +XXX,XX @@ extern unsigned int bdrv_drain_all_count;
20
9 files changed, 34 insertions(+), 16 deletions(-)
29
void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent);
21
30
void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent);
22
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
31
23
index XXXXXXX..XXXXXXX 100644
32
-bool coroutine_fn bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align);
24
--- a/include/qemu/coroutine.h
33
+bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
25
+++ b/include/qemu/coroutine.h
34
+ uint64_t align);
26
@@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex);
35
BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs);
27
36
28
/**
37
int get_tmp_filename(char *filename, int size);
29
* CoQueues are a mechanism to queue coroutines in order to continue executing
38
diff --git a/block/file-posix.c b/block/file-posix.c
30
- * them later.
39
index XXXXXXX..XXXXXXX 100644
31
+ * them later. They are similar to condition variables, but they need help
40
--- a/block/file-posix.c
32
+ * from an external mutex in order to maintain thread-safety.
41
+++ b/block/file-posix.c
33
*/
42
@@ -XXX,XX +XXX,XX @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes,
34
typedef struct CoQueue {
43
35
QSIMPLEQ_HEAD(, Coroutine) entries;
44
assert(bdrv_check_request(req->offset, req->bytes) == 0);
36
@@ -XXX,XX +XXX,XX @@ void qemu_co_queue_init(CoQueue *queue);
45
37
46
- bdrv_mark_request_serialising(req, bs->bl.request_alignment);
38
/**
47
+ bdrv_make_request_serialising(req, bs->bl.request_alignment);
39
* Adds the current coroutine to the CoQueue and transfers control to the
48
}
40
- * caller of the coroutine.
49
#endif
41
+ * caller of the coroutine. The mutex is unlocked during the wait and
50
42
+ * locked again afterwards.
43
*/
44
-void coroutine_fn qemu_co_queue_wait(CoQueue *queue);
45
+void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex);
46
47
/**
48
* Restarts the next coroutine in the CoQueue and removes it from the queue.
49
diff --git a/block/backup.c b/block/backup.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/block/backup.c
52
+++ b/block/backup.c
53
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
54
retry = false;
55
QLIST_FOREACH(req, &job->inflight_reqs, list) {
56
if (end > req->start && start < req->end) {
57
- qemu_co_queue_wait(&req->wait_queue);
58
+ qemu_co_queue_wait(&req->wait_queue, NULL);
59
retry = true;
60
break;
61
}
51
diff --git a/block/io.c b/block/io.c
62
diff --git a/block/io.c b/block/io.c
52
index XXXXXXX..XXXXXXX 100644
63
index XXXXXXX..XXXXXXX 100644
53
--- a/block/io.c
64
--- a/block/io.c
54
+++ b/block/io.c
65
+++ b/block/io.c
55
@@ -XXX,XX +XXX,XX @@ bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
66
@@ -XXX,XX +XXX,XX @@ static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
56
return waited;
67
* (instead of producing a deadlock in the former case). */
68
if (!req->waiting_for) {
69
self->waiting_for = req;
70
- qemu_co_queue_wait(&req->wait_queue);
71
+ qemu_co_queue_wait(&req->wait_queue, NULL);
72
self->waiting_for = NULL;
73
retry = true;
74
waited = true;
75
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
76
77
/* Wait until any previous flushes are completed */
78
while (bs->active_flush_req) {
79
- qemu_co_queue_wait(&bs->flush_queue);
80
+ qemu_co_queue_wait(&bs->flush_queue, NULL);
81
}
82
83
bs->active_flush_req = true;
84
diff --git a/block/nbd-client.c b/block/nbd-client.c
85
index XXXXXXX..XXXXXXX 100644
86
--- a/block/nbd-client.c
87
+++ b/block/nbd-client.c
88
@@ -XXX,XX +XXX,XX @@ static void nbd_coroutine_start(NBDClientSession *s,
89
/* Poor man semaphore. The free_sema is locked when no other request
90
* can be accepted, and unlocked after receiving one reply. */
91
if (s->in_flight == MAX_NBD_REQUESTS) {
92
- qemu_co_queue_wait(&s->free_sema);
93
+ qemu_co_queue_wait(&s->free_sema, NULL);
94
assert(s->in_flight < MAX_NBD_REQUESTS);
95
}
96
s->in_flight++;
97
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
98
index XXXXXXX..XXXXXXX 100644
99
--- a/block/qcow2-cluster.c
100
+++ b/block/qcow2-cluster.c
101
@@ -XXX,XX +XXX,XX @@ static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
102
if (bytes == 0) {
103
/* Wait for the dependency to complete. We need to recheck
104
* the free/allocated clusters when we continue. */
105
- qemu_co_mutex_unlock(&s->lock);
106
- qemu_co_queue_wait(&old_alloc->dependent_requests);
107
- qemu_co_mutex_lock(&s->lock);
108
+ qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
109
return -EAGAIN;
110
}
111
}
112
diff --git a/block/sheepdog.c b/block/sheepdog.c
113
index XXXXXXX..XXXXXXX 100644
114
--- a/block/sheepdog.c
115
+++ b/block/sheepdog.c
116
@@ -XXX,XX +XXX,XX @@ static void wait_for_overlapping_aiocb(BDRVSheepdogState *s, SheepdogAIOCB *acb)
117
retry:
118
QLIST_FOREACH(cb, &s->inflight_aiocb_head, aiocb_siblings) {
119
if (AIOCBOverlapping(acb, cb)) {
120
- qemu_co_queue_wait(&s->overlapping_queue);
121
+ qemu_co_queue_wait(&s->overlapping_queue, NULL);
122
goto retry;
123
}
124
}
125
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/block/throttle-groups.c
128
+++ b/block/throttle-groups.c
129
@@ -XXX,XX +XXX,XX @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk,
130
if (must_wait || blkp->pending_reqs[is_write]) {
131
blkp->pending_reqs[is_write]++;
132
qemu_mutex_unlock(&tg->lock);
133
- qemu_co_queue_wait(&blkp->throttled_reqs[is_write]);
134
+ qemu_co_queue_wait(&blkp->throttled_reqs[is_write], NULL);
135
qemu_mutex_lock(&tg->lock);
136
blkp->pending_reqs[is_write]--;
137
}
138
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
139
index XXXXXXX..XXXXXXX 100644
140
--- a/hw/9pfs/9p.c
141
+++ b/hw/9pfs/9p.c
142
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn v9fs_flush(void *opaque)
143
/*
144
* Wait for pdu to complete.
145
*/
146
- qemu_co_queue_wait(&cancel_pdu->complete);
147
+ qemu_co_queue_wait(&cancel_pdu->complete, NULL);
148
cancel_pdu->cancelled = 0;
149
pdu_free(cancel_pdu);
150
}
151
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
152
index XXXXXXX..XXXXXXX 100644
153
--- a/util/qemu-coroutine-lock.c
154
+++ b/util/qemu-coroutine-lock.c
155
@@ -XXX,XX +XXX,XX @@ void qemu_co_queue_init(CoQueue *queue)
156
QSIMPLEQ_INIT(&queue->entries);
57
}
157
}
58
158
59
-bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
159
-void coroutine_fn qemu_co_queue_wait(CoQueue *queue)
60
+/* Called with req->bs->reqs_lock held */
160
+void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex)
61
+static void tracked_request_set_serialising(BdrvTrackedRequest *req,
62
+ uint64_t align)
63
{
161
{
64
- BlockDriverState *bs = req->bs;
162
Coroutine *self = qemu_coroutine_self();
65
int64_t overlap_offset = req->offset & ~(align - 1);
163
QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next);
66
uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
164
+
67
- overlap_offset;
165
+ if (mutex) {
68
- bool waited;
166
+ qemu_co_mutex_unlock(mutex);
69
167
+ }
70
- qemu_co_mutex_lock(&bs->reqs_lock);
168
+
71
if (!req->serialising) {
169
+ /* There is no race condition here. Other threads will call
72
qatomic_inc(&req->bs->serialising_in_flight);
170
+ * aio_co_schedule on our AioContext, which can reenter this
73
req->serialising = true;
171
+ * coroutine but only after this yield and after the main loop
74
@@ -XXX,XX +XXX,XX @@ bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
172
+ * has gone through the next iteration.
75
173
+ */
76
req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
174
qemu_coroutine_yield();
77
req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
175
assert(qemu_in_coroutine());
78
- waited = bdrv_wait_serialising_requests_locked(req);
176
+
79
- qemu_co_mutex_unlock(&bs->reqs_lock);
177
+ /* TODO: OSv implements wait morphing here, where the wakeup
80
- return waited;
178
+ * primitive automatically places the woken coroutine on the
179
+ * mutex's queue. This avoids the thundering herd effect.
180
+ */
181
+ if (mutex) {
182
+ qemu_co_mutex_lock(mutex);
183
+ }
81
}
184
}
82
185
83
/**
186
/**
84
@@ -XXX,XX +XXX,XX @@ static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self
187
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_rdlock(CoRwlock *lock)
85
return waited;
188
Coroutine *self = qemu_coroutine_self();
86
}
189
87
190
while (lock->writer) {
88
+bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
191
- qemu_co_queue_wait(&lock->queue);
89
+ uint64_t align)
192
+ qemu_co_queue_wait(&lock->queue, NULL);
90
+{
193
}
91
+ bool waited;
194
lock->reader++;
92
+
195
self->locks_held++;
93
+ qemu_co_mutex_lock(&req->bs->reqs_lock);
196
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_wrlock(CoRwlock *lock)
94
+
197
Coroutine *self = qemu_coroutine_self();
95
+ tracked_request_set_serialising(req, align);
198
96
+ waited = bdrv_wait_serialising_requests_locked(req);
199
while (lock->writer || lock->reader) {
97
+
200
- qemu_co_queue_wait(&lock->queue);
98
+ qemu_co_mutex_unlock(&req->bs->reqs_lock);
201
+ qemu_co_queue_wait(&lock->queue, NULL);
99
+
202
}
100
+ return waited;
203
lock->writer = true;
101
+}
204
self->locks_held++;
102
+
103
int bdrv_check_request(int64_t offset, int64_t bytes)
104
{
105
if (offset < 0 || bytes < 0) {
106
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
107
* with each other for the same cluster. For example, in copy-on-read
108
* it ensures that the CoR read and write operations are atomic and
109
* guest writes cannot interleave between them. */
110
- bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
111
+ bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
112
} else {
113
bdrv_wait_serialising_requests(req);
114
}
115
@@ -XXX,XX +XXX,XX @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
116
assert(!(flags & ~BDRV_REQ_MASK));
117
118
if (flags & BDRV_REQ_SERIALISING) {
119
- bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
120
+ bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
121
} else {
122
bdrv_wait_serialising_requests(req);
123
}
124
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
125
126
padding = bdrv_init_padding(bs, offset, bytes, &pad);
127
if (padding) {
128
- bdrv_mark_request_serialising(req, align);
129
+ bdrv_make_request_serialising(req, align);
130
131
bdrv_padding_rmw_read(child, req, &pad, true);
132
133
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
134
}
135
136
if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) {
137
- bdrv_mark_request_serialising(&req, align);
138
+ bdrv_make_request_serialising(&req, align);
139
bdrv_padding_rmw_read(child, &req, &pad, false);
140
}
141
142
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
143
* new area, we need to make sure that no write requests are made to it
144
* concurrently or they might be overwritten by preallocation. */
145
if (new_bytes) {
146
- bdrv_mark_request_serialising(&req, 1);
147
+ bdrv_make_request_serialising(&req, 1);
148
}
149
if (bs->read_only) {
150
error_setg(errp, "Image is read-only");
151
--
205
--
152
2.29.2
206
2.9.3
153
207
154
208
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
4
Reviewed-by: Max Reitz <mreitz@redhat.com>
5
Message-Id: <20201021145859.11201-11-vsementsov@virtuozzo.com>
6
Signed-off-by: Max Reitz <mreitz@redhat.com>
7
---
8
tests/qemu-iotests/iotests.py | 7 ++++++-
9
1 file changed, 6 insertions(+), 1 deletion(-)
10
11
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tests/qemu-iotests/iotests.py
14
+++ b/tests/qemu-iotests/iotests.py
15
@@ -XXX,XX +XXX,XX @@ def qemu_io_log(*args):
16
17
def qemu_io_silent(*args):
18
'''Run qemu-io and return the exit code, suppressing stdout'''
19
- args = qemu_io_args + list(args)
20
+ if '-f' in args or '--image-opts' in args:
21
+ default_args = qemu_io_args_no_fmt
22
+ else:
23
+ default_args = qemu_io_args
24
+
25
+ args = default_args + list(args)
26
exitcode = subprocess.call(args, stdout=open('/dev/null', 'w'))
27
if exitcode < 0:
28
sys.stderr.write('qemu-io received signal %i: %s\n' %
29
--
30
2.29.2
31
32
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Support benchmarks returning not seconds but iops. We'll use it for
4
further new test.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Message-Id: <20201021145859.11201-15-vsementsov@virtuozzo.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
11
scripts/simplebench/simplebench.py | 38 ++++++++++++++++++++++--------
12
1 file changed, 28 insertions(+), 10 deletions(-)
13
14
diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py
15
index XXXXXXX..XXXXXXX 100644
16
--- a/scripts/simplebench/simplebench.py
17
+++ b/scripts/simplebench/simplebench.py
18
@@ -XXX,XX +XXX,XX @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
19
20
test_func -- benchmarking function with prototype
21
test_func(env, case), which takes test_env and test_case
22
- arguments and returns {'seconds': int} (which is benchmark
23
- result) on success and {'error': str} on error. Returned
24
- dict may contain any other additional fields.
25
+ arguments and on success returns dict with 'seconds' or
26
+ 'iops' (or both) fields, specifying the benchmark result.
27
+ If both 'iops' and 'seconds' provided, the 'iops' is
28
+ considered the main, and 'seconds' is just an additional
29
+ info. On failure test_func should return {'error': str}.
30
+ Returned dict may contain any other additional fields.
31
test_env -- test environment - opaque first argument for test_func
32
test_case -- test case - opaque second argument for test_func
33
count -- how many times to call test_func, to calculate average
34
@@ -XXX,XX +XXX,XX @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
35
36
Returns dict with the following fields:
37
'runs': list of test_func results
38
- 'average': average seconds per run (exists only if at least one run
39
- succeeded)
40
+ 'dimension': dimension of results, may be 'seconds' or 'iops'
41
+ 'average': average value (iops or seconds) per run (exists only if at
42
+ least one run succeeded)
43
'delta': maximum delta between test_func result and the average
44
(exists only if at least one run succeeded)
45
'n-failed': number of failed runs (exists only if at least one run
46
@@ -XXX,XX +XXX,XX @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
47
48
result = {'runs': runs}
49
50
- succeeded = [r for r in runs if ('seconds' in r)]
51
+ succeeded = [r for r in runs if ('seconds' in r or 'iops' in r)]
52
if succeeded:
53
- avg = sum(r['seconds'] for r in succeeded) / len(succeeded)
54
+ if 'iops' in succeeded[0]:
55
+ assert all('iops' in r for r in succeeded)
56
+ dim = 'iops'
57
+ else:
58
+ assert all('seconds' in r for r in succeeded)
59
+ assert all('iops' not in r for r in succeeded)
60
+ dim = 'seconds'
61
+ avg = sum(r[dim] for r in succeeded) / len(succeeded)
62
+ result['dimension'] = dim
63
result['average'] = avg
64
- result['delta'] = max(abs(r['seconds'] - avg) for r in succeeded)
65
+ result['delta'] = max(abs(r[dim] - avg) for r in succeeded)
66
67
if len(succeeded) < count:
68
result['n-failed'] = count - len(succeeded)
69
@@ -XXX,XX +XXX,XX @@ def ascii(results):
70
"""Return ASCII representation of bench() returned dict."""
71
from tabulate import tabulate
72
73
+ dim = None
74
tab = [[""] + [c['id'] for c in results['envs']]]
75
for case in results['cases']:
76
row = [case['id']]
77
for env in results['envs']:
78
- row.append(ascii_one(results['tab'][case['id']][env['id']]))
79
+ res = results['tab'][case['id']][env['id']]
80
+ if dim is None:
81
+ dim = res['dimension']
82
+ else:
83
+ assert dim == res['dimension']
84
+ row.append(ascii_one(res))
85
tab.append(row)
86
87
- return tabulate(tab)
88
+ return f'All results are in {dim}\n\n' + tabulate(tab)
89
--
90
2.29.2
91
92
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Standard deviation is more usual to see after +- than current maximum
4
of deviations.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Message-Id: <20201021145859.11201-16-vsementsov@virtuozzo.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
11
scripts/simplebench/simplebench.py | 11 ++++++-----
12
1 file changed, 6 insertions(+), 5 deletions(-)
13
14
diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py
15
index XXXXXXX..XXXXXXX 100644
16
--- a/scripts/simplebench/simplebench.py
17
+++ b/scripts/simplebench/simplebench.py
18
@@ -XXX,XX +XXX,XX @@
19
# along with this program. If not, see <http://www.gnu.org/licenses/>.
20
#
21
22
+import statistics
23
+
24
25
def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
26
"""Benchmark one test-case
27
@@ -XXX,XX +XXX,XX @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
28
'dimension': dimension of results, may be 'seconds' or 'iops'
29
'average': average value (iops or seconds) per run (exists only if at
30
least one run succeeded)
31
- 'delta': maximum delta between test_func result and the average
32
+ 'stdev': standard deviation of results
33
(exists only if at least one run succeeded)
34
'n-failed': number of failed runs (exists only if at least one run
35
failed)
36
@@ -XXX,XX +XXX,XX @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
37
assert all('seconds' in r for r in succeeded)
38
assert all('iops' not in r for r in succeeded)
39
dim = 'seconds'
40
- avg = sum(r[dim] for r in succeeded) / len(succeeded)
41
result['dimension'] = dim
42
- result['average'] = avg
43
- result['delta'] = max(abs(r[dim] - avg) for r in succeeded)
44
+ result['average'] = statistics.mean(r[dim] for r in succeeded)
45
+ result['stdev'] = statistics.stdev(r[dim] for r in succeeded)
46
47
if len(succeeded) < count:
48
result['n-failed'] = count - len(succeeded)
49
@@ -XXX,XX +XXX,XX @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
50
def ascii_one(result):
51
"""Return ASCII representation of bench_one() returned dict."""
52
if 'average' in result:
53
- s = '{:.2f} +- {:.2f}'.format(result['average'], result['delta'])
54
+ s = '{:.2f} +- {:.2f}'.format(result['average'], result['stdev'])
55
if 'n-failed' in result:
56
s += '\n({} failed)'.format(result['n-failed'])
57
return s
58
--
59
2.29.2
60
61
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Next patch will use utf8 plus-minus symbol, let's use more generic (and
4
more readable) name.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Message-Id: <20201021145859.11201-17-vsementsov@virtuozzo.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
11
scripts/simplebench/bench-example.py | 2 +-
12
scripts/simplebench/bench_write_req.py | 2 +-
13
scripts/simplebench/simplebench.py | 10 +++++-----
14
3 files changed, 7 insertions(+), 7 deletions(-)
15
16
diff --git a/scripts/simplebench/bench-example.py b/scripts/simplebench/bench-example.py
17
index XXXXXXX..XXXXXXX 100644
18
--- a/scripts/simplebench/bench-example.py
19
+++ b/scripts/simplebench/bench-example.py
20
@@ -XXX,XX +XXX,XX @@ test_envs = [
21
]
22
23
result = simplebench.bench(bench_func, test_envs, test_cases, count=3)
24
-print(simplebench.ascii(result))
25
+print(simplebench.results_to_text(result))
26
diff --git a/scripts/simplebench/bench_write_req.py b/scripts/simplebench/bench_write_req.py
27
index XXXXXXX..XXXXXXX 100755
28
--- a/scripts/simplebench/bench_write_req.py
29
+++ b/scripts/simplebench/bench_write_req.py
30
@@ -XXX,XX +XXX,XX @@ if __name__ == '__main__':
31
32
result = simplebench.bench(bench_func, test_envs, test_cases, count=3,
33
initial_run=False)
34
- print(simplebench.ascii(result))
35
+ print(simplebench.results_to_text(result))
36
diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py
37
index XXXXXXX..XXXXXXX 100644
38
--- a/scripts/simplebench/simplebench.py
39
+++ b/scripts/simplebench/simplebench.py
40
@@ -XXX,XX +XXX,XX @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
41
return result
42
43
44
-def ascii_one(result):
45
- """Return ASCII representation of bench_one() returned dict."""
46
+def result_to_text(result):
47
+ """Return text representation of bench_one() returned dict."""
48
if 'average' in result:
49
s = '{:.2f} +- {:.2f}'.format(result['average'], result['stdev'])
50
if 'n-failed' in result:
51
@@ -XXX,XX +XXX,XX @@ def bench(test_func, test_envs, test_cases, *args, **vargs):
52
return results
53
54
55
-def ascii(results):
56
- """Return ASCII representation of bench() returned dict."""
57
+def results_to_text(results):
58
+ """Return text representation of bench() returned dict."""
59
from tabulate import tabulate
60
61
dim = None
62
@@ -XXX,XX +XXX,XX @@ def ascii(results):
63
dim = res['dimension']
64
else:
65
assert dim == res['dimension']
66
- row.append(ascii_one(res))
67
+ row.append(result_to_text(res))
68
tab.append(row)
69
70
return f'All results are in {dim}\n\n' + tabulate(tab)
71
--
72
2.29.2
73
74
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Let's keep view part in separate: this way it's better to improve it in
4
the following commits.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Message-Id: <20201021145859.11201-18-vsementsov@virtuozzo.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
11
scripts/simplebench/bench-example.py | 3 +-
12
scripts/simplebench/bench_write_req.py | 3 +-
13
scripts/simplebench/results_to_text.py | 48 ++++++++++++++++++++++++++
14
scripts/simplebench/simplebench.py | 31 -----------------
15
4 files changed, 52 insertions(+), 33 deletions(-)
16
create mode 100644 scripts/simplebench/results_to_text.py
17
18
diff --git a/scripts/simplebench/bench-example.py b/scripts/simplebench/bench-example.py
19
index XXXXXXX..XXXXXXX 100644
20
--- a/scripts/simplebench/bench-example.py
21
+++ b/scripts/simplebench/bench-example.py
22
@@ -XXX,XX +XXX,XX @@
23
#
24
25
import simplebench
26
+from results_to_text import results_to_text
27
from bench_block_job import bench_block_copy, drv_file, drv_nbd
28
29
30
@@ -XXX,XX +XXX,XX @@ test_envs = [
31
]
32
33
result = simplebench.bench(bench_func, test_envs, test_cases, count=3)
34
-print(simplebench.results_to_text(result))
35
+print(results_to_text(result))
36
diff --git a/scripts/simplebench/bench_write_req.py b/scripts/simplebench/bench_write_req.py
37
index XXXXXXX..XXXXXXX 100755
38
--- a/scripts/simplebench/bench_write_req.py
39
+++ b/scripts/simplebench/bench_write_req.py
40
@@ -XXX,XX +XXX,XX @@ import sys
41
import os
42
import subprocess
43
import simplebench
44
+from results_to_text import results_to_text
45
46
47
def bench_func(env, case):
48
@@ -XXX,XX +XXX,XX @@ if __name__ == '__main__':
49
50
result = simplebench.bench(bench_func, test_envs, test_cases, count=3,
51
initial_run=False)
52
- print(simplebench.results_to_text(result))
53
+ print(results_to_text(result))
54
diff --git a/scripts/simplebench/results_to_text.py b/scripts/simplebench/results_to_text.py
55
new file mode 100644
56
index XXXXXXX..XXXXXXX
57
--- /dev/null
58
+++ b/scripts/simplebench/results_to_text.py
59
@@ -XXX,XX +XXX,XX @@
60
+# Simple benchmarking framework
61
+#
62
+# Copyright (c) 2019 Virtuozzo International GmbH.
63
+#
64
+# This program is free software; you can redistribute it and/or modify
65
+# it under the terms of the GNU General Public License as published by
66
+# the Free Software Foundation; either version 2 of the License, or
67
+# (at your option) any later version.
68
+#
69
+# This program is distributed in the hope that it will be useful,
70
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
71
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
72
+# GNU General Public License for more details.
73
+#
74
+# You should have received a copy of the GNU General Public License
75
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
76
+#
77
+
78
+
79
+def result_to_text(result):
80
+ """Return text representation of bench_one() returned dict."""
81
+ if 'average' in result:
82
+ s = '{:.2f} +- {:.2f}'.format(result['average'], result['stdev'])
83
+ if 'n-failed' in result:
84
+ s += '\n({} failed)'.format(result['n-failed'])
85
+ return s
86
+ else:
87
+ return 'FAILED'
88
+
89
+
90
+def results_to_text(results):
91
+ """Return text representation of bench() returned dict."""
92
+ from tabulate import tabulate
93
+
94
+ dim = None
95
+ tab = [[""] + [c['id'] for c in results['envs']]]
96
+ for case in results['cases']:
97
+ row = [case['id']]
98
+ for env in results['envs']:
99
+ res = results['tab'][case['id']][env['id']]
100
+ if dim is None:
101
+ dim = res['dimension']
102
+ else:
103
+ assert dim == res['dimension']
104
+ row.append(result_to_text(res))
105
+ tab.append(row)
106
+
107
+ return f'All results are in {dim}\n\n' + tabulate(tab)
108
diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py
109
index XXXXXXX..XXXXXXX 100644
110
--- a/scripts/simplebench/simplebench.py
111
+++ b/scripts/simplebench/simplebench.py
112
@@ -XXX,XX +XXX,XX @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
113
return result
114
115
116
-def result_to_text(result):
117
- """Return text representation of bench_one() returned dict."""
118
- if 'average' in result:
119
- s = '{:.2f} +- {:.2f}'.format(result['average'], result['stdev'])
120
- if 'n-failed' in result:
121
- s += '\n({} failed)'.format(result['n-failed'])
122
- return s
123
- else:
124
- return 'FAILED'
125
-
126
-
127
def bench(test_func, test_envs, test_cases, *args, **vargs):
128
"""Fill benchmark table
129
130
@@ -XXX,XX +XXX,XX @@ def bench(test_func, test_envs, test_cases, *args, **vargs):
131
132
print('Done')
133
return results
134
-
135
-
136
-def results_to_text(results):
137
- """Return text representation of bench() returned dict."""
138
- from tabulate import tabulate
139
-
140
- dim = None
141
- tab = [[""] + [c['id'] for c in results['envs']]]
142
- for case in results['cases']:
143
- row = [case['id']]
144
- for env in results['envs']:
145
- res = results['tab'][case['id']][env['id']]
146
- if dim is None:
147
- dim = res['dimension']
148
- else:
149
- assert dim == res['dimension']
150
- row.append(result_to_text(res))
151
- tab.append(row)
152
-
153
- return f'All results are in {dim}\n\n' + tabulate(tab)
154
--
155
2.29.2
156
157
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Move to generic format for floats and percentage for error.
4
5
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
Message-Id: <20201021145859.11201-19-vsementsov@virtuozzo.com>
7
Acked-by: Max Reitz <mreitz@redhat.com>
8
Signed-off-by: Max Reitz <mreitz@redhat.com>
9
---
10
scripts/simplebench/results_to_text.py | 13 ++++++++++++-
11
1 file changed, 12 insertions(+), 1 deletion(-)
12
13
diff --git a/scripts/simplebench/results_to_text.py b/scripts/simplebench/results_to_text.py
14
index XXXXXXX..XXXXXXX 100644
15
--- a/scripts/simplebench/results_to_text.py
16
+++ b/scripts/simplebench/results_to_text.py
17
@@ -XXX,XX +XXX,XX @@
18
# along with this program. If not, see <http://www.gnu.org/licenses/>.
19
#
20
21
+import math
22
+
23
+
24
+def format_value(x, stdev):
25
+ stdev_pr = stdev / x * 100
26
+ if stdev_pr < 1.5:
27
+ # don't care too much
28
+ return f'{x:.2g}'
29
+ else:
30
+ return f'{x:.2g} ± {math.ceil(stdev_pr)}%'
31
+
32
33
def result_to_text(result):
34
"""Return text representation of bench_one() returned dict."""
35
if 'average' in result:
36
- s = '{:.2f} +- {:.2f}'.format(result['average'], result['stdev'])
37
+ s = format_value(result['average'], result['stdev'])
38
if 'n-failed' in result:
39
s += '\n({} failed)'.format(result['n-failed'])
40
return s
41
--
42
2.29.2
43
44
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
NVMe drive cannot be shrunk.
3
This adds a CoMutex around the existing CoQueue. Because the write-side
4
can just take CoMutex, the old "writer" field is not necessary anymore.
5
Instead of removing it altogether, count the number of pending writers
6
during a read-side critical section and forbid further readers from
7
entering.
4
8
5
Since commit c80d8b06cfa we can use the @exact parameter (set
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
6
to false) to return success if the block device is larger than
10
Reviewed-by: Fam Zheng <famz@redhat.com>
7
the requested offset (even if we can not be shrunk).
11
Message-id: 20170213181244.16297-7-pbonzini@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
include/qemu/coroutine.h | 3 ++-
15
util/qemu-coroutine-lock.c | 35 ++++++++++++++++++++++++-----------
16
2 files changed, 26 insertions(+), 12 deletions(-)
8
17
9
Use this parameter to implement the NVMe truncate() coroutine,
18
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
10
similarly how it is done for the iscsi and file-posix drivers
11
(see commit 82325ae5f2f "Evaluate @exact in protocol drivers").
12
13
Reported-by: Xueqiang Wei <xuwei@redhat.com>
14
Suggested-by: Max Reitz <mreitz@redhat.com>
15
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
16
Message-Id: <20201210125202.858656-1-philmd@redhat.com>
17
Signed-off-by: Max Reitz <mreitz@redhat.com>
18
---
19
block/nvme.c | 24 ++++++++++++++++++++++++
20
1 file changed, 24 insertions(+)
21
22
diff --git a/block/nvme.c b/block/nvme.c
23
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
24
--- a/block/nvme.c
20
--- a/include/qemu/coroutine.h
25
+++ b/block/nvme.c
21
+++ b/include/qemu/coroutine.h
26
@@ -XXX,XX +XXX,XX @@ out:
22
@@ -XXX,XX +XXX,XX @@ bool qemu_co_queue_empty(CoQueue *queue);
27
23
24
25
typedef struct CoRwlock {
26
- bool writer;
27
+ int pending_writer;
28
int reader;
29
+ CoMutex mutex;
30
CoQueue queue;
31
} CoRwlock;
32
33
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/util/qemu-coroutine-lock.c
36
+++ b/util/qemu-coroutine-lock.c
37
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_init(CoRwlock *lock)
38
{
39
memset(lock, 0, sizeof(*lock));
40
qemu_co_queue_init(&lock->queue);
41
+ qemu_co_mutex_init(&lock->mutex);
28
}
42
}
29
43
30
+static int coroutine_fn nvme_co_truncate(BlockDriverState *bs, int64_t offset,
44
void qemu_co_rwlock_rdlock(CoRwlock *lock)
31
+ bool exact, PreallocMode prealloc,
45
{
32
+ BdrvRequestFlags flags, Error **errp)
46
Coroutine *self = qemu_coroutine_self();
33
+{
47
34
+ int64_t cur_length;
48
- while (lock->writer) {
49
- qemu_co_queue_wait(&lock->queue, NULL);
50
+ qemu_co_mutex_lock(&lock->mutex);
51
+ /* For fairness, wait if a writer is in line. */
52
+ while (lock->pending_writer) {
53
+ qemu_co_queue_wait(&lock->queue, &lock->mutex);
54
}
55
lock->reader++;
56
+ qemu_co_mutex_unlock(&lock->mutex);
35
+
57
+
36
+ if (prealloc != PREALLOC_MODE_OFF) {
58
+ /* The rest of the read-side critical section is run without the mutex. */
37
+ error_setg(errp, "Unsupported preallocation mode '%s'",
59
self->locks_held++;
38
+ PreallocMode_str(prealloc));
60
}
39
+ return -ENOTSUP;
61
40
+ }
62
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_unlock(CoRwlock *lock)
63
Coroutine *self = qemu_coroutine_self();
64
65
assert(qemu_in_coroutine());
66
- if (lock->writer) {
67
- lock->writer = false;
68
+ if (!lock->reader) {
69
+ /* The critical section started in qemu_co_rwlock_wrlock. */
70
qemu_co_queue_restart_all(&lock->queue);
71
} else {
72
+ self->locks_held--;
41
+
73
+
42
+ cur_length = nvme_getlength(bs);
74
+ qemu_co_mutex_lock(&lock->mutex);
43
+ if (offset != cur_length && exact) {
75
lock->reader--;
44
+ error_setg(errp, "Cannot resize NVMe devices");
76
assert(lock->reader >= 0);
45
+ return -ENOTSUP;
77
/* Wakeup only one waiting writer */
46
+ } else if (offset > cur_length) {
78
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_unlock(CoRwlock *lock)
47
+ error_setg(errp, "Cannot grow NVMe devices");
79
qemu_co_queue_next(&lock->queue);
48
+ return -EINVAL;
80
}
49
+ }
81
}
82
- self->locks_held--;
83
+ qemu_co_mutex_unlock(&lock->mutex);
84
}
85
86
void qemu_co_rwlock_wrlock(CoRwlock *lock)
87
{
88
- Coroutine *self = qemu_coroutine_self();
89
-
90
- while (lock->writer || lock->reader) {
91
- qemu_co_queue_wait(&lock->queue, NULL);
92
+ qemu_co_mutex_lock(&lock->mutex);
93
+ lock->pending_writer++;
94
+ while (lock->reader) {
95
+ qemu_co_queue_wait(&lock->queue, &lock->mutex);
96
}
97
- lock->writer = true;
98
- self->locks_held++;
99
+ lock->pending_writer--;
50
+
100
+
51
+ return 0;
101
+ /* The rest of the write-side critical section is run with
52
+}
102
+ * the mutex taken, so that lock->reader remains zero.
53
103
+ * There is no need to update self->locks_held.
54
static int nvme_reopen_prepare(BDRVReopenState *reopen_state,
104
+ */
55
BlockReopenQueue *queue, Error **errp)
105
}
56
@@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_nvme = {
57
.bdrv_close = nvme_close,
58
.bdrv_getlength = nvme_getlength,
59
.bdrv_probe_blocksizes = nvme_probe_blocksizes,
60
+ .bdrv_co_truncate = nvme_co_truncate,
61
62
.bdrv_co_preadv = nvme_co_preadv,
63
.bdrv_co_pwritev = nvme_co_pwritev,
64
--
106
--
65
2.29.2
107
2.9.3
66
108
67
109
diff view generated by jsdifflib