1
The following changes since commit 508ba0f7e2092d3ca56e3f75e894d52d8b94818e:
1
The following changes since commit 887cba855bb6ff4775256f7968409281350b568c:
2
2
3
Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20171109' into staging (2017-11-13 11:41:47 +0000)
3
configure: Fix cross-building for RISCV host (v5) (2023-07-11 17:56:09 +0100)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/stefanha/qemu.git tags/block-pull-request
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 0761562687e0d8135310a94b1d3e08376387c027:
9
for you to fetch changes up to 75dcb4d790bbe5327169fd72b185960ca58e2fa6:
10
10
11
qemu-iotests: Test I/O limits with removable media (2017-11-13 15:46:26 +0000)
11
virtio-blk: fix host notifier issues during dataplane start/stop (2023-07-12 15:20:32 -0400)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Pull request
14
Pull request
15
15
16
The following disk I/O throttling fixes solve recent bugs.
17
18
----------------------------------------------------------------
16
----------------------------------------------------------------
19
17
20
Alberto Garcia (3):
18
Stefan Hajnoczi (1):
21
block: Check for inserted BlockDriverState in blk_io_limits_disable()
19
virtio-blk: fix host notifier issues during dataplane start/stop
22
block: Leave valid throttle timers when removing a BDS from a backend
23
qemu-iotests: Test I/O limits with removable media
24
20
25
Stefan Hajnoczi (1):
21
hw/block/dataplane/virtio-blk.c | 67 +++++++++++++++++++--------------
26
throttle-groups: drain before detaching ThrottleState
22
1 file changed, 38 insertions(+), 29 deletions(-)
27
28
Zhengui (1):
29
block: all I/O should be completed before removing throttle timers.
30
31
block/block-backend.c | 36 ++++++++++++++++++---------
32
block/throttle-groups.c | 6 +++++
33
tests/qemu-iotests/093 | 62 ++++++++++++++++++++++++++++++++++++++++++++++
34
tests/qemu-iotests/093.out | 4 +--
35
4 files changed, 94 insertions(+), 14 deletions(-)
36
23
37
--
24
--
38
2.13.6
25
2.40.1
39
40
diff view generated by jsdifflib
Deleted patch
1
From: Zhengui <lizhengui@huawei.com>
2
1
3
In blk_remove_bs, all I/O should be completed before removing throttle
4
timers. If there has inflight I/O, removing throttle timers here will
5
cause the inflight I/O never return.
6
This patch add bdrv_drained_begin before throttle_timers_detach_aio_context
7
to let all I/O completed before removing throttle timers.
8
9
[Moved declaration of bs as suggested by Alberto Garcia
10
<berto@igalia.com>.
11
--Stefan]
12
13
Signed-off-by: Zhengui <lizhengui@huawei.com>
14
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
15
Reviewed-by: Alberto Garcia <berto@igalia.com>
16
Message-id: 1508564040-120700-1-git-send-email-lizhengui@huawei.com
17
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
18
---
19
block/block-backend.c | 4 ++++
20
1 file changed, 4 insertions(+)
21
22
diff --git a/block/block-backend.c b/block/block-backend.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/block/block-backend.c
25
+++ b/block/block-backend.c
26
@@ -XXX,XX +XXX,XX @@ BlockBackend *blk_by_public(BlockBackendPublic *public)
27
*/
28
void blk_remove_bs(BlockBackend *blk)
29
{
30
+ BlockDriverState *bs;
31
ThrottleTimers *tt;
32
33
notifier_list_notify(&blk->remove_bs_notifiers, blk);
34
if (blk->public.throttle_group_member.throttle_state) {
35
tt = &blk->public.throttle_group_member.throttle_timers;
36
+ bs = blk_bs(blk);
37
+ bdrv_drained_begin(bs);
38
throttle_timers_detach_aio_context(tt);
39
+ bdrv_drained_end(bs);
40
}
41
42
blk_update_root_state(blk);
43
--
44
2.13.6
45
46
diff view generated by jsdifflib
Deleted patch
1
I/O requests hang after stop/cont commands at least since QEMU 2.10.0
2
with -drive iops=100:
3
1
4
(guest)$ dd if=/dev/zero of=/dev/vdb oflag=direct count=1000
5
(qemu) stop
6
(qemu) cont
7
...I/O is stuck...
8
9
This happens because blk_set_aio_context() detaches the ThrottleState
10
while requests may still be in flight:
11
12
if (tgm->throttle_state) {
13
throttle_group_detach_aio_context(tgm);
14
throttle_group_attach_aio_context(tgm, new_context);
15
}
16
17
This patch encloses the detach/attach calls in a drained region so no
18
I/O request is left hanging. Also add assertions so we don't make the
19
same mistake again in the future.
20
21
Reported-by: Yongxue Hong <yhong@redhat.com>
22
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
23
Reviewed-by: Alberto Garcia <berto@igalia.com>
24
Message-id: 20171110151934.16883-1-stefanha@redhat.com
25
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
26
---
27
block/block-backend.c | 2 ++
28
block/throttle-groups.c | 6 ++++++
29
2 files changed, 8 insertions(+)
30
31
diff --git a/block/block-backend.c b/block/block-backend.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/block/block-backend.c
34
+++ b/block/block-backend.c
35
@@ -XXX,XX +XXX,XX @@ void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
36
37
if (bs) {
38
if (tgm->throttle_state) {
39
+ bdrv_drained_begin(bs);
40
throttle_group_detach_aio_context(tgm);
41
throttle_group_attach_aio_context(tgm, new_context);
42
+ bdrv_drained_end(bs);
43
}
44
bdrv_set_aio_context(bs, new_context);
45
}
46
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/block/throttle-groups.c
49
+++ b/block/throttle-groups.c
50
@@ -XXX,XX +XXX,XX @@ void throttle_group_attach_aio_context(ThrottleGroupMember *tgm,
51
void throttle_group_detach_aio_context(ThrottleGroupMember *tgm)
52
{
53
ThrottleTimers *tt = &tgm->throttle_timers;
54
+
55
+ /* Requests must have been drained */
56
+ assert(tgm->pending_reqs[0] == 0 && tgm->pending_reqs[1] == 0);
57
+ assert(qemu_co_queue_empty(&tgm->throttled_reqs[0]));
58
+ assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
59
+
60
throttle_timers_detach_aio_context(tt);
61
tgm->aio_context = NULL;
62
}
63
--
64
2.13.6
65
66
diff view generated by jsdifflib
Deleted patch
1
From: Alberto Garcia <berto@igalia.com>
2
1
3
When you set I/O limits using block_set_io_throttle or the command
4
line throttling.* options they are kept in the BlockBackend regardless
5
of whether a BlockDriverState is attached to the backend or not.
6
7
Therefore when removing the limits using blk_io_limits_disable() we
8
need to check if there's a BDS before attempting to drain it, else it
9
will crash QEMU. This can be reproduced very easily using HMP:
10
11
(qemu) drive_add 0 if=none,throttling.iops-total=5000
12
(qemu) drive_del none0
13
14
Reported-by: sochin jiang <sochin.jiang@huawei.com>
15
Signed-off-by: Alberto Garcia <berto@igalia.com>
16
Reviewed-by: Max Reitz <mreitz@redhat.com>
17
Message-id: 0d3a67ce8d948bb33e08672564714dcfb76a3d8c.1510339534.git.berto@igalia.com
18
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
19
---
20
block/block-backend.c | 14 ++++++++++----
21
1 file changed, 10 insertions(+), 4 deletions(-)
22
23
diff --git a/block/block-backend.c b/block/block-backend.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/block/block-backend.c
26
+++ b/block/block-backend.c
27
@@ -XXX,XX +XXX,XX @@ void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)
28
29
void blk_io_limits_disable(BlockBackend *blk)
30
{
31
- assert(blk->public.throttle_group_member.throttle_state);
32
- bdrv_drained_begin(blk_bs(blk));
33
- throttle_group_unregister_tgm(&blk->public.throttle_group_member);
34
- bdrv_drained_end(blk_bs(blk));
35
+ BlockDriverState *bs = blk_bs(blk);
36
+ ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
37
+ assert(tgm->throttle_state);
38
+ if (bs) {
39
+ bdrv_drained_begin(bs);
40
+ }
41
+ throttle_group_unregister_tgm(tgm);
42
+ if (bs) {
43
+ bdrv_drained_end(bs);
44
+ }
45
}
46
47
/* should be called before blk_set_io_limits if a limit is set */
48
--
49
2.13.6
50
51
diff view generated by jsdifflib
1
From: Alberto Garcia <berto@igalia.com>
1
The main loop thread can consume 100% CPU when using --device
2
virtio-blk-pci,iothread=<iothread>. ppoll() constantly returns but
3
reading virtqueue host notifiers fails with EAGAIN. The file descriptors
4
are stale and remain registered with the AioContext because of bugs in
5
the virtio-blk dataplane start/stop code.
2
6
3
If a BlockBackend has I/O limits set then its ThrottleGroupMember
7
The problem is that the dataplane start/stop code involves drain
4
structure uses the AioContext from its attached BlockDriverState.
8
operations, which call virtio_blk_drained_begin() and
5
Those two contexts must be kept in sync manually. This is not
9
virtio_blk_drained_end() at points where the host notifier is not
6
ideal and will be fixed in the future by removing the throttling
10
operational:
7
configuration from the BlockBackend and storing it in an implicit
11
- In virtio_blk_data_plane_start(), blk_set_aio_context() drains after
8
filter node instead, but for now we have to live with this.
12
vblk->dataplane_started has been set to true but the host notifier has
13
not been attached yet.
14
- In virtio_blk_data_plane_stop(), blk_drain() and blk_set_aio_context()
15
drain after the host notifier has already been detached but with
16
vblk->dataplane_started still set to true.
9
17
10
When you remove the BlockDriverState from the backend then the
18
I would like to simplify ->ioeventfd_start/stop() to avoid interactions
11
throttle timers are destroyed. If a new BlockDriverState is later
19
with drain entirely, but couldn't find a way to do that. Instead, this
12
inserted then they are created again using the new AioContext.
20
patch accepts the fragile nature of the code and reorders it so that
21
vblk->dataplane_started is false during drain operations. This way the
22
virtio_blk_drained_begin() and virtio_blk_drained_end() calls don't
23
touch the host notifier. The result is that
24
virtio_blk_data_plane_start() and virtio_blk_data_plane_stop() have
25
complete control over the host notifier and stale file descriptors are
26
no longer left in the AioContext.
13
27
14
There are a couple of problems with this:
28
This patch fixes the 100% CPU consumption in the main loop thread and
29
correctly moves host notifier processing to the IOThread.
15
30
16
a) The code manipulates the timers directly, leaving the
31
Fixes: 1665d9326fd2 ("virtio-blk: implement BlockDevOps->drained_begin()")
17
ThrottleGroupMember.aio_context field in an inconsisent state.
32
Reported-by: Lukáš Doktor <ldoktor@redhat.com>
18
33
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
19
b) If you remove the I/O limits (e.g by destroying the backend)
34
Tested-by: Lukas Doktor <ldoktor@redhat.com>
20
when the timers are gone then throttle_group_unregister_tgm()
35
Message-id: 20230704151527.193586-1-stefanha@redhat.com
21
will attempt to destroy them again, crashing QEMU.
22
23
While b) could be fixed easily by allowing the timers to be freed
24
twice, this would result in a situation in which we can no longer
25
guarantee that a valid ThrottleState has a valid AioContext and
26
timers.
27
28
This patch ensures that the timers and AioContext are always valid
29
when I/O limits are set, regardless of whether the BlockBackend has a
30
BlockDriverState inserted or not.
31
32
[Fixed "There'a" typo as suggested by Max Reitz <mreitz@redhat.com>
33
--Stefan]
34
35
Reported-by: sochin jiang <sochin.jiang@huawei.com>
36
Signed-off-by: Alberto Garcia <berto@igalia.com>
37
Reviewed-by: Max Reitz <mreitz@redhat.com>
38
Message-id: e089c66e7c20289b046d782cea4373b765c5bc1d.1510339534.git.berto@igalia.com
39
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
36
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
40
---
37
---
41
block/block-backend.c | 16 ++++++++--------
38
hw/block/dataplane/virtio-blk.c | 67 +++++++++++++++++++--------------
42
1 file changed, 8 insertions(+), 8 deletions(-)
39
1 file changed, 38 insertions(+), 29 deletions(-)
43
40
44
diff --git a/block/block-backend.c b/block/block-backend.c
41
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
45
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
46
--- a/block/block-backend.c
43
--- a/hw/block/dataplane/virtio-blk.c
47
+++ b/block/block-backend.c
44
+++ b/hw/block/dataplane/virtio-blk.c
48
@@ -XXX,XX +XXX,XX @@ BlockBackend *blk_by_public(BlockBackendPublic *public)
45
@@ -XXX,XX +XXX,XX @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
49
*/
46
50
void blk_remove_bs(BlockBackend *blk)
47
memory_region_transaction_commit();
51
{
48
52
+ ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
49
- /*
53
BlockDriverState *bs;
50
- * These fields are visible to the IOThread so we rely on implicit barriers
54
- ThrottleTimers *tt;
51
- * in aio_context_acquire() on the write side and aio_notify_accept() on
55
52
- * the read side.
56
notifier_list_notify(&blk->remove_bs_notifiers, blk);
53
- */
57
- if (blk->public.throttle_group_member.throttle_state) {
54
- s->starting = false;
58
- tt = &blk->public.throttle_group_member.throttle_timers;
55
- vblk->dataplane_started = true;
59
+ if (tgm->throttle_state) {
56
trace_virtio_blk_data_plane_start(s);
60
bs = blk_bs(blk);
57
61
bdrv_drained_begin(bs);
58
old_context = blk_get_aio_context(s->conf->conf.blk);
62
- throttle_timers_detach_aio_context(tt);
59
@@ -XXX,XX +XXX,XX @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
63
+ throttle_group_detach_aio_context(tgm);
60
event_notifier_set(virtio_queue_get_host_notifier(vq));
64
+ throttle_group_attach_aio_context(tgm, qemu_get_aio_context());
65
bdrv_drained_end(bs);
66
}
61
}
67
62
68
@@ -XXX,XX +XXX,XX @@ void blk_remove_bs(BlockBackend *blk)
63
+ /*
69
*/
64
+ * These fields must be visible to the IOThread when it processes the
70
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
65
+ * virtqueue, otherwise it will think dataplane has not started yet.
71
{
66
+ *
72
+ ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
67
+ * Make sure ->dataplane_started is false when blk_set_aio_context() is
73
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
68
+ * called above so that draining does not cause the host notifier to be
74
blk->perm, blk->shared_perm, blk, errp);
69
+ * detached/attached prematurely.
75
if (blk->root == NULL) {
70
+ */
76
@@ -XXX,XX +XXX,XX @@ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
71
+ s->starting = false;
77
bdrv_ref(bs);
72
+ vblk->dataplane_started = true;
78
73
+ smp_wmb(); /* paired with aio_notify_accept() on the read side */
79
notifier_list_notify(&blk->insert_bs_notifiers, blk);
74
+
80
- if (blk->public.throttle_group_member.throttle_state) {
75
/* Get this show started by hooking up our callbacks */
81
- throttle_timers_attach_aio_context(
76
if (!blk_in_drain(s->conf->conf.blk)) {
82
- &blk->public.throttle_group_member.throttle_timers,
77
aio_context_acquire(s->ctx);
83
- bdrv_get_aio_context(bs));
78
@@ -XXX,XX +XXX,XX @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
84
+ if (tgm->throttle_state) {
79
fail_guest_notifiers:
85
+ throttle_group_detach_aio_context(tgm);
80
vblk->dataplane_disabled = true;
86
+ throttle_group_attach_aio_context(tgm, bdrv_get_aio_context(bs));
81
s->starting = false;
82
- vblk->dataplane_started = true;
83
return -ENOSYS;
84
}
85
86
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
87
aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
87
}
88
}
88
89
89
return 0;
90
+ /*
91
+ * Batch all the host notifiers in a single transaction to avoid
92
+ * quadratic time complexity in address_space_update_ioeventfds().
93
+ */
94
+ memory_region_transaction_begin();
95
+
96
+ for (i = 0; i < nvqs; i++) {
97
+ virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
98
+ }
99
+
100
+ /*
101
+ * The transaction expects the ioeventfds to be open when it
102
+ * commits. Do it now, before the cleanup loop.
103
+ */
104
+ memory_region_transaction_commit();
105
+
106
+ for (i = 0; i < nvqs; i++) {
107
+ virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
108
+ }
109
+
110
+ /*
111
+ * Set ->dataplane_started to false before draining so that host notifiers
112
+ * are not detached/attached anymore.
113
+ */
114
+ vblk->dataplane_started = false;
115
+
116
aio_context_acquire(s->ctx);
117
118
/* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
119
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
120
121
aio_context_release(s->ctx);
122
123
- /*
124
- * Batch all the host notifiers in a single transaction to avoid
125
- * quadratic time complexity in address_space_update_ioeventfds().
126
- */
127
- memory_region_transaction_begin();
128
-
129
- for (i = 0; i < nvqs; i++) {
130
- virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
131
- }
132
-
133
- /*
134
- * The transaction expects the ioeventfds to be open when it
135
- * commits. Do it now, before the cleanup loop.
136
- */
137
- memory_region_transaction_commit();
138
-
139
- for (i = 0; i < nvqs; i++) {
140
- virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
141
- }
142
-
143
qemu_bh_cancel(s->bh);
144
notify_guest_bh(s); /* final chance to notify guest */
145
146
/* Clean up guest notifier (irq) */
147
k->set_guest_notifiers(qbus->parent, nvqs, false);
148
149
- vblk->dataplane_started = false;
150
s->stopping = false;
151
}
90
--
152
--
91
2.13.6
153
2.40.1
92
154
93
155
diff view generated by jsdifflib
Deleted patch
1
From: Alberto Garcia <berto@igalia.com>
2
1
3
This test hotplugs a CD drive to a VM and checks that I/O limits can
4
be set only when the drive has media inserted and that they are kept
5
when the media is replaced.
6
7
This also tests the removal of a device with valid I/O limits set but
8
no media inserted. This involves deleting and disabling the limits
9
of a BlockBackend without BlockDriverState, a scenario that has been
10
crashing until the fixes from the last couple of patches.
11
12
[Python PEP8 fixup: "Don't use spaces are the = sign when used to
13
indicate a keyword argument or a default parameter value"
14
--Stefan]
15
16
Signed-off-by: Alberto Garcia <berto@igalia.com>
17
Reviewed-by: Max Reitz <mreitz@redhat.com>
18
Message-id: 071eb397118ed207c5a7f01d58766e415ee18d6a.1510339534.git.berto@igalia.com
19
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
20
---
21
tests/qemu-iotests/093 | 62 ++++++++++++++++++++++++++++++++++++++++++++++
22
tests/qemu-iotests/093.out | 4 +--
23
2 files changed, 64 insertions(+), 2 deletions(-)
24
25
diff --git a/tests/qemu-iotests/093 b/tests/qemu-iotests/093
26
index XXXXXXX..XXXXXXX 100755
27
--- a/tests/qemu-iotests/093
28
+++ b/tests/qemu-iotests/093
29
@@ -XXX,XX +XXX,XX @@ class ThrottleTestGroupNames(iotests.QMPTestCase):
30
groupname = "group%d" % i
31
self.verify_name(devname, groupname)
32
33
+class ThrottleTestRemovableMedia(iotests.QMPTestCase):
34
+ def setUp(self):
35
+ self.vm = iotests.VM()
36
+ if iotests.qemu_default_machine == 's390-ccw-virtio':
37
+ self.vm.add_device("virtio-scsi-ccw,id=virtio-scsi")
38
+ else:
39
+ self.vm.add_device("virtio-scsi-pci,id=virtio-scsi")
40
+ self.vm.launch()
41
+
42
+ def tearDown(self):
43
+ self.vm.shutdown()
44
+
45
+ def test_removable_media(self):
46
+ # Add a couple of dummy nodes named cd0 and cd1
47
+ result = self.vm.qmp("blockdev-add", driver="null-aio",
48
+ node_name="cd0")
49
+ self.assert_qmp(result, 'return', {})
50
+ result = self.vm.qmp("blockdev-add", driver="null-aio",
51
+ node_name="cd1")
52
+ self.assert_qmp(result, 'return', {})
53
+
54
+ # Attach a CD drive with cd0 inserted
55
+ result = self.vm.qmp("device_add", driver="scsi-cd",
56
+ id="dev0", drive="cd0")
57
+ self.assert_qmp(result, 'return', {})
58
+
59
+ # Set I/O limits
60
+ args = { "id": "dev0", "iops": 100, "iops_rd": 0, "iops_wr": 0,
61
+ "bps": 50, "bps_rd": 0, "bps_wr": 0 }
62
+ result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **args)
63
+ self.assert_qmp(result, 'return', {})
64
+
65
+ # Check that the I/O limits have been set
66
+ result = self.vm.qmp("query-block")
67
+ self.assert_qmp(result, 'return[0]/inserted/iops', 100)
68
+ self.assert_qmp(result, 'return[0]/inserted/bps', 50)
69
+
70
+ # Now eject cd0 and insert cd1
71
+ result = self.vm.qmp("blockdev-open-tray", id='dev0')
72
+ self.assert_qmp(result, 'return', {})
73
+ result = self.vm.qmp("x-blockdev-remove-medium", id='dev0')
74
+ self.assert_qmp(result, 'return', {})
75
+ result = self.vm.qmp("x-blockdev-insert-medium", id='dev0', node_name='cd1')
76
+ self.assert_qmp(result, 'return', {})
77
+
78
+ # Check that the I/O limits are still the same
79
+ result = self.vm.qmp("query-block")
80
+ self.assert_qmp(result, 'return[0]/inserted/iops', 100)
81
+ self.assert_qmp(result, 'return[0]/inserted/bps', 50)
82
+
83
+ # Eject cd1
84
+ result = self.vm.qmp("x-blockdev-remove-medium", id='dev0')
85
+ self.assert_qmp(result, 'return', {})
86
+
87
+ # Check that we can't set limits if the device has no medium
88
+ result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **args)
89
+ self.assert_qmp(result, 'error/class', 'GenericError')
90
+
91
+ # Remove the CD drive
92
+ result = self.vm.qmp("device_del", id='dev0')
93
+ self.assert_qmp(result, 'return', {})
94
+
95
96
if __name__ == '__main__':
97
iotests.main(supported_fmts=["raw"])
98
diff --git a/tests/qemu-iotests/093.out b/tests/qemu-iotests/093.out
99
index XXXXXXX..XXXXXXX 100644
100
--- a/tests/qemu-iotests/093.out
101
+++ b/tests/qemu-iotests/093.out
102
@@ -XXX,XX +XXX,XX @@
103
-.......
104
+........
105
----------------------------------------------------------------------
106
-Ran 7 tests
107
+Ran 8 tests
108
109
OK
110
--
111
2.13.6
112
113
diff view generated by jsdifflib