1
The following changes since commit 2b483739791b33c46e6084b51edcf62107058ae1:
1
The following changes since commit 508ba0f7e2092d3ca56e3f75e894d52d8b94818e:
2
2
3
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20170904-2' into staging (2017-09-04 17:21:24 +0100)
3
Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20171109' into staging (2017-11-13 11:41:47 +0000)
4
4
5
are available in the git repository at:
5
are available in the git repository at:
6
6
7
git://github.com/stefanha/qemu.git tags/block-pull-request
7
git://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to b461151ff31c7925f271c297e8abed20231ac7d3:
9
for you to fetch changes up to 0761562687e0d8135310a94b1d3e08376387c027:
10
10
11
block: document semantics of bdrv_co_preadv|pwritev (2017-09-05 11:07:02 +0100)
11
qemu-iotests: Test I/O limits with removable media (2017-11-13 15:46:26 +0000)
12
13
----------------------------------------------------------------
14
Pull request
15
16
The following disk I/O throttling fixes solve recent bugs.
12
17
13
----------------------------------------------------------------
18
----------------------------------------------------------------
14
19
15
----------------------------------------------------------------
20
Alberto Garcia (3):
21
block: Check for inserted BlockDriverState in blk_io_limits_disable()
22
block: Leave valid throttle timers when removing a BDS from a backend
23
qemu-iotests: Test I/O limits with removable media
16
24
17
Daniel P. Berrange (1):
25
Stefan Hajnoczi (1):
18
block: document semantics of bdrv_co_preadv|pwritev
26
throttle-groups: drain before detaching ThrottleState
19
27
20
Stefan Hajnoczi (3):
28
Zhengui (1):
21
qemu.py: make VM() a context manager
29
block: all I/O should be completed before removing throttle timers.
22
iotests.py: add FilePath context manager
23
qemu-iotests: use context managers for resource cleanup in 194
24
30
25
include/block/block_int.h | 31 +++++++++++
31
block/block-backend.c | 36 ++++++++++++++++++---------
26
scripts/qemu.py | 16 +++++-
32
block/throttle-groups.c | 6 +++++
27
tests/qemu-iotests/194 | 117 +++++++++++++++++++++---------------------
33
tests/qemu-iotests/093 | 62 ++++++++++++++++++++++++++++++++++++++++++++++
28
tests/qemu-iotests/iotests.py | 26 ++++++++++
34
tests/qemu-iotests/093.out | 4 +--
29
4 files changed, 130 insertions(+), 60 deletions(-)
35
4 files changed, 94 insertions(+), 14 deletions(-)
30
36
31
--
37
--
32
2.13.5
38
2.13.6
33
39
34
40
diff view generated by jsdifflib
New patch
1
From: Zhengui <lizhengui@huawei.com>
1
2
3
In blk_remove_bs, all I/O should be completed before removing throttle
4
timers. If there has inflight I/O, removing throttle timers here will
5
cause the inflight I/O never return.
6
This patch add bdrv_drained_begin before throttle_timers_detach_aio_context
7
to let all I/O completed before removing throttle timers.
8
9
[Moved declaration of bs as suggested by Alberto Garcia
10
<berto@igalia.com>.
11
--Stefan]
12
13
Signed-off-by: Zhengui <lizhengui@huawei.com>
14
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
15
Reviewed-by: Alberto Garcia <berto@igalia.com>
16
Message-id: 1508564040-120700-1-git-send-email-lizhengui@huawei.com
17
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
18
---
19
block/block-backend.c | 4 ++++
20
1 file changed, 4 insertions(+)
21
22
diff --git a/block/block-backend.c b/block/block-backend.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/block/block-backend.c
25
+++ b/block/block-backend.c
26
@@ -XXX,XX +XXX,XX @@ BlockBackend *blk_by_public(BlockBackendPublic *public)
27
*/
28
void blk_remove_bs(BlockBackend *blk)
29
{
30
+ BlockDriverState *bs;
31
ThrottleTimers *tt;
32
33
notifier_list_notify(&blk->remove_bs_notifiers, blk);
34
if (blk->public.throttle_group_member.throttle_state) {
35
tt = &blk->public.throttle_group_member.throttle_timers;
36
+ bs = blk_bs(blk);
37
+ bdrv_drained_begin(bs);
38
throttle_timers_detach_aio_context(tt);
39
+ bdrv_drained_end(bs);
40
}
41
42
blk_update_root_state(blk);
43
--
44
2.13.6
45
46
diff view generated by jsdifflib
1
From: "Daniel P. Berrange" <berrange@redhat.com>
1
I/O requests hang after stop/cont commands at least since QEMU 2.10.0
2
with -drive iops=100:
2
3
3
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
4
(guest)$ dd if=/dev/zero of=/dev/vdb oflag=direct count=1000
4
Reviewed-by: Eric Blake <eblake@redhat.com>
5
(qemu) stop
5
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
6
(qemu) cont
6
Message-id: 20170831105456.9558-1-berrange@redhat.com
7
...I/O is stuck...
8
9
This happens because blk_set_aio_context() detaches the ThrottleState
10
while requests may still be in flight:
11
12
if (tgm->throttle_state) {
13
throttle_group_detach_aio_context(tgm);
14
throttle_group_attach_aio_context(tgm, new_context);
15
}
16
17
This patch encloses the detach/attach calls in a drained region so no
18
I/O request is left hanging. Also add assertions so we don't make the
19
same mistake again in the future.
20
21
Reported-by: Yongxue Hong <yhong@redhat.com>
22
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
23
Reviewed-by: Alberto Garcia <berto@igalia.com>
24
Message-id: 20171110151934.16883-1-stefanha@redhat.com
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
25
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
---
26
---
9
include/block/block_int.h | 31 +++++++++++++++++++++++++++++++
27
block/block-backend.c | 2 ++
10
1 file changed, 31 insertions(+)
28
block/throttle-groups.c | 6 ++++++
29
2 files changed, 8 insertions(+)
11
30
12
diff --git a/include/block/block_int.h b/include/block/block_int.h
31
diff --git a/block/block-backend.c b/block/block-backend.c
13
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
14
--- a/include/block/block_int.h
33
--- a/block/block-backend.c
15
+++ b/include/block/block_int.h
34
+++ b/block/block-backend.c
16
@@ -XXX,XX +XXX,XX @@ struct BlockDriver {
35
@@ -XXX,XX +XXX,XX @@ void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
17
36
18
int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
37
if (bs) {
19
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
38
if (tgm->throttle_state) {
39
+ bdrv_drained_begin(bs);
40
throttle_group_detach_aio_context(tgm);
41
throttle_group_attach_aio_context(tgm, new_context);
42
+ bdrv_drained_end(bs);
43
}
44
bdrv_set_aio_context(bs, new_context);
45
}
46
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/block/throttle-groups.c
49
+++ b/block/throttle-groups.c
50
@@ -XXX,XX +XXX,XX @@ void throttle_group_attach_aio_context(ThrottleGroupMember *tgm,
51
void throttle_group_detach_aio_context(ThrottleGroupMember *tgm)
52
{
53
ThrottleTimers *tt = &tgm->throttle_timers;
20
+
54
+
21
+ /**
55
+ /* Requests must have been drained */
22
+ * @offset: position in bytes to read at
56
+ assert(tgm->pending_reqs[0] == 0 && tgm->pending_reqs[1] == 0);
23
+ * @bytes: number of bytes to read
57
+ assert(qemu_co_queue_empty(&tgm->throttled_reqs[0]));
24
+ * @qiov: the buffers to fill with read data
58
+ assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
25
+ * @flags: currently unused, always 0
59
+
26
+ *
60
throttle_timers_detach_aio_context(tt);
27
+ * @offset and @bytes will be a multiple of 'request_alignment',
61
tgm->aio_context = NULL;
28
+ * but the length of individual @qiov elements does not have to
62
}
29
+ * be a multiple.
30
+ *
31
+ * @bytes will always equal the total size of @qiov, and will be
32
+ * no larger than 'max_transfer'.
33
+ *
34
+ * The buffer in @qiov may point directly to guest memory.
35
+ */
36
int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs,
37
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);
38
int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs,
39
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
40
int coroutine_fn (*bdrv_co_writev_flags)(BlockDriverState *bs,
41
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags);
42
+ /**
43
+ * @offset: position in bytes to write at
44
+ * @bytes: number of bytes to write
45
+ * @qiov: the buffers containing data to write
46
+ * @flags: zero or more bits allowed by 'supported_write_flags'
47
+ *
48
+ * @offset and @bytes will be a multiple of 'request_alignment',
49
+ * but the length of individual @qiov elements does not have to
50
+ * be a multiple.
51
+ *
52
+ * @bytes will always equal the total size of @qiov, and will be
53
+ * no larger than 'max_transfer'.
54
+ *
55
+ * The buffer in @qiov may point directly to guest memory.
56
+ */
57
int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs,
58
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);
59
60
--
63
--
61
2.13.5
64
2.13.6
62
65
63
66
diff view generated by jsdifflib
1
Switch from atexit.register() to a more elegant idiom of declaring
1
From: Alberto Garcia <berto@igalia.com>
2
resources in a with statement:
3
2
4
with FilePath('monitor.sock') as monitor_path,
3
When you set I/O limits using block_set_io_throttle or the command
5
VM() as vm:
4
line throttling.* options they are kept in the BlockBackend regardless
6
...
5
of whether a BlockDriverState is attached to the backend or not.
7
6
8
The files and VMs will be automatically cleaned up whether the test
7
Therefore when removing the limits using blk_io_limits_disable() we
9
passes or fails.
8
need to check if there's a BDS before attempting to drain it, else it
9
will crash QEMU. This can be reproduced very easily using HMP:
10
10
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
(qemu) drive_add 0 if=none,throttling.iops-total=5000
12
Message-id: 20170824072202.26818-4-stefanha@redhat.com
12
(qemu) drive_del none0
13
14
Reported-by: sochin jiang <sochin.jiang@huawei.com>
15
Signed-off-by: Alberto Garcia <berto@igalia.com>
16
Reviewed-by: Max Reitz <mreitz@redhat.com>
17
Message-id: 0d3a67ce8d948bb33e08672564714dcfb76a3d8c.1510339534.git.berto@igalia.com
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
18
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
---
19
---
15
tests/qemu-iotests/194 | 117 ++++++++++++++++++++++++-------------------------
20
block/block-backend.c | 14 ++++++++++----
16
1 file changed, 58 insertions(+), 59 deletions(-)
21
1 file changed, 10 insertions(+), 4 deletions(-)
17
22
18
diff --git a/tests/qemu-iotests/194 b/tests/qemu-iotests/194
23
diff --git a/block/block-backend.c b/block/block-backend.c
19
index XXXXXXX..XXXXXXX 100755
24
index XXXXXXX..XXXXXXX 100644
20
--- a/tests/qemu-iotests/194
25
--- a/block/block-backend.c
21
+++ b/tests/qemu-iotests/194
26
+++ b/block/block-backend.c
22
@@ -XXX,XX +XXX,XX @@
27
@@ -XXX,XX +XXX,XX @@ void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)
23
#
28
24
# Non-shared storage migration test using NBD server and drive-mirror
29
void blk_io_limits_disable(BlockBackend *blk)
25
30
{
26
-import os
31
- assert(blk->public.throttle_group_member.throttle_state);
27
-import atexit
32
- bdrv_drained_begin(blk_bs(blk));
28
import iotests
33
- throttle_group_unregister_tgm(&blk->public.throttle_group_member);
29
34
- bdrv_drained_end(blk_bs(blk));
30
iotests.verify_platform(['linux'])
35
+ BlockDriverState *bs = blk_bs(blk);
31
36
+ ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
32
-img_size = '1G'
37
+ assert(tgm->throttle_state);
33
-source_img_path = os.path.join(iotests.test_dir, 'source.img')
38
+ if (bs) {
34
-dest_img_path = os.path.join(iotests.test_dir, 'dest.img')
39
+ bdrv_drained_begin(bs);
35
-iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, source_img_path, img_size)
40
+ }
36
-iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, dest_img_path, img_size)
41
+ throttle_group_unregister_tgm(tgm);
37
-
42
+ if (bs) {
38
-iotests.log('Launching VMs...')
43
+ bdrv_drained_end(bs);
39
-migration_sock_path = os.path.join(iotests.test_dir, 'migration.sock')
44
+ }
40
-nbd_sock_path = os.path.join(iotests.test_dir, 'nbd.sock')
45
}
41
-source_vm = iotests.VM('source').add_drive(source_img_path)
46
42
-dest_vm = (iotests.VM('dest').add_drive(dest_img_path)
47
/* should be called before blk_set_io_limits if a limit is set */
43
- .add_incoming('unix:{0}'.format(migration_sock_path)))
44
-source_vm.launch()
45
-atexit.register(source_vm.shutdown)
46
-dest_vm.launch()
47
-atexit.register(dest_vm.shutdown)
48
-
49
-iotests.log('Launching NBD server on destination...')
50
-iotests.log(dest_vm.qmp('nbd-server-start', addr={'type': 'unix', 'data': {'path': nbd_sock_path}}))
51
-iotests.log(dest_vm.qmp('nbd-server-add', device='drive0', writable=True))
52
-
53
-iotests.log('Starting `drive-mirror` on source...')
54
-iotests.log(source_vm.qmp(
55
- 'drive-mirror',
56
- device='drive0',
57
- target='nbd+unix:///drive0?socket={0}'.format(nbd_sock_path),
58
- sync='full',
59
- format='raw', # always raw, the server handles the format
60
- mode='existing',
61
- job_id='mirror-job0'))
62
-
63
-iotests.log('Waiting for `drive-mirror` to complete...')
64
-iotests.log(source_vm.event_wait('BLOCK_JOB_READY'),
65
- filters=[iotests.filter_qmp_event])
66
-
67
-iotests.log('Starting migration...')
68
-source_vm.qmp('migrate-set-capabilities',
69
- capabilities=[{'capability': 'events', 'state': True}])
70
-dest_vm.qmp('migrate-set-capabilities',
71
- capabilities=[{'capability': 'events', 'state': True}])
72
-iotests.log(source_vm.qmp('migrate', uri='unix:{0}'.format(migration_sock_path)))
73
-
74
-while True:
75
- event1 = source_vm.event_wait('MIGRATION')
76
- iotests.log(event1, filters=[iotests.filter_qmp_event])
77
- if event1['data']['status'] in ('completed', 'failed'):
78
- iotests.log('Gracefully ending the `drive-mirror` job on source...')
79
- iotests.log(source_vm.qmp('block-job-cancel', device='mirror-job0'))
80
- break
81
-
82
-while True:
83
- event2 = source_vm.event_wait('BLOCK_JOB_COMPLETED')
84
- iotests.log(event2, filters=[iotests.filter_qmp_event])
85
- if event2['event'] == 'BLOCK_JOB_COMPLETED':
86
- iotests.log('Stopping the NBD server on destination...')
87
- iotests.log(dest_vm.qmp('nbd-server-stop'))
88
- break
89
+with iotests.FilePath('source.img') as source_img_path, \
90
+ iotests.FilePath('dest.img') as dest_img_path, \
91
+ iotests.FilePath('migration.sock') as migration_sock_path, \
92
+ iotests.FilePath('nbd.sock') as nbd_sock_path, \
93
+ iotests.VM('source') as source_vm, \
94
+ iotests.VM('dest') as dest_vm:
95
+
96
+ img_size = '1G'
97
+ iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, source_img_path, img_size)
98
+ iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, dest_img_path, img_size)
99
+
100
+ iotests.log('Launching VMs...')
101
+ (source_vm.add_drive(source_img_path)
102
+ .launch())
103
+ (dest_vm.add_drive(dest_img_path)
104
+ .add_incoming('unix:{0}'.format(migration_sock_path))
105
+ .launch())
106
+
107
+ iotests.log('Launching NBD server on destination...')
108
+ iotests.log(dest_vm.qmp('nbd-server-start', addr={'type': 'unix', 'data': {'path': nbd_sock_path}}))
109
+ iotests.log(dest_vm.qmp('nbd-server-add', device='drive0', writable=True))
110
+
111
+ iotests.log('Starting `drive-mirror` on source...')
112
+ iotests.log(source_vm.qmp(
113
+ 'drive-mirror',
114
+ device='drive0',
115
+ target='nbd+unix:///drive0?socket={0}'.format(nbd_sock_path),
116
+ sync='full',
117
+ format='raw', # always raw, the server handles the format
118
+ mode='existing',
119
+ job_id='mirror-job0'))
120
+
121
+ iotests.log('Waiting for `drive-mirror` to complete...')
122
+ iotests.log(source_vm.event_wait('BLOCK_JOB_READY'),
123
+ filters=[iotests.filter_qmp_event])
124
+
125
+ iotests.log('Starting migration...')
126
+ source_vm.qmp('migrate-set-capabilities',
127
+ capabilities=[{'capability': 'events', 'state': True}])
128
+ dest_vm.qmp('migrate-set-capabilities',
129
+ capabilities=[{'capability': 'events', 'state': True}])
130
+ iotests.log(source_vm.qmp('migrate', uri='unix:{0}'.format(migration_sock_path)))
131
+
132
+ while True:
133
+ event1 = source_vm.event_wait('MIGRATION')
134
+ iotests.log(event1, filters=[iotests.filter_qmp_event])
135
+ if event1['data']['status'] in ('completed', 'failed'):
136
+ iotests.log('Gracefully ending the `drive-mirror` job on source...')
137
+ iotests.log(source_vm.qmp('block-job-cancel', device='mirror-job0'))
138
+ break
139
+
140
+ while True:
141
+ event2 = source_vm.event_wait('BLOCK_JOB_COMPLETED')
142
+ iotests.log(event2, filters=[iotests.filter_qmp_event])
143
+ if event2['event'] == 'BLOCK_JOB_COMPLETED':
144
+ iotests.log('Stopping the NBD server on destination...')
145
+ iotests.log(dest_vm.qmp('nbd-server-stop'))
146
+ break
147
--
48
--
148
2.13.5
49
2.13.6
149
50
150
51
diff view generated by jsdifflib
1
The scratch/ (TEST_DIR) directory is not automatically cleaned up after
1
From: Alberto Garcia <berto@igalia.com>
2
test execution. It is the responsibility of tests to remove any files
3
they create.
4
2
5
A nice way of doing this is to declare files at the beginning of the
3
If a BlockBackend has I/O limits set then its ThrottleGroupMember
6
test and automatically remove them with a context manager:
4
structure uses the AioContext from its attached BlockDriverState.
5
Those two contexts must be kept in sync manually. This is not
6
ideal and will be fixed in the future by removing the throttling
7
configuration from the BlockBackend and storing it in an implicit
8
filter node instead, but for now we have to live with this.
7
9
8
with iotests.FilePath('test.img') as img_path:
10
When you remove the BlockDriverState from the backend then the
9
qemu_img(...)
11
throttle timers are destroyed. If a new BlockDriverState is later
10
qemu_io(...)
12
inserted then they are created again using the new AioContext.
11
# img_path is guaranteed to be deleted here
12
13
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
There are a couple of problems with this:
14
Message-id: 20170824072202.26818-3-stefanha@redhat.com
15
16
a) The code manipulates the timers directly, leaving the
17
ThrottleGroupMember.aio_context field in an inconsisent state.
18
19
b) If you remove the I/O limits (e.g by destroying the backend)
20
when the timers are gone then throttle_group_unregister_tgm()
21
will attempt to destroy them again, crashing QEMU.
22
23
While b) could be fixed easily by allowing the timers to be freed
24
twice, this would result in a situation in which we can no longer
25
guarantee that a valid ThrottleState has a valid AioContext and
26
timers.
27
28
This patch ensures that the timers and AioContext are always valid
29
when I/O limits are set, regardless of whether the BlockBackend has a
30
BlockDriverState inserted or not.
31
32
[Fixed "There'a" typo as suggested by Max Reitz <mreitz@redhat.com>
33
--Stefan]
34
35
Reported-by: sochin jiang <sochin.jiang@huawei.com>
36
Signed-off-by: Alberto Garcia <berto@igalia.com>
37
Reviewed-by: Max Reitz <mreitz@redhat.com>
38
Message-id: e089c66e7c20289b046d782cea4373b765c5bc1d.1510339534.git.berto@igalia.com
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
39
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
16
---
40
---
17
tests/qemu-iotests/iotests.py | 26 ++++++++++++++++++++++++++
41
block/block-backend.c | 16 ++++++++--------
18
1 file changed, 26 insertions(+)
42
1 file changed, 8 insertions(+), 8 deletions(-)
19
43
20
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
44
diff --git a/block/block-backend.c b/block/block-backend.c
21
index XXXXXXX..XXXXXXX 100644
45
index XXXXXXX..XXXXXXX 100644
22
--- a/tests/qemu-iotests/iotests.py
46
--- a/block/block-backend.c
23
+++ b/tests/qemu-iotests/iotests.py
47
+++ b/block/block-backend.c
24
@@ -XXX,XX +XXX,XX @@ class Timeout:
48
@@ -XXX,XX +XXX,XX @@ BlockBackend *blk_by_public(BlockBackendPublic *public)
25
def timeout(self, signum, frame):
49
*/
26
raise Exception(self.errmsg)
50
void blk_remove_bs(BlockBackend *blk)
27
51
{
28
+
52
+ ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
29
+class FilePath(object):
53
BlockDriverState *bs;
30
+ '''An auto-generated filename that cleans itself up.
54
- ThrottleTimers *tt;
31
+
55
32
+ Use this context manager to generate filenames and ensure that the file
56
notifier_list_notify(&blk->remove_bs_notifiers, blk);
33
+ gets deleted::
57
- if (blk->public.throttle_group_member.throttle_state) {
34
+
58
- tt = &blk->public.throttle_group_member.throttle_timers;
35
+ with TestFilePath('test.img') as img_path:
59
+ if (tgm->throttle_state) {
36
+ qemu_img('create', img_path, '1G')
60
bs = blk_bs(blk);
37
+ # migration_sock_path is automatically deleted
61
bdrv_drained_begin(bs);
38
+ '''
62
- throttle_timers_detach_aio_context(tt);
39
+ def __init__(self, name):
63
+ throttle_group_detach_aio_context(tgm);
40
+ filename = '{0}-{1}'.format(os.getpid(), name)
64
+ throttle_group_attach_aio_context(tgm, qemu_get_aio_context());
41
+ self.path = os.path.join(test_dir, filename)
65
bdrv_drained_end(bs);
42
+
66
}
43
+ def __enter__(self):
67
44
+ return self.path
68
@@ -XXX,XX +XXX,XX @@ void blk_remove_bs(BlockBackend *blk)
45
+
69
*/
46
+ def __exit__(self, exc_type, exc_val, exc_tb):
70
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
47
+ try:
71
{
48
+ os.remove(self.path)
72
+ ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
49
+ except OSError:
73
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
50
+ pass
74
blk->perm, blk->shared_perm, blk, errp);
51
+ return False
75
if (blk->root == NULL) {
52
+
76
@@ -XXX,XX +XXX,XX @@ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
53
+
77
bdrv_ref(bs);
54
class VM(qtest.QEMUQtestMachine):
78
55
'''A QEMU VM'''
79
notifier_list_notify(&blk->insert_bs_notifiers, blk);
56
80
- if (blk->public.throttle_group_member.throttle_state) {
81
- throttle_timers_attach_aio_context(
82
- &blk->public.throttle_group_member.throttle_timers,
83
- bdrv_get_aio_context(bs));
84
+ if (tgm->throttle_state) {
85
+ throttle_group_detach_aio_context(tgm);
86
+ throttle_group_attach_aio_context(tgm, bdrv_get_aio_context(bs));
87
}
88
89
return 0;
57
--
90
--
58
2.13.5
91
2.13.6
59
92
60
93
diff view generated by jsdifflib
1
There are a number of ways to ensure that the QEMU process is shut down
1
From: Alberto Garcia <berto@igalia.com>
2
when the test ends, including atexit.register(), try: finally:, or
3
unittest.teardown() methods. All of these require extra code and the
4
programmer must remember to add vm.shutdown().
5
2
6
A nice solution is context managers:
3
This test hotplugs a CD drive to a VM and checks that I/O limits can
4
be set only when the drive has media inserted and that they are kept
5
when the media is replaced.
7
6
8
with VM(binary) as vm:
7
This also tests the removal of a device with valid I/O limits set but
9
...
8
no media inserted. This involves deleting and disabling the limits
10
# vm is guaranteed to be shut down here
9
of a BlockBackend without BlockDriverState, a scenario that has been
10
crashing until the fixes from the last couple of patches.
11
11
12
Cc: Eduardo Habkost <ehabkost@redhat.com>
12
[Python PEP8 fixup: "Don't use spaces are the = sign when used to
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
indicate a keyword argument or a default parameter value"
14
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
14
--Stefan]
15
Message-id: 20170824072202.26818-2-stefanha@redhat.com
15
16
Signed-off-by: Alberto Garcia <berto@igalia.com>
17
Reviewed-by: Max Reitz <mreitz@redhat.com>
18
Message-id: 071eb397118ed207c5a7f01d58766e415ee18d6a.1510339534.git.berto@igalia.com
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
19
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
---
20
---
18
scripts/qemu.py | 16 +++++++++++++++-
21
tests/qemu-iotests/093 | 62 ++++++++++++++++++++++++++++++++++++++++++++++
19
1 file changed, 15 insertions(+), 1 deletion(-)
22
tests/qemu-iotests/093.out | 4 +--
23
2 files changed, 64 insertions(+), 2 deletions(-)
20
24
21
diff --git a/scripts/qemu.py b/scripts/qemu.py
25
diff --git a/tests/qemu-iotests/093 b/tests/qemu-iotests/093
26
index XXXXXXX..XXXXXXX 100755
27
--- a/tests/qemu-iotests/093
28
+++ b/tests/qemu-iotests/093
29
@@ -XXX,XX +XXX,XX @@ class ThrottleTestGroupNames(iotests.QMPTestCase):
30
groupname = "group%d" % i
31
self.verify_name(devname, groupname)
32
33
+class ThrottleTestRemovableMedia(iotests.QMPTestCase):
34
+ def setUp(self):
35
+ self.vm = iotests.VM()
36
+ if iotests.qemu_default_machine == 's390-ccw-virtio':
37
+ self.vm.add_device("virtio-scsi-ccw,id=virtio-scsi")
38
+ else:
39
+ self.vm.add_device("virtio-scsi-pci,id=virtio-scsi")
40
+ self.vm.launch()
41
+
42
+ def tearDown(self):
43
+ self.vm.shutdown()
44
+
45
+ def test_removable_media(self):
46
+ # Add a couple of dummy nodes named cd0 and cd1
47
+ result = self.vm.qmp("blockdev-add", driver="null-aio",
48
+ node_name="cd0")
49
+ self.assert_qmp(result, 'return', {})
50
+ result = self.vm.qmp("blockdev-add", driver="null-aio",
51
+ node_name="cd1")
52
+ self.assert_qmp(result, 'return', {})
53
+
54
+ # Attach a CD drive with cd0 inserted
55
+ result = self.vm.qmp("device_add", driver="scsi-cd",
56
+ id="dev0", drive="cd0")
57
+ self.assert_qmp(result, 'return', {})
58
+
59
+ # Set I/O limits
60
+ args = { "id": "dev0", "iops": 100, "iops_rd": 0, "iops_wr": 0,
61
+ "bps": 50, "bps_rd": 0, "bps_wr": 0 }
62
+ result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **args)
63
+ self.assert_qmp(result, 'return', {})
64
+
65
+ # Check that the I/O limits have been set
66
+ result = self.vm.qmp("query-block")
67
+ self.assert_qmp(result, 'return[0]/inserted/iops', 100)
68
+ self.assert_qmp(result, 'return[0]/inserted/bps', 50)
69
+
70
+ # Now eject cd0 and insert cd1
71
+ result = self.vm.qmp("blockdev-open-tray", id='dev0')
72
+ self.assert_qmp(result, 'return', {})
73
+ result = self.vm.qmp("x-blockdev-remove-medium", id='dev0')
74
+ self.assert_qmp(result, 'return', {})
75
+ result = self.vm.qmp("x-blockdev-insert-medium", id='dev0', node_name='cd1')
76
+ self.assert_qmp(result, 'return', {})
77
+
78
+ # Check that the I/O limits are still the same
79
+ result = self.vm.qmp("query-block")
80
+ self.assert_qmp(result, 'return[0]/inserted/iops', 100)
81
+ self.assert_qmp(result, 'return[0]/inserted/bps', 50)
82
+
83
+ # Eject cd1
84
+ result = self.vm.qmp("x-blockdev-remove-medium", id='dev0')
85
+ self.assert_qmp(result, 'return', {})
86
+
87
+ # Check that we can't set limits if the device has no medium
88
+ result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **args)
89
+ self.assert_qmp(result, 'error/class', 'GenericError')
90
+
91
+ # Remove the CD drive
92
+ result = self.vm.qmp("device_del", id='dev0')
93
+ self.assert_qmp(result, 'return', {})
94
+
95
96
if __name__ == '__main__':
97
iotests.main(supported_fmts=["raw"])
98
diff --git a/tests/qemu-iotests/093.out b/tests/qemu-iotests/093.out
22
index XXXXXXX..XXXXXXX 100644
99
index XXXXXXX..XXXXXXX 100644
23
--- a/scripts/qemu.py
100
--- a/tests/qemu-iotests/093.out
24
+++ b/scripts/qemu.py
101
+++ b/tests/qemu-iotests/093.out
25
@@ -XXX,XX +XXX,XX @@ import qmp.qmp
102
@@ -XXX,XX +XXX,XX @@
26
103
-.......
27
104
+........
28
class QEMUMachine(object):
105
----------------------------------------------------------------------
29
- '''A QEMU VM'''
106
-Ran 7 tests
30
+ '''A QEMU VM
107
+Ran 8 tests
31
+
108
32
+ Use this object as a context manager to ensure the QEMU process terminates::
109
OK
33
+
34
+ with VM(binary) as vm:
35
+ ...
36
+ # vm is guaranteed to be shut down here
37
+ '''
38
39
def __init__(self, binary, args=[], wrapper=[], name=None, test_dir="/var/tmp",
40
monitor_address=None, socket_scm_helper=None, debug=False):
41
@@ -XXX,XX +XXX,XX @@ class QEMUMachine(object):
42
self._socket_scm_helper = socket_scm_helper
43
self._debug = debug
44
45
+ def __enter__(self):
46
+ return self
47
+
48
+ def __exit__(self, exc_type, exc_val, exc_tb):
49
+ self.shutdown()
50
+ return False
51
+
52
# This can be used to add an unused monitor instance.
53
def add_monitor_telnet(self, ip, port):
54
args = 'tcp:%s:%d,server,nowait,telnet' % (ip, port)
55
--
110
--
56
2.13.5
111
2.13.6
57
112
58
113
diff view generated by jsdifflib