1 | The following changes since commit 2ef2f16781af9dee6ba6517755e9073ba5799fa2: | 1 | The following changes since commit df34fe314b5da628bc9a2664fb1b887bc0a6cc6d: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20180615a' into staging (2018-06-15 18:13:35 +0100) | 3 | Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20190708' into staging (2019-07-08 14:23:32 +0100) |
4 | 4 | ||
5 | are available in the git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | git://repo.or.cz/qemu/kevin.git tags/for-upstream | 7 | git://repo.or.cz/qemu/kevin.git tags/for-upstream |
8 | 8 | ||
9 | for you to fetch changes up to 4c790afe2503eab12874508acab5b388d7babfd2: | 9 | for you to fetch changes up to f7077c9860a438087c2d9a8cc27cb8438c98a748: |
10 | 10 | ||
11 | Merge remote-tracking branch 'mreitz/tags/pull-block-2018-06-18' into queue-block (2018-06-18 17:20:42 +0200) | 11 | qcow2: Allow -o compat=v3 during qemu-img amend (2019-07-08 16:00:31 +0200) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Block layer patches: | 14 | Block layer patches: |
15 | 15 | ||
16 | - Active mirror (blockdev-mirror copy-mode=write-blocking) | 16 | - virtio-scsi: Fix request resubmission after I/O error with iothreads |
17 | - bdrv_drain_*() fixes and test cases | 17 | - qcow2: Fix missing v2/v3 subformat aliases for amend |
18 | - Fix crash with scsi-hd and drive_del | 18 | - qcow(1): More specific error message for wrong format version |
19 | - MAINTAINERS: update RBD block maintainer | ||
19 | 20 | ||
20 | ---------------------------------------------------------------- | 21 | ---------------------------------------------------------------- |
21 | Greg Kurz (1): | 22 | Eric Blake (1): |
22 | block: fix QEMU crash with scsi-hd and drive_del | 23 | qcow2: Allow -o compat=v3 during qemu-img amend |
23 | 24 | ||
24 | Kevin Wolf (20): | 25 | Jason Dillaman (1): |
25 | test-bdrv-drain: bdrv_drain() works with cross-AioContext events | 26 | MAINTAINERS: update RBD block maintainer |
26 | block: Use bdrv_do_drain_begin/end in bdrv_drain_all() | ||
27 | block: Remove 'recursive' parameter from bdrv_drain_invoke() | ||
28 | block: Don't manually poll in bdrv_drain_all() | ||
29 | tests/test-bdrv-drain: bdrv_drain_all() works in coroutines now | ||
30 | block: Avoid unnecessary aio_poll() in AIO_WAIT_WHILE() | ||
31 | block: Really pause block jobs on drain | ||
32 | block: Remove bdrv_drain_recurse() | ||
33 | block: Drain recursively with a single BDRV_POLL_WHILE() | ||
34 | test-bdrv-drain: Test node deletion in subtree recursion | ||
35 | block: Don't poll in parent drain callbacks | ||
36 | test-bdrv-drain: Graph change through parent callback | ||
37 | block: Defer .bdrv_drain_begin callback to polling phase | ||
38 | test-bdrv-drain: Test that bdrv_drain_invoke() doesn't poll | ||
39 | block: Allow AIO_WAIT_WHILE with NULL ctx | ||
40 | block: Move bdrv_drain_all_begin() out of coroutine context | ||
41 | block: ignore_bds_parents parameter for drain functions | ||
42 | block: Allow graph changes in bdrv_drain_all_begin/end sections | ||
43 | test-bdrv-drain: Test graph changes in drain_all section | ||
44 | Merge remote-tracking branch 'mreitz/tags/pull-block-2018-06-18' into queue-block | ||
45 | 27 | ||
46 | Max Reitz (15): | 28 | John Snow (1): |
47 | test-bdrv-drain: Add test for node deletion | 29 | block/qcow: Improve error when opening qcow2 files as qcow |
48 | block/mirror: Pull out mirror_perform() | ||
49 | block/mirror: Convert to coroutines | ||
50 | block/mirror: Use CoQueue to wait on in-flight ops | ||
51 | block/mirror: Wait for in-flight op conflicts | ||
52 | block/mirror: Use source as a BdrvChild | ||
53 | block: Generalize should_update_child() rule | ||
54 | hbitmap: Add @advance param to hbitmap_iter_next() | ||
55 | test-hbitmap: Add non-advancing iter_next tests | ||
56 | block/dirty-bitmap: Add bdrv_dirty_iter_next_area | ||
57 | block/mirror: Add MirrorBDSOpaque | ||
58 | job: Add job_progress_increase_remaining() | ||
59 | block/mirror: Add active mirroring | ||
60 | block/mirror: Add copy mode QAPI interface | ||
61 | iotests: Add test for active mirroring | ||
62 | 30 | ||
63 | qapi/block-core.json | 29 +- | 31 | Stefan Hajnoczi (3): |
64 | include/block/aio-wait.h | 25 +- | 32 | vl: add qemu_add_vm_change_state_handler_prio() |
65 | include/block/block.h | 31 +- | 33 | qdev: add qdev_add_vm_change_state_handler() |
66 | include/block/block_int.h | 18 +- | 34 | virtio-scsi: restart DMA after iothread |
67 | include/block/blockjob_int.h | 8 + | ||
68 | include/block/dirty-bitmap.h | 2 + | ||
69 | include/qemu/hbitmap.h | 5 +- | ||
70 | include/qemu/job.h | 15 + | ||
71 | block.c | 96 +++++- | ||
72 | block/backup.c | 2 +- | ||
73 | block/block-backend.c | 5 + | ||
74 | block/dirty-bitmap.c | 57 +++- | ||
75 | block/io.c | 332 ++++++++++++-------- | ||
76 | block/mirror.c | 613 +++++++++++++++++++++++++++++-------- | ||
77 | block/vvfat.c | 1 + | ||
78 | blockdev.c | 9 +- | ||
79 | blockjob.c | 23 ++ | ||
80 | job.c | 5 + | ||
81 | tests/test-bdrv-drain.c | 705 +++++++++++++++++++++++++++++++++++++++++-- | ||
82 | tests/test-hbitmap.c | 38 ++- | ||
83 | util/hbitmap.c | 10 +- | ||
84 | tests/qemu-iotests/151 | 120 ++++++++ | ||
85 | tests/qemu-iotests/151.out | 5 + | ||
86 | tests/qemu-iotests/group | 1 + | ||
87 | 24 files changed, 1836 insertions(+), 319 deletions(-) | ||
88 | create mode 100755 tests/qemu-iotests/151 | ||
89 | create mode 100644 tests/qemu-iotests/151.out | ||
90 | 35 | ||
36 | include/hw/qdev-core.h | 5 ++++ | ||
37 | include/sysemu/sysemu.h | 2 ++ | ||
38 | block/qcow.c | 7 ++++- | ||
39 | block/qcow2.c | 6 ++-- | ||
40 | hw/core/vm-change-state-handler.c | 61 +++++++++++++++++++++++++++++++++++++++ | ||
41 | hw/scsi/scsi-bus.c | 4 +-- | ||
42 | hw/virtio/virtio.c | 4 +-- | ||
43 | vl.c | 59 +++++++++++++++++++++++++++++-------- | ||
44 | MAINTAINERS | 2 +- | ||
45 | hw/core/Makefile.objs | 1 + | ||
46 | 10 files changed, 130 insertions(+), 21 deletions(-) | ||
47 | create mode 100644 hw/core/vm-change-state-handler.c | ||
48 | diff view generated by jsdifflib |
1 | Anything can happen inside BDRV_POLL_WHILE(), including graph | 1 | From: Stefan Hajnoczi <stefanha@redhat.com> |
---|---|---|---|
2 | changes that may interfere with its callers (e.g. child list iteration | ||
3 | in recursive callers of bdrv_do_drained_begin). | ||
4 | 2 | ||
5 | Switch to a single BDRV_POLL_WHILE() call for the whole subtree at the | 3 | Add an API for registering vm change state handlers with a well-defined |
6 | end of bdrv_do_drained_begin() to avoid such effects. The recursion | 4 | ordering. This is necessary when handlers depend on each other. |
7 | happens now inside the loop condition. As the graph can only change | ||
8 | between bdrv_drain_poll() calls, but not inside of it, doing the | ||
9 | recursion here is safe. | ||
10 | 5 | ||
6 | Small coding style fixes are included to make checkpatch.pl happy. | ||
7 | |||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
12 | --- | 10 | --- |
13 | include/block/block.h | 9 +++++--- | 11 | include/sysemu/sysemu.h | 2 ++ |
14 | block.c | 2 +- | 12 | vl.c | 59 ++++++++++++++++++++++++++++++++--------- |
15 | block/io.c | 63 ++++++++++++++++++++++++++++++++++++--------------- | 13 | 2 files changed, 49 insertions(+), 12 deletions(-) |
16 | 3 files changed, 52 insertions(+), 22 deletions(-) | ||
17 | 14 | ||
18 | diff --git a/include/block/block.h b/include/block/block.h | 15 | diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h |
19 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/include/block/block.h | 17 | --- a/include/sysemu/sysemu.h |
21 | +++ b/include/block/block.h | 18 | +++ b/include/sysemu/sysemu.h |
22 | @@ -XXX,XX +XXX,XX @@ void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore); | 19 | @@ -XXX,XX +XXX,XX @@ typedef void VMChangeStateHandler(void *opaque, int running, RunState state); |
23 | /** | 20 | |
24 | * bdrv_drain_poll: | 21 | VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb, |
25 | * | 22 | void *opaque); |
26 | - * Poll for pending requests in @bs and its parents (except for | 23 | +VMChangeStateEntry *qemu_add_vm_change_state_handler_prio( |
27 | - * @ignore_parent). This is part of bdrv_drained_begin. | 24 | + VMChangeStateHandler *cb, void *opaque, int priority); |
28 | + * Poll for pending requests in @bs, its parents (except for @ignore_parent), | 25 | void qemu_del_vm_change_state_handler(VMChangeStateEntry *e); |
29 | + * and if @recursive is true its children as well. | 26 | void vm_state_notify(int running, RunState state); |
27 | |||
28 | diff --git a/vl.c b/vl.c | ||
29 | index XXXXXXX..XXXXXXX 100644 | ||
30 | --- a/vl.c | ||
31 | +++ b/vl.c | ||
32 | @@ -XXX,XX +XXX,XX @@ static int machine_help_func(QemuOpts *opts, MachineState *machine) | ||
33 | struct vm_change_state_entry { | ||
34 | VMChangeStateHandler *cb; | ||
35 | void *opaque; | ||
36 | - QLIST_ENTRY (vm_change_state_entry) entries; | ||
37 | + QTAILQ_ENTRY(vm_change_state_entry) entries; | ||
38 | + int priority; | ||
39 | }; | ||
40 | |||
41 | -static QLIST_HEAD(, vm_change_state_entry) vm_change_state_head; | ||
42 | +static QTAILQ_HEAD(, vm_change_state_entry) vm_change_state_head; | ||
43 | |||
44 | -VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb, | ||
45 | - void *opaque) | ||
46 | +/** | ||
47 | + * qemu_add_vm_change_state_handler_prio: | ||
48 | + * @cb: the callback to invoke | ||
49 | + * @opaque: user data passed to the callback | ||
50 | + * @priority: low priorities execute first when the vm runs and the reverse is | ||
51 | + * true when the vm stops | ||
30 | + * | 52 | + * |
31 | + * This is part of bdrv_drained_begin. | 53 | + * Register a callback function that is invoked when the vm starts or stops |
32 | */ | 54 | + * running. |
33 | -bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent); | 55 | + * |
34 | +bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, | 56 | + * Returns: an entry to be freed using qemu_del_vm_change_state_handler() |
35 | + BdrvChild *ignore_parent); | 57 | + */ |
36 | 58 | +VMChangeStateEntry *qemu_add_vm_change_state_handler_prio( | |
37 | /** | 59 | + VMChangeStateHandler *cb, void *opaque, int priority) |
38 | * bdrv_drained_begin: | ||
39 | diff --git a/block.c b/block.c | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/block.c | ||
42 | +++ b/block.c | ||
43 | @@ -XXX,XX +XXX,XX @@ static void bdrv_child_cb_drained_begin(BdrvChild *child) | ||
44 | static bool bdrv_child_cb_drained_poll(BdrvChild *child) | ||
45 | { | 60 | { |
46 | BlockDriverState *bs = child->opaque; | 61 | VMChangeStateEntry *e; |
47 | - return bdrv_drain_poll(bs, NULL); | 62 | + VMChangeStateEntry *other; |
48 | + return bdrv_drain_poll(bs, false, NULL); | 63 | |
49 | } | 64 | - e = g_malloc0(sizeof (*e)); |
50 | 65 | - | |
51 | static void bdrv_child_cb_drained_end(BdrvChild *child) | 66 | + e = g_malloc0(sizeof(*e)); |
52 | diff --git a/block/io.c b/block/io.c | 67 | e->cb = cb; |
53 | index XXXXXXX..XXXXXXX 100644 | 68 | e->opaque = opaque; |
54 | --- a/block/io.c | 69 | - QLIST_INSERT_HEAD(&vm_change_state_head, e, entries); |
55 | +++ b/block/io.c | 70 | + e->priority = priority; |
56 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
57 | bool done; | ||
58 | bool begin; | ||
59 | bool recursive; | ||
60 | + bool poll; | ||
61 | BdrvChild *parent; | ||
62 | } BdrvCoDrainData; | ||
63 | |||
64 | @@ -XXX,XX +XXX,XX @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin) | ||
65 | } | ||
66 | |||
67 | /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ | ||
68 | -bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent) | ||
69 | +bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, | ||
70 | + BdrvChild *ignore_parent) | ||
71 | { | ||
72 | + BdrvChild *child, *next; | ||
73 | + | 71 | + |
74 | if (bdrv_parent_drained_poll(bs, ignore_parent)) { | 72 | + /* Keep list sorted in ascending priority order */ |
75 | return true; | 73 | + QTAILQ_FOREACH(other, &vm_change_state_head, entries) { |
76 | } | 74 | + if (priority < other->priority) { |
77 | 75 | + QTAILQ_INSERT_BEFORE(other, e, entries); | |
78 | - return atomic_read(&bs->in_flight); | 76 | + return e; |
79 | + if (atomic_read(&bs->in_flight)) { | ||
80 | + return true; | ||
81 | + } | ||
82 | + | ||
83 | + if (recursive) { | ||
84 | + QLIST_FOREACH_SAFE(child, &bs->children, next, next) { | ||
85 | + if (bdrv_drain_poll(child->bs, recursive, child)) { | ||
86 | + return true; | ||
87 | + } | ||
88 | + } | 77 | + } |
89 | + } | 78 | + } |
90 | + | 79 | + |
91 | + return false; | 80 | + QTAILQ_INSERT_TAIL(&vm_change_state_head, e, entries); |
81 | return e; | ||
92 | } | 82 | } |
93 | 83 | ||
94 | -static bool bdrv_drain_poll_top_level(BlockDriverState *bs, | 84 | +VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb, |
95 | +static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive, | 85 | + void *opaque) |
96 | BdrvChild *ignore_parent) | 86 | +{ |
87 | + return qemu_add_vm_change_state_handler_prio(cb, opaque, 0); | ||
88 | +} | ||
89 | + | ||
90 | void qemu_del_vm_change_state_handler(VMChangeStateEntry *e) | ||
97 | { | 91 | { |
98 | /* Execute pending BHs first and check everything else only after the BHs | 92 | - QLIST_REMOVE (e, entries); |
99 | * have executed. */ | 93 | - g_free (e); |
100 | while (aio_poll(bs->aio_context, false)); | 94 | + QTAILQ_REMOVE(&vm_change_state_head, e, entries); |
101 | 95 | + g_free(e); | |
102 | - return bdrv_drain_poll(bs, ignore_parent); | ||
103 | + return bdrv_drain_poll(bs, recursive, ignore_parent); | ||
104 | } | 96 | } |
105 | 97 | ||
106 | static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, | 98 | void vm_state_notify(int running, RunState state) |
107 | - BdrvChild *parent); | 99 | @@ -XXX,XX +XXX,XX @@ void vm_state_notify(int running, RunState state) |
108 | + BdrvChild *parent, bool poll); | 100 | |
109 | static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, | 101 | trace_vm_state_notify(running, state, RunState_str(state)); |
110 | BdrvChild *parent); | 102 | |
111 | 103 | - QLIST_FOREACH_SAFE(e, &vm_change_state_head, entries, next) { | |
112 | @@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque) | 104 | - e->cb(e->opaque, running, state); |
113 | 105 | + if (running) { | |
114 | bdrv_dec_in_flight(bs); | 106 | + QTAILQ_FOREACH_SAFE(e, &vm_change_state_head, entries, next) { |
115 | if (data->begin) { | 107 | + e->cb(e->opaque, running, state); |
116 | - bdrv_do_drained_begin(bs, data->recursive, data->parent); | 108 | + } |
117 | + bdrv_do_drained_begin(bs, data->recursive, data->parent, data->poll); | 109 | + } else { |
118 | } else { | 110 | + QTAILQ_FOREACH_REVERSE_SAFE(e, &vm_change_state_head, entries, next) { |
119 | bdrv_do_drained_end(bs, data->recursive, data->parent); | 111 | + e->cb(e->opaque, running, state); |
120 | } | 112 | + } |
121 | @@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque) | ||
122 | |||
123 | static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, | ||
124 | bool begin, bool recursive, | ||
125 | - BdrvChild *parent) | ||
126 | + BdrvChild *parent, bool poll) | ||
127 | { | ||
128 | BdrvCoDrainData data; | ||
129 | |||
130 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, | ||
131 | .begin = begin, | ||
132 | .recursive = recursive, | ||
133 | .parent = parent, | ||
134 | + .poll = poll, | ||
135 | }; | ||
136 | bdrv_inc_in_flight(bs); | ||
137 | aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), | ||
138 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, | ||
139 | } | ||
140 | |||
141 | void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, | ||
142 | - BdrvChild *parent) | ||
143 | + BdrvChild *parent, bool poll) | ||
144 | { | ||
145 | BdrvChild *child, *next; | ||
146 | |||
147 | if (qemu_in_coroutine()) { | ||
148 | - bdrv_co_yield_to_drain(bs, true, recursive, parent); | ||
149 | + bdrv_co_yield_to_drain(bs, true, recursive, parent, poll); | ||
150 | return; | ||
151 | } | ||
152 | |||
153 | @@ -XXX,XX +XXX,XX @@ void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, | ||
154 | bdrv_parent_drained_begin(bs, parent); | ||
155 | bdrv_drain_invoke(bs, true); | ||
156 | |||
157 | - /* Wait for drained requests to finish */ | ||
158 | - BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent)); | ||
159 | - | ||
160 | if (recursive) { | ||
161 | bs->recursive_quiesce_counter++; | ||
162 | QLIST_FOREACH_SAFE(child, &bs->children, next, next) { | ||
163 | - bdrv_do_drained_begin(child->bs, true, child); | ||
164 | + bdrv_do_drained_begin(child->bs, true, child, false); | ||
165 | } | ||
166 | } | ||
167 | + | ||
168 | + /* | ||
169 | + * Wait for drained requests to finish. | ||
170 | + * | ||
171 | + * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The | ||
172 | + * call is needed so things in this AioContext can make progress even | ||
173 | + * though we don't return to the main AioContext loop - this automatically | ||
174 | + * includes other nodes in the same AioContext and therefore all child | ||
175 | + * nodes. | ||
176 | + */ | ||
177 | + if (poll) { | ||
178 | + BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent)); | ||
179 | + } | ||
180 | } | ||
181 | |||
182 | void bdrv_drained_begin(BlockDriverState *bs) | ||
183 | { | ||
184 | - bdrv_do_drained_begin(bs, false, NULL); | ||
185 | + bdrv_do_drained_begin(bs, false, NULL, true); | ||
186 | } | ||
187 | |||
188 | void bdrv_subtree_drained_begin(BlockDriverState *bs) | ||
189 | { | ||
190 | - bdrv_do_drained_begin(bs, true, NULL); | ||
191 | + bdrv_do_drained_begin(bs, true, NULL, true); | ||
192 | } | ||
193 | |||
194 | void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, | ||
195 | @@ -XXX,XX +XXX,XX @@ void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, | ||
196 | int old_quiesce_counter; | ||
197 | |||
198 | if (qemu_in_coroutine()) { | ||
199 | - bdrv_co_yield_to_drain(bs, false, recursive, parent); | ||
200 | + bdrv_co_yield_to_drain(bs, false, recursive, parent, false); | ||
201 | return; | ||
202 | } | ||
203 | assert(bs->quiesce_counter > 0); | ||
204 | @@ -XXX,XX +XXX,XX @@ void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) | ||
205 | int i; | ||
206 | |||
207 | for (i = 0; i < new_parent->recursive_quiesce_counter; i++) { | ||
208 | - bdrv_do_drained_begin(child->bs, true, child); | ||
209 | + bdrv_do_drained_begin(child->bs, true, child, true); | ||
210 | } | 113 | } |
211 | } | 114 | } |
212 | 115 | ||
213 | @@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void) | 116 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp) |
214 | AioContext *aio_context = bdrv_get_aio_context(bs); | 117 | exit(1); |
215 | |||
216 | aio_context_acquire(aio_context); | ||
217 | - bdrv_do_drained_begin(bs, true, NULL); | ||
218 | + bdrv_do_drained_begin(bs, true, NULL, true); | ||
219 | aio_context_release(aio_context); | ||
220 | } | 118 | } |
221 | 119 | ||
120 | - QLIST_INIT (&vm_change_state_head); | ||
121 | + QTAILQ_INIT(&vm_change_state_head); | ||
122 | os_setup_early_signal_handling(); | ||
123 | |||
124 | cpu_option = NULL; | ||
222 | -- | 125 | -- |
223 | 2.13.6 | 126 | 2.20.1 |
224 | 127 | ||
225 | 128 | diff view generated by jsdifflib |
1 | From: Max Reitz <mreitz@redhat.com> | 1 | From: Stefan Hajnoczi <stefanha@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | This patch implements active synchronous mirroring. In active mode, the | 3 | Children sometimes depend on their parent's vm change state handler |
4 | passive mechanism will still be in place and is used to copy all | 4 | having completed. Add a vm change state handler API for devices that |
5 | initially dirty clusters off the source disk; but every write request | 5 | guarantees tree depth ordering. |
6 | will write data both to the source and the target disk, so the source | ||
7 | cannot be dirtied faster than data is mirrored to the target. Also, | ||
8 | once the block job has converged (BLOCK_JOB_READY sent), source and | ||
9 | target are guaranteed to stay in sync (unless an error occurs). | ||
10 | 6 | ||
11 | Active mode is completely optional and currently disabled at runtime. A | 7 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
12 | later patch will add a way for users to enable it. | 8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
9 | --- | ||
10 | include/hw/qdev-core.h | 5 +++ | ||
11 | hw/core/vm-change-state-handler.c | 61 +++++++++++++++++++++++++++++++ | ||
12 | hw/core/Makefile.objs | 1 + | ||
13 | 3 files changed, 67 insertions(+) | ||
14 | create mode 100644 hw/core/vm-change-state-handler.c | ||
13 | 15 | ||
14 | Signed-off-by: Max Reitz <mreitz@redhat.com> | 16 | diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h |
15 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
16 | Message-id: 20180613181823.13618-13-mreitz@redhat.com | ||
17 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
18 | --- | ||
19 | qapi/block-core.json | 18 ++++ | ||
20 | block/mirror.c | 252 ++++++++++++++++++++++++++++++++++++++++++++++++++- | ||
21 | 2 files changed, 265 insertions(+), 5 deletions(-) | ||
22 | |||
23 | diff --git a/qapi/block-core.json b/qapi/block-core.json | ||
24 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
25 | --- a/qapi/block-core.json | 18 | --- a/include/hw/qdev-core.h |
26 | +++ b/qapi/block-core.json | 19 | +++ b/include/hw/qdev-core.h |
27 | @@ -XXX,XX +XXX,XX @@ | 20 | @@ -XXX,XX +XXX,XX @@ |
28 | 'data': ['top', 'full', 'none', 'incremental'] } | 21 | #include "qom/object.h" |
29 | 22 | #include "hw/irq.h" | |
30 | ## | 23 | #include "hw/hotplug.h" |
31 | +# @MirrorCopyMode: | 24 | +#include "sysemu/sysemu.h" |
32 | +# | 25 | |
33 | +# An enumeration whose values tell the mirror block job when to | 26 | enum { |
34 | +# trigger writes to the target. | 27 | DEV_NVECTORS_UNSPECIFIED = -1, |
35 | +# | 28 | @@ -XXX,XX +XXX,XX @@ static inline bool qbus_is_hotpluggable(BusState *bus) |
36 | +# @background: copy data in background only. | 29 | void device_listener_register(DeviceListener *listener); |
37 | +# | 30 | void device_listener_unregister(DeviceListener *listener); |
38 | +# @write-blocking: when data is written to the source, write it | 31 | |
39 | +# (synchronously) to the target as well. In | 32 | +VMChangeStateEntry *qdev_add_vm_change_state_handler(DeviceState *dev, |
40 | +# addition, data is copied in background just like in | 33 | + VMChangeStateHandler *cb, |
41 | +# @background mode. | 34 | + void *opaque); |
42 | +# | ||
43 | +# Since: 3.0 | ||
44 | +## | ||
45 | +{ 'enum': 'MirrorCopyMode', | ||
46 | + 'data': ['background', 'write-blocking'] } | ||
47 | + | 35 | + |
48 | +## | 36 | #endif |
49 | # @BlockJobInfo: | 37 | diff --git a/hw/core/vm-change-state-handler.c b/hw/core/vm-change-state-handler.c |
50 | # | 38 | new file mode 100644 |
51 | # Information about a long-running block device operation. | 39 | index XXXXXXX..XXXXXXX |
52 | diff --git a/block/mirror.c b/block/mirror.c | 40 | --- /dev/null |
53 | index XXXXXXX..XXXXXXX 100644 | 41 | +++ b/hw/core/vm-change-state-handler.c |
54 | --- a/block/mirror.c | 42 | @@ -XXX,XX +XXX,XX @@ |
55 | +++ b/block/mirror.c | 43 | +/* |
56 | @@ -XXX,XX +XXX,XX @@ typedef struct MirrorBlockJob { | 44 | + * qdev vm change state handlers |
57 | Error *replace_blocker; | 45 | + * |
58 | bool is_none_mode; | 46 | + * This program is free software; you can redistribute it and/or modify |
59 | BlockMirrorBackingMode backing_mode; | 47 | + * it under the terms of the GNU General Public License as published by |
60 | + MirrorCopyMode copy_mode; | 48 | + * the Free Software Foundation; either version 2 of the License, |
61 | BlockdevOnError on_source_error, on_target_error; | 49 | + * or (at your option) any later version. |
62 | bool synced; | 50 | + * |
63 | + /* Set when the target is synced (dirty bitmap is clean, nothing | 51 | + * This program is distributed in the hope that it will be useful, |
64 | + * in flight) and the job is running in active mode */ | 52 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
65 | + bool actively_synced; | 53 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
66 | bool should_complete; | 54 | + * GNU General Public License for more details. |
67 | int64_t granularity; | 55 | + * |
68 | size_t buf_size; | 56 | + * You should have received a copy of the GNU General Public License |
69 | @@ -XXX,XX +XXX,XX @@ typedef struct MirrorBlockJob { | 57 | + * along with this program; if not, see <http://www.gnu.org/licenses/>. |
70 | int target_cluster_size; | 58 | + */ |
71 | int max_iov; | 59 | + |
72 | bool initial_zeroing_ongoing; | 60 | +#include "qemu/osdep.h" |
73 | + int in_active_write_counter; | 61 | +#include "hw/qdev.h" |
74 | } MirrorBlockJob; | 62 | + |
75 | 63 | +static int qdev_get_dev_tree_depth(DeviceState *dev) | |
76 | typedef struct MirrorBDSOpaque { | ||
77 | @@ -XXX,XX +XXX,XX @@ struct MirrorOp { | ||
78 | int64_t *bytes_handled; | ||
79 | |||
80 | bool is_pseudo_op; | ||
81 | + bool is_active_write; | ||
82 | CoQueue waiting_requests; | ||
83 | |||
84 | QTAILQ_ENTRY(MirrorOp) next; | ||
85 | @@ -XXX,XX +XXX,XX @@ static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, | ||
86 | int error) | ||
87 | { | ||
88 | s->synced = false; | ||
89 | + s->actively_synced = false; | ||
90 | if (read) { | ||
91 | return block_job_error_action(&s->common, s->on_source_error, | ||
92 | true, error); | ||
93 | @@ -XXX,XX +XXX,XX @@ static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, | ||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | -static inline void mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) | ||
98 | +static inline void mirror_wait_for_any_operation(MirrorBlockJob *s, bool active) | ||
99 | { | ||
100 | MirrorOp *op; | ||
101 | |||
102 | @@ -XXX,XX +XXX,XX @@ static inline void mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) | ||
103 | * caller of this function. Since there is only one pseudo op | ||
104 | * at any given time, we will always find some real operation | ||
105 | * to wait on. */ | ||
106 | - if (!op->is_pseudo_op) { | ||
107 | + if (!op->is_pseudo_op && op->is_active_write == active) { | ||
108 | qemu_co_queue_wait(&op->waiting_requests, NULL); | ||
109 | return; | ||
110 | } | ||
111 | @@ -XXX,XX +XXX,XX @@ static inline void mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) | ||
112 | abort(); | ||
113 | } | ||
114 | |||
115 | +static inline void mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) | ||
116 | +{ | 64 | +{ |
117 | + /* Only non-active operations use up in-flight slots */ | 65 | + int depth; |
118 | + mirror_wait_for_any_operation(s, false); | ||
119 | +} | ||
120 | + | 66 | + |
121 | /* Perform a mirror copy operation. | 67 | + for (depth = 0; dev; depth++) { |
122 | * | 68 | + BusState *bus = dev->parent_bus; |
123 | * *op->bytes_handled is set to the number of bytes copied after and | ||
124 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn mirror_run(void *opaque) | ||
125 | /* Transition to the READY state and wait for complete. */ | ||
126 | job_transition_to_ready(&s->common.job); | ||
127 | s->synced = true; | ||
128 | + s->actively_synced = true; | ||
129 | while (!job_is_cancelled(&s->common.job) && !s->should_complete) { | ||
130 | job_yield(&s->common.job); | ||
131 | } | ||
132 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn mirror_run(void *opaque) | ||
133 | int64_t cnt, delta; | ||
134 | bool should_complete; | ||
135 | |||
136 | + /* Do not start passive operations while there are active | ||
137 | + * writes in progress */ | ||
138 | + while (s->in_active_write_counter) { | ||
139 | + mirror_wait_for_any_operation(s, true); | ||
140 | + } | ||
141 | + | 69 | + |
142 | if (s->ret < 0) { | 70 | + if (!bus) { |
143 | ret = s->ret; | ||
144 | goto immediate_exit; | ||
145 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn mirror_run(void *opaque) | ||
146 | */ | ||
147 | job_transition_to_ready(&s->common.job); | ||
148 | s->synced = true; | ||
149 | + if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) { | ||
150 | + s->actively_synced = true; | ||
151 | + } | ||
152 | } | ||
153 | |||
154 | should_complete = s->should_complete || | ||
155 | @@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_active_job_driver = { | ||
156 | .drain = mirror_drain, | ||
157 | }; | ||
158 | |||
159 | +static void do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, | ||
160 | + uint64_t offset, uint64_t bytes, | ||
161 | + QEMUIOVector *qiov, int flags) | ||
162 | +{ | ||
163 | + BdrvDirtyBitmapIter *iter; | ||
164 | + QEMUIOVector target_qiov; | ||
165 | + uint64_t dirty_offset; | ||
166 | + int dirty_bytes; | ||
167 | + | ||
168 | + if (qiov) { | ||
169 | + qemu_iovec_init(&target_qiov, qiov->niov); | ||
170 | + } | ||
171 | + | ||
172 | + iter = bdrv_dirty_iter_new(job->dirty_bitmap); | ||
173 | + bdrv_set_dirty_iter(iter, offset); | ||
174 | + | ||
175 | + while (true) { | ||
176 | + bool valid_area; | ||
177 | + int ret; | ||
178 | + | ||
179 | + bdrv_dirty_bitmap_lock(job->dirty_bitmap); | ||
180 | + valid_area = bdrv_dirty_iter_next_area(iter, offset + bytes, | ||
181 | + &dirty_offset, &dirty_bytes); | ||
182 | + if (!valid_area) { | ||
183 | + bdrv_dirty_bitmap_unlock(job->dirty_bitmap); | ||
184 | + break; | 71 | + break; |
185 | + } | 72 | + } |
186 | + | 73 | + |
187 | + bdrv_reset_dirty_bitmap_locked(job->dirty_bitmap, | 74 | + dev = bus->parent; |
188 | + dirty_offset, dirty_bytes); | ||
189 | + bdrv_dirty_bitmap_unlock(job->dirty_bitmap); | ||
190 | + | ||
191 | + job_progress_increase_remaining(&job->common.job, dirty_bytes); | ||
192 | + | ||
193 | + assert(dirty_offset - offset <= SIZE_MAX); | ||
194 | + if (qiov) { | ||
195 | + qemu_iovec_reset(&target_qiov); | ||
196 | + qemu_iovec_concat(&target_qiov, qiov, | ||
197 | + dirty_offset - offset, dirty_bytes); | ||
198 | + } | ||
199 | + | ||
200 | + switch (method) { | ||
201 | + case MIRROR_METHOD_COPY: | ||
202 | + ret = blk_co_pwritev(job->target, dirty_offset, dirty_bytes, | ||
203 | + qiov ? &target_qiov : NULL, flags); | ||
204 | + break; | ||
205 | + | ||
206 | + case MIRROR_METHOD_ZERO: | ||
207 | + assert(!qiov); | ||
208 | + ret = blk_co_pwrite_zeroes(job->target, dirty_offset, dirty_bytes, | ||
209 | + flags); | ||
210 | + break; | ||
211 | + | ||
212 | + case MIRROR_METHOD_DISCARD: | ||
213 | + assert(!qiov); | ||
214 | + ret = blk_co_pdiscard(job->target, dirty_offset, dirty_bytes); | ||
215 | + break; | ||
216 | + | ||
217 | + default: | ||
218 | + abort(); | ||
219 | + } | ||
220 | + | ||
221 | + if (ret >= 0) { | ||
222 | + job_progress_update(&job->common.job, dirty_bytes); | ||
223 | + } else { | ||
224 | + BlockErrorAction action; | ||
225 | + | ||
226 | + bdrv_set_dirty_bitmap(job->dirty_bitmap, dirty_offset, dirty_bytes); | ||
227 | + job->actively_synced = false; | ||
228 | + | ||
229 | + action = mirror_error_action(job, false, -ret); | ||
230 | + if (action == BLOCK_ERROR_ACTION_REPORT) { | ||
231 | + if (!job->ret) { | ||
232 | + job->ret = ret; | ||
233 | + } | ||
234 | + break; | ||
235 | + } | ||
236 | + } | ||
237 | + } | 75 | + } |
238 | + | 76 | + |
239 | + bdrv_dirty_iter_free(iter); | 77 | + return depth; |
240 | + if (qiov) { | ||
241 | + qemu_iovec_destroy(&target_qiov); | ||
242 | + } | ||
243 | +} | 78 | +} |
244 | + | 79 | + |
245 | +static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s, | 80 | +/** |
246 | + uint64_t offset, | 81 | + * qdev_add_vm_change_state_handler: |
247 | + uint64_t bytes) | 82 | + * @dev: the device that owns this handler |
83 | + * @cb: the callback function to be invoked | ||
84 | + * @opaque: user data passed to the callback function | ||
85 | + * | ||
86 | + * This function works like qemu_add_vm_change_state_handler() except callbacks | ||
87 | + * are invoked in qdev tree depth order. Ordering is desirable when callbacks | ||
88 | + * of children depend on their parent's callback having completed first. | ||
89 | + * | ||
90 | + * For example, when qdev_add_vm_change_state_handler() is used, a host | ||
91 | + * controller's callback is invoked before the children on its bus when the VM | ||
92 | + * starts running. The order is reversed when the VM stops running. | ||
93 | + * | ||
94 | + * Returns: an entry to be freed with qemu_del_vm_change_state_handler() | ||
95 | + */ | ||
96 | +VMChangeStateEntry *qdev_add_vm_change_state_handler(DeviceState *dev, | ||
97 | + VMChangeStateHandler *cb, | ||
98 | + void *opaque) | ||
248 | +{ | 99 | +{ |
249 | + MirrorOp *op; | 100 | + int depth = qdev_get_dev_tree_depth(dev); |
250 | + uint64_t start_chunk = offset / s->granularity; | ||
251 | + uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); | ||
252 | + | 101 | + |
253 | + op = g_new(MirrorOp, 1); | 102 | + return qemu_add_vm_change_state_handler_prio(cb, opaque, depth); |
254 | + *op = (MirrorOp){ | ||
255 | + .s = s, | ||
256 | + .offset = offset, | ||
257 | + .bytes = bytes, | ||
258 | + .is_active_write = true, | ||
259 | + }; | ||
260 | + qemu_co_queue_init(&op->waiting_requests); | ||
261 | + QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); | ||
262 | + | ||
263 | + s->in_active_write_counter++; | ||
264 | + | ||
265 | + mirror_wait_on_conflicts(op, s, offset, bytes); | ||
266 | + | ||
267 | + bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); | ||
268 | + | ||
269 | + return op; | ||
270 | +} | 103 | +} |
271 | + | 104 | diff --git a/hw/core/Makefile.objs b/hw/core/Makefile.objs |
272 | +static void coroutine_fn active_write_settle(MirrorOp *op) | 105 | index XXXXXXX..XXXXXXX 100644 |
273 | +{ | 106 | --- a/hw/core/Makefile.objs |
274 | + uint64_t start_chunk = op->offset / op->s->granularity; | 107 | +++ b/hw/core/Makefile.objs |
275 | + uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes, | 108 | @@ -XXX,XX +XXX,XX @@ common-obj-$(CONFIG_SOFTMMU) += fw-path-provider.o |
276 | + op->s->granularity); | 109 | common-obj-y += irq.o |
277 | + | 110 | common-obj-y += hotplug.o |
278 | + if (!--op->s->in_active_write_counter && op->s->actively_synced) { | 111 | common-obj-$(CONFIG_SOFTMMU) += nmi.o |
279 | + BdrvChild *source = op->s->mirror_top_bs->backing; | 112 | +common-obj-$(CONFIG_SOFTMMU) += vm-change-state-handler.o |
280 | + | 113 | |
281 | + if (QLIST_FIRST(&source->bs->parents) == source && | 114 | common-obj-$(CONFIG_EMPTY_SLOT) += empty_slot.o |
282 | + QLIST_NEXT(source, next_parent) == NULL) | 115 | common-obj-$(CONFIG_XILINX_AXI) += stream.o |
283 | + { | ||
284 | + /* Assert that we are back in sync once all active write | ||
285 | + * operations are settled. | ||
286 | + * Note that we can only assert this if the mirror node | ||
287 | + * is the source node's only parent. */ | ||
288 | + assert(!bdrv_get_dirty_count(op->s->dirty_bitmap)); | ||
289 | + } | ||
290 | + } | ||
291 | + bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); | ||
292 | + QTAILQ_REMOVE(&op->s->ops_in_flight, op, next); | ||
293 | + qemu_co_queue_restart_all(&op->waiting_requests); | ||
294 | + g_free(op); | ||
295 | +} | ||
296 | + | ||
297 | static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, | ||
298 | uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) | ||
299 | { | ||
300 | return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); | ||
301 | } | ||
302 | |||
303 | +static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs, | ||
304 | + MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, | ||
305 | + int flags) | ||
306 | +{ | ||
307 | + MirrorOp *op = NULL; | ||
308 | + MirrorBDSOpaque *s = bs->opaque; | ||
309 | + int ret = 0; | ||
310 | + bool copy_to_target; | ||
311 | + | ||
312 | + copy_to_target = s->job->ret >= 0 && | ||
313 | + s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; | ||
314 | + | ||
315 | + if (copy_to_target) { | ||
316 | + op = active_write_prepare(s->job, offset, bytes); | ||
317 | + } | ||
318 | + | ||
319 | + switch (method) { | ||
320 | + case MIRROR_METHOD_COPY: | ||
321 | + ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); | ||
322 | + break; | ||
323 | + | ||
324 | + case MIRROR_METHOD_ZERO: | ||
325 | + ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); | ||
326 | + break; | ||
327 | + | ||
328 | + case MIRROR_METHOD_DISCARD: | ||
329 | + ret = bdrv_co_pdiscard(bs->backing->bs, offset, bytes); | ||
330 | + break; | ||
331 | + | ||
332 | + default: | ||
333 | + abort(); | ||
334 | + } | ||
335 | + | ||
336 | + if (ret < 0) { | ||
337 | + goto out; | ||
338 | + } | ||
339 | + | ||
340 | + if (copy_to_target) { | ||
341 | + do_sync_target_write(s->job, method, offset, bytes, qiov, flags); | ||
342 | + } | ||
343 | + | ||
344 | +out: | ||
345 | + if (copy_to_target) { | ||
346 | + active_write_settle(op); | ||
347 | + } | ||
348 | + return ret; | ||
349 | +} | ||
350 | + | ||
351 | static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, | ||
352 | uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) | ||
353 | { | ||
354 | - return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); | ||
355 | + MirrorBDSOpaque *s = bs->opaque; | ||
356 | + QEMUIOVector bounce_qiov; | ||
357 | + void *bounce_buf; | ||
358 | + int ret = 0; | ||
359 | + bool copy_to_target; | ||
360 | + | ||
361 | + copy_to_target = s->job->ret >= 0 && | ||
362 | + s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; | ||
363 | + | ||
364 | + if (copy_to_target) { | ||
365 | + /* The guest might concurrently modify the data to write; but | ||
366 | + * the data on source and destination must match, so we have | ||
367 | + * to use a bounce buffer if we are going to write to the | ||
368 | + * target now. */ | ||
369 | + bounce_buf = qemu_blockalign(bs, bytes); | ||
370 | + iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes); | ||
371 | + | ||
372 | + qemu_iovec_init(&bounce_qiov, 1); | ||
373 | + qemu_iovec_add(&bounce_qiov, bounce_buf, bytes); | ||
374 | + qiov = &bounce_qiov; | ||
375 | + } | ||
376 | + | ||
377 | + ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov, | ||
378 | + flags); | ||
379 | + | ||
380 | + if (copy_to_target) { | ||
381 | + qemu_iovec_destroy(&bounce_qiov); | ||
382 | + qemu_vfree(bounce_buf); | ||
383 | + } | ||
384 | + | ||
385 | + return ret; | ||
386 | } | ||
387 | |||
388 | static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) | ||
389 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) | ||
390 | static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, | ||
391 | int64_t offset, int bytes, BdrvRequestFlags flags) | ||
392 | { | ||
393 | - return bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); | ||
394 | + return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL, | ||
395 | + flags); | ||
396 | } | ||
397 | |||
398 | static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, | ||
399 | int64_t offset, int bytes) | ||
400 | { | ||
401 | - return bdrv_co_pdiscard(bs->backing->bs, offset, bytes); | ||
402 | + return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes, | ||
403 | + NULL, 0); | ||
404 | } | ||
405 | |||
406 | static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts) | ||
407 | @@ -XXX,XX +XXX,XX @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs, | ||
408 | s->on_target_error = on_target_error; | ||
409 | s->is_none_mode = is_none_mode; | ||
410 | s->backing_mode = backing_mode; | ||
411 | + s->copy_mode = MIRROR_COPY_MODE_BACKGROUND; | ||
412 | s->base = base; | ||
413 | s->granularity = granularity; | ||
414 | s->buf_size = ROUND_UP(buf_size, granularity); | ||
415 | -- | 116 | -- |
416 | 2.13.6 | 117 | 2.20.1 |
417 | 118 | ||
418 | 119 | diff view generated by jsdifflib |
1 | We cannot allow aio_poll() in bdrv_drain_invoke(begin=true) until we're | 1 | From: Stefan Hajnoczi <stefanha@redhat.com> |
---|---|---|---|
2 | done with propagating the drain through the graph and are doing the | ||
3 | single final BDRV_POLL_WHILE(). | ||
4 | 2 | ||
5 | Just schedule the coroutine with the callback and increase bs->in_flight | 3 | When the 'cont' command resumes guest execution the vm change state |
6 | to make sure that the polling phase will wait for it. | 4 | handlers are invoked. Unfortunately there is no explicit ordering |
5 | between classic qemu_add_vm_change_state_handler() callbacks. When two | ||
6 | layers of code both use vm change state handlers, we don't control which | ||
7 | handler runs first. | ||
7 | 8 | ||
9 | virtio-scsi with iothreads hits a deadlock when a failed SCSI command is | ||
10 | restarted and completes before the iothread is re-initialized. | ||
11 | |||
12 | This patch uses the new qdev_add_vm_change_state_handler() API to | ||
13 | guarantee that virtio-scsi's virtio change state handler executes before | ||
14 | the SCSI bus children. This way DMA is restarted after the iothread has | ||
15 | re-initialized. | ||
16 | |||
17 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 18 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
9 | --- | 19 | --- |
10 | block/io.c | 28 +++++++++++++++++++++++----- | 20 | hw/scsi/scsi-bus.c | 4 ++-- |
11 | 1 file changed, 23 insertions(+), 5 deletions(-) | 21 | hw/virtio/virtio.c | 4 ++-- |
22 | 2 files changed, 4 insertions(+), 4 deletions(-) | ||
12 | 23 | ||
13 | diff --git a/block/io.c b/block/io.c | 24 | diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c |
14 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/block/io.c | 26 | --- a/hw/scsi/scsi-bus.c |
16 | +++ b/block/io.c | 27 | +++ b/hw/scsi/scsi-bus.c |
17 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) | 28 | @@ -XXX,XX +XXX,XX @@ static void scsi_qdev_realize(DeviceState *qdev, Error **errp) |
18 | 29 | error_propagate(errp, local_err); | |
19 | /* Set data->done before reading bs->wakeup. */ | ||
20 | atomic_mb_set(&data->done, true); | ||
21 | - bdrv_wakeup(bs); | ||
22 | + bdrv_dec_in_flight(bs); | ||
23 | + | ||
24 | + if (data->begin) { | ||
25 | + g_free(data); | ||
26 | + } | ||
27 | } | ||
28 | |||
29 | /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */ | ||
30 | static void bdrv_drain_invoke(BlockDriverState *bs, bool begin) | ||
31 | { | ||
32 | - BdrvCoDrainData data = { .bs = bs, .done = false, .begin = begin}; | ||
33 | + BdrvCoDrainData *data; | ||
34 | |||
35 | if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) || | ||
36 | (!begin && !bs->drv->bdrv_co_drain_end)) { | ||
37 | return; | 30 | return; |
38 | } | 31 | } |
39 | 32 | - dev->vmsentry = qemu_add_vm_change_state_handler(scsi_dma_restart_cb, | |
40 | - data.co = qemu_coroutine_create(bdrv_drain_invoke_entry, &data); | 33 | - dev); |
41 | - bdrv_coroutine_enter(bs, data.co); | 34 | + dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev), |
42 | - BDRV_POLL_WHILE(bs, !data.done); | 35 | + scsi_dma_restart_cb, dev); |
43 | + data = g_new(BdrvCoDrainData, 1); | ||
44 | + *data = (BdrvCoDrainData) { | ||
45 | + .bs = bs, | ||
46 | + .done = false, | ||
47 | + .begin = begin | ||
48 | + }; | ||
49 | + | ||
50 | + /* Make sure the driver callback completes during the polling phase for | ||
51 | + * drain_begin. */ | ||
52 | + bdrv_inc_in_flight(bs); | ||
53 | + data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data); | ||
54 | + aio_co_schedule(bdrv_get_aio_context(bs), data->co); | ||
55 | + | ||
56 | + if (!begin) { | ||
57 | + BDRV_POLL_WHILE(bs, !data->done); | ||
58 | + g_free(data); | ||
59 | + } | ||
60 | } | 36 | } |
61 | 37 | ||
62 | /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ | 38 | static void scsi_qdev_unrealize(DeviceState *qdev, Error **errp) |
39 | diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/hw/virtio/virtio.c | ||
42 | +++ b/hw/virtio/virtio.c | ||
43 | @@ -XXX,XX +XXX,XX @@ void virtio_init(VirtIODevice *vdev, const char *name, | ||
44 | } else { | ||
45 | vdev->config = NULL; | ||
46 | } | ||
47 | - vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change, | ||
48 | - vdev); | ||
49 | + vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev), | ||
50 | + virtio_vmstate_change, vdev); | ||
51 | vdev->device_endian = virtio_default_endian(); | ||
52 | vdev->use_guest_notifier_mask = true; | ||
53 | } | ||
63 | -- | 54 | -- |
64 | 2.13.6 | 55 | 2.20.1 |
65 | 56 | ||
66 | 57 | diff view generated by jsdifflib |
1 | As long as nobody keeps the other I/O thread from working, there is no | 1 | From: John Snow <jsnow@redhat.com> |
---|---|---|---|
2 | reason why bdrv_drain() wouldn't work with cross-AioContext events. The | ||
3 | key is that the root request we're waiting for is in the AioContext | ||
4 | we're polling (which it always is for bdrv_drain()) so that aio_poll() | ||
5 | is woken up in the end. | ||
6 | 2 | ||
7 | Add a test case that shows that it works. Remove the comment in | 3 | Reported-by: radmehrsaeed7@gmail.com |
8 | bdrv_drain() that claims otherwise. | 4 | Fixes: https://bugs.launchpad.net/bugs/1832914 |
9 | 5 | Signed-off-by: John Snow <jsnow@redhat.com> | |
6 | Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> | ||
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
11 | --- | 9 | --- |
12 | block/io.c | 4 -- | 10 | block/qcow.c | 7 ++++++- |
13 | tests/test-bdrv-drain.c | 187 +++++++++++++++++++++++++++++++++++++++++++++++- | 11 | 1 file changed, 6 insertions(+), 1 deletion(-) |
14 | 2 files changed, 186 insertions(+), 5 deletions(-) | ||
15 | 12 | ||
16 | diff --git a/block/io.c b/block/io.c | 13 | diff --git a/block/qcow.c b/block/qcow.c |
17 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/block/io.c | 15 | --- a/block/qcow.c |
19 | +++ b/block/io.c | 16 | +++ b/block/qcow.c |
20 | @@ -XXX,XX +XXX,XX @@ void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) | 17 | @@ -XXX,XX +XXX,XX @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags, |
21 | * | 18 | goto fail; |
22 | * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState | 19 | } |
23 | * AioContext. | 20 | if (header.version != QCOW_VERSION) { |
24 | - * | 21 | - error_setg(errp, "Unsupported qcow version %" PRIu32, header.version); |
25 | - * Only this BlockDriverState's AioContext is run, so in-flight requests must | 22 | + error_setg(errp, "qcow (v%d) does not support qcow version %" PRIu32, |
26 | - * not depend on events in other AioContexts. In that case, use | 23 | + QCOW_VERSION, header.version); |
27 | - * bdrv_drain_all() instead. | 24 | + if (header.version == 2 || header.version == 3) { |
28 | */ | 25 | + error_append_hint(errp, "Try the 'qcow2' driver instead.\n"); |
29 | void coroutine_fn bdrv_co_drain(BlockDriverState *bs) | ||
30 | { | ||
31 | diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/tests/test-bdrv-drain.c | ||
34 | +++ b/tests/test-bdrv-drain.c | ||
35 | @@ -XXX,XX +XXX,XX @@ | ||
36 | #include "block/blockjob_int.h" | ||
37 | #include "sysemu/block-backend.h" | ||
38 | #include "qapi/error.h" | ||
39 | +#include "iothread.h" | ||
40 | + | ||
41 | +static QemuEvent done_event; | ||
42 | |||
43 | typedef struct BDRVTestState { | ||
44 | int drain_count; | ||
45 | + AioContext *bh_indirection_ctx; | ||
46 | } BDRVTestState; | ||
47 | |||
48 | static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs) | ||
49 | @@ -XXX,XX +XXX,XX @@ static void bdrv_test_close(BlockDriverState *bs) | ||
50 | g_assert_cmpint(s->drain_count, >, 0); | ||
51 | } | ||
52 | |||
53 | +static void co_reenter_bh(void *opaque) | ||
54 | +{ | ||
55 | + aio_co_wake(opaque); | ||
56 | +} | ||
57 | + | ||
58 | static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs, | ||
59 | uint64_t offset, uint64_t bytes, | ||
60 | QEMUIOVector *qiov, int flags) | ||
61 | { | ||
62 | + BDRVTestState *s = bs->opaque; | ||
63 | + | ||
64 | /* We want this request to stay until the polling loop in drain waits for | ||
65 | * it to complete. We need to sleep a while as bdrv_drain_invoke() comes | ||
66 | * first and polls its result, too, but it shouldn't accidentally complete | ||
67 | * this request yet. */ | ||
68 | qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000); | ||
69 | |||
70 | + if (s->bh_indirection_ctx) { | ||
71 | + aio_bh_schedule_oneshot(s->bh_indirection_ctx, co_reenter_bh, | ||
72 | + qemu_coroutine_self()); | ||
73 | + qemu_coroutine_yield(); | ||
74 | + } | ||
75 | + | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | @@ -XXX,XX +XXX,XX @@ static void test_graph_change(void) | ||
80 | blk_unref(blk_b); | ||
81 | } | ||
82 | |||
83 | +struct test_iothread_data { | ||
84 | + BlockDriverState *bs; | ||
85 | + enum drain_type drain_type; | ||
86 | + int *aio_ret; | ||
87 | +}; | ||
88 | + | ||
89 | +static void test_iothread_drain_entry(void *opaque) | ||
90 | +{ | ||
91 | + struct test_iothread_data *data = opaque; | ||
92 | + | ||
93 | + aio_context_acquire(bdrv_get_aio_context(data->bs)); | ||
94 | + do_drain_begin(data->drain_type, data->bs); | ||
95 | + g_assert_cmpint(*data->aio_ret, ==, 0); | ||
96 | + do_drain_end(data->drain_type, data->bs); | ||
97 | + aio_context_release(bdrv_get_aio_context(data->bs)); | ||
98 | + | ||
99 | + qemu_event_set(&done_event); | ||
100 | +} | ||
101 | + | ||
102 | +static void test_iothread_aio_cb(void *opaque, int ret) | ||
103 | +{ | ||
104 | + int *aio_ret = opaque; | ||
105 | + *aio_ret = ret; | ||
106 | + qemu_event_set(&done_event); | ||
107 | +} | ||
108 | + | ||
109 | +/* | ||
110 | + * Starts an AIO request on a BDS that runs in the AioContext of iothread 1. | ||
111 | + * The request involves a BH on iothread 2 before it can complete. | ||
112 | + * | ||
113 | + * @drain_thread = 0 means that do_drain_begin/end are called from the main | ||
114 | + * thread, @drain_thread = 1 means that they are called from iothread 1. Drain | ||
115 | + * for this BDS cannot be called from iothread 2 because only the main thread | ||
116 | + * may do cross-AioContext polling. | ||
117 | + */ | ||
118 | +static void test_iothread_common(enum drain_type drain_type, int drain_thread) | ||
119 | +{ | ||
120 | + BlockBackend *blk; | ||
121 | + BlockDriverState *bs; | ||
122 | + BDRVTestState *s; | ||
123 | + BlockAIOCB *acb; | ||
124 | + int aio_ret; | ||
125 | + struct test_iothread_data data; | ||
126 | + | ||
127 | + IOThread *a = iothread_new(); | ||
128 | + IOThread *b = iothread_new(); | ||
129 | + AioContext *ctx_a = iothread_get_aio_context(a); | ||
130 | + AioContext *ctx_b = iothread_get_aio_context(b); | ||
131 | + | ||
132 | + QEMUIOVector qiov; | ||
133 | + struct iovec iov = { | ||
134 | + .iov_base = NULL, | ||
135 | + .iov_len = 0, | ||
136 | + }; | ||
137 | + qemu_iovec_init_external(&qiov, &iov, 1); | ||
138 | + | ||
139 | + /* bdrv_drain_all() may only be called from the main loop thread */ | ||
140 | + if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) { | ||
141 | + goto out; | ||
142 | + } | ||
143 | + | ||
144 | + blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL); | ||
145 | + bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, | ||
146 | + &error_abort); | ||
147 | + s = bs->opaque; | ||
148 | + blk_insert_bs(blk, bs, &error_abort); | ||
149 | + | ||
150 | + blk_set_aio_context(blk, ctx_a); | ||
151 | + aio_context_acquire(ctx_a); | ||
152 | + | ||
153 | + s->bh_indirection_ctx = ctx_b; | ||
154 | + | ||
155 | + aio_ret = -EINPROGRESS; | ||
156 | + if (drain_thread == 0) { | ||
157 | + acb = blk_aio_preadv(blk, 0, &qiov, 0, test_iothread_aio_cb, &aio_ret); | ||
158 | + } else { | ||
159 | + acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret); | ||
160 | + } | ||
161 | + g_assert(acb != NULL); | ||
162 | + g_assert_cmpint(aio_ret, ==, -EINPROGRESS); | ||
163 | + | ||
164 | + aio_context_release(ctx_a); | ||
165 | + | ||
166 | + data = (struct test_iothread_data) { | ||
167 | + .bs = bs, | ||
168 | + .drain_type = drain_type, | ||
169 | + .aio_ret = &aio_ret, | ||
170 | + }; | ||
171 | + | ||
172 | + switch (drain_thread) { | ||
173 | + case 0: | ||
174 | + if (drain_type != BDRV_DRAIN_ALL) { | ||
175 | + aio_context_acquire(ctx_a); | ||
176 | + } | 26 | + } |
177 | + | 27 | + |
178 | + /* The request is running on the IOThread a. Draining its block device | 28 | ret = -ENOTSUP; |
179 | + * will make sure that it has completed as far as the BDS is concerned, | 29 | goto fail; |
180 | + * but the drain in this thread can continue immediately after | 30 | } |
181 | + * bdrv_dec_in_flight() and aio_ret might be assigned only slightly | ||
182 | + * later. */ | ||
183 | + qemu_event_reset(&done_event); | ||
184 | + do_drain_begin(drain_type, bs); | ||
185 | + g_assert_cmpint(bs->in_flight, ==, 0); | ||
186 | + | ||
187 | + if (drain_type != BDRV_DRAIN_ALL) { | ||
188 | + aio_context_release(ctx_a); | ||
189 | + } | ||
190 | + qemu_event_wait(&done_event); | ||
191 | + if (drain_type != BDRV_DRAIN_ALL) { | ||
192 | + aio_context_acquire(ctx_a); | ||
193 | + } | ||
194 | + | ||
195 | + g_assert_cmpint(aio_ret, ==, 0); | ||
196 | + do_drain_end(drain_type, bs); | ||
197 | + | ||
198 | + if (drain_type != BDRV_DRAIN_ALL) { | ||
199 | + aio_context_release(ctx_a); | ||
200 | + } | ||
201 | + break; | ||
202 | + case 1: | ||
203 | + qemu_event_reset(&done_event); | ||
204 | + aio_bh_schedule_oneshot(ctx_a, test_iothread_drain_entry, &data); | ||
205 | + qemu_event_wait(&done_event); | ||
206 | + break; | ||
207 | + default: | ||
208 | + g_assert_not_reached(); | ||
209 | + } | ||
210 | + | ||
211 | + aio_context_acquire(ctx_a); | ||
212 | + blk_set_aio_context(blk, qemu_get_aio_context()); | ||
213 | + aio_context_release(ctx_a); | ||
214 | + | ||
215 | + bdrv_unref(bs); | ||
216 | + blk_unref(blk); | ||
217 | + | ||
218 | +out: | ||
219 | + iothread_join(a); | ||
220 | + iothread_join(b); | ||
221 | +} | ||
222 | + | ||
223 | +static void test_iothread_drain_all(void) | ||
224 | +{ | ||
225 | + test_iothread_common(BDRV_DRAIN_ALL, 0); | ||
226 | + test_iothread_common(BDRV_DRAIN_ALL, 1); | ||
227 | +} | ||
228 | + | ||
229 | +static void test_iothread_drain(void) | ||
230 | +{ | ||
231 | + test_iothread_common(BDRV_DRAIN, 0); | ||
232 | + test_iothread_common(BDRV_DRAIN, 1); | ||
233 | +} | ||
234 | + | ||
235 | +static void test_iothread_drain_subtree(void) | ||
236 | +{ | ||
237 | + test_iothread_common(BDRV_SUBTREE_DRAIN, 0); | ||
238 | + test_iothread_common(BDRV_SUBTREE_DRAIN, 1); | ||
239 | +} | ||
240 | + | ||
241 | |||
242 | typedef struct TestBlockJob { | ||
243 | BlockJob common; | ||
244 | @@ -XXX,XX +XXX,XX @@ static void test_blockjob_drain_subtree(void) | ||
245 | |||
246 | int main(int argc, char **argv) | ||
247 | { | ||
248 | + int ret; | ||
249 | + | ||
250 | bdrv_init(); | ||
251 | qemu_init_main_loop(&error_abort); | ||
252 | |||
253 | g_test_init(&argc, &argv, NULL); | ||
254 | + qemu_event_init(&done_event, false); | ||
255 | |||
256 | g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all); | ||
257 | g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain); | ||
258 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
259 | g_test_add_func("/bdrv-drain/multiparent", test_multiparent); | ||
260 | g_test_add_func("/bdrv-drain/graph-change", test_graph_change); | ||
261 | |||
262 | + g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all); | ||
263 | + g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain); | ||
264 | + g_test_add_func("/bdrv-drain/iothread/drain_subtree", | ||
265 | + test_iothread_drain_subtree); | ||
266 | + | ||
267 | g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all); | ||
268 | g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain); | ||
269 | g_test_add_func("/bdrv-drain/blockjob/drain_subtree", | ||
270 | test_blockjob_drain_subtree); | ||
271 | |||
272 | - return g_test_run(); | ||
273 | + ret = g_test_run(); | ||
274 | + qemu_event_destroy(&done_event); | ||
275 | + return ret; | ||
276 | } | ||
277 | -- | 31 | -- |
278 | 2.13.6 | 32 | 2.20.1 |
279 | 33 | ||
280 | 34 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | bdrv_do_drain_begin/end() implement already everything that | ||
2 | bdrv_drain_all_begin/end() need and currently still do manually: Disable | ||
3 | external events, call parent drain callbacks, call block driver | ||
4 | callbacks. | ||
5 | 1 | ||
6 | It also does two more things: | ||
7 | |||
8 | The first is incrementing bs->quiesce_counter. bdrv_drain_all() already | ||
9 | stood out in the test case by behaving different from the other drain | ||
10 | variants. Adding this is not only safe, but in fact a bug fix. | ||
11 | |||
12 | The second is calling bdrv_drain_recurse(). We already do that later in | ||
13 | the same function in a loop, so basically doing an early first iteration | ||
14 | doesn't hurt. | ||
15 | |||
16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
17 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
18 | --- | ||
19 | block/io.c | 10 ++-------- | ||
20 | tests/test-bdrv-drain.c | 14 ++++---------- | ||
21 | 2 files changed, 6 insertions(+), 18 deletions(-) | ||
22 | |||
23 | diff --git a/block/io.c b/block/io.c | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/block/io.c | ||
26 | +++ b/block/io.c | ||
27 | @@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void) | ||
28 | for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { | ||
29 | AioContext *aio_context = bdrv_get_aio_context(bs); | ||
30 | |||
31 | - /* Stop things in parent-to-child order */ | ||
32 | aio_context_acquire(aio_context); | ||
33 | - aio_disable_external(aio_context); | ||
34 | - bdrv_parent_drained_begin(bs, NULL); | ||
35 | - bdrv_drain_invoke(bs, true, true); | ||
36 | + bdrv_do_drained_begin(bs, true, NULL); | ||
37 | aio_context_release(aio_context); | ||
38 | |||
39 | if (!g_slist_find(aio_ctxs, aio_context)) { | ||
40 | @@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_end(void) | ||
41 | for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { | ||
42 | AioContext *aio_context = bdrv_get_aio_context(bs); | ||
43 | |||
44 | - /* Re-enable things in child-to-parent order */ | ||
45 | aio_context_acquire(aio_context); | ||
46 | - bdrv_drain_invoke(bs, false, true); | ||
47 | - bdrv_parent_drained_end(bs, NULL); | ||
48 | - aio_enable_external(aio_context); | ||
49 | + bdrv_do_drained_end(bs, true, NULL); | ||
50 | aio_context_release(aio_context); | ||
51 | } | ||
52 | } | ||
53 | diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c | ||
54 | index XXXXXXX..XXXXXXX 100644 | ||
55 | --- a/tests/test-bdrv-drain.c | ||
56 | +++ b/tests/test-bdrv-drain.c | ||
57 | @@ -XXX,XX +XXX,XX @@ static void test_quiesce_common(enum drain_type drain_type, bool recursive) | ||
58 | |||
59 | static void test_quiesce_drain_all(void) | ||
60 | { | ||
61 | - // XXX drain_all doesn't quiesce | ||
62 | - //test_quiesce_common(BDRV_DRAIN_ALL, true); | ||
63 | + test_quiesce_common(BDRV_DRAIN_ALL, true); | ||
64 | } | ||
65 | |||
66 | static void test_quiesce_drain(void) | ||
67 | @@ -XXX,XX +XXX,XX @@ static void test_nested(void) | ||
68 | |||
69 | for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) { | ||
70 | for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) { | ||
71 | - /* XXX bdrv_drain_all() doesn't increase the quiesce_counter */ | ||
72 | - int bs_quiesce = (outer != BDRV_DRAIN_ALL) + | ||
73 | - (inner != BDRV_DRAIN_ALL); | ||
74 | - int backing_quiesce = (outer == BDRV_SUBTREE_DRAIN) + | ||
75 | - (inner == BDRV_SUBTREE_DRAIN); | ||
76 | - int backing_cb_cnt = (outer != BDRV_DRAIN) + | ||
77 | + int backing_quiesce = (outer != BDRV_DRAIN) + | ||
78 | (inner != BDRV_DRAIN); | ||
79 | |||
80 | g_assert_cmpint(bs->quiesce_counter, ==, 0); | ||
81 | @@ -XXX,XX +XXX,XX @@ static void test_nested(void) | ||
82 | do_drain_begin(outer, bs); | ||
83 | do_drain_begin(inner, bs); | ||
84 | |||
85 | - g_assert_cmpint(bs->quiesce_counter, ==, bs_quiesce); | ||
86 | + g_assert_cmpint(bs->quiesce_counter, ==, 2); | ||
87 | g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce); | ||
88 | g_assert_cmpint(s->drain_count, ==, 2); | ||
89 | - g_assert_cmpint(backing_s->drain_count, ==, backing_cb_cnt); | ||
90 | + g_assert_cmpint(backing_s->drain_count, ==, backing_quiesce); | ||
91 | |||
92 | do_drain_end(inner, bs); | ||
93 | do_drain_end(outer, bs); | ||
94 | -- | ||
95 | 2.13.6 | ||
96 | |||
97 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | All callers pass false for the 'recursive' parameter now. Remove it. | ||
2 | 1 | ||
3 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
4 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
5 | --- | ||
6 | block/io.c | 13 +++---------- | ||
7 | 1 file changed, 3 insertions(+), 10 deletions(-) | ||
8 | |||
9 | diff --git a/block/io.c b/block/io.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/block/io.c | ||
12 | +++ b/block/io.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) | ||
14 | } | ||
15 | |||
16 | /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */ | ||
17 | -static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, bool recursive) | ||
18 | +static void bdrv_drain_invoke(BlockDriverState *bs, bool begin) | ||
19 | { | ||
20 | - BdrvChild *child, *tmp; | ||
21 | BdrvCoDrainData data = { .bs = bs, .done = false, .begin = begin}; | ||
22 | |||
23 | if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) || | ||
24 | @@ -XXX,XX +XXX,XX @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, bool recursive) | ||
25 | data.co = qemu_coroutine_create(bdrv_drain_invoke_entry, &data); | ||
26 | bdrv_coroutine_enter(bs, data.co); | ||
27 | BDRV_POLL_WHILE(bs, !data.done); | ||
28 | - | ||
29 | - if (recursive) { | ||
30 | - QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) { | ||
31 | - bdrv_drain_invoke(child->bs, begin, true); | ||
32 | - } | ||
33 | - } | ||
34 | } | ||
35 | |||
36 | static bool bdrv_drain_recurse(BlockDriverState *bs) | ||
37 | @@ -XXX,XX +XXX,XX @@ void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, | ||
38 | } | ||
39 | |||
40 | bdrv_parent_drained_begin(bs, parent); | ||
41 | - bdrv_drain_invoke(bs, true, false); | ||
42 | + bdrv_drain_invoke(bs, true); | ||
43 | bdrv_drain_recurse(bs); | ||
44 | |||
45 | if (recursive) { | ||
46 | @@ -XXX,XX +XXX,XX @@ void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, | ||
47 | old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter); | ||
48 | |||
49 | /* Re-enable things in child-to-parent order */ | ||
50 | - bdrv_drain_invoke(bs, false, false); | ||
51 | + bdrv_drain_invoke(bs, false); | ||
52 | bdrv_parent_drained_end(bs, parent); | ||
53 | if (old_quiesce_counter == 1) { | ||
54 | aio_enable_external(bdrv_get_aio_context(bs)); | ||
55 | -- | ||
56 | 2.13.6 | ||
57 | |||
58 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | All involved nodes are already idle, we called bdrv_do_drain_begin() on | ||
2 | them. | ||
3 | 1 | ||
4 | The comment in the code suggested that this was not correct because the | ||
5 | completion of a request on one node could spawn a new request on a | ||
6 | different node (which might have been drained before, so we wouldn't | ||
7 | drain the new request). In reality, new requests to different nodes | ||
8 | aren't spawned out of nothing, but only in the context of a parent | ||
9 | request, and they aren't submitted to random nodes, but only to child | ||
10 | nodes. As long as we still poll for the completion of the parent request | ||
11 | (which we do), draining each root node separately is good enough. | ||
12 | |||
13 | Remove the additional polling code from bdrv_drain_all_begin() and | ||
14 | replace it with an assertion that all nodes are already idle after we | ||
15 | drained them separately. | ||
16 | |||
17 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
18 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
19 | --- | ||
20 | block/io.c | 41 ++++++++++++----------------------------- | ||
21 | 1 file changed, 12 insertions(+), 29 deletions(-) | ||
22 | |||
23 | diff --git a/block/io.c b/block/io.c | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/block/io.c | ||
26 | +++ b/block/io.c | ||
27 | @@ -XXX,XX +XXX,XX @@ void bdrv_drain(BlockDriverState *bs) | ||
28 | bdrv_drained_end(bs); | ||
29 | } | ||
30 | |||
31 | +static void bdrv_drain_assert_idle(BlockDriverState *bs) | ||
32 | +{ | ||
33 | + BdrvChild *child, *next; | ||
34 | + | ||
35 | + assert(atomic_read(&bs->in_flight) == 0); | ||
36 | + QLIST_FOREACH_SAFE(child, &bs->children, next, next) { | ||
37 | + bdrv_drain_assert_idle(child->bs); | ||
38 | + } | ||
39 | +} | ||
40 | + | ||
41 | /* | ||
42 | * Wait for pending requests to complete across all BlockDriverStates | ||
43 | * | ||
44 | @@ -XXX,XX +XXX,XX @@ void bdrv_drain(BlockDriverState *bs) | ||
45 | */ | ||
46 | void bdrv_drain_all_begin(void) | ||
47 | { | ||
48 | - /* Always run first iteration so any pending completion BHs run */ | ||
49 | - bool waited = true; | ||
50 | BlockDriverState *bs; | ||
51 | BdrvNextIterator it; | ||
52 | - GSList *aio_ctxs = NULL, *ctx; | ||
53 | |||
54 | /* BDRV_POLL_WHILE() for a node can only be called from its own I/O thread | ||
55 | * or the main loop AioContext. We potentially use BDRV_POLL_WHILE() on | ||
56 | @@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void) | ||
57 | aio_context_acquire(aio_context); | ||
58 | bdrv_do_drained_begin(bs, true, NULL); | ||
59 | aio_context_release(aio_context); | ||
60 | - | ||
61 | - if (!g_slist_find(aio_ctxs, aio_context)) { | ||
62 | - aio_ctxs = g_slist_prepend(aio_ctxs, aio_context); | ||
63 | - } | ||
64 | } | ||
65 | |||
66 | - /* Note that completion of an asynchronous I/O operation can trigger any | ||
67 | - * number of other I/O operations on other devices---for example a | ||
68 | - * coroutine can submit an I/O request to another device in response to | ||
69 | - * request completion. Therefore we must keep looping until there was no | ||
70 | - * more activity rather than simply draining each device independently. | ||
71 | - */ | ||
72 | - while (waited) { | ||
73 | - waited = false; | ||
74 | - | ||
75 | - for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) { | ||
76 | - AioContext *aio_context = ctx->data; | ||
77 | - | ||
78 | - aio_context_acquire(aio_context); | ||
79 | - for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { | ||
80 | - if (aio_context == bdrv_get_aio_context(bs)) { | ||
81 | - waited |= bdrv_drain_recurse(bs); | ||
82 | - } | ||
83 | - } | ||
84 | - aio_context_release(aio_context); | ||
85 | - } | ||
86 | + for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { | ||
87 | + bdrv_drain_assert_idle(bs); | ||
88 | } | ||
89 | - | ||
90 | - g_slist_free(aio_ctxs); | ||
91 | } | ||
92 | |||
93 | void bdrv_drain_all_end(void) | ||
94 | -- | ||
95 | 2.13.6 | ||
96 | |||
97 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Since we use bdrv_do_drained_begin/end() for bdrv_drain_all_begin/end(), | ||
2 | coroutine context is automatically left with a BH, preventing the | ||
3 | deadlocks that made bdrv_drain_all*() unsafe in coroutine context. Now | ||
4 | that we even removed the old polling code as dead code, it's obvious | ||
5 | that it's compatible now. | ||
6 | 1 | ||
7 | Enable the coroutine test cases for bdrv_drain_all(). | ||
8 | |||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
10 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
11 | --- | ||
12 | tests/test-bdrv-drain.c | 16 ++++++++++++++-- | ||
13 | 1 file changed, 14 insertions(+), 2 deletions(-) | ||
14 | |||
15 | diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/tests/test-bdrv-drain.c | ||
18 | +++ b/tests/test-bdrv-drain.c | ||
19 | @@ -XXX,XX +XXX,XX @@ static void test_drv_cb_drain_subtree(void) | ||
20 | test_drv_cb_common(BDRV_SUBTREE_DRAIN, true); | ||
21 | } | ||
22 | |||
23 | +static void test_drv_cb_co_drain_all(void) | ||
24 | +{ | ||
25 | + call_in_coroutine(test_drv_cb_drain_all); | ||
26 | +} | ||
27 | + | ||
28 | static void test_drv_cb_co_drain(void) | ||
29 | { | ||
30 | call_in_coroutine(test_drv_cb_drain); | ||
31 | @@ -XXX,XX +XXX,XX @@ static void test_quiesce_drain_subtree(void) | ||
32 | test_quiesce_common(BDRV_SUBTREE_DRAIN, true); | ||
33 | } | ||
34 | |||
35 | +static void test_quiesce_co_drain_all(void) | ||
36 | +{ | ||
37 | + call_in_coroutine(test_quiesce_drain_all); | ||
38 | +} | ||
39 | + | ||
40 | static void test_quiesce_co_drain(void) | ||
41 | { | ||
42 | call_in_coroutine(test_quiesce_drain); | ||
43 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
44 | g_test_add_func("/bdrv-drain/driver-cb/drain_subtree", | ||
45 | test_drv_cb_drain_subtree); | ||
46 | |||
47 | - // XXX bdrv_drain_all() doesn't work in coroutine context | ||
48 | + g_test_add_func("/bdrv-drain/driver-cb/co/drain_all", | ||
49 | + test_drv_cb_co_drain_all); | ||
50 | g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain); | ||
51 | g_test_add_func("/bdrv-drain/driver-cb/co/drain_subtree", | ||
52 | test_drv_cb_co_drain_subtree); | ||
53 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
54 | g_test_add_func("/bdrv-drain/quiesce/drain_subtree", | ||
55 | test_quiesce_drain_subtree); | ||
56 | |||
57 | - // XXX bdrv_drain_all() doesn't work in coroutine context | ||
58 | + g_test_add_func("/bdrv-drain/quiesce/co/drain_all", | ||
59 | + test_quiesce_co_drain_all); | ||
60 | g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain); | ||
61 | g_test_add_func("/bdrv-drain/quiesce/co/drain_subtree", | ||
62 | test_quiesce_co_drain_subtree); | ||
63 | -- | ||
64 | 2.13.6 | ||
65 | |||
66 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Commit 91af091f923 added an additional aio_poll() to BDRV_POLL_WHILE() | ||
2 | in order to make sure that all pending BHs are executed on drain. This | ||
3 | was the wrong place to make the fix, as it is useless overhead for all | ||
4 | other users of the macro and unnecessarily complicates the mechanism. | ||
5 | 1 | ||
6 | This patch effectively reverts said commit (the context has changed a | ||
7 | bit and the code has moved to AIO_WAIT_WHILE()) and instead polls in the | ||
8 | loop condition for drain. | ||
9 | |||
10 | The effect is probably hard to measure in any real-world use case | ||
11 | because actual I/O will dominate, but if I run only the initialisation | ||
12 | part of 'qemu-img convert' where it calls bdrv_block_status() for the | ||
13 | whole image to find out how much data there is copy, this phase actually | ||
14 | needs only roughly half the time after this patch. | ||
15 | |||
16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
17 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
18 | --- | ||
19 | include/block/aio-wait.h | 22 ++++++++-------------- | ||
20 | block/io.c | 11 ++++++++++- | ||
21 | 2 files changed, 18 insertions(+), 15 deletions(-) | ||
22 | |||
23 | diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/include/block/aio-wait.h | ||
26 | +++ b/include/block/aio-wait.h | ||
27 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
28 | */ | ||
29 | #define AIO_WAIT_WHILE(wait, ctx, cond) ({ \ | ||
30 | bool waited_ = false; \ | ||
31 | - bool busy_ = true; \ | ||
32 | AioWait *wait_ = (wait); \ | ||
33 | AioContext *ctx_ = (ctx); \ | ||
34 | if (in_aio_context_home_thread(ctx_)) { \ | ||
35 | - while ((cond) || busy_) { \ | ||
36 | - busy_ = aio_poll(ctx_, (cond)); \ | ||
37 | - waited_ |= !!(cond) | busy_; \ | ||
38 | + while ((cond)) { \ | ||
39 | + aio_poll(ctx_, true); \ | ||
40 | + waited_ = true; \ | ||
41 | } \ | ||
42 | } else { \ | ||
43 | assert(qemu_get_current_aio_context() == \ | ||
44 | qemu_get_aio_context()); \ | ||
45 | /* Increment wait_->num_waiters before evaluating cond. */ \ | ||
46 | atomic_inc(&wait_->num_waiters); \ | ||
47 | - while (busy_) { \ | ||
48 | - if ((cond)) { \ | ||
49 | - waited_ = busy_ = true; \ | ||
50 | - aio_context_release(ctx_); \ | ||
51 | - aio_poll(qemu_get_aio_context(), true); \ | ||
52 | - aio_context_acquire(ctx_); \ | ||
53 | - } else { \ | ||
54 | - busy_ = aio_poll(ctx_, false); \ | ||
55 | - waited_ |= busy_; \ | ||
56 | - } \ | ||
57 | + while ((cond)) { \ | ||
58 | + aio_context_release(ctx_); \ | ||
59 | + aio_poll(qemu_get_aio_context(), true); \ | ||
60 | + aio_context_acquire(ctx_); \ | ||
61 | + waited_ = true; \ | ||
62 | } \ | ||
63 | atomic_dec(&wait_->num_waiters); \ | ||
64 | } \ | ||
65 | diff --git a/block/io.c b/block/io.c | ||
66 | index XXXXXXX..XXXXXXX 100644 | ||
67 | --- a/block/io.c | ||
68 | +++ b/block/io.c | ||
69 | @@ -XXX,XX +XXX,XX @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin) | ||
70 | BDRV_POLL_WHILE(bs, !data.done); | ||
71 | } | ||
72 | |||
73 | +/* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ | ||
74 | +static bool bdrv_drain_poll(BlockDriverState *bs) | ||
75 | +{ | ||
76 | + /* Execute pending BHs first and check everything else only after the BHs | ||
77 | + * have executed. */ | ||
78 | + while (aio_poll(bs->aio_context, false)); | ||
79 | + return atomic_read(&bs->in_flight); | ||
80 | +} | ||
81 | + | ||
82 | static bool bdrv_drain_recurse(BlockDriverState *bs) | ||
83 | { | ||
84 | BdrvChild *child, *tmp; | ||
85 | bool waited; | ||
86 | |||
87 | /* Wait for drained requests to finish */ | ||
88 | - waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0); | ||
89 | + waited = BDRV_POLL_WHILE(bs, bdrv_drain_poll(bs)); | ||
90 | |||
91 | QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) { | ||
92 | BlockDriverState *bs = child->bs; | ||
93 | -- | ||
94 | 2.13.6 | ||
95 | |||
96 | diff view generated by jsdifflib |
1 | From: Max Reitz <mreitz@redhat.com> | 1 | From: Jason Dillaman <dillaman@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | This patch adds two bdrv-drain tests for what happens if some BDS goes | 3 | Remove Josh as per his request since he is no longer the upstream RBD |
4 | away during the drainage. | 4 | tech lead. Add myself as the maintainer since I am the current RBD tech |
5 | lead. | ||
5 | 6 | ||
6 | The basic idea is that you have a parent BDS with some child nodes. | 7 | Signed-off-by: Jason Dillaman <dillaman@redhat.com> |
7 | Then, you drain one of the children. Because of that, the party who | 8 | Reviewed-by: Josh Durgin <jdurgin@redhat.com> |
8 | actually owns the parent decides to (A) delete it, or (B) detach all its | ||
9 | children from it -- both while the child is still being drained. | ||
10 | |||
11 | A real-world case where this can happen is the mirror block job, which | ||
12 | may exit if you drain one of its children. | ||
13 | |||
14 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
15 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
16 | --- | 10 | --- |
17 | tests/test-bdrv-drain.c | 169 ++++++++++++++++++++++++++++++++++++++++++++++++ | 11 | MAINTAINERS | 2 +- |
18 | 1 file changed, 169 insertions(+) | 12 | 1 file changed, 1 insertion(+), 1 deletion(-) |
19 | 13 | ||
20 | diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c | 14 | diff --git a/MAINTAINERS b/MAINTAINERS |
21 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/tests/test-bdrv-drain.c | 16 | --- a/MAINTAINERS |
23 | +++ b/tests/test-bdrv-drain.c | 17 | +++ b/MAINTAINERS |
24 | @@ -XXX,XX +XXX,XX @@ static void test_blockjob_drain_subtree(void) | 18 | @@ -XXX,XX +XXX,XX @@ S: Supported |
25 | test_blockjob_common(BDRV_SUBTREE_DRAIN); | 19 | F: block/vmdk.c |
26 | } | 20 | |
27 | 21 | RBD | |
28 | + | 22 | -M: Josh Durgin <jdurgin@redhat.com> |
29 | +typedef struct BDRVTestTopState { | 23 | +M: Jason Dillaman <dillaman@redhat.com> |
30 | + BdrvChild *wait_child; | 24 | L: qemu-block@nongnu.org |
31 | +} BDRVTestTopState; | 25 | S: Supported |
32 | + | 26 | F: block/rbd.c |
33 | +static void bdrv_test_top_close(BlockDriverState *bs) | ||
34 | +{ | ||
35 | + BdrvChild *c, *next_c; | ||
36 | + QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { | ||
37 | + bdrv_unref_child(bs, c); | ||
38 | + } | ||
39 | +} | ||
40 | + | ||
41 | +static int coroutine_fn bdrv_test_top_co_preadv(BlockDriverState *bs, | ||
42 | + uint64_t offset, uint64_t bytes, | ||
43 | + QEMUIOVector *qiov, int flags) | ||
44 | +{ | ||
45 | + BDRVTestTopState *tts = bs->opaque; | ||
46 | + return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags); | ||
47 | +} | ||
48 | + | ||
49 | +static BlockDriver bdrv_test_top_driver = { | ||
50 | + .format_name = "test_top_driver", | ||
51 | + .instance_size = sizeof(BDRVTestTopState), | ||
52 | + | ||
53 | + .bdrv_close = bdrv_test_top_close, | ||
54 | + .bdrv_co_preadv = bdrv_test_top_co_preadv, | ||
55 | + | ||
56 | + .bdrv_child_perm = bdrv_format_default_perms, | ||
57 | +}; | ||
58 | + | ||
59 | +typedef struct TestCoDeleteByDrainData { | ||
60 | + BlockBackend *blk; | ||
61 | + bool detach_instead_of_delete; | ||
62 | + bool done; | ||
63 | +} TestCoDeleteByDrainData; | ||
64 | + | ||
65 | +static void coroutine_fn test_co_delete_by_drain(void *opaque) | ||
66 | +{ | ||
67 | + TestCoDeleteByDrainData *dbdd = opaque; | ||
68 | + BlockBackend *blk = dbdd->blk; | ||
69 | + BlockDriverState *bs = blk_bs(blk); | ||
70 | + BDRVTestTopState *tts = bs->opaque; | ||
71 | + void *buffer = g_malloc(65536); | ||
72 | + QEMUIOVector qiov; | ||
73 | + struct iovec iov = { | ||
74 | + .iov_base = buffer, | ||
75 | + .iov_len = 65536, | ||
76 | + }; | ||
77 | + | ||
78 | + qemu_iovec_init_external(&qiov, &iov, 1); | ||
79 | + | ||
80 | + /* Pretend some internal write operation from parent to child. | ||
81 | + * Important: We have to read from the child, not from the parent! | ||
82 | + * Draining works by first propagating it all up the tree to the | ||
83 | + * root and then waiting for drainage from root to the leaves | ||
84 | + * (protocol nodes). If we have a request waiting on the root, | ||
85 | + * everything will be drained before we go back down the tree, but | ||
86 | + * we do not want that. We want to be in the middle of draining | ||
87 | + * when this following requests returns. */ | ||
88 | + bdrv_co_preadv(tts->wait_child, 0, 65536, &qiov, 0); | ||
89 | + | ||
90 | + g_assert_cmpint(bs->refcnt, ==, 1); | ||
91 | + | ||
92 | + if (!dbdd->detach_instead_of_delete) { | ||
93 | + blk_unref(blk); | ||
94 | + } else { | ||
95 | + BdrvChild *c, *next_c; | ||
96 | + QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { | ||
97 | + bdrv_unref_child(bs, c); | ||
98 | + } | ||
99 | + } | ||
100 | + | ||
101 | + dbdd->done = true; | ||
102 | +} | ||
103 | + | ||
104 | +/** | ||
105 | + * Test what happens when some BDS has some children, you drain one of | ||
106 | + * them and this results in the BDS being deleted. | ||
107 | + * | ||
108 | + * If @detach_instead_of_delete is set, the BDS is not going to be | ||
109 | + * deleted but will only detach all of its children. | ||
110 | + */ | ||
111 | +static void do_test_delete_by_drain(bool detach_instead_of_delete) | ||
112 | +{ | ||
113 | + BlockBackend *blk; | ||
114 | + BlockDriverState *bs, *child_bs, *null_bs; | ||
115 | + BDRVTestTopState *tts; | ||
116 | + TestCoDeleteByDrainData dbdd; | ||
117 | + Coroutine *co; | ||
118 | + | ||
119 | + bs = bdrv_new_open_driver(&bdrv_test_top_driver, "top", BDRV_O_RDWR, | ||
120 | + &error_abort); | ||
121 | + bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; | ||
122 | + tts = bs->opaque; | ||
123 | + | ||
124 | + null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, | ||
125 | + &error_abort); | ||
126 | + bdrv_attach_child(bs, null_bs, "null-child", &child_file, &error_abort); | ||
127 | + | ||
128 | + /* This child will be the one to pass to requests through to, and | ||
129 | + * it will stall until a drain occurs */ | ||
130 | + child_bs = bdrv_new_open_driver(&bdrv_test, "child", BDRV_O_RDWR, | ||
131 | + &error_abort); | ||
132 | + child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; | ||
133 | + /* Takes our reference to child_bs */ | ||
134 | + tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child", &child_file, | ||
135 | + &error_abort); | ||
136 | + | ||
137 | + /* This child is just there to be deleted | ||
138 | + * (for detach_instead_of_delete == true) */ | ||
139 | + null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, | ||
140 | + &error_abort); | ||
141 | + bdrv_attach_child(bs, null_bs, "null-child", &child_file, &error_abort); | ||
142 | + | ||
143 | + blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL); | ||
144 | + blk_insert_bs(blk, bs, &error_abort); | ||
145 | + | ||
146 | + /* Referenced by blk now */ | ||
147 | + bdrv_unref(bs); | ||
148 | + | ||
149 | + g_assert_cmpint(bs->refcnt, ==, 1); | ||
150 | + g_assert_cmpint(child_bs->refcnt, ==, 1); | ||
151 | + g_assert_cmpint(null_bs->refcnt, ==, 1); | ||
152 | + | ||
153 | + | ||
154 | + dbdd = (TestCoDeleteByDrainData){ | ||
155 | + .blk = blk, | ||
156 | + .detach_instead_of_delete = detach_instead_of_delete, | ||
157 | + .done = false, | ||
158 | + }; | ||
159 | + co = qemu_coroutine_create(test_co_delete_by_drain, &dbdd); | ||
160 | + qemu_coroutine_enter(co); | ||
161 | + | ||
162 | + /* Drain the child while the read operation is still pending. | ||
163 | + * This should result in the operation finishing and | ||
164 | + * test_co_delete_by_drain() resuming. Thus, @bs will be deleted | ||
165 | + * and the coroutine will exit while this drain operation is still | ||
166 | + * in progress. */ | ||
167 | + bdrv_ref(child_bs); | ||
168 | + bdrv_drain(child_bs); | ||
169 | + bdrv_unref(child_bs); | ||
170 | + | ||
171 | + while (!dbdd.done) { | ||
172 | + aio_poll(qemu_get_aio_context(), true); | ||
173 | + } | ||
174 | + | ||
175 | + if (detach_instead_of_delete) { | ||
176 | + /* Here, the reference has not passed over to the coroutine, | ||
177 | + * so we have to delete the BB ourselves */ | ||
178 | + blk_unref(blk); | ||
179 | + } | ||
180 | +} | ||
181 | + | ||
182 | + | ||
183 | +static void test_delete_by_drain(void) | ||
184 | +{ | ||
185 | + do_test_delete_by_drain(false); | ||
186 | +} | ||
187 | + | ||
188 | +static void test_detach_by_drain(void) | ||
189 | +{ | ||
190 | + do_test_delete_by_drain(true); | ||
191 | +} | ||
192 | + | ||
193 | + | ||
194 | int main(int argc, char **argv) | ||
195 | { | ||
196 | int ret; | ||
197 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
198 | g_test_add_func("/bdrv-drain/blockjob/drain_subtree", | ||
199 | test_blockjob_drain_subtree); | ||
200 | |||
201 | + g_test_add_func("/bdrv-drain/deletion", test_delete_by_drain); | ||
202 | + g_test_add_func("/bdrv-drain/detach", test_detach_by_drain); | ||
203 | + | ||
204 | ret = g_test_run(); | ||
205 | qemu_event_destroy(&done_event); | ||
206 | return ret; | ||
207 | -- | 27 | -- |
208 | 2.13.6 | 28 | 2.20.1 |
209 | 29 | ||
210 | 30 | diff view generated by jsdifflib |
1 | We already requested that block jobs be paused in .bdrv_drained_begin, | 1 | From: Eric Blake <eblake@redhat.com> |
---|---|---|---|
2 | but no guarantee was made that the job was actually inactive at the | ||
3 | point where bdrv_drained_begin() returned. | ||
4 | 2 | ||
5 | This introduces a new callback BdrvChildRole.bdrv_drained_poll() and | 3 | Commit b76b4f60 allowed '-o compat=v3' as an alias for the |
6 | uses it to make bdrv_drain_poll() consider block jobs using the node to | 4 | less-appealing '-o compat=1.1' for 'qemu-img create' since we want to |
7 | be drained. | 5 | use the QMP form as much as possible, but forgot to do likewise for |
6 | qemu-img amend. Also, it doesn't help that '-o help' doesn't list our | ||
7 | new preferred spellings. | ||
8 | 8 | ||
9 | For the test case to work as expected, we have to switch from | 9 | Signed-off-by: Eric Blake <eblake@redhat.com> |
10 | block_job_sleep_ns() to qemu_co_sleep_ns() so that the test job is even | ||
11 | considered active and must be waited for when draining the node. | ||
12 | |||
13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
14 | --- | 11 | --- |
15 | include/block/block.h | 8 ++++++++ | 12 | block/qcow2.c | 6 +++--- |
16 | include/block/block_int.h | 7 +++++++ | 13 | 1 file changed, 3 insertions(+), 3 deletions(-) |
17 | include/block/blockjob_int.h | 8 ++++++++ | ||
18 | block.c | 9 +++++++++ | ||
19 | block/io.c | 40 ++++++++++++++++++++++++++++++++++------ | ||
20 | block/mirror.c | 8 ++++++++ | ||
21 | blockjob.c | 23 +++++++++++++++++++++++ | ||
22 | tests/test-bdrv-drain.c | 18 ++++++++++-------- | ||
23 | 8 files changed, 107 insertions(+), 14 deletions(-) | ||
24 | 14 | ||
25 | diff --git a/include/block/block.h b/include/block/block.h | 15 | diff --git a/block/qcow2.c b/block/qcow2.c |
26 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
27 | --- a/include/block/block.h | 17 | --- a/block/qcow2.c |
28 | +++ b/include/block/block.h | 18 | +++ b/block/qcow2.c |
29 | @@ -XXX,XX +XXX,XX @@ void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore); | 19 | @@ -XXX,XX +XXX,XX @@ static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts, |
30 | void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore); | 20 | compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL); |
31 | 21 | if (!compat) { | |
32 | /** | 22 | /* preserve default */ |
33 | + * bdrv_drain_poll: | 23 | - } else if (!strcmp(compat, "0.10")) { |
34 | + * | 24 | + } else if (!strcmp(compat, "0.10") || !strcmp(compat, "v2")) { |
35 | + * Poll for pending requests in @bs and its parents (except for | 25 | new_version = 2; |
36 | + * @ignore_parent). This is part of bdrv_drained_begin. | 26 | - } else if (!strcmp(compat, "1.1")) { |
37 | + */ | 27 | + } else if (!strcmp(compat, "1.1") || !strcmp(compat, "v3")) { |
38 | +bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent); | 28 | new_version = 3; |
39 | + | 29 | } else { |
40 | +/** | 30 | error_setg(errp, "Unknown compatibility level %s", compat); |
41 | * bdrv_drained_begin: | 31 | @@ -XXX,XX +XXX,XX @@ static QemuOptsList qcow2_create_opts = { |
42 | * | 32 | { |
43 | * Begin a quiesced section for exclusive access to the BDS, by disabling | 33 | .name = BLOCK_OPT_COMPAT_LEVEL, |
44 | diff --git a/include/block/block_int.h b/include/block/block_int.h | 34 | .type = QEMU_OPT_STRING, |
45 | index XXXXXXX..XXXXXXX 100644 | 35 | - .help = "Compatibility level (0.10 or 1.1)" |
46 | --- a/include/block/block_int.h | 36 | + .help = "Compatibility level (v2 [0.10] or v3 [1.1])" |
47 | +++ b/include/block/block_int.h | 37 | }, |
48 | @@ -XXX,XX +XXX,XX @@ struct BdrvChildRole { | 38 | { |
49 | void (*drained_begin)(BdrvChild *child); | 39 | .name = BLOCK_OPT_BACKING_FILE, |
50 | void (*drained_end)(BdrvChild *child); | ||
51 | |||
52 | + /* | ||
53 | + * Returns whether the parent has pending requests for the child. This | ||
54 | + * callback is polled after .drained_begin() has been called until all | ||
55 | + * activity on the child has stopped. | ||
56 | + */ | ||
57 | + bool (*drained_poll)(BdrvChild *child); | ||
58 | + | ||
59 | /* Notifies the parent that the child has been activated/inactivated (e.g. | ||
60 | * when migration is completing) and it can start/stop requesting | ||
61 | * permissions and doing I/O on it. */ | ||
62 | diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h | ||
63 | index XXXXXXX..XXXXXXX 100644 | ||
64 | --- a/include/block/blockjob_int.h | ||
65 | +++ b/include/block/blockjob_int.h | ||
66 | @@ -XXX,XX +XXX,XX @@ struct BlockJobDriver { | ||
67 | JobDriver job_driver; | ||
68 | |||
69 | /* | ||
70 | + * Returns whether the job has pending requests for the child or will | ||
71 | + * submit new requests before the next pause point. This callback is polled | ||
72 | + * in the context of draining a job node after requesting that the job be | ||
73 | + * paused, until all activity on the child has stopped. | ||
74 | + */ | ||
75 | + bool (*drained_poll)(BlockJob *job); | ||
76 | + | ||
77 | + /* | ||
78 | * If the callback is not NULL, it will be invoked before the job is | ||
79 | * resumed in a new AioContext. This is the place to move any resources | ||
80 | * besides job->blk to the new AioContext. | ||
81 | diff --git a/block.c b/block.c | ||
82 | index XXXXXXX..XXXXXXX 100644 | ||
83 | --- a/block.c | ||
84 | +++ b/block.c | ||
85 | @@ -XXX,XX +XXX,XX @@ static void bdrv_child_cb_drained_begin(BdrvChild *child) | ||
86 | bdrv_drained_begin(bs); | ||
87 | } | ||
88 | |||
89 | +static bool bdrv_child_cb_drained_poll(BdrvChild *child) | ||
90 | +{ | ||
91 | + BlockDriverState *bs = child->opaque; | ||
92 | + return bdrv_drain_poll(bs, NULL); | ||
93 | +} | ||
94 | + | ||
95 | static void bdrv_child_cb_drained_end(BdrvChild *child) | ||
96 | { | ||
97 | BlockDriverState *bs = child->opaque; | ||
98 | @@ -XXX,XX +XXX,XX @@ const BdrvChildRole child_file = { | ||
99 | .get_parent_desc = bdrv_child_get_parent_desc, | ||
100 | .inherit_options = bdrv_inherited_options, | ||
101 | .drained_begin = bdrv_child_cb_drained_begin, | ||
102 | + .drained_poll = bdrv_child_cb_drained_poll, | ||
103 | .drained_end = bdrv_child_cb_drained_end, | ||
104 | .attach = bdrv_child_cb_attach, | ||
105 | .detach = bdrv_child_cb_detach, | ||
106 | @@ -XXX,XX +XXX,XX @@ const BdrvChildRole child_format = { | ||
107 | .get_parent_desc = bdrv_child_get_parent_desc, | ||
108 | .inherit_options = bdrv_inherited_fmt_options, | ||
109 | .drained_begin = bdrv_child_cb_drained_begin, | ||
110 | + .drained_poll = bdrv_child_cb_drained_poll, | ||
111 | .drained_end = bdrv_child_cb_drained_end, | ||
112 | .attach = bdrv_child_cb_attach, | ||
113 | .detach = bdrv_child_cb_detach, | ||
114 | @@ -XXX,XX +XXX,XX @@ const BdrvChildRole child_backing = { | ||
115 | .detach = bdrv_backing_detach, | ||
116 | .inherit_options = bdrv_backing_options, | ||
117 | .drained_begin = bdrv_child_cb_drained_begin, | ||
118 | + .drained_poll = bdrv_child_cb_drained_poll, | ||
119 | .drained_end = bdrv_child_cb_drained_end, | ||
120 | .inactivate = bdrv_child_cb_inactivate, | ||
121 | .update_filename = bdrv_backing_update_filename, | ||
122 | diff --git a/block/io.c b/block/io.c | ||
123 | index XXXXXXX..XXXXXXX 100644 | ||
124 | --- a/block/io.c | ||
125 | +++ b/block/io.c | ||
126 | @@ -XXX,XX +XXX,XX @@ void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore) | ||
127 | } | ||
128 | } | ||
129 | |||
130 | +static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore) | ||
131 | +{ | ||
132 | + BdrvChild *c, *next; | ||
133 | + bool busy = false; | ||
134 | + | ||
135 | + QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { | ||
136 | + if (c == ignore) { | ||
137 | + continue; | ||
138 | + } | ||
139 | + if (c->role->drained_poll) { | ||
140 | + busy |= c->role->drained_poll(c); | ||
141 | + } | ||
142 | + } | ||
143 | + | ||
144 | + return busy; | ||
145 | +} | ||
146 | + | ||
147 | static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) | ||
148 | { | ||
149 | dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer); | ||
150 | @@ -XXX,XX +XXX,XX @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin) | ||
151 | } | ||
152 | |||
153 | /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ | ||
154 | -static bool bdrv_drain_poll(BlockDriverState *bs) | ||
155 | +bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent) | ||
156 | +{ | ||
157 | + if (bdrv_parent_drained_poll(bs, ignore_parent)) { | ||
158 | + return true; | ||
159 | + } | ||
160 | + | ||
161 | + return atomic_read(&bs->in_flight); | ||
162 | +} | ||
163 | + | ||
164 | +static bool bdrv_drain_poll_top_level(BlockDriverState *bs, | ||
165 | + BdrvChild *ignore_parent) | ||
166 | { | ||
167 | /* Execute pending BHs first and check everything else only after the BHs | ||
168 | * have executed. */ | ||
169 | while (aio_poll(bs->aio_context, false)); | ||
170 | - return atomic_read(&bs->in_flight); | ||
171 | + | ||
172 | + return bdrv_drain_poll(bs, ignore_parent); | ||
173 | } | ||
174 | |||
175 | -static bool bdrv_drain_recurse(BlockDriverState *bs) | ||
176 | +static bool bdrv_drain_recurse(BlockDriverState *bs, BdrvChild *parent) | ||
177 | { | ||
178 | BdrvChild *child, *tmp; | ||
179 | bool waited; | ||
180 | |||
181 | /* Wait for drained requests to finish */ | ||
182 | - waited = BDRV_POLL_WHILE(bs, bdrv_drain_poll(bs)); | ||
183 | + waited = BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent)); | ||
184 | |||
185 | QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) { | ||
186 | BlockDriverState *bs = child->bs; | ||
187 | @@ -XXX,XX +XXX,XX @@ static bool bdrv_drain_recurse(BlockDriverState *bs) | ||
188 | */ | ||
189 | bdrv_ref(bs); | ||
190 | } | ||
191 | - waited |= bdrv_drain_recurse(bs); | ||
192 | + waited |= bdrv_drain_recurse(bs, child); | ||
193 | if (in_main_loop) { | ||
194 | bdrv_unref(bs); | ||
195 | } | ||
196 | @@ -XXX,XX +XXX,XX @@ void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, | ||
197 | |||
198 | bdrv_parent_drained_begin(bs, parent); | ||
199 | bdrv_drain_invoke(bs, true); | ||
200 | - bdrv_drain_recurse(bs); | ||
201 | + bdrv_drain_recurse(bs, parent); | ||
202 | |||
203 | if (recursive) { | ||
204 | bs->recursive_quiesce_counter++; | ||
205 | diff --git a/block/mirror.c b/block/mirror.c | ||
206 | index XXXXXXX..XXXXXXX 100644 | ||
207 | --- a/block/mirror.c | ||
208 | +++ b/block/mirror.c | ||
209 | @@ -XXX,XX +XXX,XX @@ static void mirror_pause(Job *job) | ||
210 | mirror_wait_for_all_io(s); | ||
211 | } | ||
212 | |||
213 | +static bool mirror_drained_poll(BlockJob *job) | ||
214 | +{ | ||
215 | + MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | ||
216 | + return !!s->in_flight; | ||
217 | +} | ||
218 | + | ||
219 | static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context) | ||
220 | { | ||
221 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); | ||
222 | @@ -XXX,XX +XXX,XX @@ static const BlockJobDriver mirror_job_driver = { | ||
223 | .pause = mirror_pause, | ||
224 | .complete = mirror_complete, | ||
225 | }, | ||
226 | + .drained_poll = mirror_drained_poll, | ||
227 | .attached_aio_context = mirror_attached_aio_context, | ||
228 | .drain = mirror_drain, | ||
229 | }; | ||
230 | @@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_active_job_driver = { | ||
231 | .pause = mirror_pause, | ||
232 | .complete = mirror_complete, | ||
233 | }, | ||
234 | + .drained_poll = mirror_drained_poll, | ||
235 | .attached_aio_context = mirror_attached_aio_context, | ||
236 | .drain = mirror_drain, | ||
237 | }; | ||
238 | diff --git a/blockjob.c b/blockjob.c | ||
239 | index XXXXXXX..XXXXXXX 100644 | ||
240 | --- a/blockjob.c | ||
241 | +++ b/blockjob.c | ||
242 | @@ -XXX,XX +XXX,XX @@ static void child_job_drained_begin(BdrvChild *c) | ||
243 | job_pause(&job->job); | ||
244 | } | ||
245 | |||
246 | +static bool child_job_drained_poll(BdrvChild *c) | ||
247 | +{ | ||
248 | + BlockJob *bjob = c->opaque; | ||
249 | + Job *job = &bjob->job; | ||
250 | + const BlockJobDriver *drv = block_job_driver(bjob); | ||
251 | + | ||
252 | + /* An inactive or completed job doesn't have any pending requests. Jobs | ||
253 | + * with !job->busy are either already paused or have a pause point after | ||
254 | + * being reentered, so no job driver code will run before they pause. */ | ||
255 | + if (!job->busy || job_is_completed(job) || job->deferred_to_main_loop) { | ||
256 | + return false; | ||
257 | + } | ||
258 | + | ||
259 | + /* Otherwise, assume that it isn't fully stopped yet, but allow the job to | ||
260 | + * override this assumption. */ | ||
261 | + if (drv->drained_poll) { | ||
262 | + return drv->drained_poll(bjob); | ||
263 | + } else { | ||
264 | + return true; | ||
265 | + } | ||
266 | +} | ||
267 | + | ||
268 | static void child_job_drained_end(BdrvChild *c) | ||
269 | { | ||
270 | BlockJob *job = c->opaque; | ||
271 | @@ -XXX,XX +XXX,XX @@ static void child_job_drained_end(BdrvChild *c) | ||
272 | static const BdrvChildRole child_job = { | ||
273 | .get_parent_desc = child_job_get_parent_desc, | ||
274 | .drained_begin = child_job_drained_begin, | ||
275 | + .drained_poll = child_job_drained_poll, | ||
276 | .drained_end = child_job_drained_end, | ||
277 | .stay_at_node = true, | ||
278 | }; | ||
279 | diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c | ||
280 | index XXXXXXX..XXXXXXX 100644 | ||
281 | --- a/tests/test-bdrv-drain.c | ||
282 | +++ b/tests/test-bdrv-drain.c | ||
283 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn test_job_start(void *opaque) | ||
284 | |||
285 | job_transition_to_ready(&s->common.job); | ||
286 | while (!s->should_complete) { | ||
287 | - job_sleep_ns(&s->common.job, 100000); | ||
288 | + /* Avoid block_job_sleep_ns() because it marks the job as !busy. We | ||
289 | + * want to emulate some actual activity (probably some I/O) here so | ||
290 | + * that drain has to wait for this acitivity to stop. */ | ||
291 | + qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000); | ||
292 | + job_pause_point(&s->common.job); | ||
293 | } | ||
294 | |||
295 | job_defer_to_main_loop(&s->common.job, test_job_completed, NULL); | ||
296 | @@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type) | ||
297 | |||
298 | g_assert_cmpint(job->job.pause_count, ==, 0); | ||
299 | g_assert_false(job->job.paused); | ||
300 | - g_assert_false(job->job.busy); /* We're in job_sleep_ns() */ | ||
301 | + g_assert_true(job->job.busy); /* We're in job_sleep_ns() */ | ||
302 | |||
303 | do_drain_begin(drain_type, src); | ||
304 | |||
305 | @@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type) | ||
306 | } else { | ||
307 | g_assert_cmpint(job->job.pause_count, ==, 1); | ||
308 | } | ||
309 | - /* XXX We don't wait until the job is actually paused. Is this okay? */ | ||
310 | - /* g_assert_true(job->job.paused); */ | ||
311 | + g_assert_true(job->job.paused); | ||
312 | g_assert_false(job->job.busy); /* The job is paused */ | ||
313 | |||
314 | do_drain_end(drain_type, src); | ||
315 | |||
316 | g_assert_cmpint(job->job.pause_count, ==, 0); | ||
317 | g_assert_false(job->job.paused); | ||
318 | - g_assert_false(job->job.busy); /* We're in job_sleep_ns() */ | ||
319 | + g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ | ||
320 | |||
321 | do_drain_begin(drain_type, target); | ||
322 | |||
323 | @@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type) | ||
324 | } else { | ||
325 | g_assert_cmpint(job->job.pause_count, ==, 1); | ||
326 | } | ||
327 | - /* XXX We don't wait until the job is actually paused. Is this okay? */ | ||
328 | - /* g_assert_true(job->job.paused); */ | ||
329 | + g_assert_true(job->job.paused); | ||
330 | g_assert_false(job->job.busy); /* The job is paused */ | ||
331 | |||
332 | do_drain_end(drain_type, target); | ||
333 | |||
334 | g_assert_cmpint(job->job.pause_count, ==, 0); | ||
335 | g_assert_false(job->job.paused); | ||
336 | - g_assert_false(job->job.busy); /* We're in job_sleep_ns() */ | ||
337 | + g_assert_true(job->job.busy); /* We're in job_sleep_ns() */ | ||
338 | |||
339 | ret = job_complete_sync(&job->job, &error_abort); | ||
340 | g_assert_cmpint(ret, ==, 0); | ||
341 | -- | 40 | -- |
342 | 2.13.6 | 41 | 2.20.1 |
343 | 42 | ||
344 | 43 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | For bdrv_drain(), recursively waiting for child node requests is | ||
2 | pointless because we didn't quiesce their parents, so new requests could | ||
3 | come in anyway. Letting the function work only on a single node makes it | ||
4 | more consistent. | ||
5 | 1 | ||
6 | For subtree drains and drain_all, we already have the recursion in | ||
7 | bdrv_do_drained_begin(), so the extra recursion doesn't add anything | ||
8 | either. | ||
9 | |||
10 | Remove the useless code. | ||
11 | |||
12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
13 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
14 | --- | ||
15 | block/io.c | 36 +++--------------------------------- | ||
16 | 1 file changed, 3 insertions(+), 33 deletions(-) | ||
17 | |||
18 | diff --git a/block/io.c b/block/io.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/block/io.c | ||
21 | +++ b/block/io.c | ||
22 | @@ -XXX,XX +XXX,XX @@ static bool bdrv_drain_poll_top_level(BlockDriverState *bs, | ||
23 | return bdrv_drain_poll(bs, ignore_parent); | ||
24 | } | ||
25 | |||
26 | -static bool bdrv_drain_recurse(BlockDriverState *bs, BdrvChild *parent) | ||
27 | -{ | ||
28 | - BdrvChild *child, *tmp; | ||
29 | - bool waited; | ||
30 | - | ||
31 | - /* Wait for drained requests to finish */ | ||
32 | - waited = BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent)); | ||
33 | - | ||
34 | - QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) { | ||
35 | - BlockDriverState *bs = child->bs; | ||
36 | - bool in_main_loop = | ||
37 | - qemu_get_current_aio_context() == qemu_get_aio_context(); | ||
38 | - assert(bs->refcnt > 0); | ||
39 | - if (in_main_loop) { | ||
40 | - /* In case the recursive bdrv_drain_recurse processes a | ||
41 | - * block_job_defer_to_main_loop BH and modifies the graph, | ||
42 | - * let's hold a reference to bs until we are done. | ||
43 | - * | ||
44 | - * IOThread doesn't have such a BH, and it is not safe to call | ||
45 | - * bdrv_unref without BQL, so skip doing it there. | ||
46 | - */ | ||
47 | - bdrv_ref(bs); | ||
48 | - } | ||
49 | - waited |= bdrv_drain_recurse(bs, child); | ||
50 | - if (in_main_loop) { | ||
51 | - bdrv_unref(bs); | ||
52 | - } | ||
53 | - } | ||
54 | - | ||
55 | - return waited; | ||
56 | -} | ||
57 | - | ||
58 | static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, | ||
59 | BdrvChild *parent); | ||
60 | static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, | ||
61 | @@ -XXX,XX +XXX,XX @@ void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, | ||
62 | |||
63 | bdrv_parent_drained_begin(bs, parent); | ||
64 | bdrv_drain_invoke(bs, true); | ||
65 | - bdrv_drain_recurse(bs, parent); | ||
66 | + | ||
67 | + /* Wait for drained requests to finish */ | ||
68 | + BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent)); | ||
69 | |||
70 | if (recursive) { | ||
71 | bs->recursive_quiesce_counter++; | ||
72 | -- | ||
73 | 2.13.6 | ||
74 | |||
75 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | If bdrv_do_drained_begin() polls during its subtree recursion, the graph | ||
2 | can change and mess up the bs->children iteration. Test that this | ||
3 | doesn't happen. | ||
4 | 1 | ||
5 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
6 | --- | ||
7 | tests/test-bdrv-drain.c | 38 +++++++++++++++++++++++++++++--------- | ||
8 | 1 file changed, 29 insertions(+), 9 deletions(-) | ||
9 | |||
10 | diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/tests/test-bdrv-drain.c | ||
13 | +++ b/tests/test-bdrv-drain.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn test_co_delete_by_drain(void *opaque) | ||
15 | * If @detach_instead_of_delete is set, the BDS is not going to be | ||
16 | * deleted but will only detach all of its children. | ||
17 | */ | ||
18 | -static void do_test_delete_by_drain(bool detach_instead_of_delete) | ||
19 | +static void do_test_delete_by_drain(bool detach_instead_of_delete, | ||
20 | + enum drain_type drain_type) | ||
21 | { | ||
22 | BlockBackend *blk; | ||
23 | BlockDriverState *bs, *child_bs, *null_bs; | ||
24 | @@ -XXX,XX +XXX,XX @@ static void do_test_delete_by_drain(bool detach_instead_of_delete) | ||
25 | * test_co_delete_by_drain() resuming. Thus, @bs will be deleted | ||
26 | * and the coroutine will exit while this drain operation is still | ||
27 | * in progress. */ | ||
28 | - bdrv_ref(child_bs); | ||
29 | - bdrv_drain(child_bs); | ||
30 | - bdrv_unref(child_bs); | ||
31 | + switch (drain_type) { | ||
32 | + case BDRV_DRAIN: | ||
33 | + bdrv_ref(child_bs); | ||
34 | + bdrv_drain(child_bs); | ||
35 | + bdrv_unref(child_bs); | ||
36 | + break; | ||
37 | + case BDRV_SUBTREE_DRAIN: | ||
38 | + /* Would have to ref/unref bs here for !detach_instead_of_delete, but | ||
39 | + * then the whole test becomes pointless because the graph changes | ||
40 | + * don't occur during the drain any more. */ | ||
41 | + assert(detach_instead_of_delete); | ||
42 | + bdrv_subtree_drained_begin(bs); | ||
43 | + bdrv_subtree_drained_end(bs); | ||
44 | + break; | ||
45 | + default: | ||
46 | + g_assert_not_reached(); | ||
47 | + } | ||
48 | |||
49 | while (!dbdd.done) { | ||
50 | aio_poll(qemu_get_aio_context(), true); | ||
51 | @@ -XXX,XX +XXX,XX @@ static void do_test_delete_by_drain(bool detach_instead_of_delete) | ||
52 | } | ||
53 | } | ||
54 | |||
55 | - | ||
56 | static void test_delete_by_drain(void) | ||
57 | { | ||
58 | - do_test_delete_by_drain(false); | ||
59 | + do_test_delete_by_drain(false, BDRV_DRAIN); | ||
60 | } | ||
61 | |||
62 | static void test_detach_by_drain(void) | ||
63 | { | ||
64 | - do_test_delete_by_drain(true); | ||
65 | + do_test_delete_by_drain(true, BDRV_DRAIN); | ||
66 | +} | ||
67 | + | ||
68 | +static void test_detach_by_drain_subtree(void) | ||
69 | +{ | ||
70 | + do_test_delete_by_drain(true, BDRV_SUBTREE_DRAIN); | ||
71 | } | ||
72 | |||
73 | |||
74 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
75 | g_test_add_func("/bdrv-drain/blockjob/drain_subtree", | ||
76 | test_blockjob_drain_subtree); | ||
77 | |||
78 | - g_test_add_func("/bdrv-drain/deletion", test_delete_by_drain); | ||
79 | - g_test_add_func("/bdrv-drain/detach", test_detach_by_drain); | ||
80 | + g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain); | ||
81 | + g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain); | ||
82 | + g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree); | ||
83 | |||
84 | ret = g_test_run(); | ||
85 | qemu_event_destroy(&done_event); | ||
86 | -- | ||
87 | 2.13.6 | ||
88 | |||
89 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | bdrv_do_drained_begin() is only safe if we have a single | ||
2 | BDRV_POLL_WHILE() after quiescing all affected nodes. We cannot allow | ||
3 | that parent callbacks introduce a nested polling loop that could cause | ||
4 | graph changes while we're traversing the graph. | ||
5 | 1 | ||
6 | Split off bdrv_do_drained_begin_quiesce(), which only quiesces a single | ||
7 | node without waiting for its requests to complete. These requests will | ||
8 | be waited for in the BDRV_POLL_WHILE() call down the call chain. | ||
9 | |||
10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
11 | --- | ||
12 | include/block/block.h | 9 +++++++++ | ||
13 | block.c | 2 +- | ||
14 | block/io.c | 24 ++++++++++++++++-------- | ||
15 | 3 files changed, 26 insertions(+), 9 deletions(-) | ||
16 | |||
17 | diff --git a/include/block/block.h b/include/block/block.h | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/include/block/block.h | ||
20 | +++ b/include/block/block.h | ||
21 | @@ -XXX,XX +XXX,XX @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, | ||
22 | void bdrv_drained_begin(BlockDriverState *bs); | ||
23 | |||
24 | /** | ||
25 | + * bdrv_do_drained_begin_quiesce: | ||
26 | + * | ||
27 | + * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already | ||
28 | + * running requests to complete. | ||
29 | + */ | ||
30 | +void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, | ||
31 | + BdrvChild *parent); | ||
32 | + | ||
33 | +/** | ||
34 | * Like bdrv_drained_begin, but recursively begins a quiesced section for | ||
35 | * exclusive access to all child nodes as well. | ||
36 | */ | ||
37 | diff --git a/block.c b/block.c | ||
38 | index XXXXXXX..XXXXXXX 100644 | ||
39 | --- a/block.c | ||
40 | +++ b/block.c | ||
41 | @@ -XXX,XX +XXX,XX @@ static char *bdrv_child_get_parent_desc(BdrvChild *c) | ||
42 | static void bdrv_child_cb_drained_begin(BdrvChild *child) | ||
43 | { | ||
44 | BlockDriverState *bs = child->opaque; | ||
45 | - bdrv_drained_begin(bs); | ||
46 | + bdrv_do_drained_begin_quiesce(bs, NULL); | ||
47 | } | ||
48 | |||
49 | static bool bdrv_child_cb_drained_poll(BdrvChild *child) | ||
50 | diff --git a/block/io.c b/block/io.c | ||
51 | index XXXXXXX..XXXXXXX 100644 | ||
52 | --- a/block/io.c | ||
53 | +++ b/block/io.c | ||
54 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, | ||
55 | assert(data.done); | ||
56 | } | ||
57 | |||
58 | -void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, | ||
59 | - BdrvChild *parent, bool poll) | ||
60 | +void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, | ||
61 | + BdrvChild *parent) | ||
62 | { | ||
63 | - BdrvChild *child, *next; | ||
64 | - | ||
65 | - if (qemu_in_coroutine()) { | ||
66 | - bdrv_co_yield_to_drain(bs, true, recursive, parent, poll); | ||
67 | - return; | ||
68 | - } | ||
69 | + assert(!qemu_in_coroutine()); | ||
70 | |||
71 | /* Stop things in parent-to-child order */ | ||
72 | if (atomic_fetch_inc(&bs->quiesce_counter) == 0) { | ||
73 | @@ -XXX,XX +XXX,XX @@ void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, | ||
74 | |||
75 | bdrv_parent_drained_begin(bs, parent); | ||
76 | bdrv_drain_invoke(bs, true); | ||
77 | +} | ||
78 | + | ||
79 | +static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, | ||
80 | + BdrvChild *parent, bool poll) | ||
81 | +{ | ||
82 | + BdrvChild *child, *next; | ||
83 | + | ||
84 | + if (qemu_in_coroutine()) { | ||
85 | + bdrv_co_yield_to_drain(bs, true, recursive, parent, poll); | ||
86 | + return; | ||
87 | + } | ||
88 | + | ||
89 | + bdrv_do_drained_begin_quiesce(bs, parent); | ||
90 | |||
91 | if (recursive) { | ||
92 | bs->recursive_quiesce_counter++; | ||
93 | -- | ||
94 | 2.13.6 | ||
95 | |||
96 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
2 | --- | ||
3 | tests/test-bdrv-drain.c | 130 ++++++++++++++++++++++++++++++++++++++++++++++++ | ||
4 | 1 file changed, 130 insertions(+) | ||
5 | 1 | ||
6 | diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c | ||
7 | index XXXXXXX..XXXXXXX 100644 | ||
8 | --- a/tests/test-bdrv-drain.c | ||
9 | +++ b/tests/test-bdrv-drain.c | ||
10 | @@ -XXX,XX +XXX,XX @@ static void test_detach_by_drain_subtree(void) | ||
11 | } | ||
12 | |||
13 | |||
14 | +struct detach_by_parent_data { | ||
15 | + BlockDriverState *parent_b; | ||
16 | + BdrvChild *child_b; | ||
17 | + BlockDriverState *c; | ||
18 | + BdrvChild *child_c; | ||
19 | +}; | ||
20 | + | ||
21 | +static void detach_by_parent_aio_cb(void *opaque, int ret) | ||
22 | +{ | ||
23 | + struct detach_by_parent_data *data = opaque; | ||
24 | + | ||
25 | + g_assert_cmpint(ret, ==, 0); | ||
26 | + bdrv_unref_child(data->parent_b, data->child_b); | ||
27 | + | ||
28 | + bdrv_ref(data->c); | ||
29 | + data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C", | ||
30 | + &child_file, &error_abort); | ||
31 | +} | ||
32 | + | ||
33 | +/* | ||
34 | + * Initial graph: | ||
35 | + * | ||
36 | + * PA PB | ||
37 | + * \ / \ | ||
38 | + * A B C | ||
39 | + * | ||
40 | + * PA has a pending write request whose callback changes the child nodes of PB: | ||
41 | + * It removes B and adds C instead. The subtree of PB is drained, which will | ||
42 | + * indirectly drain the write request, too. | ||
43 | + */ | ||
44 | +static void test_detach_by_parent_cb(void) | ||
45 | +{ | ||
46 | + BlockBackend *blk; | ||
47 | + BlockDriverState *parent_a, *parent_b, *a, *b, *c; | ||
48 | + BdrvChild *child_a, *child_b; | ||
49 | + BlockAIOCB *acb; | ||
50 | + struct detach_by_parent_data data; | ||
51 | + | ||
52 | + QEMUIOVector qiov; | ||
53 | + struct iovec iov = { | ||
54 | + .iov_base = NULL, | ||
55 | + .iov_len = 0, | ||
56 | + }; | ||
57 | + qemu_iovec_init_external(&qiov, &iov, 1); | ||
58 | + | ||
59 | + /* Create all involved nodes */ | ||
60 | + parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR, | ||
61 | + &error_abort); | ||
62 | + parent_b = bdrv_new_open_driver(&bdrv_test, "parent-b", 0, | ||
63 | + &error_abort); | ||
64 | + | ||
65 | + a = bdrv_new_open_driver(&bdrv_test, "a", BDRV_O_RDWR, &error_abort); | ||
66 | + b = bdrv_new_open_driver(&bdrv_test, "b", BDRV_O_RDWR, &error_abort); | ||
67 | + c = bdrv_new_open_driver(&bdrv_test, "c", BDRV_O_RDWR, &error_abort); | ||
68 | + | ||
69 | + /* blk is a BB for parent-a */ | ||
70 | + blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL); | ||
71 | + blk_insert_bs(blk, parent_a, &error_abort); | ||
72 | + bdrv_unref(parent_a); | ||
73 | + | ||
74 | + /* Set child relationships */ | ||
75 | + bdrv_ref(b); | ||
76 | + bdrv_ref(a); | ||
77 | + child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_file, &error_abort); | ||
78 | + child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_backing, &error_abort); | ||
79 | + | ||
80 | + bdrv_ref(a); | ||
81 | + bdrv_attach_child(parent_a, a, "PA-A", &child_file, &error_abort); | ||
82 | + | ||
83 | + g_assert_cmpint(parent_a->refcnt, ==, 1); | ||
84 | + g_assert_cmpint(parent_b->refcnt, ==, 1); | ||
85 | + g_assert_cmpint(a->refcnt, ==, 3); | ||
86 | + g_assert_cmpint(b->refcnt, ==, 2); | ||
87 | + g_assert_cmpint(c->refcnt, ==, 1); | ||
88 | + | ||
89 | + g_assert(QLIST_FIRST(&parent_b->children) == child_a); | ||
90 | + g_assert(QLIST_NEXT(child_a, next) == child_b); | ||
91 | + g_assert(QLIST_NEXT(child_b, next) == NULL); | ||
92 | + | ||
93 | + /* Start the evil write request */ | ||
94 | + data = (struct detach_by_parent_data) { | ||
95 | + .parent_b = parent_b, | ||
96 | + .child_b = child_b, | ||
97 | + .c = c, | ||
98 | + }; | ||
99 | + acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, &data); | ||
100 | + g_assert(acb != NULL); | ||
101 | + | ||
102 | + /* Drain and check the expected result */ | ||
103 | + bdrv_subtree_drained_begin(parent_b); | ||
104 | + | ||
105 | + g_assert(data.child_c != NULL); | ||
106 | + | ||
107 | + g_assert_cmpint(parent_a->refcnt, ==, 1); | ||
108 | + g_assert_cmpint(parent_b->refcnt, ==, 1); | ||
109 | + g_assert_cmpint(a->refcnt, ==, 3); | ||
110 | + g_assert_cmpint(b->refcnt, ==, 1); | ||
111 | + g_assert_cmpint(c->refcnt, ==, 2); | ||
112 | + | ||
113 | + g_assert(QLIST_FIRST(&parent_b->children) == data.child_c); | ||
114 | + g_assert(QLIST_NEXT(data.child_c, next) == child_a); | ||
115 | + g_assert(QLIST_NEXT(child_a, next) == NULL); | ||
116 | + | ||
117 | + g_assert_cmpint(parent_a->quiesce_counter, ==, 1); | ||
118 | + g_assert_cmpint(parent_b->quiesce_counter, ==, 1); | ||
119 | + g_assert_cmpint(a->quiesce_counter, ==, 1); | ||
120 | + g_assert_cmpint(b->quiesce_counter, ==, 0); | ||
121 | + g_assert_cmpint(c->quiesce_counter, ==, 1); | ||
122 | + | ||
123 | + bdrv_subtree_drained_end(parent_b); | ||
124 | + | ||
125 | + bdrv_unref(parent_b); | ||
126 | + blk_unref(blk); | ||
127 | + | ||
128 | + /* XXX Once bdrv_close() unref's children instead of just detaching them, | ||
129 | + * this won't be necessary any more. */ | ||
130 | + bdrv_unref(a); | ||
131 | + bdrv_unref(a); | ||
132 | + bdrv_unref(c); | ||
133 | + | ||
134 | + g_assert_cmpint(a->refcnt, ==, 1); | ||
135 | + g_assert_cmpint(b->refcnt, ==, 1); | ||
136 | + g_assert_cmpint(c->refcnt, ==, 1); | ||
137 | + bdrv_unref(a); | ||
138 | + bdrv_unref(b); | ||
139 | + bdrv_unref(c); | ||
140 | +} | ||
141 | + | ||
142 | + | ||
143 | int main(int argc, char **argv) | ||
144 | { | ||
145 | int ret; | ||
146 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
147 | g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain); | ||
148 | g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain); | ||
149 | g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree); | ||
150 | + g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb); | ||
151 | |||
152 | ret = g_test_run(); | ||
153 | qemu_event_destroy(&done_event); | ||
154 | -- | ||
155 | 2.13.6 | ||
156 | |||
157 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | This adds a test case that goes wrong if bdrv_drain_invoke() calls | ||
2 | aio_poll(). | ||
3 | 1 | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
5 | --- | ||
6 | tests/test-bdrv-drain.c | 102 +++++++++++++++++++++++++++++++++++++++++------- | ||
7 | 1 file changed, 88 insertions(+), 14 deletions(-) | ||
8 | |||
9 | diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/tests/test-bdrv-drain.c | ||
12 | +++ b/tests/test-bdrv-drain.c | ||
13 | @@ -XXX,XX +XXX,XX @@ static QemuEvent done_event; | ||
14 | typedef struct BDRVTestState { | ||
15 | int drain_count; | ||
16 | AioContext *bh_indirection_ctx; | ||
17 | + bool sleep_in_drain_begin; | ||
18 | } BDRVTestState; | ||
19 | |||
20 | static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs) | ||
21 | { | ||
22 | BDRVTestState *s = bs->opaque; | ||
23 | s->drain_count++; | ||
24 | + if (s->sleep_in_drain_begin) { | ||
25 | + qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000); | ||
26 | + } | ||
27 | } | ||
28 | |||
29 | static void coroutine_fn bdrv_test_co_drain_end(BlockDriverState *bs) | ||
30 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs, | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | +static void bdrv_test_child_perm(BlockDriverState *bs, BdrvChild *c, | ||
35 | + const BdrvChildRole *role, | ||
36 | + BlockReopenQueue *reopen_queue, | ||
37 | + uint64_t perm, uint64_t shared, | ||
38 | + uint64_t *nperm, uint64_t *nshared) | ||
39 | +{ | ||
40 | + /* bdrv_format_default_perms() accepts only these two, so disguise | ||
41 | + * detach_by_driver_cb_role as one of them. */ | ||
42 | + if (role != &child_file && role != &child_backing) { | ||
43 | + role = &child_file; | ||
44 | + } | ||
45 | + | ||
46 | + bdrv_format_default_perms(bs, c, role, reopen_queue, perm, shared, | ||
47 | + nperm, nshared); | ||
48 | +} | ||
49 | + | ||
50 | static BlockDriver bdrv_test = { | ||
51 | .format_name = "test", | ||
52 | .instance_size = sizeof(BDRVTestState), | ||
53 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_test = { | ||
54 | .bdrv_co_drain_begin = bdrv_test_co_drain_begin, | ||
55 | .bdrv_co_drain_end = bdrv_test_co_drain_end, | ||
56 | |||
57 | - .bdrv_child_perm = bdrv_format_default_perms, | ||
58 | + .bdrv_child_perm = bdrv_test_child_perm, | ||
59 | }; | ||
60 | |||
61 | static void aio_ret_cb(void *opaque, int ret) | ||
62 | @@ -XXX,XX +XXX,XX @@ struct detach_by_parent_data { | ||
63 | BdrvChild *child_b; | ||
64 | BlockDriverState *c; | ||
65 | BdrvChild *child_c; | ||
66 | + bool by_parent_cb; | ||
67 | }; | ||
68 | +static struct detach_by_parent_data detach_by_parent_data; | ||
69 | |||
70 | -static void detach_by_parent_aio_cb(void *opaque, int ret) | ||
71 | +static void detach_indirect_bh(void *opaque) | ||
72 | { | ||
73 | struct detach_by_parent_data *data = opaque; | ||
74 | |||
75 | - g_assert_cmpint(ret, ==, 0); | ||
76 | bdrv_unref_child(data->parent_b, data->child_b); | ||
77 | |||
78 | bdrv_ref(data->c); | ||
79 | @@ -XXX,XX +XXX,XX @@ static void detach_by_parent_aio_cb(void *opaque, int ret) | ||
80 | &child_file, &error_abort); | ||
81 | } | ||
82 | |||
83 | +static void detach_by_parent_aio_cb(void *opaque, int ret) | ||
84 | +{ | ||
85 | + struct detach_by_parent_data *data = &detach_by_parent_data; | ||
86 | + | ||
87 | + g_assert_cmpint(ret, ==, 0); | ||
88 | + if (data->by_parent_cb) { | ||
89 | + detach_indirect_bh(data); | ||
90 | + } | ||
91 | +} | ||
92 | + | ||
93 | +static void detach_by_driver_cb_drained_begin(BdrvChild *child) | ||
94 | +{ | ||
95 | + aio_bh_schedule_oneshot(qemu_get_current_aio_context(), | ||
96 | + detach_indirect_bh, &detach_by_parent_data); | ||
97 | + child_file.drained_begin(child); | ||
98 | +} | ||
99 | + | ||
100 | +static BdrvChildRole detach_by_driver_cb_role; | ||
101 | + | ||
102 | /* | ||
103 | * Initial graph: | ||
104 | * | ||
105 | @@ -XXX,XX +XXX,XX @@ static void detach_by_parent_aio_cb(void *opaque, int ret) | ||
106 | * \ / \ | ||
107 | * A B C | ||
108 | * | ||
109 | - * PA has a pending write request whose callback changes the child nodes of PB: | ||
110 | - * It removes B and adds C instead. The subtree of PB is drained, which will | ||
111 | - * indirectly drain the write request, too. | ||
112 | + * by_parent_cb == true: Test that parent callbacks don't poll | ||
113 | + * | ||
114 | + * PA has a pending write request whose callback changes the child nodes of | ||
115 | + * PB: It removes B and adds C instead. The subtree of PB is drained, which | ||
116 | + * will indirectly drain the write request, too. | ||
117 | + * | ||
118 | + * by_parent_cb == false: Test that bdrv_drain_invoke() doesn't poll | ||
119 | + * | ||
120 | + * PA's BdrvChildRole has a .drained_begin callback that schedules a BH | ||
121 | + * that does the same graph change. If bdrv_drain_invoke() calls it, the | ||
122 | + * state is messed up, but if it is only polled in the single | ||
123 | + * BDRV_POLL_WHILE() at the end of the drain, this should work fine. | ||
124 | */ | ||
125 | -static void test_detach_by_parent_cb(void) | ||
126 | +static void test_detach_indirect(bool by_parent_cb) | ||
127 | { | ||
128 | BlockBackend *blk; | ||
129 | BlockDriverState *parent_a, *parent_b, *a, *b, *c; | ||
130 | BdrvChild *child_a, *child_b; | ||
131 | BlockAIOCB *acb; | ||
132 | - struct detach_by_parent_data data; | ||
133 | |||
134 | QEMUIOVector qiov; | ||
135 | struct iovec iov = { | ||
136 | @@ -XXX,XX +XXX,XX @@ static void test_detach_by_parent_cb(void) | ||
137 | }; | ||
138 | qemu_iovec_init_external(&qiov, &iov, 1); | ||
139 | |||
140 | + if (!by_parent_cb) { | ||
141 | + detach_by_driver_cb_role = child_file; | ||
142 | + detach_by_driver_cb_role.drained_begin = | ||
143 | + detach_by_driver_cb_drained_begin; | ||
144 | + } | ||
145 | + | ||
146 | /* Create all involved nodes */ | ||
147 | parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR, | ||
148 | &error_abort); | ||
149 | @@ -XXX,XX +XXX,XX @@ static void test_detach_by_parent_cb(void) | ||
150 | blk_insert_bs(blk, parent_a, &error_abort); | ||
151 | bdrv_unref(parent_a); | ||
152 | |||
153 | + /* If we want to get bdrv_drain_invoke() to call aio_poll(), the driver | ||
154 | + * callback must not return immediately. */ | ||
155 | + if (!by_parent_cb) { | ||
156 | + BDRVTestState *s = parent_a->opaque; | ||
157 | + s->sleep_in_drain_begin = true; | ||
158 | + } | ||
159 | + | ||
160 | /* Set child relationships */ | ||
161 | bdrv_ref(b); | ||
162 | bdrv_ref(a); | ||
163 | @@ -XXX,XX +XXX,XX @@ static void test_detach_by_parent_cb(void) | ||
164 | child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_backing, &error_abort); | ||
165 | |||
166 | bdrv_ref(a); | ||
167 | - bdrv_attach_child(parent_a, a, "PA-A", &child_file, &error_abort); | ||
168 | + bdrv_attach_child(parent_a, a, "PA-A", | ||
169 | + by_parent_cb ? &child_file : &detach_by_driver_cb_role, | ||
170 | + &error_abort); | ||
171 | |||
172 | g_assert_cmpint(parent_a->refcnt, ==, 1); | ||
173 | g_assert_cmpint(parent_b->refcnt, ==, 1); | ||
174 | @@ -XXX,XX +XXX,XX @@ static void test_detach_by_parent_cb(void) | ||
175 | g_assert(QLIST_NEXT(child_b, next) == NULL); | ||
176 | |||
177 | /* Start the evil write request */ | ||
178 | - data = (struct detach_by_parent_data) { | ||
179 | + detach_by_parent_data = (struct detach_by_parent_data) { | ||
180 | .parent_b = parent_b, | ||
181 | .child_b = child_b, | ||
182 | .c = c, | ||
183 | + .by_parent_cb = by_parent_cb, | ||
184 | }; | ||
185 | - acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, &data); | ||
186 | + acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, NULL); | ||
187 | g_assert(acb != NULL); | ||
188 | |||
189 | /* Drain and check the expected result */ | ||
190 | bdrv_subtree_drained_begin(parent_b); | ||
191 | |||
192 | - g_assert(data.child_c != NULL); | ||
193 | + g_assert(detach_by_parent_data.child_c != NULL); | ||
194 | |||
195 | g_assert_cmpint(parent_a->refcnt, ==, 1); | ||
196 | g_assert_cmpint(parent_b->refcnt, ==, 1); | ||
197 | @@ -XXX,XX +XXX,XX @@ static void test_detach_by_parent_cb(void) | ||
198 | g_assert_cmpint(b->refcnt, ==, 1); | ||
199 | g_assert_cmpint(c->refcnt, ==, 2); | ||
200 | |||
201 | - g_assert(QLIST_FIRST(&parent_b->children) == data.child_c); | ||
202 | - g_assert(QLIST_NEXT(data.child_c, next) == child_a); | ||
203 | + g_assert(QLIST_FIRST(&parent_b->children) == detach_by_parent_data.child_c); | ||
204 | + g_assert(QLIST_NEXT(detach_by_parent_data.child_c, next) == child_a); | ||
205 | g_assert(QLIST_NEXT(child_a, next) == NULL); | ||
206 | |||
207 | g_assert_cmpint(parent_a->quiesce_counter, ==, 1); | ||
208 | @@ -XXX,XX +XXX,XX @@ static void test_detach_by_parent_cb(void) | ||
209 | bdrv_unref(c); | ||
210 | } | ||
211 | |||
212 | +static void test_detach_by_parent_cb(void) | ||
213 | +{ | ||
214 | + test_detach_indirect(true); | ||
215 | +} | ||
216 | + | ||
217 | +static void test_detach_by_driver_cb(void) | ||
218 | +{ | ||
219 | + test_detach_indirect(false); | ||
220 | +} | ||
221 | |||
222 | int main(int argc, char **argv) | ||
223 | { | ||
224 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
225 | g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain); | ||
226 | g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree); | ||
227 | g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb); | ||
228 | + g_test_add_func("/bdrv-drain/detach/driver_cb", test_detach_by_driver_cb); | ||
229 | |||
230 | ret = g_test_run(); | ||
231 | qemu_event_destroy(&done_event); | ||
232 | -- | ||
233 | 2.13.6 | ||
234 | |||
235 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | bdrv_drain_all() wants to have a single polling loop for draining the | ||
2 | in-flight requests of all nodes. This means that the AIO_WAIT_WHILE() | ||
3 | condition relies on activity in multiple AioContexts, which is polled | ||
4 | from the mainloop context. We must therefore call AIO_WAIT_WHILE() from | ||
5 | the mainloop thread and use the AioWait notification mechanism. | ||
6 | 1 | ||
7 | Just randomly picking the AioContext of any non-mainloop thread would | ||
8 | work, but instead of bothering to find such a context in the caller, we | ||
9 | can just as well accept NULL for ctx. | ||
10 | |||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | --- | ||
13 | include/block/aio-wait.h | 13 +++++++++---- | ||
14 | 1 file changed, 9 insertions(+), 4 deletions(-) | ||
15 | |||
16 | diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/include/block/aio-wait.h | ||
19 | +++ b/include/block/aio-wait.h | ||
20 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
21 | /** | ||
22 | * AIO_WAIT_WHILE: | ||
23 | * @wait: the aio wait object | ||
24 | - * @ctx: the aio context | ||
25 | + * @ctx: the aio context, or NULL if multiple aio contexts (for which the | ||
26 | + * caller does not hold a lock) are involved in the polling condition. | ||
27 | * @cond: wait while this conditional expression is true | ||
28 | * | ||
29 | * Wait while a condition is true. Use this to implement synchronous | ||
30 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
31 | bool waited_ = false; \ | ||
32 | AioWait *wait_ = (wait); \ | ||
33 | AioContext *ctx_ = (ctx); \ | ||
34 | - if (in_aio_context_home_thread(ctx_)) { \ | ||
35 | + if (ctx_ && in_aio_context_home_thread(ctx_)) { \ | ||
36 | while ((cond)) { \ | ||
37 | aio_poll(ctx_, true); \ | ||
38 | waited_ = true; \ | ||
39 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
40 | /* Increment wait_->num_waiters before evaluating cond. */ \ | ||
41 | atomic_inc(&wait_->num_waiters); \ | ||
42 | while ((cond)) { \ | ||
43 | - aio_context_release(ctx_); \ | ||
44 | + if (ctx_) { \ | ||
45 | + aio_context_release(ctx_); \ | ||
46 | + } \ | ||
47 | aio_poll(qemu_get_aio_context(), true); \ | ||
48 | - aio_context_acquire(ctx_); \ | ||
49 | + if (ctx_) { \ | ||
50 | + aio_context_acquire(ctx_); \ | ||
51 | + } \ | ||
52 | waited_ = true; \ | ||
53 | } \ | ||
54 | atomic_dec(&wait_->num_waiters); \ | ||
55 | -- | ||
56 | 2.13.6 | ||
57 | |||
58 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Before we can introduce a single polling loop for all nodes in | ||
2 | bdrv_drain_all_begin(), we must make sure to run it outside of coroutine | ||
3 | context like we already do for bdrv_do_drained_begin(). | ||
4 | 1 | ||
5 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
6 | --- | ||
7 | block/io.c | 22 +++++++++++++++++----- | ||
8 | 1 file changed, 17 insertions(+), 5 deletions(-) | ||
9 | |||
10 | diff --git a/block/io.c b/block/io.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/block/io.c | ||
13 | +++ b/block/io.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque) | ||
15 | Coroutine *co = data->co; | ||
16 | BlockDriverState *bs = data->bs; | ||
17 | |||
18 | - bdrv_dec_in_flight(bs); | ||
19 | - if (data->begin) { | ||
20 | - bdrv_do_drained_begin(bs, data->recursive, data->parent, data->poll); | ||
21 | + if (bs) { | ||
22 | + bdrv_dec_in_flight(bs); | ||
23 | + if (data->begin) { | ||
24 | + bdrv_do_drained_begin(bs, data->recursive, data->parent, data->poll); | ||
25 | + } else { | ||
26 | + bdrv_do_drained_end(bs, data->recursive, data->parent); | ||
27 | + } | ||
28 | } else { | ||
29 | - bdrv_do_drained_end(bs, data->recursive, data->parent); | ||
30 | + assert(data->begin); | ||
31 | + bdrv_drain_all_begin(); | ||
32 | } | ||
33 | |||
34 | data->done = true; | ||
35 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, | ||
36 | .parent = parent, | ||
37 | .poll = poll, | ||
38 | }; | ||
39 | - bdrv_inc_in_flight(bs); | ||
40 | + if (bs) { | ||
41 | + bdrv_inc_in_flight(bs); | ||
42 | + } | ||
43 | aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), | ||
44 | bdrv_co_drain_bh_cb, &data); | ||
45 | |||
46 | @@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void) | ||
47 | BlockDriverState *bs; | ||
48 | BdrvNextIterator it; | ||
49 | |||
50 | + if (qemu_in_coroutine()) { | ||
51 | + bdrv_co_yield_to_drain(NULL, true, false, NULL, true); | ||
52 | + return; | ||
53 | + } | ||
54 | + | ||
55 | /* BDRV_POLL_WHILE() for a node can only be called from its own I/O thread | ||
56 | * or the main loop AioContext. We potentially use BDRV_POLL_WHILE() on | ||
57 | * nodes in several different AioContexts, so make sure we're in the main | ||
58 | -- | ||
59 | 2.13.6 | ||
60 | |||
61 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | In the future, bdrv_drained_all_begin/end() will drain all invidiual | ||
2 | nodes separately rather than whole subtrees. This means that we don't | ||
3 | want to propagate the drain to all parents any more: If the parent is a | ||
4 | BDS, it will already be drained separately. Recursing to all parents is | ||
5 | unnecessary work and would make it an O(n²) operation. | ||
6 | 1 | ||
7 | Prepare the drain function for the changed drain_all by adding an | ||
8 | ignore_bds_parents parameter to the internal implementation that | ||
9 | prevents the propagation of the drain to BDS parents. We still (have to) | ||
10 | propagate it to non-BDS parents like BlockBackends or Jobs because those | ||
11 | are not drained separately. | ||
12 | |||
13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
14 | --- | ||
15 | include/block/block.h | 16 ++++++--- | ||
16 | include/block/block_int.h | 6 ++++ | ||
17 | block.c | 11 +++--- | ||
18 | block/io.c | 88 ++++++++++++++++++++++++++++------------------- | ||
19 | block/vvfat.c | 1 + | ||
20 | 5 files changed, 78 insertions(+), 44 deletions(-) | ||
21 | |||
22 | diff --git a/include/block/block.h b/include/block/block.h | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/include/block/block.h | ||
25 | +++ b/include/block/block.h | ||
26 | @@ -XXX,XX +XXX,XX @@ void bdrv_io_unplug(BlockDriverState *bs); | ||
27 | * Begin a quiesced section of all users of @bs. This is part of | ||
28 | * bdrv_drained_begin. | ||
29 | */ | ||
30 | -void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore); | ||
31 | +void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore, | ||
32 | + bool ignore_bds_parents); | ||
33 | |||
34 | /** | ||
35 | * bdrv_parent_drained_end: | ||
36 | @@ -XXX,XX +XXX,XX @@ void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore); | ||
37 | * End a quiesced section of all users of @bs. This is part of | ||
38 | * bdrv_drained_end. | ||
39 | */ | ||
40 | -void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore); | ||
41 | +void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, | ||
42 | + bool ignore_bds_parents); | ||
43 | |||
44 | /** | ||
45 | * bdrv_drain_poll: | ||
46 | * | ||
47 | * Poll for pending requests in @bs, its parents (except for @ignore_parent), | ||
48 | - * and if @recursive is true its children as well. | ||
49 | + * and if @recursive is true its children as well (used for subtree drain). | ||
50 | + * | ||
51 | + * If @ignore_bds_parents is true, parents that are BlockDriverStates must | ||
52 | + * ignore the drain request because they will be drained separately (used for | ||
53 | + * drain_all). | ||
54 | * | ||
55 | * This is part of bdrv_drained_begin. | ||
56 | */ | ||
57 | bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, | ||
58 | - BdrvChild *ignore_parent); | ||
59 | + BdrvChild *ignore_parent, bool ignore_bds_parents); | ||
60 | |||
61 | /** | ||
62 | * bdrv_drained_begin: | ||
63 | @@ -XXX,XX +XXX,XX @@ void bdrv_drained_begin(BlockDriverState *bs); | ||
64 | * running requests to complete. | ||
65 | */ | ||
66 | void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, | ||
67 | - BdrvChild *parent); | ||
68 | + BdrvChild *parent, bool ignore_bds_parents); | ||
69 | |||
70 | /** | ||
71 | * Like bdrv_drained_begin, but recursively begins a quiesced section for | ||
72 | diff --git a/include/block/block_int.h b/include/block/block_int.h | ||
73 | index XXXXXXX..XXXXXXX 100644 | ||
74 | --- a/include/block/block_int.h | ||
75 | +++ b/include/block/block_int.h | ||
76 | @@ -XXX,XX +XXX,XX @@ struct BdrvChildRole { | ||
77 | * points to. */ | ||
78 | bool stay_at_node; | ||
79 | |||
80 | + /* If true, the parent is a BlockDriverState and bdrv_next_all_states() | ||
81 | + * will return it. This information is used for drain_all, where every node | ||
82 | + * will be drained separately, so the drain only needs to be propagated to | ||
83 | + * non-BDS parents. */ | ||
84 | + bool parent_is_bds; | ||
85 | + | ||
86 | void (*inherit_options)(int *child_flags, QDict *child_options, | ||
87 | int parent_flags, QDict *parent_options); | ||
88 | |||
89 | diff --git a/block.c b/block.c | ||
90 | index XXXXXXX..XXXXXXX 100644 | ||
91 | --- a/block.c | ||
92 | +++ b/block.c | ||
93 | @@ -XXX,XX +XXX,XX @@ static char *bdrv_child_get_parent_desc(BdrvChild *c) | ||
94 | static void bdrv_child_cb_drained_begin(BdrvChild *child) | ||
95 | { | ||
96 | BlockDriverState *bs = child->opaque; | ||
97 | - bdrv_do_drained_begin_quiesce(bs, NULL); | ||
98 | + bdrv_do_drained_begin_quiesce(bs, NULL, false); | ||
99 | } | ||
100 | |||
101 | static bool bdrv_child_cb_drained_poll(BdrvChild *child) | ||
102 | { | ||
103 | BlockDriverState *bs = child->opaque; | ||
104 | - return bdrv_drain_poll(bs, false, NULL); | ||
105 | + return bdrv_drain_poll(bs, false, NULL, false); | ||
106 | } | ||
107 | |||
108 | static void bdrv_child_cb_drained_end(BdrvChild *child) | ||
109 | @@ -XXX,XX +XXX,XX @@ static void bdrv_inherited_options(int *child_flags, QDict *child_options, | ||
110 | } | ||
111 | |||
112 | const BdrvChildRole child_file = { | ||
113 | + .parent_is_bds = true, | ||
114 | .get_parent_desc = bdrv_child_get_parent_desc, | ||
115 | .inherit_options = bdrv_inherited_options, | ||
116 | .drained_begin = bdrv_child_cb_drained_begin, | ||
117 | @@ -XXX,XX +XXX,XX @@ static void bdrv_inherited_fmt_options(int *child_flags, QDict *child_options, | ||
118 | } | ||
119 | |||
120 | const BdrvChildRole child_format = { | ||
121 | + .parent_is_bds = true, | ||
122 | .get_parent_desc = bdrv_child_get_parent_desc, | ||
123 | .inherit_options = bdrv_inherited_fmt_options, | ||
124 | .drained_begin = bdrv_child_cb_drained_begin, | ||
125 | @@ -XXX,XX +XXX,XX @@ static int bdrv_backing_update_filename(BdrvChild *c, BlockDriverState *base, | ||
126 | } | ||
127 | |||
128 | const BdrvChildRole child_backing = { | ||
129 | + .parent_is_bds = true, | ||
130 | .get_parent_desc = bdrv_child_get_parent_desc, | ||
131 | .attach = bdrv_backing_attach, | ||
132 | .detach = bdrv_backing_detach, | ||
133 | @@ -XXX,XX +XXX,XX @@ void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context) | ||
134 | AioContext *ctx = bdrv_get_aio_context(bs); | ||
135 | |||
136 | aio_disable_external(ctx); | ||
137 | - bdrv_parent_drained_begin(bs, NULL); | ||
138 | + bdrv_parent_drained_begin(bs, NULL, false); | ||
139 | bdrv_drain(bs); /* ensure there are no in-flight requests */ | ||
140 | |||
141 | while (aio_poll(ctx, false)) { | ||
142 | @@ -XXX,XX +XXX,XX @@ void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context) | ||
143 | */ | ||
144 | aio_context_acquire(new_context); | ||
145 | bdrv_attach_aio_context(bs, new_context); | ||
146 | - bdrv_parent_drained_end(bs, NULL); | ||
147 | + bdrv_parent_drained_end(bs, NULL, false); | ||
148 | aio_enable_external(ctx); | ||
149 | aio_context_release(new_context); | ||
150 | } | ||
151 | diff --git a/block/io.c b/block/io.c | ||
152 | index XXXXXXX..XXXXXXX 100644 | ||
153 | --- a/block/io.c | ||
154 | +++ b/block/io.c | ||
155 | @@ -XXX,XX +XXX,XX @@ | ||
156 | static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, | ||
157 | int64_t offset, int bytes, BdrvRequestFlags flags); | ||
158 | |||
159 | -void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore) | ||
160 | +void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore, | ||
161 | + bool ignore_bds_parents) | ||
162 | { | ||
163 | BdrvChild *c, *next; | ||
164 | |||
165 | QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { | ||
166 | - if (c == ignore) { | ||
167 | + if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) { | ||
168 | continue; | ||
169 | } | ||
170 | if (c->role->drained_begin) { | ||
171 | @@ -XXX,XX +XXX,XX @@ void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore) | ||
172 | } | ||
173 | } | ||
174 | |||
175 | -void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore) | ||
176 | +void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, | ||
177 | + bool ignore_bds_parents) | ||
178 | { | ||
179 | BdrvChild *c, *next; | ||
180 | |||
181 | QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { | ||
182 | - if (c == ignore) { | ||
183 | + if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) { | ||
184 | continue; | ||
185 | } | ||
186 | if (c->role->drained_end) { | ||
187 | @@ -XXX,XX +XXX,XX @@ void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore) | ||
188 | } | ||
189 | } | ||
190 | |||
191 | -static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore) | ||
192 | +static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, | ||
193 | + bool ignore_bds_parents) | ||
194 | { | ||
195 | BdrvChild *c, *next; | ||
196 | bool busy = false; | ||
197 | |||
198 | QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) { | ||
199 | - if (c == ignore) { | ||
200 | + if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) { | ||
201 | continue; | ||
202 | } | ||
203 | if (c->role->drained_poll) { | ||
204 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
205 | bool recursive; | ||
206 | bool poll; | ||
207 | BdrvChild *parent; | ||
208 | + bool ignore_bds_parents; | ||
209 | } BdrvCoDrainData; | ||
210 | |||
211 | static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) | ||
212 | @@ -XXX,XX +XXX,XX @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin) | ||
213 | |||
214 | /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */ | ||
215 | bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, | ||
216 | - BdrvChild *ignore_parent) | ||
217 | + BdrvChild *ignore_parent, bool ignore_bds_parents) | ||
218 | { | ||
219 | BdrvChild *child, *next; | ||
220 | |||
221 | - if (bdrv_parent_drained_poll(bs, ignore_parent)) { | ||
222 | + if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) { | ||
223 | return true; | ||
224 | } | ||
225 | |||
226 | @@ -XXX,XX +XXX,XX @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, | ||
227 | } | ||
228 | |||
229 | if (recursive) { | ||
230 | + assert(!ignore_bds_parents); | ||
231 | QLIST_FOREACH_SAFE(child, &bs->children, next, next) { | ||
232 | - if (bdrv_drain_poll(child->bs, recursive, child)) { | ||
233 | + if (bdrv_drain_poll(child->bs, recursive, child, false)) { | ||
234 | return true; | ||
235 | } | ||
236 | } | ||
237 | @@ -XXX,XX +XXX,XX @@ static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive, | ||
238 | * have executed. */ | ||
239 | while (aio_poll(bs->aio_context, false)); | ||
240 | |||
241 | - return bdrv_drain_poll(bs, recursive, ignore_parent); | ||
242 | + return bdrv_drain_poll(bs, recursive, ignore_parent, false); | ||
243 | } | ||
244 | |||
245 | static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, | ||
246 | - BdrvChild *parent, bool poll); | ||
247 | + BdrvChild *parent, bool ignore_bds_parents, | ||
248 | + bool poll); | ||
249 | static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, | ||
250 | - BdrvChild *parent); | ||
251 | + BdrvChild *parent, bool ignore_bds_parents); | ||
252 | |||
253 | static void bdrv_co_drain_bh_cb(void *opaque) | ||
254 | { | ||
255 | @@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque) | ||
256 | if (bs) { | ||
257 | bdrv_dec_in_flight(bs); | ||
258 | if (data->begin) { | ||
259 | - bdrv_do_drained_begin(bs, data->recursive, data->parent, data->poll); | ||
260 | + bdrv_do_drained_begin(bs, data->recursive, data->parent, | ||
261 | + data->ignore_bds_parents, data->poll); | ||
262 | } else { | ||
263 | - bdrv_do_drained_end(bs, data->recursive, data->parent); | ||
264 | + bdrv_do_drained_end(bs, data->recursive, data->parent, | ||
265 | + data->ignore_bds_parents); | ||
266 | } | ||
267 | } else { | ||
268 | assert(data->begin); | ||
269 | @@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque) | ||
270 | |||
271 | static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, | ||
272 | bool begin, bool recursive, | ||
273 | - BdrvChild *parent, bool poll) | ||
274 | + BdrvChild *parent, | ||
275 | + bool ignore_bds_parents, | ||
276 | + bool poll) | ||
277 | { | ||
278 | BdrvCoDrainData data; | ||
279 | |||
280 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, | ||
281 | .begin = begin, | ||
282 | .recursive = recursive, | ||
283 | .parent = parent, | ||
284 | + .ignore_bds_parents = ignore_bds_parents, | ||
285 | .poll = poll, | ||
286 | }; | ||
287 | if (bs) { | ||
288 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, | ||
289 | } | ||
290 | |||
291 | void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, | ||
292 | - BdrvChild *parent) | ||
293 | + BdrvChild *parent, bool ignore_bds_parents) | ||
294 | { | ||
295 | assert(!qemu_in_coroutine()); | ||
296 | |||
297 | @@ -XXX,XX +XXX,XX @@ void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, | ||
298 | aio_disable_external(bdrv_get_aio_context(bs)); | ||
299 | } | ||
300 | |||
301 | - bdrv_parent_drained_begin(bs, parent); | ||
302 | + bdrv_parent_drained_begin(bs, parent, ignore_bds_parents); | ||
303 | bdrv_drain_invoke(bs, true); | ||
304 | } | ||
305 | |||
306 | static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, | ||
307 | - BdrvChild *parent, bool poll) | ||
308 | + BdrvChild *parent, bool ignore_bds_parents, | ||
309 | + bool poll) | ||
310 | { | ||
311 | BdrvChild *child, *next; | ||
312 | |||
313 | if (qemu_in_coroutine()) { | ||
314 | - bdrv_co_yield_to_drain(bs, true, recursive, parent, poll); | ||
315 | + bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents, | ||
316 | + poll); | ||
317 | return; | ||
318 | } | ||
319 | |||
320 | - bdrv_do_drained_begin_quiesce(bs, parent); | ||
321 | + bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents); | ||
322 | |||
323 | if (recursive) { | ||
324 | + assert(!ignore_bds_parents); | ||
325 | bs->recursive_quiesce_counter++; | ||
326 | QLIST_FOREACH_SAFE(child, &bs->children, next, next) { | ||
327 | - bdrv_do_drained_begin(child->bs, true, child, false); | ||
328 | + bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents, | ||
329 | + false); | ||
330 | } | ||
331 | } | ||
332 | |||
333 | @@ -XXX,XX +XXX,XX @@ static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive, | ||
334 | * nodes. | ||
335 | */ | ||
336 | if (poll) { | ||
337 | + assert(!ignore_bds_parents); | ||
338 | BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent)); | ||
339 | } | ||
340 | } | ||
341 | |||
342 | void bdrv_drained_begin(BlockDriverState *bs) | ||
343 | { | ||
344 | - bdrv_do_drained_begin(bs, false, NULL, true); | ||
345 | + bdrv_do_drained_begin(bs, false, NULL, false, true); | ||
346 | } | ||
347 | |||
348 | void bdrv_subtree_drained_begin(BlockDriverState *bs) | ||
349 | { | ||
350 | - bdrv_do_drained_begin(bs, true, NULL, true); | ||
351 | + bdrv_do_drained_begin(bs, true, NULL, false, true); | ||
352 | } | ||
353 | |||
354 | -void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, | ||
355 | - BdrvChild *parent) | ||
356 | +static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, | ||
357 | + BdrvChild *parent, bool ignore_bds_parents) | ||
358 | { | ||
359 | BdrvChild *child, *next; | ||
360 | int old_quiesce_counter; | ||
361 | |||
362 | if (qemu_in_coroutine()) { | ||
363 | - bdrv_co_yield_to_drain(bs, false, recursive, parent, false); | ||
364 | + bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents, | ||
365 | + false); | ||
366 | return; | ||
367 | } | ||
368 | assert(bs->quiesce_counter > 0); | ||
369 | @@ -XXX,XX +XXX,XX @@ void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, | ||
370 | |||
371 | /* Re-enable things in child-to-parent order */ | ||
372 | bdrv_drain_invoke(bs, false); | ||
373 | - bdrv_parent_drained_end(bs, parent); | ||
374 | + bdrv_parent_drained_end(bs, parent, ignore_bds_parents); | ||
375 | if (old_quiesce_counter == 1) { | ||
376 | aio_enable_external(bdrv_get_aio_context(bs)); | ||
377 | } | ||
378 | |||
379 | if (recursive) { | ||
380 | + assert(!ignore_bds_parents); | ||
381 | bs->recursive_quiesce_counter--; | ||
382 | QLIST_FOREACH_SAFE(child, &bs->children, next, next) { | ||
383 | - bdrv_do_drained_end(child->bs, true, child); | ||
384 | + bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents); | ||
385 | } | ||
386 | } | ||
387 | } | ||
388 | |||
389 | void bdrv_drained_end(BlockDriverState *bs) | ||
390 | { | ||
391 | - bdrv_do_drained_end(bs, false, NULL); | ||
392 | + bdrv_do_drained_end(bs, false, NULL, false); | ||
393 | } | ||
394 | |||
395 | void bdrv_subtree_drained_end(BlockDriverState *bs) | ||
396 | { | ||
397 | - bdrv_do_drained_end(bs, true, NULL); | ||
398 | + bdrv_do_drained_end(bs, true, NULL, false); | ||
399 | } | ||
400 | |||
401 | void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) | ||
402 | @@ -XXX,XX +XXX,XX @@ void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) | ||
403 | int i; | ||
404 | |||
405 | for (i = 0; i < new_parent->recursive_quiesce_counter; i++) { | ||
406 | - bdrv_do_drained_begin(child->bs, true, child, true); | ||
407 | + bdrv_do_drained_begin(child->bs, true, child, false, true); | ||
408 | } | ||
409 | } | ||
410 | |||
411 | @@ -XXX,XX +XXX,XX @@ void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) | ||
412 | int i; | ||
413 | |||
414 | for (i = 0; i < old_parent->recursive_quiesce_counter; i++) { | ||
415 | - bdrv_do_drained_end(child->bs, true, child); | ||
416 | + bdrv_do_drained_end(child->bs, true, child, false); | ||
417 | } | ||
418 | } | ||
419 | |||
420 | @@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void) | ||
421 | BdrvNextIterator it; | ||
422 | |||
423 | if (qemu_in_coroutine()) { | ||
424 | - bdrv_co_yield_to_drain(NULL, true, false, NULL, true); | ||
425 | + bdrv_co_yield_to_drain(NULL, true, false, NULL, false, true); | ||
426 | return; | ||
427 | } | ||
428 | |||
429 | @@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void) | ||
430 | AioContext *aio_context = bdrv_get_aio_context(bs); | ||
431 | |||
432 | aio_context_acquire(aio_context); | ||
433 | - bdrv_do_drained_begin(bs, true, NULL, true); | ||
434 | + bdrv_do_drained_begin(bs, true, NULL, false, true); | ||
435 | aio_context_release(aio_context); | ||
436 | } | ||
437 | |||
438 | @@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_end(void) | ||
439 | AioContext *aio_context = bdrv_get_aio_context(bs); | ||
440 | |||
441 | aio_context_acquire(aio_context); | ||
442 | - bdrv_do_drained_end(bs, true, NULL); | ||
443 | + bdrv_do_drained_end(bs, true, NULL, false); | ||
444 | aio_context_release(aio_context); | ||
445 | } | ||
446 | } | ||
447 | diff --git a/block/vvfat.c b/block/vvfat.c | ||
448 | index XXXXXXX..XXXXXXX 100644 | ||
449 | --- a/block/vvfat.c | ||
450 | +++ b/block/vvfat.c | ||
451 | @@ -XXX,XX +XXX,XX @@ static void vvfat_qcow_options(int *child_flags, QDict *child_options, | ||
452 | } | ||
453 | |||
454 | static const BdrvChildRole child_vvfat_qcow = { | ||
455 | + .parent_is_bds = true, | ||
456 | .inherit_options = vvfat_qcow_options, | ||
457 | }; | ||
458 | |||
459 | -- | ||
460 | 2.13.6 | ||
461 | |||
462 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | bdrv_drain_all_*() used bdrv_next() to iterate over all root nodes and | ||
2 | did a subtree drain for each of them. This works fine as long as the | ||
3 | graph is static, but sadly, reality looks different. | ||
4 | 1 | ||
5 | If the graph changes so that root nodes are added or removed, we would | ||
6 | have to compensate for this. bdrv_next() returns each root node only | ||
7 | once even if it's the root node for multiple BlockBackends or for a | ||
8 | monitor-owned block driver tree, which would only complicate things. | ||
9 | |||
10 | The much easier and more obviously correct way is to fundamentally | ||
11 | change the way the functions work: Iterate over all BlockDriverStates, | ||
12 | no matter who owns them, and drain them individually. Compensation is | ||
13 | only necessary when a new BDS is created inside a drain_all section. | ||
14 | Removal of a BDS doesn't require any action because it's gone afterwards | ||
15 | anyway. | ||
16 | |||
17 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
18 | --- | ||
19 | include/block/block.h | 1 + | ||
20 | include/block/block_int.h | 1 + | ||
21 | block.c | 34 ++++++++++++++++++++++++--- | ||
22 | block/io.c | 60 ++++++++++++++++++++++++++++++++++++----------- | ||
23 | 4 files changed, 79 insertions(+), 17 deletions(-) | ||
24 | |||
25 | diff --git a/include/block/block.h b/include/block/block.h | ||
26 | index XXXXXXX..XXXXXXX 100644 | ||
27 | --- a/include/block/block.h | ||
28 | +++ b/include/block/block.h | ||
29 | @@ -XXX,XX +XXX,XX @@ BlockDriverState *bdrv_lookup_bs(const char *device, | ||
30 | Error **errp); | ||
31 | bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base); | ||
32 | BlockDriverState *bdrv_next_node(BlockDriverState *bs); | ||
33 | +BlockDriverState *bdrv_next_all_states(BlockDriverState *bs); | ||
34 | |||
35 | typedef struct BdrvNextIterator { | ||
36 | enum { | ||
37 | diff --git a/include/block/block_int.h b/include/block/block_int.h | ||
38 | index XXXXXXX..XXXXXXX 100644 | ||
39 | --- a/include/block/block_int.h | ||
40 | +++ b/include/block/block_int.h | ||
41 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child, | ||
42 | int64_t offset, unsigned int bytes, QEMUIOVector *qiov, | ||
43 | BdrvRequestFlags flags); | ||
44 | |||
45 | +extern unsigned int bdrv_drain_all_count; | ||
46 | void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent); | ||
47 | void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent); | ||
48 | |||
49 | diff --git a/block.c b/block.c | ||
50 | index XXXXXXX..XXXXXXX 100644 | ||
51 | --- a/block.c | ||
52 | +++ b/block.c | ||
53 | @@ -XXX,XX +XXX,XX @@ BlockDriverState *bdrv_new(void) | ||
54 | |||
55 | qemu_co_queue_init(&bs->flush_queue); | ||
56 | |||
57 | + for (i = 0; i < bdrv_drain_all_count; i++) { | ||
58 | + bdrv_drained_begin(bs); | ||
59 | + } | ||
60 | + | ||
61 | QTAILQ_INSERT_TAIL(&all_bdrv_states, bs, bs_list); | ||
62 | |||
63 | return bs; | ||
64 | @@ -XXX,XX +XXX,XX @@ static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, | ||
65 | int open_flags, Error **errp) | ||
66 | { | ||
67 | Error *local_err = NULL; | ||
68 | - int ret; | ||
69 | + int i, ret; | ||
70 | |||
71 | bdrv_assign_node_name(bs, node_name, &local_err); | ||
72 | if (local_err) { | ||
73 | @@ -XXX,XX +XXX,XX @@ static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, | ||
74 | assert(bdrv_min_mem_align(bs) != 0); | ||
75 | assert(is_power_of_2(bs->bl.request_alignment)); | ||
76 | |||
77 | + for (i = 0; i < bs->quiesce_counter; i++) { | ||
78 | + if (drv->bdrv_co_drain_begin) { | ||
79 | + drv->bdrv_co_drain_begin(bs); | ||
80 | + } | ||
81 | + } | ||
82 | + | ||
83 | return 0; | ||
84 | open_failed: | ||
85 | bs->drv = NULL; | ||
86 | @@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_noperm(BdrvChild *child, | ||
87 | child->role->detach(child); | ||
88 | } | ||
89 | if (old_bs->quiesce_counter && child->role->drained_end) { | ||
90 | - for (i = 0; i < old_bs->quiesce_counter; i++) { | ||
91 | + int num = old_bs->quiesce_counter; | ||
92 | + if (child->role->parent_is_bds) { | ||
93 | + num -= bdrv_drain_all_count; | ||
94 | + } | ||
95 | + assert(num >= 0); | ||
96 | + for (i = 0; i < num; i++) { | ||
97 | child->role->drained_end(child); | ||
98 | } | ||
99 | } | ||
100 | @@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_noperm(BdrvChild *child, | ||
101 | if (new_bs) { | ||
102 | QLIST_INSERT_HEAD(&new_bs->parents, child, next_parent); | ||
103 | if (new_bs->quiesce_counter && child->role->drained_begin) { | ||
104 | - for (i = 0; i < new_bs->quiesce_counter; i++) { | ||
105 | + int num = new_bs->quiesce_counter; | ||
106 | + if (child->role->parent_is_bds) { | ||
107 | + num -= bdrv_drain_all_count; | ||
108 | + } | ||
109 | + assert(num >= 0); | ||
110 | + for (i = 0; i < num; i++) { | ||
111 | child->role->drained_begin(child); | ||
112 | } | ||
113 | } | ||
114 | @@ -XXX,XX +XXX,XX @@ BlockDriverState *bdrv_next_node(BlockDriverState *bs) | ||
115 | return QTAILQ_NEXT(bs, node_list); | ||
116 | } | ||
117 | |||
118 | +BlockDriverState *bdrv_next_all_states(BlockDriverState *bs) | ||
119 | +{ | ||
120 | + if (!bs) { | ||
121 | + return QTAILQ_FIRST(&all_bdrv_states); | ||
122 | + } | ||
123 | + return QTAILQ_NEXT(bs, bs_list); | ||
124 | +} | ||
125 | + | ||
126 | const char *bdrv_get_node_name(const BlockDriverState *bs) | ||
127 | { | ||
128 | return bs->node_name; | ||
129 | diff --git a/block/io.c b/block/io.c | ||
130 | index XXXXXXX..XXXXXXX 100644 | ||
131 | --- a/block/io.c | ||
132 | +++ b/block/io.c | ||
133 | @@ -XXX,XX +XXX,XX @@ | ||
134 | /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ | ||
135 | #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) | ||
136 | |||
137 | +static AioWait drain_all_aio_wait; | ||
138 | + | ||
139 | static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, | ||
140 | int64_t offset, int bytes, BdrvRequestFlags flags); | ||
141 | |||
142 | @@ -XXX,XX +XXX,XX @@ static void bdrv_drain_assert_idle(BlockDriverState *bs) | ||
143 | } | ||
144 | } | ||
145 | |||
146 | +unsigned int bdrv_drain_all_count = 0; | ||
147 | + | ||
148 | +static bool bdrv_drain_all_poll(void) | ||
149 | +{ | ||
150 | + BlockDriverState *bs = NULL; | ||
151 | + bool result = false; | ||
152 | + | ||
153 | + /* Execute pending BHs first (may modify the graph) and check everything | ||
154 | + * else only after the BHs have executed. */ | ||
155 | + while (aio_poll(qemu_get_aio_context(), false)); | ||
156 | + | ||
157 | + /* bdrv_drain_poll() can't make changes to the graph and we are holding the | ||
158 | + * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ | ||
159 | + while ((bs = bdrv_next_all_states(bs))) { | ||
160 | + AioContext *aio_context = bdrv_get_aio_context(bs); | ||
161 | + aio_context_acquire(aio_context); | ||
162 | + result |= bdrv_drain_poll(bs, false, NULL, true); | ||
163 | + aio_context_release(aio_context); | ||
164 | + } | ||
165 | + | ||
166 | + return result; | ||
167 | +} | ||
168 | + | ||
169 | /* | ||
170 | * Wait for pending requests to complete across all BlockDriverStates | ||
171 | * | ||
172 | @@ -XXX,XX +XXX,XX @@ static void bdrv_drain_assert_idle(BlockDriverState *bs) | ||
173 | */ | ||
174 | void bdrv_drain_all_begin(void) | ||
175 | { | ||
176 | - BlockDriverState *bs; | ||
177 | - BdrvNextIterator it; | ||
178 | + BlockDriverState *bs = NULL; | ||
179 | |||
180 | if (qemu_in_coroutine()) { | ||
181 | - bdrv_co_yield_to_drain(NULL, true, false, NULL, false, true); | ||
182 | + bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true); | ||
183 | return; | ||
184 | } | ||
185 | |||
186 | - /* BDRV_POLL_WHILE() for a node can only be called from its own I/O thread | ||
187 | - * or the main loop AioContext. We potentially use BDRV_POLL_WHILE() on | ||
188 | - * nodes in several different AioContexts, so make sure we're in the main | ||
189 | - * context. */ | ||
190 | + /* AIO_WAIT_WHILE() with a NULL context can only be called from the main | ||
191 | + * loop AioContext, so make sure we're in the main context. */ | ||
192 | assert(qemu_get_current_aio_context() == qemu_get_aio_context()); | ||
193 | + assert(bdrv_drain_all_count < INT_MAX); | ||
194 | + bdrv_drain_all_count++; | ||
195 | |||
196 | - for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { | ||
197 | + /* Quiesce all nodes, without polling in-flight requests yet. The graph | ||
198 | + * cannot change during this loop. */ | ||
199 | + while ((bs = bdrv_next_all_states(bs))) { | ||
200 | AioContext *aio_context = bdrv_get_aio_context(bs); | ||
201 | |||
202 | aio_context_acquire(aio_context); | ||
203 | - bdrv_do_drained_begin(bs, true, NULL, false, true); | ||
204 | + bdrv_do_drained_begin(bs, false, NULL, true, false); | ||
205 | aio_context_release(aio_context); | ||
206 | } | ||
207 | |||
208 | - for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { | ||
209 | + /* Now poll the in-flight requests */ | ||
210 | + AIO_WAIT_WHILE(&drain_all_aio_wait, NULL, bdrv_drain_all_poll()); | ||
211 | + | ||
212 | + while ((bs = bdrv_next_all_states(bs))) { | ||
213 | bdrv_drain_assert_idle(bs); | ||
214 | } | ||
215 | } | ||
216 | |||
217 | void bdrv_drain_all_end(void) | ||
218 | { | ||
219 | - BlockDriverState *bs; | ||
220 | - BdrvNextIterator it; | ||
221 | + BlockDriverState *bs = NULL; | ||
222 | |||
223 | - for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { | ||
224 | + while ((bs = bdrv_next_all_states(bs))) { | ||
225 | AioContext *aio_context = bdrv_get_aio_context(bs); | ||
226 | |||
227 | aio_context_acquire(aio_context); | ||
228 | - bdrv_do_drained_end(bs, true, NULL, false); | ||
229 | + bdrv_do_drained_end(bs, false, NULL, true); | ||
230 | aio_context_release(aio_context); | ||
231 | } | ||
232 | + | ||
233 | + assert(bdrv_drain_all_count > 0); | ||
234 | + bdrv_drain_all_count--; | ||
235 | } | ||
236 | |||
237 | void bdrv_drain_all(void) | ||
238 | @@ -XXX,XX +XXX,XX @@ void bdrv_inc_in_flight(BlockDriverState *bs) | ||
239 | void bdrv_wakeup(BlockDriverState *bs) | ||
240 | { | ||
241 | aio_wait_kick(bdrv_get_aio_wait(bs)); | ||
242 | + aio_wait_kick(&drain_all_aio_wait); | ||
243 | } | ||
244 | |||
245 | void bdrv_dec_in_flight(BlockDriverState *bs) | ||
246 | -- | ||
247 | 2.13.6 | ||
248 | |||
249 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | This tests both adding and remove a node between bdrv_drain_all_begin() | ||
2 | and bdrv_drain_all_end(), and enabled the existing detach test for | ||
3 | drain_all. | ||
4 | 1 | ||
5 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
6 | --- | ||
7 | tests/test-bdrv-drain.c | 75 +++++++++++++++++++++++++++++++++++++++++++++++-- | ||
8 | 1 file changed, 73 insertions(+), 2 deletions(-) | ||
9 | |||
10 | diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/tests/test-bdrv-drain.c | ||
13 | +++ b/tests/test-bdrv-drain.c | ||
14 | @@ -XXX,XX +XXX,XX @@ static void test_multiparent(void) | ||
15 | blk_unref(blk_b); | ||
16 | } | ||
17 | |||
18 | -static void test_graph_change(void) | ||
19 | +static void test_graph_change_drain_subtree(void) | ||
20 | { | ||
21 | BlockBackend *blk_a, *blk_b; | ||
22 | BlockDriverState *bs_a, *bs_b, *backing; | ||
23 | @@ -XXX,XX +XXX,XX @@ static void test_graph_change(void) | ||
24 | blk_unref(blk_b); | ||
25 | } | ||
26 | |||
27 | +static void test_graph_change_drain_all(void) | ||
28 | +{ | ||
29 | + BlockBackend *blk_a, *blk_b; | ||
30 | + BlockDriverState *bs_a, *bs_b; | ||
31 | + BDRVTestState *a_s, *b_s; | ||
32 | + | ||
33 | + /* Create node A with a BlockBackend */ | ||
34 | + blk_a = blk_new(BLK_PERM_ALL, BLK_PERM_ALL); | ||
35 | + bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR, | ||
36 | + &error_abort); | ||
37 | + a_s = bs_a->opaque; | ||
38 | + blk_insert_bs(blk_a, bs_a, &error_abort); | ||
39 | + | ||
40 | + g_assert_cmpint(bs_a->quiesce_counter, ==, 0); | ||
41 | + g_assert_cmpint(a_s->drain_count, ==, 0); | ||
42 | + | ||
43 | + /* Call bdrv_drain_all_begin() */ | ||
44 | + bdrv_drain_all_begin(); | ||
45 | + | ||
46 | + g_assert_cmpint(bs_a->quiesce_counter, ==, 1); | ||
47 | + g_assert_cmpint(a_s->drain_count, ==, 1); | ||
48 | + | ||
49 | + /* Create node B with a BlockBackend */ | ||
50 | + blk_b = blk_new(BLK_PERM_ALL, BLK_PERM_ALL); | ||
51 | + bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR, | ||
52 | + &error_abort); | ||
53 | + b_s = bs_b->opaque; | ||
54 | + blk_insert_bs(blk_b, bs_b, &error_abort); | ||
55 | + | ||
56 | + g_assert_cmpint(bs_a->quiesce_counter, ==, 1); | ||
57 | + g_assert_cmpint(bs_b->quiesce_counter, ==, 1); | ||
58 | + g_assert_cmpint(a_s->drain_count, ==, 1); | ||
59 | + g_assert_cmpint(b_s->drain_count, ==, 1); | ||
60 | + | ||
61 | + /* Unref and finally delete node A */ | ||
62 | + blk_unref(blk_a); | ||
63 | + | ||
64 | + g_assert_cmpint(bs_a->quiesce_counter, ==, 1); | ||
65 | + g_assert_cmpint(bs_b->quiesce_counter, ==, 1); | ||
66 | + g_assert_cmpint(a_s->drain_count, ==, 1); | ||
67 | + g_assert_cmpint(b_s->drain_count, ==, 1); | ||
68 | + | ||
69 | + bdrv_unref(bs_a); | ||
70 | + | ||
71 | + g_assert_cmpint(bs_b->quiesce_counter, ==, 1); | ||
72 | + g_assert_cmpint(b_s->drain_count, ==, 1); | ||
73 | + | ||
74 | + /* End the drained section */ | ||
75 | + bdrv_drain_all_end(); | ||
76 | + | ||
77 | + g_assert_cmpint(bs_b->quiesce_counter, ==, 0); | ||
78 | + g_assert_cmpint(b_s->drain_count, ==, 0); | ||
79 | + | ||
80 | + bdrv_unref(bs_b); | ||
81 | + blk_unref(blk_b); | ||
82 | +} | ||
83 | + | ||
84 | struct test_iothread_data { | ||
85 | BlockDriverState *bs; | ||
86 | enum drain_type drain_type; | ||
87 | @@ -XXX,XX +XXX,XX @@ static void do_test_delete_by_drain(bool detach_instead_of_delete, | ||
88 | bdrv_subtree_drained_begin(bs); | ||
89 | bdrv_subtree_drained_end(bs); | ||
90 | break; | ||
91 | + case BDRV_DRAIN_ALL: | ||
92 | + bdrv_drain_all_begin(); | ||
93 | + bdrv_drain_all_end(); | ||
94 | + break; | ||
95 | default: | ||
96 | g_assert_not_reached(); | ||
97 | } | ||
98 | @@ -XXX,XX +XXX,XX @@ static void test_delete_by_drain(void) | ||
99 | do_test_delete_by_drain(false, BDRV_DRAIN); | ||
100 | } | ||
101 | |||
102 | +static void test_detach_by_drain_all(void) | ||
103 | +{ | ||
104 | + do_test_delete_by_drain(true, BDRV_DRAIN_ALL); | ||
105 | +} | ||
106 | + | ||
107 | static void test_detach_by_drain(void) | ||
108 | { | ||
109 | do_test_delete_by_drain(true, BDRV_DRAIN); | ||
110 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
111 | |||
112 | g_test_add_func("/bdrv-drain/nested", test_nested); | ||
113 | g_test_add_func("/bdrv-drain/multiparent", test_multiparent); | ||
114 | - g_test_add_func("/bdrv-drain/graph-change", test_graph_change); | ||
115 | + | ||
116 | + g_test_add_func("/bdrv-drain/graph-change/drain_subtree", | ||
117 | + test_graph_change_drain_subtree); | ||
118 | + g_test_add_func("/bdrv-drain/graph-change/drain_all", | ||
119 | + test_graph_change_drain_all); | ||
120 | |||
121 | g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all); | ||
122 | g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain); | ||
123 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
124 | test_blockjob_drain_subtree); | ||
125 | |||
126 | g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain); | ||
127 | + g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all); | ||
128 | g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain); | ||
129 | g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree); | ||
130 | g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb); | ||
131 | -- | ||
132 | 2.13.6 | ||
133 | |||
134 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Greg Kurz <groug@kaod.org> | ||
2 | 1 | ||
3 | Removing a drive with drive_del while it is being used to run an I/O | ||
4 | intensive workload can cause QEMU to crash. | ||
5 | |||
6 | An AIO flush can yield at some point: | ||
7 | |||
8 | blk_aio_flush_entry() | ||
9 | blk_co_flush(blk) | ||
10 | bdrv_co_flush(blk->root->bs) | ||
11 | ... | ||
12 | qemu_coroutine_yield() | ||
13 | |||
14 | and let the HMP command to run, free blk->root and give control | ||
15 | back to the AIO flush: | ||
16 | |||
17 | hmp_drive_del() | ||
18 | blk_remove_bs() | ||
19 | bdrv_root_unref_child(blk->root) | ||
20 | child_bs = blk->root->bs | ||
21 | bdrv_detach_child(blk->root) | ||
22 | bdrv_replace_child(blk->root, NULL) | ||
23 | blk->root->bs = NULL | ||
24 | g_free(blk->root) <============== blk->root becomes stale | ||
25 | bdrv_unref(child_bs) | ||
26 | bdrv_delete(child_bs) | ||
27 | bdrv_close() | ||
28 | bdrv_drained_begin() | ||
29 | bdrv_do_drained_begin() | ||
30 | bdrv_drain_recurse() | ||
31 | aio_poll() | ||
32 | ... | ||
33 | qemu_coroutine_switch() | ||
34 | |||
35 | and the AIO flush completion ends up dereferencing blk->root: | ||
36 | |||
37 | blk_aio_complete() | ||
38 | scsi_aio_complete() | ||
39 | blk_get_aio_context(blk) | ||
40 | bs = blk_bs(blk) | ||
41 | ie, bs = blk->root ? blk->root->bs : NULL | ||
42 | ^^^^^ | ||
43 | stale | ||
44 | |||
45 | The problem is that we should avoid making block driver graph | ||
46 | changes while we have in-flight requests. Let's drain all I/O | ||
47 | for this BB before calling bdrv_root_unref_child(). | ||
48 | |||
49 | Signed-off-by: Greg Kurz <groug@kaod.org> | ||
50 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
51 | --- | ||
52 | block/block-backend.c | 5 +++++ | ||
53 | 1 file changed, 5 insertions(+) | ||
54 | |||
55 | diff --git a/block/block-backend.c b/block/block-backend.c | ||
56 | index XXXXXXX..XXXXXXX 100644 | ||
57 | --- a/block/block-backend.c | ||
58 | +++ b/block/block-backend.c | ||
59 | @@ -XXX,XX +XXX,XX @@ void blk_remove_bs(BlockBackend *blk) | ||
60 | |||
61 | blk_update_root_state(blk); | ||
62 | |||
63 | + /* bdrv_root_unref_child() will cause blk->root to become stale and may | ||
64 | + * switch to a completion coroutine later on. Let's drain all I/O here | ||
65 | + * to avoid that and a potential QEMU crash. | ||
66 | + */ | ||
67 | + blk_drain(blk); | ||
68 | bdrv_root_unref_child(blk->root); | ||
69 | blk->root = NULL; | ||
70 | } | ||
71 | -- | ||
72 | 2.13.6 | ||
73 | |||
74 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Max Reitz <mreitz@redhat.com> | ||
2 | 1 | ||
3 | When converting mirror's I/O to coroutines, we are going to need a point | ||
4 | where these coroutines are created. mirror_perform() is going to be | ||
5 | that point. | ||
6 | |||
7 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
8 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
9 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
10 | Reviewed-by: Jeff Cody <jcody@redhat.com> | ||
11 | Reviewed-by: Alberto Garcia <berto@igalia.com> | ||
12 | Message-id: 20180613181823.13618-2-mreitz@redhat.com | ||
13 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
14 | --- | ||
15 | block/mirror.c | 51 +++++++++++++++++++++++++++++---------------------- | ||
16 | 1 file changed, 29 insertions(+), 22 deletions(-) | ||
17 | |||
18 | diff --git a/block/mirror.c b/block/mirror.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/block/mirror.c | ||
21 | +++ b/block/mirror.c | ||
22 | @@ -XXX,XX +XXX,XX @@ typedef struct MirrorOp { | ||
23 | uint64_t bytes; | ||
24 | } MirrorOp; | ||
25 | |||
26 | +typedef enum MirrorMethod { | ||
27 | + MIRROR_METHOD_COPY, | ||
28 | + MIRROR_METHOD_ZERO, | ||
29 | + MIRROR_METHOD_DISCARD, | ||
30 | +} MirrorMethod; | ||
31 | + | ||
32 | static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, | ||
33 | int error) | ||
34 | { | ||
35 | @@ -XXX,XX +XXX,XX @@ static void mirror_do_zero_or_discard(MirrorBlockJob *s, | ||
36 | } | ||
37 | } | ||
38 | |||
39 | +static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, | ||
40 | + unsigned bytes, MirrorMethod mirror_method) | ||
41 | +{ | ||
42 | + switch (mirror_method) { | ||
43 | + case MIRROR_METHOD_COPY: | ||
44 | + return mirror_do_read(s, offset, bytes); | ||
45 | + case MIRROR_METHOD_ZERO: | ||
46 | + case MIRROR_METHOD_DISCARD: | ||
47 | + mirror_do_zero_or_discard(s, offset, bytes, | ||
48 | + mirror_method == MIRROR_METHOD_DISCARD); | ||
49 | + return bytes; | ||
50 | + default: | ||
51 | + abort(); | ||
52 | + } | ||
53 | +} | ||
54 | + | ||
55 | static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | ||
56 | { | ||
57 | BlockDriverState *source = s->source; | ||
58 | @@ -XXX,XX +XXX,XX @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | ||
59 | int ret; | ||
60 | int64_t io_bytes; | ||
61 | int64_t io_bytes_acct; | ||
62 | - enum MirrorMethod { | ||
63 | - MIRROR_METHOD_COPY, | ||
64 | - MIRROR_METHOD_ZERO, | ||
65 | - MIRROR_METHOD_DISCARD | ||
66 | - } mirror_method = MIRROR_METHOD_COPY; | ||
67 | + MirrorMethod mirror_method = MIRROR_METHOD_COPY; | ||
68 | |||
69 | assert(!(offset % s->granularity)); | ||
70 | ret = bdrv_block_status_above(source, NULL, offset, | ||
71 | @@ -XXX,XX +XXX,XX @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | ||
72 | } | ||
73 | |||
74 | io_bytes = mirror_clip_bytes(s, offset, io_bytes); | ||
75 | - switch (mirror_method) { | ||
76 | - case MIRROR_METHOD_COPY: | ||
77 | - io_bytes = io_bytes_acct = mirror_do_read(s, offset, io_bytes); | ||
78 | - break; | ||
79 | - case MIRROR_METHOD_ZERO: | ||
80 | - case MIRROR_METHOD_DISCARD: | ||
81 | - mirror_do_zero_or_discard(s, offset, io_bytes, | ||
82 | - mirror_method == MIRROR_METHOD_DISCARD); | ||
83 | - if (write_zeroes_ok) { | ||
84 | - io_bytes_acct = 0; | ||
85 | - } else { | ||
86 | - io_bytes_acct = io_bytes; | ||
87 | - } | ||
88 | - break; | ||
89 | - default: | ||
90 | - abort(); | ||
91 | + io_bytes = mirror_perform(s, offset, io_bytes, mirror_method); | ||
92 | + if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) { | ||
93 | + io_bytes_acct = 0; | ||
94 | + } else { | ||
95 | + io_bytes_acct = io_bytes; | ||
96 | } | ||
97 | assert(io_bytes); | ||
98 | offset += io_bytes; | ||
99 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) | ||
100 | continue; | ||
101 | } | ||
102 | |||
103 | - mirror_do_zero_or_discard(s, offset, bytes, false); | ||
104 | + mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO); | ||
105 | offset += bytes; | ||
106 | } | ||
107 | |||
108 | -- | ||
109 | 2.13.6 | ||
110 | |||
111 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Max Reitz <mreitz@redhat.com> | ||
2 | 1 | ||
3 | In order to talk to the source BDS (and maybe in the future to the | ||
4 | target BDS as well) directly, we need to convert our existing AIO | ||
5 | requests into coroutine I/O requests. | ||
6 | |||
7 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
8 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
9 | Message-id: 20180613181823.13618-3-mreitz@redhat.com | ||
10 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
11 | --- | ||
12 | block/mirror.c | 152 ++++++++++++++++++++++++++++++++++----------------------- | ||
13 | 1 file changed, 90 insertions(+), 62 deletions(-) | ||
14 | |||
15 | diff --git a/block/mirror.c b/block/mirror.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/block/mirror.c | ||
18 | +++ b/block/mirror.c | ||
19 | @@ -XXX,XX +XXX,XX @@ typedef struct MirrorOp { | ||
20 | QEMUIOVector qiov; | ||
21 | int64_t offset; | ||
22 | uint64_t bytes; | ||
23 | + | ||
24 | + /* The pointee is set by mirror_co_read(), mirror_co_zero(), and | ||
25 | + * mirror_co_discard() before yielding for the first time */ | ||
26 | + int64_t *bytes_handled; | ||
27 | } MirrorOp; | ||
28 | |||
29 | typedef enum MirrorMethod { | ||
30 | @@ -XXX,XX +XXX,XX @@ static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, | ||
31 | } | ||
32 | } | ||
33 | |||
34 | -static void mirror_iteration_done(MirrorOp *op, int ret) | ||
35 | +static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret) | ||
36 | { | ||
37 | MirrorBlockJob *s = op->s; | ||
38 | struct iovec *iov; | ||
39 | @@ -XXX,XX +XXX,XX @@ static void mirror_iteration_done(MirrorOp *op, int ret) | ||
40 | } | ||
41 | } | ||
42 | |||
43 | -static void mirror_write_complete(void *opaque, int ret) | ||
44 | +static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret) | ||
45 | { | ||
46 | - MirrorOp *op = opaque; | ||
47 | MirrorBlockJob *s = op->s; | ||
48 | |||
49 | aio_context_acquire(blk_get_aio_context(s->common.blk)); | ||
50 | @@ -XXX,XX +XXX,XX @@ static void mirror_write_complete(void *opaque, int ret) | ||
51 | aio_context_release(blk_get_aio_context(s->common.blk)); | ||
52 | } | ||
53 | |||
54 | -static void mirror_read_complete(void *opaque, int ret) | ||
55 | +static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret) | ||
56 | { | ||
57 | - MirrorOp *op = opaque; | ||
58 | MirrorBlockJob *s = op->s; | ||
59 | |||
60 | aio_context_acquire(blk_get_aio_context(s->common.blk)); | ||
61 | @@ -XXX,XX +XXX,XX @@ static void mirror_read_complete(void *opaque, int ret) | ||
62 | |||
63 | mirror_iteration_done(op, ret); | ||
64 | } else { | ||
65 | - blk_aio_pwritev(s->target, op->offset, &op->qiov, | ||
66 | - 0, mirror_write_complete, op); | ||
67 | + ret = blk_co_pwritev(s->target, op->offset, | ||
68 | + op->qiov.size, &op->qiov, 0); | ||
69 | + mirror_write_complete(op, ret); | ||
70 | } | ||
71 | aio_context_release(blk_get_aio_context(s->common.blk)); | ||
72 | } | ||
73 | @@ -XXX,XX +XXX,XX @@ static inline void mirror_wait_for_io(MirrorBlockJob *s) | ||
74 | s->waiting_for_io = false; | ||
75 | } | ||
76 | |||
77 | -/* Submit async read while handling COW. | ||
78 | - * Returns: The number of bytes copied after and including offset, | ||
79 | - * excluding any bytes copied prior to offset due to alignment. | ||
80 | - * This will be @bytes if no alignment is necessary, or | ||
81 | - * (new_end - offset) if tail is rounded up or down due to | ||
82 | - * alignment or buffer limit. | ||
83 | +/* Perform a mirror copy operation. | ||
84 | + * | ||
85 | + * *op->bytes_handled is set to the number of bytes copied after and | ||
86 | + * including offset, excluding any bytes copied prior to offset due | ||
87 | + * to alignment. This will be op->bytes if no alignment is necessary, | ||
88 | + * or (new_end - op->offset) if the tail is rounded up or down due to | ||
89 | + * alignment or buffer limit. | ||
90 | */ | ||
91 | -static uint64_t mirror_do_read(MirrorBlockJob *s, int64_t offset, | ||
92 | - uint64_t bytes) | ||
93 | +static void coroutine_fn mirror_co_read(void *opaque) | ||
94 | { | ||
95 | + MirrorOp *op = opaque; | ||
96 | + MirrorBlockJob *s = op->s; | ||
97 | BlockBackend *source = s->common.blk; | ||
98 | int nb_chunks; | ||
99 | uint64_t ret; | ||
100 | - MirrorOp *op; | ||
101 | uint64_t max_bytes; | ||
102 | |||
103 | max_bytes = s->granularity * s->max_iov; | ||
104 | |||
105 | /* We can only handle as much as buf_size at a time. */ | ||
106 | - bytes = MIN(s->buf_size, MIN(max_bytes, bytes)); | ||
107 | - assert(bytes); | ||
108 | - assert(bytes < BDRV_REQUEST_MAX_BYTES); | ||
109 | - ret = bytes; | ||
110 | + op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes)); | ||
111 | + assert(op->bytes); | ||
112 | + assert(op->bytes < BDRV_REQUEST_MAX_BYTES); | ||
113 | + *op->bytes_handled = op->bytes; | ||
114 | |||
115 | if (s->cow_bitmap) { | ||
116 | - ret += mirror_cow_align(s, &offset, &bytes); | ||
117 | + *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes); | ||
118 | } | ||
119 | - assert(bytes <= s->buf_size); | ||
120 | + /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */ | ||
121 | + assert(*op->bytes_handled <= UINT_MAX); | ||
122 | + assert(op->bytes <= s->buf_size); | ||
123 | /* The offset is granularity-aligned because: | ||
124 | * 1) Caller passes in aligned values; | ||
125 | * 2) mirror_cow_align is used only when target cluster is larger. */ | ||
126 | - assert(QEMU_IS_ALIGNED(offset, s->granularity)); | ||
127 | + assert(QEMU_IS_ALIGNED(op->offset, s->granularity)); | ||
128 | /* The range is sector-aligned, since bdrv_getlength() rounds up. */ | ||
129 | - assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); | ||
130 | - nb_chunks = DIV_ROUND_UP(bytes, s->granularity); | ||
131 | + assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE)); | ||
132 | + nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); | ||
133 | |||
134 | while (s->buf_free_count < nb_chunks) { | ||
135 | - trace_mirror_yield_in_flight(s, offset, s->in_flight); | ||
136 | + trace_mirror_yield_in_flight(s, op->offset, s->in_flight); | ||
137 | mirror_wait_for_io(s); | ||
138 | } | ||
139 | |||
140 | - /* Allocate a MirrorOp that is used as an AIO callback. */ | ||
141 | - op = g_new(MirrorOp, 1); | ||
142 | - op->s = s; | ||
143 | - op->offset = offset; | ||
144 | - op->bytes = bytes; | ||
145 | - | ||
146 | /* Now make a QEMUIOVector taking enough granularity-sized chunks | ||
147 | * from s->buf_free. | ||
148 | */ | ||
149 | qemu_iovec_init(&op->qiov, nb_chunks); | ||
150 | while (nb_chunks-- > 0) { | ||
151 | MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); | ||
152 | - size_t remaining = bytes - op->qiov.size; | ||
153 | + size_t remaining = op->bytes - op->qiov.size; | ||
154 | |||
155 | QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); | ||
156 | s->buf_free_count--; | ||
157 | @@ -XXX,XX +XXX,XX @@ static uint64_t mirror_do_read(MirrorBlockJob *s, int64_t offset, | ||
158 | |||
159 | /* Copy the dirty cluster. */ | ||
160 | s->in_flight++; | ||
161 | - s->bytes_in_flight += bytes; | ||
162 | - trace_mirror_one_iteration(s, offset, bytes); | ||
163 | + s->bytes_in_flight += op->bytes; | ||
164 | + trace_mirror_one_iteration(s, op->offset, op->bytes); | ||
165 | |||
166 | - blk_aio_preadv(source, offset, &op->qiov, 0, mirror_read_complete, op); | ||
167 | - return ret; | ||
168 | + ret = blk_co_preadv(source, op->offset, op->bytes, &op->qiov, 0); | ||
169 | + mirror_read_complete(op, ret); | ||
170 | } | ||
171 | |||
172 | -static void mirror_do_zero_or_discard(MirrorBlockJob *s, | ||
173 | - int64_t offset, | ||
174 | - uint64_t bytes, | ||
175 | - bool is_discard) | ||
176 | +static void coroutine_fn mirror_co_zero(void *opaque) | ||
177 | { | ||
178 | - MirrorOp *op; | ||
179 | + MirrorOp *op = opaque; | ||
180 | + int ret; | ||
181 | |||
182 | - /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed | ||
183 | - * so the freeing in mirror_iteration_done is nop. */ | ||
184 | - op = g_new0(MirrorOp, 1); | ||
185 | - op->s = s; | ||
186 | - op->offset = offset; | ||
187 | - op->bytes = bytes; | ||
188 | + op->s->in_flight++; | ||
189 | + op->s->bytes_in_flight += op->bytes; | ||
190 | + *op->bytes_handled = op->bytes; | ||
191 | |||
192 | - s->in_flight++; | ||
193 | - s->bytes_in_flight += bytes; | ||
194 | - if (is_discard) { | ||
195 | - blk_aio_pdiscard(s->target, offset, | ||
196 | - op->bytes, mirror_write_complete, op); | ||
197 | - } else { | ||
198 | - blk_aio_pwrite_zeroes(s->target, offset, | ||
199 | - op->bytes, s->unmap ? BDRV_REQ_MAY_UNMAP : 0, | ||
200 | - mirror_write_complete, op); | ||
201 | - } | ||
202 | + ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes, | ||
203 | + op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0); | ||
204 | + mirror_write_complete(op, ret); | ||
205 | +} | ||
206 | + | ||
207 | +static void coroutine_fn mirror_co_discard(void *opaque) | ||
208 | +{ | ||
209 | + MirrorOp *op = opaque; | ||
210 | + int ret; | ||
211 | + | ||
212 | + op->s->in_flight++; | ||
213 | + op->s->bytes_in_flight += op->bytes; | ||
214 | + *op->bytes_handled = op->bytes; | ||
215 | + | ||
216 | + ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes); | ||
217 | + mirror_write_complete(op, ret); | ||
218 | } | ||
219 | |||
220 | static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, | ||
221 | unsigned bytes, MirrorMethod mirror_method) | ||
222 | { | ||
223 | + MirrorOp *op; | ||
224 | + Coroutine *co; | ||
225 | + int64_t bytes_handled = -1; | ||
226 | + | ||
227 | + op = g_new(MirrorOp, 1); | ||
228 | + *op = (MirrorOp){ | ||
229 | + .s = s, | ||
230 | + .offset = offset, | ||
231 | + .bytes = bytes, | ||
232 | + .bytes_handled = &bytes_handled, | ||
233 | + }; | ||
234 | + | ||
235 | switch (mirror_method) { | ||
236 | case MIRROR_METHOD_COPY: | ||
237 | - return mirror_do_read(s, offset, bytes); | ||
238 | + co = qemu_coroutine_create(mirror_co_read, op); | ||
239 | + break; | ||
240 | case MIRROR_METHOD_ZERO: | ||
241 | + co = qemu_coroutine_create(mirror_co_zero, op); | ||
242 | + break; | ||
243 | case MIRROR_METHOD_DISCARD: | ||
244 | - mirror_do_zero_or_discard(s, offset, bytes, | ||
245 | - mirror_method == MIRROR_METHOD_DISCARD); | ||
246 | - return bytes; | ||
247 | + co = qemu_coroutine_create(mirror_co_discard, op); | ||
248 | + break; | ||
249 | default: | ||
250 | abort(); | ||
251 | } | ||
252 | + | ||
253 | + qemu_coroutine_enter(co); | ||
254 | + /* At this point, ownership of op has been moved to the coroutine | ||
255 | + * and the object may already be freed */ | ||
256 | + | ||
257 | + /* Assert that this value has been set */ | ||
258 | + assert(bytes_handled >= 0); | ||
259 | + | ||
260 | + /* Same assertion as in mirror_co_read() (and for mirror_co_read() | ||
261 | + * and mirror_co_discard(), bytes_handled == op->bytes, which | ||
262 | + * is the @bytes parameter given to this function) */ | ||
263 | + assert(bytes_handled <= UINT_MAX); | ||
264 | + return bytes_handled; | ||
265 | } | ||
266 | |||
267 | static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | ||
268 | -- | ||
269 | 2.13.6 | ||
270 | |||
271 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Max Reitz <mreitz@redhat.com> | ||
2 | 1 | ||
3 | Attach a CoQueue to each in-flight operation so if we need to wait for | ||
4 | any we can use it to wait instead of just blindly yielding and hoping | ||
5 | for some operation to wake us. | ||
6 | |||
7 | A later patch will use this infrastructure to allow requests accessing | ||
8 | the same area of the virtual disk to specifically wait for each other. | ||
9 | |||
10 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
11 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
12 | Message-id: 20180613181823.13618-4-mreitz@redhat.com | ||
13 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
14 | --- | ||
15 | block/mirror.c | 34 +++++++++++++++++++++++----------- | ||
16 | 1 file changed, 23 insertions(+), 11 deletions(-) | ||
17 | |||
18 | diff --git a/block/mirror.c b/block/mirror.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/block/mirror.c | ||
21 | +++ b/block/mirror.c | ||
22 | @@ -XXX,XX +XXX,XX @@ | ||
23 | |||
24 | #include "qemu/osdep.h" | ||
25 | #include "qemu/cutils.h" | ||
26 | +#include "qemu/coroutine.h" | ||
27 | #include "trace.h" | ||
28 | #include "block/blockjob_int.h" | ||
29 | #include "block/block_int.h" | ||
30 | @@ -XXX,XX +XXX,XX @@ typedef struct MirrorBuffer { | ||
31 | QSIMPLEQ_ENTRY(MirrorBuffer) next; | ||
32 | } MirrorBuffer; | ||
33 | |||
34 | +typedef struct MirrorOp MirrorOp; | ||
35 | + | ||
36 | typedef struct MirrorBlockJob { | ||
37 | BlockJob common; | ||
38 | BlockBackend *target; | ||
39 | @@ -XXX,XX +XXX,XX @@ typedef struct MirrorBlockJob { | ||
40 | unsigned long *in_flight_bitmap; | ||
41 | int in_flight; | ||
42 | int64_t bytes_in_flight; | ||
43 | + QTAILQ_HEAD(MirrorOpList, MirrorOp) ops_in_flight; | ||
44 | int ret; | ||
45 | bool unmap; | ||
46 | - bool waiting_for_io; | ||
47 | int target_cluster_size; | ||
48 | int max_iov; | ||
49 | bool initial_zeroing_ongoing; | ||
50 | } MirrorBlockJob; | ||
51 | |||
52 | -typedef struct MirrorOp { | ||
53 | +struct MirrorOp { | ||
54 | MirrorBlockJob *s; | ||
55 | QEMUIOVector qiov; | ||
56 | int64_t offset; | ||
57 | @@ -XXX,XX +XXX,XX @@ typedef struct MirrorOp { | ||
58 | /* The pointee is set by mirror_co_read(), mirror_co_zero(), and | ||
59 | * mirror_co_discard() before yielding for the first time */ | ||
60 | int64_t *bytes_handled; | ||
61 | -} MirrorOp; | ||
62 | + | ||
63 | + CoQueue waiting_requests; | ||
64 | + | ||
65 | + QTAILQ_ENTRY(MirrorOp) next; | ||
66 | +}; | ||
67 | |||
68 | typedef enum MirrorMethod { | ||
69 | MIRROR_METHOD_COPY, | ||
70 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret) | ||
71 | |||
72 | chunk_num = op->offset / s->granularity; | ||
73 | nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); | ||
74 | + | ||
75 | bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); | ||
76 | + QTAILQ_REMOVE(&s->ops_in_flight, op, next); | ||
77 | if (ret >= 0) { | ||
78 | if (s->cow_bitmap) { | ||
79 | bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); | ||
80 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret) | ||
81 | } | ||
82 | } | ||
83 | qemu_iovec_destroy(&op->qiov); | ||
84 | - g_free(op); | ||
85 | |||
86 | - if (s->waiting_for_io) { | ||
87 | - qemu_coroutine_enter(s->common.job.co); | ||
88 | - } | ||
89 | + qemu_co_queue_restart_all(&op->waiting_requests); | ||
90 | + g_free(op); | ||
91 | } | ||
92 | |||
93 | static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret) | ||
94 | @@ -XXX,XX +XXX,XX @@ static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, | ||
95 | |||
96 | static inline void mirror_wait_for_io(MirrorBlockJob *s) | ||
97 | { | ||
98 | - assert(!s->waiting_for_io); | ||
99 | - s->waiting_for_io = true; | ||
100 | - qemu_coroutine_yield(); | ||
101 | - s->waiting_for_io = false; | ||
102 | + MirrorOp *op; | ||
103 | + | ||
104 | + op = QTAILQ_FIRST(&s->ops_in_flight); | ||
105 | + assert(op); | ||
106 | + qemu_co_queue_wait(&op->waiting_requests, NULL); | ||
107 | } | ||
108 | |||
109 | /* Perform a mirror copy operation. | ||
110 | @@ -XXX,XX +XXX,XX @@ static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, | ||
111 | .bytes = bytes, | ||
112 | .bytes_handled = &bytes_handled, | ||
113 | }; | ||
114 | + qemu_co_queue_init(&op->waiting_requests); | ||
115 | |||
116 | switch (mirror_method) { | ||
117 | case MIRROR_METHOD_COPY: | ||
118 | @@ -XXX,XX +XXX,XX @@ static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, | ||
119 | abort(); | ||
120 | } | ||
121 | |||
122 | + QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); | ||
123 | qemu_coroutine_enter(co); | ||
124 | /* At this point, ownership of op has been moved to the coroutine | ||
125 | * and the object may already be freed */ | ||
126 | @@ -XXX,XX +XXX,XX @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs, | ||
127 | } | ||
128 | } | ||
129 | |||
130 | + QTAILQ_INIT(&s->ops_in_flight); | ||
131 | + | ||
132 | trace_mirror_start(bs, s, opaque); | ||
133 | job_start(&s->common.job); | ||
134 | return; | ||
135 | -- | ||
136 | 2.13.6 | ||
137 | |||
138 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Max Reitz <mreitz@redhat.com> | ||
2 | 1 | ||
3 | This patch makes the mirror code differentiate between simply waiting | ||
4 | for any operation to complete (mirror_wait_for_free_in_flight_slot()) | ||
5 | and specifically waiting for all operations touching a certain range of | ||
6 | the virtual disk to complete (mirror_wait_on_conflicts()). | ||
7 | |||
8 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
9 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
10 | Message-id: 20180613181823.13618-5-mreitz@redhat.com | ||
11 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
12 | --- | ||
13 | block/mirror.c | 102 +++++++++++++++++++++++++++++++++++++++++++++++---------- | ||
14 | 1 file changed, 84 insertions(+), 18 deletions(-) | ||
15 | |||
16 | diff --git a/block/mirror.c b/block/mirror.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/mirror.c | ||
19 | +++ b/block/mirror.c | ||
20 | @@ -XXX,XX +XXX,XX @@ | ||
21 | #include "qemu/osdep.h" | ||
22 | #include "qemu/cutils.h" | ||
23 | #include "qemu/coroutine.h" | ||
24 | +#include "qemu/range.h" | ||
25 | #include "trace.h" | ||
26 | #include "block/blockjob_int.h" | ||
27 | #include "block/block_int.h" | ||
28 | @@ -XXX,XX +XXX,XX @@ struct MirrorOp { | ||
29 | * mirror_co_discard() before yielding for the first time */ | ||
30 | int64_t *bytes_handled; | ||
31 | |||
32 | + bool is_pseudo_op; | ||
33 | CoQueue waiting_requests; | ||
34 | |||
35 | QTAILQ_ENTRY(MirrorOp) next; | ||
36 | @@ -XXX,XX +XXX,XX @@ static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, | ||
37 | } | ||
38 | } | ||
39 | |||
40 | +static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self, | ||
41 | + MirrorBlockJob *s, | ||
42 | + uint64_t offset, | ||
43 | + uint64_t bytes) | ||
44 | +{ | ||
45 | + uint64_t self_start_chunk = offset / s->granularity; | ||
46 | + uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); | ||
47 | + uint64_t self_nb_chunks = self_end_chunk - self_start_chunk; | ||
48 | + | ||
49 | + while (find_next_bit(s->in_flight_bitmap, self_end_chunk, | ||
50 | + self_start_chunk) < self_end_chunk && | ||
51 | + s->ret >= 0) | ||
52 | + { | ||
53 | + MirrorOp *op; | ||
54 | + | ||
55 | + QTAILQ_FOREACH(op, &s->ops_in_flight, next) { | ||
56 | + uint64_t op_start_chunk = op->offset / s->granularity; | ||
57 | + uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes, | ||
58 | + s->granularity) - | ||
59 | + op_start_chunk; | ||
60 | + | ||
61 | + if (op == self) { | ||
62 | + continue; | ||
63 | + } | ||
64 | + | ||
65 | + if (ranges_overlap(self_start_chunk, self_nb_chunks, | ||
66 | + op_start_chunk, op_nb_chunks)) | ||
67 | + { | ||
68 | + qemu_co_queue_wait(&op->waiting_requests, NULL); | ||
69 | + break; | ||
70 | + } | ||
71 | + } | ||
72 | + } | ||
73 | +} | ||
74 | + | ||
75 | static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret) | ||
76 | { | ||
77 | MirrorBlockJob *s = op->s; | ||
78 | @@ -XXX,XX +XXX,XX @@ static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, | ||
79 | return ret; | ||
80 | } | ||
81 | |||
82 | -static inline void mirror_wait_for_io(MirrorBlockJob *s) | ||
83 | +static inline void mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) | ||
84 | { | ||
85 | MirrorOp *op; | ||
86 | |||
87 | - op = QTAILQ_FIRST(&s->ops_in_flight); | ||
88 | - assert(op); | ||
89 | - qemu_co_queue_wait(&op->waiting_requests, NULL); | ||
90 | + QTAILQ_FOREACH(op, &s->ops_in_flight, next) { | ||
91 | + /* Do not wait on pseudo ops, because it may in turn wait on | ||
92 | + * some other operation to start, which may in fact be the | ||
93 | + * caller of this function. Since there is only one pseudo op | ||
94 | + * at any given time, we will always find some real operation | ||
95 | + * to wait on. */ | ||
96 | + if (!op->is_pseudo_op) { | ||
97 | + qemu_co_queue_wait(&op->waiting_requests, NULL); | ||
98 | + return; | ||
99 | + } | ||
100 | + } | ||
101 | + abort(); | ||
102 | } | ||
103 | |||
104 | /* Perform a mirror copy operation. | ||
105 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn mirror_co_read(void *opaque) | ||
106 | |||
107 | while (s->buf_free_count < nb_chunks) { | ||
108 | trace_mirror_yield_in_flight(s, op->offset, s->in_flight); | ||
109 | - mirror_wait_for_io(s); | ||
110 | + mirror_wait_for_free_in_flight_slot(s); | ||
111 | } | ||
112 | |||
113 | /* Now make a QEMUIOVector taking enough granularity-sized chunks | ||
114 | @@ -XXX,XX +XXX,XX @@ static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, | ||
115 | static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | ||
116 | { | ||
117 | BlockDriverState *source = s->source; | ||
118 | - int64_t offset, first_chunk; | ||
119 | - uint64_t delay_ns = 0; | ||
120 | + MirrorOp *pseudo_op; | ||
121 | + int64_t offset; | ||
122 | + uint64_t delay_ns = 0, ret = 0; | ||
123 | /* At least the first dirty chunk is mirrored in one iteration. */ | ||
124 | int nb_chunks = 1; | ||
125 | bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); | ||
126 | @@ -XXX,XX +XXX,XX @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | ||
127 | } | ||
128 | bdrv_dirty_bitmap_unlock(s->dirty_bitmap); | ||
129 | |||
130 | - first_chunk = offset / s->granularity; | ||
131 | - while (test_bit(first_chunk, s->in_flight_bitmap)) { | ||
132 | - trace_mirror_yield_in_flight(s, offset, s->in_flight); | ||
133 | - mirror_wait_for_io(s); | ||
134 | - } | ||
135 | + mirror_wait_on_conflicts(NULL, s, offset, 1); | ||
136 | |||
137 | job_pause_point(&s->common.job); | ||
138 | |||
139 | @@ -XXX,XX +XXX,XX @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | ||
140 | nb_chunks * s->granularity); | ||
141 | bdrv_dirty_bitmap_unlock(s->dirty_bitmap); | ||
142 | |||
143 | + /* Before claiming an area in the in-flight bitmap, we have to | ||
144 | + * create a MirrorOp for it so that conflicting requests can wait | ||
145 | + * for it. mirror_perform() will create the real MirrorOps later, | ||
146 | + * for now we just create a pseudo operation that will wake up all | ||
147 | + * conflicting requests once all real operations have been | ||
148 | + * launched. */ | ||
149 | + pseudo_op = g_new(MirrorOp, 1); | ||
150 | + *pseudo_op = (MirrorOp){ | ||
151 | + .offset = offset, | ||
152 | + .bytes = nb_chunks * s->granularity, | ||
153 | + .is_pseudo_op = true, | ||
154 | + }; | ||
155 | + qemu_co_queue_init(&pseudo_op->waiting_requests); | ||
156 | + QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next); | ||
157 | + | ||
158 | bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); | ||
159 | while (nb_chunks > 0 && offset < s->bdev_length) { | ||
160 | int ret; | ||
161 | @@ -XXX,XX +XXX,XX @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | ||
162 | |||
163 | while (s->in_flight >= MAX_IN_FLIGHT) { | ||
164 | trace_mirror_yield_in_flight(s, offset, s->in_flight); | ||
165 | - mirror_wait_for_io(s); | ||
166 | + mirror_wait_for_free_in_flight_slot(s); | ||
167 | } | ||
168 | |||
169 | if (s->ret < 0) { | ||
170 | - return 0; | ||
171 | + ret = 0; | ||
172 | + goto fail; | ||
173 | } | ||
174 | |||
175 | io_bytes = mirror_clip_bytes(s, offset, io_bytes); | ||
176 | @@ -XXX,XX +XXX,XX @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | ||
177 | nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); | ||
178 | delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct); | ||
179 | } | ||
180 | - return delay_ns; | ||
181 | + | ||
182 | + ret = delay_ns; | ||
183 | +fail: | ||
184 | + QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next); | ||
185 | + qemu_co_queue_restart_all(&pseudo_op->waiting_requests); | ||
186 | + g_free(pseudo_op); | ||
187 | + | ||
188 | + return ret; | ||
189 | } | ||
190 | |||
191 | static void mirror_free_init(MirrorBlockJob *s) | ||
192 | @@ -XXX,XX +XXX,XX @@ static void mirror_free_init(MirrorBlockJob *s) | ||
193 | static void mirror_wait_for_all_io(MirrorBlockJob *s) | ||
194 | { | ||
195 | while (s->in_flight > 0) { | ||
196 | - mirror_wait_for_io(s); | ||
197 | + mirror_wait_for_free_in_flight_slot(s); | ||
198 | } | ||
199 | } | ||
200 | |||
201 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) | ||
202 | if (s->in_flight >= MAX_IN_FLIGHT) { | ||
203 | trace_mirror_yield(s, UINT64_MAX, s->buf_free_count, | ||
204 | s->in_flight); | ||
205 | - mirror_wait_for_io(s); | ||
206 | + mirror_wait_for_free_in_flight_slot(s); | ||
207 | continue; | ||
208 | } | ||
209 | |||
210 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn mirror_run(void *opaque) | ||
211 | if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || | ||
212 | (cnt == 0 && s->in_flight > 0)) { | ||
213 | trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); | ||
214 | - mirror_wait_for_io(s); | ||
215 | + mirror_wait_for_free_in_flight_slot(s); | ||
216 | continue; | ||
217 | } else if (cnt != 0) { | ||
218 | delay_ns = mirror_iteration(s); | ||
219 | -- | ||
220 | 2.13.6 | ||
221 | |||
222 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Max Reitz <mreitz@redhat.com> | ||
2 | 1 | ||
3 | With this, the mirror_top_bs is no longer just a technically required | ||
4 | node in the BDS graph but actually represents the block job operation. | ||
5 | |||
6 | Also, drop MirrorBlockJob.source, as we can reach it through | ||
7 | mirror_top_bs->backing. | ||
8 | |||
9 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
10 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
11 | Reviewed-by: Alberto Garcia <berto@igalia.com> | ||
12 | Message-id: 20180613181823.13618-6-mreitz@redhat.com | ||
13 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
14 | --- | ||
15 | block/mirror.c | 14 ++++++-------- | ||
16 | 1 file changed, 6 insertions(+), 8 deletions(-) | ||
17 | |||
18 | diff --git a/block/mirror.c b/block/mirror.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/block/mirror.c | ||
21 | +++ b/block/mirror.c | ||
22 | @@ -XXX,XX +XXX,XX @@ typedef struct MirrorBlockJob { | ||
23 | BlockJob common; | ||
24 | BlockBackend *target; | ||
25 | BlockDriverState *mirror_top_bs; | ||
26 | - BlockDriverState *source; | ||
27 | BlockDriverState *base; | ||
28 | |||
29 | /* The name of the graph node to replace */ | ||
30 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn mirror_co_read(void *opaque) | ||
31 | { | ||
32 | MirrorOp *op = opaque; | ||
33 | MirrorBlockJob *s = op->s; | ||
34 | - BlockBackend *source = s->common.blk; | ||
35 | int nb_chunks; | ||
36 | uint64_t ret; | ||
37 | uint64_t max_bytes; | ||
38 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn mirror_co_read(void *opaque) | ||
39 | s->bytes_in_flight += op->bytes; | ||
40 | trace_mirror_one_iteration(s, op->offset, op->bytes); | ||
41 | |||
42 | - ret = blk_co_preadv(source, op->offset, op->bytes, &op->qiov, 0); | ||
43 | + ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes, | ||
44 | + &op->qiov, 0); | ||
45 | mirror_read_complete(op, ret); | ||
46 | } | ||
47 | |||
48 | @@ -XXX,XX +XXX,XX @@ static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, | ||
49 | |||
50 | static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | ||
51 | { | ||
52 | - BlockDriverState *source = s->source; | ||
53 | + BlockDriverState *source = s->mirror_top_bs->backing->bs; | ||
54 | MirrorOp *pseudo_op; | ||
55 | int64_t offset; | ||
56 | uint64_t delay_ns = 0, ret = 0; | ||
57 | @@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque) | ||
58 | BlockJob *bjob = &s->common; | ||
59 | MirrorExitData *data = opaque; | ||
60 | AioContext *replace_aio_context = NULL; | ||
61 | - BlockDriverState *src = s->source; | ||
62 | + BlockDriverState *src = s->mirror_top_bs->backing->bs; | ||
63 | BlockDriverState *target_bs = blk_bs(s->target); | ||
64 | BlockDriverState *mirror_top_bs = s->mirror_top_bs; | ||
65 | Error *local_err = NULL; | ||
66 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) | ||
67 | { | ||
68 | int64_t offset; | ||
69 | BlockDriverState *base = s->base; | ||
70 | - BlockDriverState *bs = s->source; | ||
71 | + BlockDriverState *bs = s->mirror_top_bs->backing->bs; | ||
72 | BlockDriverState *target_bs = blk_bs(s->target); | ||
73 | int ret; | ||
74 | int64_t count; | ||
75 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn mirror_run(void *opaque) | ||
76 | { | ||
77 | MirrorBlockJob *s = opaque; | ||
78 | MirrorExitData *data; | ||
79 | - BlockDriverState *bs = s->source; | ||
80 | + BlockDriverState *bs = s->mirror_top_bs->backing->bs; | ||
81 | BlockDriverState *target_bs = blk_bs(s->target); | ||
82 | bool need_drain = true; | ||
83 | int64_t length; | ||
84 | @@ -XXX,XX +XXX,XX @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs, | ||
85 | /* The block job now has a reference to this node */ | ||
86 | bdrv_unref(mirror_top_bs); | ||
87 | |||
88 | - s->source = bs; | ||
89 | s->mirror_top_bs = mirror_top_bs; | ||
90 | |||
91 | /* No resize for the target either; while the mirror is still running, a | ||
92 | -- | ||
93 | 2.13.6 | ||
94 | |||
95 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Max Reitz <mreitz@redhat.com> | ||
2 | 1 | ||
3 | Currently, bdrv_replace_node() refuses to create loops from one BDS to | ||
4 | itself if the BDS to be replaced is the backing node of the BDS to | ||
5 | replace it: Say there is a node A and a node B. Replacing B by A means | ||
6 | making all references to B point to A. If B is a child of A (i.e. A has | ||
7 | a reference to B), that would mean we would have to make this reference | ||
8 | point to A itself -- so we'd create a loop. | ||
9 | |||
10 | bdrv_replace_node() (through should_update_child()) refuses to do so if | ||
11 | B is the backing node of A. There is no reason why we should create | ||
12 | loops if B is not the backing node of A, though. The BDS graph should | ||
13 | never contain loops, so we should always refuse to create them. | ||
14 | |||
15 | If B is a child of A and B is to be replaced by A, we should simply | ||
16 | leave B in place there because it is the most sensible choice. | ||
17 | |||
18 | A more specific argument would be: Putting filter drivers into the BDS | ||
19 | graph is basically the same as appending an overlay to a backing chain. | ||
20 | But the main child BDS of a filter driver is not "backing" but "file", | ||
21 | so restricting the no-loop rule to backing nodes would fail here. | ||
22 | |||
23 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
24 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
25 | Reviewed-by: Alberto Garcia <berto@igalia.com> | ||
26 | Message-id: 20180613181823.13618-7-mreitz@redhat.com | ||
27 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
28 | --- | ||
29 | block.c | 44 ++++++++++++++++++++++++++++++++++---------- | ||
30 | 1 file changed, 34 insertions(+), 10 deletions(-) | ||
31 | |||
32 | diff --git a/block.c b/block.c | ||
33 | index XXXXXXX..XXXXXXX 100644 | ||
34 | --- a/block.c | ||
35 | +++ b/block.c | ||
36 | @@ -XXX,XX +XXX,XX @@ static bool should_update_child(BdrvChild *c, BlockDriverState *to) | ||
37 | return false; | ||
38 | } | ||
39 | |||
40 | - if (c->role == &child_backing) { | ||
41 | - /* If @from is a backing file of @to, ignore the child to avoid | ||
42 | - * creating a loop. We only want to change the pointer of other | ||
43 | - * parents. */ | ||
44 | - QLIST_FOREACH(to_c, &to->children, next) { | ||
45 | - if (to_c == c) { | ||
46 | - break; | ||
47 | - } | ||
48 | - } | ||
49 | - if (to_c) { | ||
50 | + /* If the child @c belongs to the BDS @to, replacing the current | ||
51 | + * c->bs by @to would mean to create a loop. | ||
52 | + * | ||
53 | + * Such a case occurs when appending a BDS to a backing chain. | ||
54 | + * For instance, imagine the following chain: | ||
55 | + * | ||
56 | + * guest device -> node A -> further backing chain... | ||
57 | + * | ||
58 | + * Now we create a new BDS B which we want to put on top of this | ||
59 | + * chain, so we first attach A as its backing node: | ||
60 | + * | ||
61 | + * node B | ||
62 | + * | | ||
63 | + * v | ||
64 | + * guest device -> node A -> further backing chain... | ||
65 | + * | ||
66 | + * Finally we want to replace A by B. When doing that, we want to | ||
67 | + * replace all pointers to A by pointers to B -- except for the | ||
68 | + * pointer from B because (1) that would create a loop, and (2) | ||
69 | + * that pointer should simply stay intact: | ||
70 | + * | ||
71 | + * guest device -> node B | ||
72 | + * | | ||
73 | + * v | ||
74 | + * node A -> further backing chain... | ||
75 | + * | ||
76 | + * In general, when replacing a node A (c->bs) by a node B (@to), | ||
77 | + * if A is a child of B, that means we cannot replace A by B there | ||
78 | + * because that would create a loop. Silently detaching A from B | ||
79 | + * is also not really an option. So overall just leaving A in | ||
80 | + * place there is the most sensible choice. */ | ||
81 | + QLIST_FOREACH(to_c, &to->children, next) { | ||
82 | + if (to_c == c) { | ||
83 | return false; | ||
84 | } | ||
85 | } | ||
86 | @@ -XXX,XX +XXX,XX @@ void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, | ||
87 | |||
88 | /* Put all parents into @list and calculate their cumulative permissions */ | ||
89 | QLIST_FOREACH_SAFE(c, &from->parents, next_parent, next) { | ||
90 | + assert(c->bs == from); | ||
91 | if (!should_update_child(c, to)) { | ||
92 | continue; | ||
93 | } | ||
94 | -- | ||
95 | 2.13.6 | ||
96 | |||
97 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Max Reitz <mreitz@redhat.com> | ||
2 | 1 | ||
3 | This new parameter allows the caller to just query the next dirty | ||
4 | position without moving the iterator. | ||
5 | |||
6 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
7 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
8 | Reviewed-by: John Snow <jsnow@redhat.com> | ||
9 | Message-id: 20180613181823.13618-8-mreitz@redhat.com | ||
10 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
11 | --- | ||
12 | include/qemu/hbitmap.h | 5 ++++- | ||
13 | block/backup.c | 2 +- | ||
14 | block/dirty-bitmap.c | 2 +- | ||
15 | tests/test-hbitmap.c | 26 +++++++++++++------------- | ||
16 | util/hbitmap.c | 10 +++++++--- | ||
17 | 5 files changed, 26 insertions(+), 19 deletions(-) | ||
18 | |||
19 | diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/include/qemu/hbitmap.h | ||
22 | +++ b/include/qemu/hbitmap.h | ||
23 | @@ -XXX,XX +XXX,XX @@ void hbitmap_free_meta(HBitmap *hb); | ||
24 | /** | ||
25 | * hbitmap_iter_next: | ||
26 | * @hbi: HBitmapIter to operate on. | ||
27 | + * @advance: If true, advance the iterator. Otherwise, the next call | ||
28 | + * of this function will return the same result (if that | ||
29 | + * position is still dirty). | ||
30 | * | ||
31 | * Return the next bit that is set in @hbi's associated HBitmap, | ||
32 | * or -1 if all remaining bits are zero. | ||
33 | */ | ||
34 | -int64_t hbitmap_iter_next(HBitmapIter *hbi); | ||
35 | +int64_t hbitmap_iter_next(HBitmapIter *hbi, bool advance); | ||
36 | |||
37 | /** | ||
38 | * hbitmap_iter_next_word: | ||
39 | diff --git a/block/backup.c b/block/backup.c | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/block/backup.c | ||
42 | +++ b/block/backup.c | ||
43 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job) | ||
44 | HBitmapIter hbi; | ||
45 | |||
46 | hbitmap_iter_init(&hbi, job->copy_bitmap, 0); | ||
47 | - while ((cluster = hbitmap_iter_next(&hbi)) != -1) { | ||
48 | + while ((cluster = hbitmap_iter_next(&hbi, true)) != -1) { | ||
49 | do { | ||
50 | if (yield_and_check(job)) { | ||
51 | return 0; | ||
52 | diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/block/dirty-bitmap.c | ||
55 | +++ b/block/dirty-bitmap.c | ||
56 | @@ -XXX,XX +XXX,XX @@ void bdrv_dirty_iter_free(BdrvDirtyBitmapIter *iter) | ||
57 | |||
58 | int64_t bdrv_dirty_iter_next(BdrvDirtyBitmapIter *iter) | ||
59 | { | ||
60 | - return hbitmap_iter_next(&iter->hbi); | ||
61 | + return hbitmap_iter_next(&iter->hbi, true); | ||
62 | } | ||
63 | |||
64 | /* Called within bdrv_dirty_bitmap_lock..unlock */ | ||
65 | diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c | ||
66 | index XXXXXXX..XXXXXXX 100644 | ||
67 | --- a/tests/test-hbitmap.c | ||
68 | +++ b/tests/test-hbitmap.c | ||
69 | @@ -XXX,XX +XXX,XX @@ static void hbitmap_test_check(TestHBitmapData *data, | ||
70 | |||
71 | i = first; | ||
72 | for (;;) { | ||
73 | - next = hbitmap_iter_next(&hbi); | ||
74 | + next = hbitmap_iter_next(&hbi, true); | ||
75 | if (next < 0) { | ||
76 | next = data->size; | ||
77 | } | ||
78 | @@ -XXX,XX +XXX,XX @@ static void test_hbitmap_iter_granularity(TestHBitmapData *data, | ||
79 | /* Note that hbitmap_test_check has to be invoked manually in this test. */ | ||
80 | hbitmap_test_init(data, 131072 << 7, 7); | ||
81 | hbitmap_iter_init(&hbi, data->hb, 0); | ||
82 | - g_assert_cmpint(hbitmap_iter_next(&hbi), <, 0); | ||
83 | + g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); | ||
84 | |||
85 | hbitmap_test_set(data, ((L2 + L1 + 1) << 7) + 8, 8); | ||
86 | hbitmap_iter_init(&hbi, data->hb, 0); | ||
87 | - g_assert_cmpint(hbitmap_iter_next(&hbi), ==, (L2 + L1 + 1) << 7); | ||
88 | - g_assert_cmpint(hbitmap_iter_next(&hbi), <, 0); | ||
89 | + g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, (L2 + L1 + 1) << 7); | ||
90 | + g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); | ||
91 | |||
92 | hbitmap_iter_init(&hbi, data->hb, (L2 + L1 + 2) << 7); | ||
93 | - g_assert_cmpint(hbitmap_iter_next(&hbi), <, 0); | ||
94 | + g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); | ||
95 | |||
96 | hbitmap_test_set(data, (131072 << 7) - 8, 8); | ||
97 | hbitmap_iter_init(&hbi, data->hb, 0); | ||
98 | - g_assert_cmpint(hbitmap_iter_next(&hbi), ==, (L2 + L1 + 1) << 7); | ||
99 | - g_assert_cmpint(hbitmap_iter_next(&hbi), ==, 131071 << 7); | ||
100 | - g_assert_cmpint(hbitmap_iter_next(&hbi), <, 0); | ||
101 | + g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, (L2 + L1 + 1) << 7); | ||
102 | + g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, 131071 << 7); | ||
103 | + g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); | ||
104 | |||
105 | hbitmap_iter_init(&hbi, data->hb, (L2 + L1 + 2) << 7); | ||
106 | - g_assert_cmpint(hbitmap_iter_next(&hbi), ==, 131071 << 7); | ||
107 | - g_assert_cmpint(hbitmap_iter_next(&hbi), <, 0); | ||
108 | + g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, 131071 << 7); | ||
109 | + g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); | ||
110 | } | ||
111 | |||
112 | static void hbitmap_test_set_boundary_bits(TestHBitmapData *data, ssize_t diff) | ||
113 | @@ -XXX,XX +XXX,XX @@ static void test_hbitmap_serialize_zeroes(TestHBitmapData *data, | ||
114 | for (i = 0; i < num_positions; i++) { | ||
115 | hbitmap_deserialize_zeroes(data->hb, positions[i], min_l1, true); | ||
116 | hbitmap_iter_init(&iter, data->hb, 0); | ||
117 | - next = hbitmap_iter_next(&iter); | ||
118 | + next = hbitmap_iter_next(&iter, true); | ||
119 | if (i == num_positions - 1) { | ||
120 | g_assert_cmpint(next, ==, -1); | ||
121 | } else { | ||
122 | @@ -XXX,XX +XXX,XX @@ static void test_hbitmap_iter_and_reset(TestHBitmapData *data, | ||
123 | |||
124 | hbitmap_iter_init(&hbi, data->hb, BITS_PER_LONG - 1); | ||
125 | |||
126 | - hbitmap_iter_next(&hbi); | ||
127 | + hbitmap_iter_next(&hbi, true); | ||
128 | |||
129 | hbitmap_reset_all(data->hb); | ||
130 | - hbitmap_iter_next(&hbi); | ||
131 | + hbitmap_iter_next(&hbi, true); | ||
132 | } | ||
133 | |||
134 | static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start) | ||
135 | diff --git a/util/hbitmap.c b/util/hbitmap.c | ||
136 | index XXXXXXX..XXXXXXX 100644 | ||
137 | --- a/util/hbitmap.c | ||
138 | +++ b/util/hbitmap.c | ||
139 | @@ -XXX,XX +XXX,XX @@ unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi) | ||
140 | return cur; | ||
141 | } | ||
142 | |||
143 | -int64_t hbitmap_iter_next(HBitmapIter *hbi) | ||
144 | +int64_t hbitmap_iter_next(HBitmapIter *hbi, bool advance) | ||
145 | { | ||
146 | unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1] & | ||
147 | hbi->hb->levels[HBITMAP_LEVELS - 1][hbi->pos]; | ||
148 | @@ -XXX,XX +XXX,XX @@ int64_t hbitmap_iter_next(HBitmapIter *hbi) | ||
149 | } | ||
150 | } | ||
151 | |||
152 | - /* The next call will resume work from the next bit. */ | ||
153 | - hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1); | ||
154 | + if (advance) { | ||
155 | + /* The next call will resume work from the next bit. */ | ||
156 | + hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1); | ||
157 | + } else { | ||
158 | + hbi->cur[HBITMAP_LEVELS - 1] = cur; | ||
159 | + } | ||
160 | item = ((uint64_t)hbi->pos << BITS_PER_LEVEL) + ctzl(cur); | ||
161 | |||
162 | return item << hbi->granularity; | ||
163 | -- | ||
164 | 2.13.6 | ||
165 | |||
166 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Max Reitz <mreitz@redhat.com> | ||
2 | 1 | ||
3 | Add a function that wraps hbitmap_iter_next() and always calls it in | ||
4 | non-advancing mode first, and in advancing mode next. The result should | ||
5 | always be the same. | ||
6 | |||
7 | By using this function everywhere we called hbitmap_iter_next() before, | ||
8 | we should get good test coverage for non-advancing hbitmap_iter_next(). | ||
9 | |||
10 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
11 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
12 | Reviewed-by: John Snow <jsnow@redhat.com> | ||
13 | Message-id: 20180613181823.13618-9-mreitz@redhat.com | ||
14 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
15 | --- | ||
16 | tests/test-hbitmap.c | 36 ++++++++++++++++++++++++------------ | ||
17 | 1 file changed, 24 insertions(+), 12 deletions(-) | ||
18 | |||
19 | diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/tests/test-hbitmap.c | ||
22 | +++ b/tests/test-hbitmap.c | ||
23 | @@ -XXX,XX +XXX,XX @@ typedef struct TestHBitmapData { | ||
24 | } TestHBitmapData; | ||
25 | |||
26 | |||
27 | +static int64_t check_hbitmap_iter_next(HBitmapIter *hbi) | ||
28 | +{ | ||
29 | + int next0, next1; | ||
30 | + | ||
31 | + next0 = hbitmap_iter_next(hbi, false); | ||
32 | + next1 = hbitmap_iter_next(hbi, true); | ||
33 | + | ||
34 | + g_assert_cmpint(next0, ==, next1); | ||
35 | + | ||
36 | + return next0; | ||
37 | +} | ||
38 | + | ||
39 | /* Check that the HBitmap and the shadow bitmap contain the same data, | ||
40 | * ignoring the same "first" bits. | ||
41 | */ | ||
42 | @@ -XXX,XX +XXX,XX @@ static void hbitmap_test_check(TestHBitmapData *data, | ||
43 | |||
44 | i = first; | ||
45 | for (;;) { | ||
46 | - next = hbitmap_iter_next(&hbi, true); | ||
47 | + next = check_hbitmap_iter_next(&hbi); | ||
48 | if (next < 0) { | ||
49 | next = data->size; | ||
50 | } | ||
51 | @@ -XXX,XX +XXX,XX @@ static void test_hbitmap_iter_granularity(TestHBitmapData *data, | ||
52 | /* Note that hbitmap_test_check has to be invoked manually in this test. */ | ||
53 | hbitmap_test_init(data, 131072 << 7, 7); | ||
54 | hbitmap_iter_init(&hbi, data->hb, 0); | ||
55 | - g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); | ||
56 | + g_assert_cmpint(check_hbitmap_iter_next(&hbi), <, 0); | ||
57 | |||
58 | hbitmap_test_set(data, ((L2 + L1 + 1) << 7) + 8, 8); | ||
59 | hbitmap_iter_init(&hbi, data->hb, 0); | ||
60 | - g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, (L2 + L1 + 1) << 7); | ||
61 | - g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); | ||
62 | + g_assert_cmpint(check_hbitmap_iter_next(&hbi), ==, (L2 + L1 + 1) << 7); | ||
63 | + g_assert_cmpint(check_hbitmap_iter_next(&hbi), <, 0); | ||
64 | |||
65 | hbitmap_iter_init(&hbi, data->hb, (L2 + L1 + 2) << 7); | ||
66 | g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); | ||
67 | |||
68 | hbitmap_test_set(data, (131072 << 7) - 8, 8); | ||
69 | hbitmap_iter_init(&hbi, data->hb, 0); | ||
70 | - g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, (L2 + L1 + 1) << 7); | ||
71 | - g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, 131071 << 7); | ||
72 | - g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); | ||
73 | + g_assert_cmpint(check_hbitmap_iter_next(&hbi), ==, (L2 + L1 + 1) << 7); | ||
74 | + g_assert_cmpint(check_hbitmap_iter_next(&hbi), ==, 131071 << 7); | ||
75 | + g_assert_cmpint(check_hbitmap_iter_next(&hbi), <, 0); | ||
76 | |||
77 | hbitmap_iter_init(&hbi, data->hb, (L2 + L1 + 2) << 7); | ||
78 | - g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, 131071 << 7); | ||
79 | - g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); | ||
80 | + g_assert_cmpint(check_hbitmap_iter_next(&hbi), ==, 131071 << 7); | ||
81 | + g_assert_cmpint(check_hbitmap_iter_next(&hbi), <, 0); | ||
82 | } | ||
83 | |||
84 | static void hbitmap_test_set_boundary_bits(TestHBitmapData *data, ssize_t diff) | ||
85 | @@ -XXX,XX +XXX,XX @@ static void test_hbitmap_serialize_zeroes(TestHBitmapData *data, | ||
86 | for (i = 0; i < num_positions; i++) { | ||
87 | hbitmap_deserialize_zeroes(data->hb, positions[i], min_l1, true); | ||
88 | hbitmap_iter_init(&iter, data->hb, 0); | ||
89 | - next = hbitmap_iter_next(&iter, true); | ||
90 | + next = check_hbitmap_iter_next(&iter); | ||
91 | if (i == num_positions - 1) { | ||
92 | g_assert_cmpint(next, ==, -1); | ||
93 | } else { | ||
94 | @@ -XXX,XX +XXX,XX @@ static void test_hbitmap_iter_and_reset(TestHBitmapData *data, | ||
95 | |||
96 | hbitmap_iter_init(&hbi, data->hb, BITS_PER_LONG - 1); | ||
97 | |||
98 | - hbitmap_iter_next(&hbi, true); | ||
99 | + check_hbitmap_iter_next(&hbi); | ||
100 | |||
101 | hbitmap_reset_all(data->hb); | ||
102 | - hbitmap_iter_next(&hbi, true); | ||
103 | + check_hbitmap_iter_next(&hbi); | ||
104 | } | ||
105 | |||
106 | static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start) | ||
107 | -- | ||
108 | 2.13.6 | ||
109 | |||
110 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Max Reitz <mreitz@redhat.com> | ||
2 | 1 | ||
3 | This new function allows to look for a consecutively dirty area in a | ||
4 | dirty bitmap. | ||
5 | |||
6 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
7 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
8 | Reviewed-by: John Snow <jsnow@redhat.com> | ||
9 | Message-id: 20180613181823.13618-10-mreitz@redhat.com | ||
10 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
11 | --- | ||
12 | include/block/dirty-bitmap.h | 2 ++ | ||
13 | block/dirty-bitmap.c | 55 ++++++++++++++++++++++++++++++++++++++++++++ | ||
14 | 2 files changed, 57 insertions(+) | ||
15 | |||
16 | diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/include/block/dirty-bitmap.h | ||
19 | +++ b/include/block/dirty-bitmap.h | ||
20 | @@ -XXX,XX +XXX,XX @@ void bdrv_set_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap, | ||
21 | void bdrv_reset_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap, | ||
22 | int64_t offset, int64_t bytes); | ||
23 | int64_t bdrv_dirty_iter_next(BdrvDirtyBitmapIter *iter); | ||
24 | +bool bdrv_dirty_iter_next_area(BdrvDirtyBitmapIter *iter, uint64_t max_offset, | ||
25 | + uint64_t *offset, int *bytes); | ||
26 | void bdrv_set_dirty_iter(BdrvDirtyBitmapIter *hbi, int64_t offset); | ||
27 | int64_t bdrv_get_dirty_count(BdrvDirtyBitmap *bitmap); | ||
28 | int64_t bdrv_get_meta_dirty_count(BdrvDirtyBitmap *bitmap); | ||
29 | diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c | ||
30 | index XXXXXXX..XXXXXXX 100644 | ||
31 | --- a/block/dirty-bitmap.c | ||
32 | +++ b/block/dirty-bitmap.c | ||
33 | @@ -XXX,XX +XXX,XX @@ int64_t bdrv_dirty_iter_next(BdrvDirtyBitmapIter *iter) | ||
34 | return hbitmap_iter_next(&iter->hbi, true); | ||
35 | } | ||
36 | |||
37 | +/** | ||
38 | + * Return the next consecutively dirty area in the dirty bitmap | ||
39 | + * belonging to the given iterator @iter. | ||
40 | + * | ||
41 | + * @max_offset: Maximum value that may be returned for | ||
42 | + * *offset + *bytes | ||
43 | + * @offset: Will contain the start offset of the next dirty area | ||
44 | + * @bytes: Will contain the length of the next dirty area | ||
45 | + * | ||
46 | + * Returns: True if a dirty area could be found before max_offset | ||
47 | + * (which means that *offset and *bytes then contain valid | ||
48 | + * values), false otherwise. | ||
49 | + * | ||
50 | + * Note that @iter is never advanced if false is returned. If an area | ||
51 | + * is found (which means that true is returned), it will be advanced | ||
52 | + * past that area. | ||
53 | + */ | ||
54 | +bool bdrv_dirty_iter_next_area(BdrvDirtyBitmapIter *iter, uint64_t max_offset, | ||
55 | + uint64_t *offset, int *bytes) | ||
56 | +{ | ||
57 | + uint32_t granularity = bdrv_dirty_bitmap_granularity(iter->bitmap); | ||
58 | + uint64_t gran_max_offset; | ||
59 | + int64_t ret; | ||
60 | + int size; | ||
61 | + | ||
62 | + if (max_offset == iter->bitmap->size) { | ||
63 | + /* If max_offset points to the image end, round it up by the | ||
64 | + * bitmap granularity */ | ||
65 | + gran_max_offset = ROUND_UP(max_offset, granularity); | ||
66 | + } else { | ||
67 | + gran_max_offset = max_offset; | ||
68 | + } | ||
69 | + | ||
70 | + ret = hbitmap_iter_next(&iter->hbi, false); | ||
71 | + if (ret < 0 || ret + granularity > gran_max_offset) { | ||
72 | + return false; | ||
73 | + } | ||
74 | + | ||
75 | + *offset = ret; | ||
76 | + size = 0; | ||
77 | + | ||
78 | + assert(granularity <= INT_MAX); | ||
79 | + | ||
80 | + do { | ||
81 | + /* Advance iterator */ | ||
82 | + ret = hbitmap_iter_next(&iter->hbi, true); | ||
83 | + size += granularity; | ||
84 | + } while (ret + granularity <= gran_max_offset && | ||
85 | + hbitmap_iter_next(&iter->hbi, false) == ret + granularity && | ||
86 | + size <= INT_MAX - granularity); | ||
87 | + | ||
88 | + *bytes = MIN(size, max_offset - *offset); | ||
89 | + return true; | ||
90 | +} | ||
91 | + | ||
92 | /* Called within bdrv_dirty_bitmap_lock..unlock */ | ||
93 | void bdrv_set_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap, | ||
94 | int64_t offset, int64_t bytes) | ||
95 | -- | ||
96 | 2.13.6 | ||
97 | |||
98 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Max Reitz <mreitz@redhat.com> | ||
2 | 1 | ||
3 | This will allow us to access the block job data when the mirror block | ||
4 | driver becomes more complex. | ||
5 | |||
6 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
7 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
8 | Message-id: 20180613181823.13618-11-mreitz@redhat.com | ||
9 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
10 | --- | ||
11 | block/mirror.c | 12 ++++++++++++ | ||
12 | 1 file changed, 12 insertions(+) | ||
13 | |||
14 | diff --git a/block/mirror.c b/block/mirror.c | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/block/mirror.c | ||
17 | +++ b/block/mirror.c | ||
18 | @@ -XXX,XX +XXX,XX @@ typedef struct MirrorBlockJob { | ||
19 | bool initial_zeroing_ongoing; | ||
20 | } MirrorBlockJob; | ||
21 | |||
22 | +typedef struct MirrorBDSOpaque { | ||
23 | + MirrorBlockJob *job; | ||
24 | +} MirrorBDSOpaque; | ||
25 | + | ||
26 | struct MirrorOp { | ||
27 | MirrorBlockJob *s; | ||
28 | QEMUIOVector qiov; | ||
29 | @@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque) | ||
30 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); | ||
31 | BlockJob *bjob = &s->common; | ||
32 | MirrorExitData *data = opaque; | ||
33 | + MirrorBDSOpaque *bs_opaque = s->mirror_top_bs->opaque; | ||
34 | AioContext *replace_aio_context = NULL; | ||
35 | BlockDriverState *src = s->mirror_top_bs->backing->bs; | ||
36 | BlockDriverState *target_bs = blk_bs(s->target); | ||
37 | @@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque) | ||
38 | blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort); | ||
39 | blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort); | ||
40 | |||
41 | + bs_opaque->job = NULL; | ||
42 | job_completed(job, data->ret, NULL); | ||
43 | |||
44 | g_free(data); | ||
45 | @@ -XXX,XX +XXX,XX @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs, | ||
46 | Error **errp) | ||
47 | { | ||
48 | MirrorBlockJob *s; | ||
49 | + MirrorBDSOpaque *bs_opaque; | ||
50 | BlockDriverState *mirror_top_bs; | ||
51 | bool target_graph_mod; | ||
52 | bool target_is_backing; | ||
53 | @@ -XXX,XX +XXX,XX @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs, | ||
54 | mirror_top_bs->total_sectors = bs->total_sectors; | ||
55 | mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED; | ||
56 | mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED; | ||
57 | + bs_opaque = g_new0(MirrorBDSOpaque, 1); | ||
58 | + mirror_top_bs->opaque = bs_opaque; | ||
59 | bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs)); | ||
60 | |||
61 | /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep | ||
62 | @@ -XXX,XX +XXX,XX @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs, | ||
63 | if (!s) { | ||
64 | goto fail; | ||
65 | } | ||
66 | + bs_opaque->job = s; | ||
67 | + | ||
68 | /* The block job now has a reference to this node */ | ||
69 | bdrv_unref(mirror_top_bs); | ||
70 | |||
71 | @@ -XXX,XX +XXX,XX @@ fail: | ||
72 | |||
73 | g_free(s->replaces); | ||
74 | blk_unref(s->target); | ||
75 | + bs_opaque->job = NULL; | ||
76 | job_early_fail(&s->common.job); | ||
77 | } | ||
78 | |||
79 | -- | ||
80 | 2.13.6 | ||
81 | |||
82 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Max Reitz <mreitz@redhat.com> | ||
2 | 1 | ||
3 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
4 | Message-id: 20180613181823.13618-12-mreitz@redhat.com | ||
5 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
6 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
7 | --- | ||
8 | include/qemu/job.h | 15 +++++++++++++++ | ||
9 | job.c | 5 +++++ | ||
10 | 2 files changed, 20 insertions(+) | ||
11 | |||
12 | diff --git a/include/qemu/job.h b/include/qemu/job.h | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/include/qemu/job.h | ||
15 | +++ b/include/qemu/job.h | ||
16 | @@ -XXX,XX +XXX,XX @@ void job_progress_update(Job *job, uint64_t done); | ||
17 | */ | ||
18 | void job_progress_set_remaining(Job *job, uint64_t remaining); | ||
19 | |||
20 | +/** | ||
21 | + * @job: The job whose expected progress end value is updated | ||
22 | + * @delta: Value which is to be added to the current expected end | ||
23 | + * value | ||
24 | + * | ||
25 | + * Increases the expected end value of the progress counter of a job. | ||
26 | + * This is useful for parenthesis operations: If a job has to | ||
27 | + * conditionally perform a high-priority operation as part of its | ||
28 | + * progress, it calls this function with the expected operation's | ||
29 | + * length before, and job_progress_update() afterwards. | ||
30 | + * (So the operation acts as a parenthesis in regards to the main job | ||
31 | + * operation running in background.) | ||
32 | + */ | ||
33 | +void job_progress_increase_remaining(Job *job, uint64_t delta); | ||
34 | + | ||
35 | /** To be called when a cancelled job is finalised. */ | ||
36 | void job_event_cancelled(Job *job); | ||
37 | |||
38 | diff --git a/job.c b/job.c | ||
39 | index XXXXXXX..XXXXXXX 100644 | ||
40 | --- a/job.c | ||
41 | +++ b/job.c | ||
42 | @@ -XXX,XX +XXX,XX @@ void job_progress_set_remaining(Job *job, uint64_t remaining) | ||
43 | job->progress_total = job->progress_current + remaining; | ||
44 | } | ||
45 | |||
46 | +void job_progress_increase_remaining(Job *job, uint64_t delta) | ||
47 | +{ | ||
48 | + job->progress_total += delta; | ||
49 | +} | ||
50 | + | ||
51 | void job_event_cancelled(Job *job) | ||
52 | { | ||
53 | notifier_list_notify(&job->on_finalize_cancelled, job); | ||
54 | -- | ||
55 | 2.13.6 | ||
56 | |||
57 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Max Reitz <mreitz@redhat.com> | ||
2 | 1 | ||
3 | This patch allows the user to specify whether to use active or only | ||
4 | background mode for mirror block jobs. Currently, this setting will | ||
5 | remain constant for the duration of the entire block job. | ||
6 | |||
7 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
8 | Reviewed-by: Alberto Garcia <berto@igalia.com> | ||
9 | Message-id: 20180613181823.13618-14-mreitz@redhat.com | ||
10 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
11 | --- | ||
12 | qapi/block-core.json | 11 +++++++++-- | ||
13 | include/block/block_int.h | 4 +++- | ||
14 | block/mirror.c | 12 +++++++----- | ||
15 | blockdev.c | 9 ++++++++- | ||
16 | 4 files changed, 27 insertions(+), 9 deletions(-) | ||
17 | |||
18 | diff --git a/qapi/block-core.json b/qapi/block-core.json | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/qapi/block-core.json | ||
21 | +++ b/qapi/block-core.json | ||
22 | @@ -XXX,XX +XXX,XX @@ | ||
23 | # written. Both will result in identical contents. | ||
24 | # Default is true. (Since 2.4) | ||
25 | # | ||
26 | +# @copy-mode: when to copy data to the destination; defaults to 'background' | ||
27 | +# (Since: 3.0) | ||
28 | +# | ||
29 | # Since: 1.3 | ||
30 | ## | ||
31 | { 'struct': 'DriveMirror', | ||
32 | @@ -XXX,XX +XXX,XX @@ | ||
33 | '*speed': 'int', '*granularity': 'uint32', | ||
34 | '*buf-size': 'int', '*on-source-error': 'BlockdevOnError', | ||
35 | '*on-target-error': 'BlockdevOnError', | ||
36 | - '*unmap': 'bool' } } | ||
37 | + '*unmap': 'bool', '*copy-mode': 'MirrorCopyMode' } } | ||
38 | |||
39 | ## | ||
40 | # @BlockDirtyBitmap: | ||
41 | @@ -XXX,XX +XXX,XX @@ | ||
42 | # above @device. If this option is not given, a node name is | ||
43 | # autogenerated. (Since: 2.9) | ||
44 | # | ||
45 | +# @copy-mode: when to copy data to the destination; defaults to 'background' | ||
46 | +# (Since: 3.0) | ||
47 | +# | ||
48 | # Returns: nothing on success. | ||
49 | # | ||
50 | # Since: 2.6 | ||
51 | @@ -XXX,XX +XXX,XX @@ | ||
52 | '*speed': 'int', '*granularity': 'uint32', | ||
53 | '*buf-size': 'int', '*on-source-error': 'BlockdevOnError', | ||
54 | '*on-target-error': 'BlockdevOnError', | ||
55 | - '*filter-node-name': 'str' } } | ||
56 | + '*filter-node-name': 'str', | ||
57 | + '*copy-mode': 'MirrorCopyMode' } } | ||
58 | |||
59 | ## | ||
60 | # @block_set_io_throttle: | ||
61 | diff --git a/include/block/block_int.h b/include/block/block_int.h | ||
62 | index XXXXXXX..XXXXXXX 100644 | ||
63 | --- a/include/block/block_int.h | ||
64 | +++ b/include/block/block_int.h | ||
65 | @@ -XXX,XX +XXX,XX @@ void commit_active_start(const char *job_id, BlockDriverState *bs, | ||
66 | * @filter_node_name: The node name that should be assigned to the filter | ||
67 | * driver that the mirror job inserts into the graph above @bs. NULL means that | ||
68 | * a node name should be autogenerated. | ||
69 | + * @copy_mode: When to trigger writes to the target. | ||
70 | * @errp: Error object. | ||
71 | * | ||
72 | * Start a mirroring operation on @bs. Clusters that are allocated | ||
73 | @@ -XXX,XX +XXX,XX @@ void mirror_start(const char *job_id, BlockDriverState *bs, | ||
74 | MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, | ||
75 | BlockdevOnError on_source_error, | ||
76 | BlockdevOnError on_target_error, | ||
77 | - bool unmap, const char *filter_node_name, Error **errp); | ||
78 | + bool unmap, const char *filter_node_name, | ||
79 | + MirrorCopyMode copy_mode, Error **errp); | ||
80 | |||
81 | /* | ||
82 | * backup_job_create: | ||
83 | diff --git a/block/mirror.c b/block/mirror.c | ||
84 | index XXXXXXX..XXXXXXX 100644 | ||
85 | --- a/block/mirror.c | ||
86 | +++ b/block/mirror.c | ||
87 | @@ -XXX,XX +XXX,XX @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs, | ||
88 | const BlockJobDriver *driver, | ||
89 | bool is_none_mode, BlockDriverState *base, | ||
90 | bool auto_complete, const char *filter_node_name, | ||
91 | - bool is_mirror, | ||
92 | + bool is_mirror, MirrorCopyMode copy_mode, | ||
93 | Error **errp) | ||
94 | { | ||
95 | MirrorBlockJob *s; | ||
96 | @@ -XXX,XX +XXX,XX @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs, | ||
97 | s->on_target_error = on_target_error; | ||
98 | s->is_none_mode = is_none_mode; | ||
99 | s->backing_mode = backing_mode; | ||
100 | - s->copy_mode = MIRROR_COPY_MODE_BACKGROUND; | ||
101 | + s->copy_mode = copy_mode; | ||
102 | s->base = base; | ||
103 | s->granularity = granularity; | ||
104 | s->buf_size = ROUND_UP(buf_size, granularity); | ||
105 | @@ -XXX,XX +XXX,XX @@ void mirror_start(const char *job_id, BlockDriverState *bs, | ||
106 | MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, | ||
107 | BlockdevOnError on_source_error, | ||
108 | BlockdevOnError on_target_error, | ||
109 | - bool unmap, const char *filter_node_name, Error **errp) | ||
110 | + bool unmap, const char *filter_node_name, | ||
111 | + MirrorCopyMode copy_mode, Error **errp) | ||
112 | { | ||
113 | bool is_none_mode; | ||
114 | BlockDriverState *base; | ||
115 | @@ -XXX,XX +XXX,XX @@ void mirror_start(const char *job_id, BlockDriverState *bs, | ||
116 | speed, granularity, buf_size, backing_mode, | ||
117 | on_source_error, on_target_error, unmap, NULL, NULL, | ||
118 | &mirror_job_driver, is_none_mode, base, false, | ||
119 | - filter_node_name, true, errp); | ||
120 | + filter_node_name, true, copy_mode, errp); | ||
121 | } | ||
122 | |||
123 | void commit_active_start(const char *job_id, BlockDriverState *bs, | ||
124 | @@ -XXX,XX +XXX,XX @@ void commit_active_start(const char *job_id, BlockDriverState *bs, | ||
125 | MIRROR_LEAVE_BACKING_CHAIN, | ||
126 | on_error, on_error, true, cb, opaque, | ||
127 | &commit_active_job_driver, false, base, auto_complete, | ||
128 | - filter_node_name, false, &local_err); | ||
129 | + filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND, | ||
130 | + &local_err); | ||
131 | if (local_err) { | ||
132 | error_propagate(errp, local_err); | ||
133 | goto error_restore_flags; | ||
134 | diff --git a/blockdev.c b/blockdev.c | ||
135 | index XXXXXXX..XXXXXXX 100644 | ||
136 | --- a/blockdev.c | ||
137 | +++ b/blockdev.c | ||
138 | @@ -XXX,XX +XXX,XX @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, | ||
139 | bool has_unmap, bool unmap, | ||
140 | bool has_filter_node_name, | ||
141 | const char *filter_node_name, | ||
142 | + bool has_copy_mode, MirrorCopyMode copy_mode, | ||
143 | Error **errp) | ||
144 | { | ||
145 | |||
146 | @@ -XXX,XX +XXX,XX @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, | ||
147 | if (!has_filter_node_name) { | ||
148 | filter_node_name = NULL; | ||
149 | } | ||
150 | + if (!has_copy_mode) { | ||
151 | + copy_mode = MIRROR_COPY_MODE_BACKGROUND; | ||
152 | + } | ||
153 | |||
154 | if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) { | ||
155 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity", | ||
156 | @@ -XXX,XX +XXX,XX @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, | ||
157 | has_replaces ? replaces : NULL, | ||
158 | speed, granularity, buf_size, sync, backing_mode, | ||
159 | on_source_error, on_target_error, unmap, filter_node_name, | ||
160 | - errp); | ||
161 | + copy_mode, errp); | ||
162 | } | ||
163 | |||
164 | void qmp_drive_mirror(DriveMirror *arg, Error **errp) | ||
165 | @@ -XXX,XX +XXX,XX @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) | ||
166 | arg->has_on_target_error, arg->on_target_error, | ||
167 | arg->has_unmap, arg->unmap, | ||
168 | false, NULL, | ||
169 | + arg->has_copy_mode, arg->copy_mode, | ||
170 | &local_err); | ||
171 | bdrv_unref(target_bs); | ||
172 | error_propagate(errp, local_err); | ||
173 | @@ -XXX,XX +XXX,XX @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id, | ||
174 | BlockdevOnError on_target_error, | ||
175 | bool has_filter_node_name, | ||
176 | const char *filter_node_name, | ||
177 | + bool has_copy_mode, MirrorCopyMode copy_mode, | ||
178 | Error **errp) | ||
179 | { | ||
180 | BlockDriverState *bs; | ||
181 | @@ -XXX,XX +XXX,XX @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id, | ||
182 | has_on_target_error, on_target_error, | ||
183 | true, true, | ||
184 | has_filter_node_name, filter_node_name, | ||
185 | + has_copy_mode, copy_mode, | ||
186 | &local_err); | ||
187 | error_propagate(errp, local_err); | ||
188 | |||
189 | -- | ||
190 | 2.13.6 | ||
191 | |||
192 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Max Reitz <mreitz@redhat.com> | ||
2 | 1 | ||
3 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
4 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
5 | Reviewed-by: Alberto Garcia <berto@igalia.com> | ||
6 | Message-id: 20180613181823.13618-15-mreitz@redhat.com | ||
7 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
8 | --- | ||
9 | tests/qemu-iotests/151 | 120 +++++++++++++++++++++++++++++++++++++++++++++ | ||
10 | tests/qemu-iotests/151.out | 5 ++ | ||
11 | tests/qemu-iotests/group | 1 + | ||
12 | 3 files changed, 126 insertions(+) | ||
13 | create mode 100755 tests/qemu-iotests/151 | ||
14 | create mode 100644 tests/qemu-iotests/151.out | ||
15 | |||
16 | diff --git a/tests/qemu-iotests/151 b/tests/qemu-iotests/151 | ||
17 | new file mode 100755 | ||
18 | index XXXXXXX..XXXXXXX | ||
19 | --- /dev/null | ||
20 | +++ b/tests/qemu-iotests/151 | ||
21 | @@ -XXX,XX +XXX,XX @@ | ||
22 | +#!/usr/bin/env python | ||
23 | +# | ||
24 | +# Tests for active mirroring | ||
25 | +# | ||
26 | +# Copyright (C) 2018 Red Hat, Inc. | ||
27 | +# | ||
28 | +# This program is free software; you can redistribute it and/or modify | ||
29 | +# it under the terms of the GNU General Public License as published by | ||
30 | +# the Free Software Foundation; either version 2 of the License, or | ||
31 | +# (at your option) any later version. | ||
32 | +# | ||
33 | +# This program is distributed in the hope that it will be useful, | ||
34 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
35 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
36 | +# GNU General Public License for more details. | ||
37 | +# | ||
38 | +# You should have received a copy of the GNU General Public License | ||
39 | +# along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
40 | +# | ||
41 | + | ||
42 | +import os | ||
43 | +import iotests | ||
44 | +from iotests import qemu_img | ||
45 | + | ||
46 | +source_img = os.path.join(iotests.test_dir, 'source.' + iotests.imgfmt) | ||
47 | +target_img = os.path.join(iotests.test_dir, 'target.' + iotests.imgfmt) | ||
48 | + | ||
49 | +class TestActiveMirror(iotests.QMPTestCase): | ||
50 | + image_len = 128 * 1024 * 1024 # MB | ||
51 | + potential_writes_in_flight = True | ||
52 | + | ||
53 | + def setUp(self): | ||
54 | + qemu_img('create', '-f', iotests.imgfmt, source_img, '128M') | ||
55 | + qemu_img('create', '-f', iotests.imgfmt, target_img, '128M') | ||
56 | + | ||
57 | + blk_source = {'id': 'source', | ||
58 | + 'if': 'none', | ||
59 | + 'node-name': 'source-node', | ||
60 | + 'driver': iotests.imgfmt, | ||
61 | + 'file': {'driver': 'file', | ||
62 | + 'filename': source_img}} | ||
63 | + | ||
64 | + blk_target = {'node-name': 'target-node', | ||
65 | + 'driver': iotests.imgfmt, | ||
66 | + 'file': {'driver': 'file', | ||
67 | + 'filename': target_img}} | ||
68 | + | ||
69 | + self.vm = iotests.VM() | ||
70 | + self.vm.add_drive_raw(self.vm.qmp_to_opts(blk_source)) | ||
71 | + self.vm.add_blockdev(self.vm.qmp_to_opts(blk_target)) | ||
72 | + self.vm.add_device('virtio-blk,drive=source') | ||
73 | + self.vm.launch() | ||
74 | + | ||
75 | + def tearDown(self): | ||
76 | + self.vm.shutdown() | ||
77 | + | ||
78 | + if not self.potential_writes_in_flight: | ||
79 | + self.assertTrue(iotests.compare_images(source_img, target_img), | ||
80 | + 'mirror target does not match source') | ||
81 | + | ||
82 | + os.remove(source_img) | ||
83 | + os.remove(target_img) | ||
84 | + | ||
85 | + def doActiveIO(self, sync_source_and_target): | ||
86 | + # Fill the source image | ||
87 | + self.vm.hmp_qemu_io('source', | ||
88 | + 'write -P 1 0 %i' % self.image_len); | ||
89 | + | ||
90 | + # Start some background requests | ||
91 | + for offset in range(1 * self.image_len / 8, 3 * self.image_len / 8, 1024 * 1024): | ||
92 | + self.vm.hmp_qemu_io('source', 'aio_write -P 2 %i 1M' % offset) | ||
93 | + for offset in range(2 * self.image_len / 8, 3 * self.image_len / 8, 1024 * 1024): | ||
94 | + self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) | ||
95 | + | ||
96 | + # Start the block job | ||
97 | + result = self.vm.qmp('blockdev-mirror', | ||
98 | + job_id='mirror', | ||
99 | + filter_node_name='mirror-node', | ||
100 | + device='source-node', | ||
101 | + target='target-node', | ||
102 | + sync='full', | ||
103 | + copy_mode='write-blocking') | ||
104 | + self.assert_qmp(result, 'return', {}) | ||
105 | + | ||
106 | + # Start some more requests | ||
107 | + for offset in range(3 * self.image_len / 8, 5 * self.image_len / 8, 1024 * 1024): | ||
108 | + self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset) | ||
109 | + for offset in range(4 * self.image_len / 8, 5 * self.image_len / 8, 1024 * 1024): | ||
110 | + self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) | ||
111 | + | ||
112 | + # Wait for the READY event | ||
113 | + self.wait_ready(drive='mirror') | ||
114 | + | ||
115 | + # Now start some final requests; all of these (which land on | ||
116 | + # the source) should be settled using the active mechanism. | ||
117 | + # The mirror code itself asserts that the source BDS's dirty | ||
118 | + # bitmap will stay clean between READY and COMPLETED. | ||
119 | + for offset in range(5 * self.image_len / 8, 7 * self.image_len / 8, 1024 * 1024): | ||
120 | + self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset) | ||
121 | + for offset in range(6 * self.image_len / 8, 7 * self.image_len / 8, 1024 * 1024): | ||
122 | + self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) | ||
123 | + | ||
124 | + if sync_source_and_target: | ||
125 | + # If source and target should be in sync after the mirror, | ||
126 | + # we have to flush before completion | ||
127 | + self.vm.hmp_qemu_io('source', 'aio_flush') | ||
128 | + self.potential_writes_in_flight = False | ||
129 | + | ||
130 | + self.complete_and_wait(drive='mirror', wait_ready=False) | ||
131 | + | ||
132 | + def testActiveIO(self): | ||
133 | + self.doActiveIO(False) | ||
134 | + | ||
135 | + def testActiveIOFlushed(self): | ||
136 | + self.doActiveIO(True) | ||
137 | + | ||
138 | + | ||
139 | + | ||
140 | +if __name__ == '__main__': | ||
141 | + iotests.main(supported_fmts=['qcow2', 'raw']) | ||
142 | diff --git a/tests/qemu-iotests/151.out b/tests/qemu-iotests/151.out | ||
143 | new file mode 100644 | ||
144 | index XXXXXXX..XXXXXXX | ||
145 | --- /dev/null | ||
146 | +++ b/tests/qemu-iotests/151.out | ||
147 | @@ -XXX,XX +XXX,XX @@ | ||
148 | +.. | ||
149 | +---------------------------------------------------------------------- | ||
150 | +Ran 2 tests | ||
151 | + | ||
152 | +OK | ||
153 | diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group | ||
154 | index XXXXXXX..XXXXXXX 100644 | ||
155 | --- a/tests/qemu-iotests/group | ||
156 | +++ b/tests/qemu-iotests/group | ||
157 | @@ -XXX,XX +XXX,XX @@ | ||
158 | 148 rw auto quick | ||
159 | 149 rw auto sudo | ||
160 | 150 rw auto quick | ||
161 | +151 rw auto | ||
162 | 152 rw auto quick | ||
163 | 153 rw auto quick | ||
164 | 154 rw auto backing quick | ||
165 | -- | ||
166 | 2.13.6 | ||
167 | |||
168 | diff view generated by jsdifflib |