1 | The following changes since commit ccdf06c1db192152ac70a1dd974c624f566cb7d4: | 1 | The following changes since commit f1d33f55c47dfdaf8daacd618588ad3ae4c452d1: |
---|---|---|---|
2 | 2 | ||
3 | Open 6.1 development tree (2021-04-30 11:15:40 +0100) | 3 | Merge tag 'pull-testing-gdbstub-plugins-gitdm-061022-3' of https://github.com/stsquad/qemu into staging (2022-10-06 07:11:56 -0400) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | git://repo.or.cz/qemu/kevin.git tags/for-upstream | 7 | git://repo.or.cz/qemu/kevin.git tags/for-upstream |
8 | 8 | ||
9 | for you to fetch changes up to 68bf7336533faa6aa90fdd4558edddbf5d8ef814: | 9 | for you to fetch changes up to a7ca2eb488ff149c898f43abe103f8bd8e3ca3c4: |
10 | 10 | ||
11 | vhost-user-blk: Fail gracefully on too large queue size (2021-04-30 12:27:48 +0200) | 11 | file-posix: Remove unused s->discard_zeroes (2022-10-07 12:11:41 +0200) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Block layer patches | 14 | Block layer patches |
15 | 15 | ||
16 | - Fix permission update order problems with block graph changes | 16 | - job: replace AioContext lock with job_mutex |
17 | - qemu-img convert: Unshare write permission for source | 17 | - Fixes to make coroutine_fn annotations more accurate |
18 | - vhost-user-blk: Fail gracefully on too large queue size | 18 | - QAPI schema: Fix incorrect example |
19 | - Code cleanup | ||
19 | 20 | ||
20 | ---------------------------------------------------------------- | 21 | ---------------------------------------------------------------- |
21 | Kevin Wolf (3): | 22 | Alberto Faria (1): |
22 | block: Add BDRV_O_NO_SHARE for blk_new_open() | 23 | coroutine: Drop coroutine_fn annotation from qemu_coroutine_self() |
23 | qemu-img convert: Unshare write permission for source | ||
24 | vhost-user-blk: Fail gracefully on too large queue size | ||
25 | 24 | ||
26 | Vladimir Sementsov-Ogievskiy (36): | 25 | Emanuele Giuseppe Esposito (20): |
27 | tests/test-bdrv-graph-mod: add test_parallel_exclusive_write | 26 | job.c: make job_mutex and job_lock/unlock() public |
28 | tests/test-bdrv-graph-mod: add test_parallel_perm_update | 27 | job.h: categorize fields in struct Job |
29 | tests/test-bdrv-graph-mod: add test_append_greedy_filter | 28 | job.c: API functions not used outside should be static |
30 | block: bdrv_append(): don't consume reference | 29 | aio-wait.h: introduce AIO_WAIT_WHILE_UNLOCKED |
31 | block: BdrvChildClass: add .get_parent_aio_context handler | 30 | job.c: add job_lock/unlock while keeping job.h intact |
32 | block: drop ctx argument from bdrv_root_attach_child | 31 | job: move and update comments from blockjob.c |
33 | block: make bdrv_reopen_{prepare,commit,abort} private | 32 | blockjob: introduce block_job _locked() APIs |
34 | util: add transactions.c | 33 | jobs: add job lock in find_* functions |
35 | block: bdrv_refresh_perms: check for parents permissions conflict | 34 | jobs: use job locks also in the unit tests |
36 | block: refactor bdrv_child* permission functions | 35 | block/mirror.c: use of job helpers in drivers |
37 | block: rewrite bdrv_child_try_set_perm() using bdrv_refresh_perms() | 36 | jobs: group together API calls under the same job lock |
38 | block: inline bdrv_child_*() permission functions calls | 37 | jobs: protect job.aio_context with BQL and job_mutex |
39 | block: use topological sort for permission update | 38 | blockjob.h: categorize fields in struct BlockJob |
40 | block: add bdrv_drv_set_perm transaction action | 39 | blockjob: rename notifier callbacks as _locked |
41 | block: add bdrv_list_* permission update functions | 40 | blockjob: protect iostatus field in BlockJob struct |
42 | block: add bdrv_replace_child_safe() transaction action | 41 | job.h: categorize JobDriver callbacks that need the AioContext lock |
43 | block: fix bdrv_replace_node_common | 42 | job.c: enable job lock/unlock and remove Aiocontext locks |
44 | block: add bdrv_attach_child_common() transaction action | 43 | block_job_query: remove atomic read |
45 | block: add bdrv_attach_child_noperm() transaction action | 44 | blockjob: remove unused functions |
46 | block: split out bdrv_replace_node_noperm() | 45 | job: remove unused functions |
47 | block: adapt bdrv_append() for inserting filters | ||
48 | block: add bdrv_remove_filter_or_cow transaction action | ||
49 | block: introduce bdrv_drop_filter() | ||
50 | block/backup-top: drop .active | ||
51 | block: drop ignore_children for permission update functions | ||
52 | block: make bdrv_unset_inherits_from to be a transaction action | ||
53 | block: make bdrv_refresh_limits() to be a transaction action | ||
54 | block: add bdrv_set_backing_noperm() transaction action | ||
55 | block: bdrv_reopen_multiple(): move bdrv_flush to separate pre-prepare | ||
56 | block: bdrv_reopen_multiple: refresh permissions on updated graph | ||
57 | block: drop unused permission update functions | ||
58 | block: inline bdrv_check_perm_common() | ||
59 | block: inline bdrv_replace_child() | ||
60 | block: refactor bdrv_child_set_perm_safe() transaction action | ||
61 | block: rename bdrv_replace_child_safe() to bdrv_replace_child() | ||
62 | block: refactor bdrv_node_check_perm() | ||
63 | 46 | ||
64 | include/block/block.h | 14 +- | 47 | Kevin Wolf (2): |
65 | include/block/block_int.h | 8 +- | 48 | quorum: Remove unnecessary forward declaration |
66 | include/qemu/transactions.h | 63 ++ | 49 | file-posix: Remove unused s->discard_zeroes |
67 | block.c | 1329 ++++++++++++++++++++------------- | 50 | |
68 | block/backup-top.c | 48 +- | 51 | Marc-André Lureau (3): |
69 | block/block-backend.c | 30 +- | 52 | 9p: add missing coroutine_fn annotations |
70 | block/commit.c | 1 + | 53 | migration: add missing coroutine_fn annotations |
71 | block/file-posix.c | 91 +-- | 54 | test-coroutine: add missing coroutine_fn annotations |
72 | block/io.c | 31 +- | 55 | |
73 | block/mirror.c | 3 - | 56 | Markus Armbruster (1): |
74 | blockdev.c | 4 - | 57 | Revert "qapi: fix examples of blockdev-add with qcow2" |
75 | blockjob.c | 11 +- | 58 | |
76 | hw/block/vhost-user-blk.c | 5 + | 59 | Paolo Bonzini (23): |
77 | qemu-img.c | 2 +- | 60 | block/nvme: separate nvme_get_free_req cases for coroutine/non-coroutine context |
78 | tests/unit/test-bdrv-drain.c | 2 +- | 61 | block: add missing coroutine_fn annotations |
79 | tests/unit/test-bdrv-graph-mod.c | 209 +++++- | 62 | qcow2: remove incorrect coroutine_fn annotations |
80 | util/transactions.c | 96 +++ | 63 | nbd: remove incorrect coroutine_fn annotations |
81 | MAINTAINERS | 6 + | 64 | coroutine: remove incorrect coroutine_fn annotations |
82 | tests/qemu-iotests/245 | 2 +- | 65 | blkverify: add missing coroutine_fn annotations |
83 | tests/qemu-iotests/283.out | 2 +- | 66 | file-posix: add missing coroutine_fn annotations |
84 | tests/qemu-iotests/tests/qsd-jobs.out | 2 +- | 67 | iscsi: add missing coroutine_fn annotations |
85 | util/meson.build | 1 + | 68 | nbd: add missing coroutine_fn annotations |
86 | 22 files changed, 1280 insertions(+), 680 deletions(-) | 69 | nfs: add missing coroutine_fn annotations |
87 | create mode 100644 include/qemu/transactions.h | 70 | nvme: add missing coroutine_fn annotations |
88 | create mode 100644 util/transactions.c | 71 | parallels: add missing coroutine_fn annotations |
72 | qcow2: add missing coroutine_fn annotations | ||
73 | copy-before-write: add missing coroutine_fn annotations | ||
74 | curl: add missing coroutine_fn annotations | ||
75 | qed: add missing coroutine_fn annotations | ||
76 | quorum: add missing coroutine_fn annotations | ||
77 | throttle: add missing coroutine_fn annotations | ||
78 | vmdk: add missing coroutine_fn annotations | ||
79 | job: add missing coroutine_fn annotations | ||
80 | coroutine-lock: add missing coroutine_fn annotations | ||
81 | raw-format: add missing coroutine_fn annotations | ||
82 | job: detect change of aiocontext within job coroutine | ||
83 | |||
84 | qapi/block-core.json | 10 +- | ||
85 | block/qcow2.h | 19 +- | ||
86 | hw/9pfs/9p.h | 9 +- | ||
87 | include/block/aio-wait.h | 17 +- | ||
88 | include/block/blockjob.h | 59 +++- | ||
89 | include/block/nbd.h | 2 +- | ||
90 | include/qemu/coroutine.h | 4 +- | ||
91 | include/qemu/job.h | 306 +++++++++++++----- | ||
92 | block.c | 24 +- | ||
93 | block/blkverify.c | 2 +- | ||
94 | block/block-backend.c | 10 +- | ||
95 | block/copy-before-write.c | 9 +- | ||
96 | block/curl.c | 2 +- | ||
97 | block/file-posix.c | 11 +- | ||
98 | block/io.c | 22 +- | ||
99 | block/iscsi.c | 3 +- | ||
100 | block/mirror.c | 19 +- | ||
101 | block/nbd.c | 11 +- | ||
102 | block/nfs.c | 2 +- | ||
103 | block/nvme.c | 54 ++-- | ||
104 | block/parallels.c | 5 +- | ||
105 | block/qcow2-cluster.c | 21 +- | ||
106 | block/qcow2-refcount.c | 6 +- | ||
107 | block/qcow2.c | 5 +- | ||
108 | block/qed.c | 4 +- | ||
109 | block/quorum.c | 38 +-- | ||
110 | block/raw-format.c | 3 +- | ||
111 | block/replication.c | 3 + | ||
112 | block/throttle.c | 2 +- | ||
113 | block/vmdk.c | 22 +- | ||
114 | blockdev.c | 129 ++++---- | ||
115 | blockjob.c | 132 ++++---- | ||
116 | job-qmp.c | 92 +++--- | ||
117 | job.c | 674 +++++++++++++++++++++++++-------------- | ||
118 | migration/migration.c | 3 +- | ||
119 | monitor/qmp-cmds.c | 7 +- | ||
120 | qemu-img.c | 17 +- | ||
121 | tests/unit/test-bdrv-drain.c | 80 +++-- | ||
122 | tests/unit/test-block-iothread.c | 8 +- | ||
123 | tests/unit/test-blockjob-txn.c | 24 +- | ||
124 | tests/unit/test-blockjob.c | 136 ++++---- | ||
125 | tests/unit/test-coroutine.c | 2 +- | ||
126 | util/qemu-coroutine-lock.c | 14 +- | ||
127 | util/qemu-coroutine.c | 2 +- | ||
128 | 44 files changed, 1237 insertions(+), 787 deletions(-) | ||
89 | 129 | ||
90 | 130 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Markus Armbruster <armbru@redhat.com> | ||
1 | 2 | ||
3 | This reverts commit b6522938327141235b97ab38e40c6c4512587373. | ||
4 | |||
5 | Kevin Wolf NAKed this patch, because: | ||
6 | |||
7 | 'file' is a required member (defined in BlockdevOptionsGenericFormat), | ||
8 | removing it makes the example invalid. 'data-file' is only an additional | ||
9 | optional member to be used for external data files (i.e. when the guest | ||
10 | data is kept separate from the metadata in the .qcow2 file). | ||
11 | |||
12 | However, it had already been merged then. Revert. | ||
13 | |||
14 | Signed-off-by: Markus Armbruster <armbru@redhat.com> | ||
15 | Message-Id: <20220930171908.846769-1-armbru@redhat.com> | ||
16 | Reviewed-by: Victor Toso <victortoso@redhat.com> | ||
17 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
18 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
19 | --- | ||
20 | qapi/block-core.json | 10 +++++----- | ||
21 | 1 file changed, 5 insertions(+), 5 deletions(-) | ||
22 | |||
23 | diff --git a/qapi/block-core.json b/qapi/block-core.json | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/qapi/block-core.json | ||
26 | +++ b/qapi/block-core.json | ||
27 | @@ -XXX,XX +XXX,XX @@ | ||
28 | # -> { "execute": "blockdev-add", | ||
29 | # "arguments": { "driver": "qcow2", | ||
30 | # "node-name": "node1534", | ||
31 | -# "data-file": { "driver": "file", | ||
32 | -# "filename": "hd1.qcow2" }, | ||
33 | +# "file": { "driver": "file", | ||
34 | +# "filename": "hd1.qcow2" }, | ||
35 | # "backing": null } } | ||
36 | # | ||
37 | # <- { "return": {} } | ||
38 | @@ -XXX,XX +XXX,XX @@ | ||
39 | # "arguments": { | ||
40 | # "driver": "qcow2", | ||
41 | # "node-name": "test1", | ||
42 | -# "data-file": { | ||
43 | +# "file": { | ||
44 | # "driver": "file", | ||
45 | # "filename": "test.qcow2" | ||
46 | # } | ||
47 | @@ -XXX,XX +XXX,XX @@ | ||
48 | # "cache": { | ||
49 | # "direct": true | ||
50 | # }, | ||
51 | -# "data-file": { | ||
52 | +# "file": { | ||
53 | # "driver": "file", | ||
54 | # "filename": "/tmp/test.qcow2" | ||
55 | # }, | ||
56 | @@ -XXX,XX +XXX,XX @@ | ||
57 | # "arguments": { | ||
58 | # "driver": "qcow2", | ||
59 | # "node-name": "node0", | ||
60 | -# "data-file": { | ||
61 | +# "file": { | ||
62 | # "driver": "file", | ||
63 | # "filename": "test.qcow2" | ||
64 | # } | ||
65 | -- | ||
66 | 2.37.3 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Alberto Faria <afaria@redhat.com> | ||
1 | 2 | ||
3 | qemu_coroutine_self() can be called from outside coroutine context, | ||
4 | returning the leader coroutine, and several such invocations currently | ||
5 | exist (mostly in qcow2 tracing calls). | ||
6 | |||
7 | Signed-off-by: Alberto Faria <afaria@redhat.com> | ||
8 | Message-Id: <20221005175209.975797-1-afaria@redhat.com> | ||
9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
10 | Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> | ||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | --- | ||
13 | include/qemu/coroutine.h | 2 +- | ||
14 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
15 | |||
16 | diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/include/qemu/coroutine.h | ||
19 | +++ b/include/qemu/coroutine.h | ||
20 | @@ -XXX,XX +XXX,XX @@ AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co); | ||
21 | /** | ||
22 | * Get the currently executing coroutine | ||
23 | */ | ||
24 | -Coroutine *coroutine_fn qemu_coroutine_self(void); | ||
25 | +Coroutine *qemu_coroutine_self(void); | ||
26 | |||
27 | /** | ||
28 | * Return whether or not currently inside a coroutine | ||
29 | -- | ||
30 | 2.37.3 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Split out no-perm part of bdrv_set_backing_hd() as a separate | 3 | nvme_get_free_req has very difference semantics when called in |
4 | transaction action. Note the in case of existing BdrvChild we reuse it, | 4 | coroutine context (where it waits) and in non-coroutine context |
5 | not recreate, just to do less actions. | 5 | (where it doesn't). Split the two cases to make it clear what |
6 | is being requested. | ||
6 | 7 | ||
7 | We don't need to create extra reference to backing_hd as we don't lose | 8 | Cc: qemu-block@nongnu.org |
8 | it in bdrv_attach_child(). | 9 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
9 | 10 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | |
10 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 11 | Message-Id: <20220922084924.201610-2-pbonzini@redhat.com> |
12 | [kwolf: Fixed up coding style] | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 13 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
12 | Message-Id: <20210428151804.439460-29-vsementsov@virtuozzo.com> | ||
13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
14 | --- | 15 | --- |
15 | block.c | 54 +++++++++++++++++++++++++++++++++++++----------------- | 16 | block/nvme.c | 48 ++++++++++++++++++++++++++++-------------------- |
16 | 1 file changed, 37 insertions(+), 17 deletions(-) | 17 | 1 file changed, 28 insertions(+), 20 deletions(-) |
17 | 18 | ||
18 | diff --git a/block.c b/block.c | 19 | diff --git a/block/nvme.c b/block/nvme.c |
19 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/block.c | 21 | --- a/block/nvme.c |
21 | +++ b/block.c | 22 | +++ b/block/nvme.c |
22 | @@ -XXX,XX +XXX,XX @@ static int bdrv_attach_child_noperm(BlockDriverState *parent_bs, | 23 | @@ -XXX,XX +XXX,XX @@ static void nvme_kick(NVMeQueuePair *q) |
23 | BdrvChild **child, | 24 | q->need_kick = 0; |
24 | Transaction *tran, | 25 | } |
25 | Error **errp); | 26 | |
26 | +static void bdrv_remove_filter_or_cow_child(BlockDriverState *bs, | 27 | -/* Find a free request element if any, otherwise: |
27 | + Transaction *tran); | 28 | - * a) if in coroutine context, try to wait for one to become available; |
28 | 29 | - * b) if not in coroutine, return NULL; | |
29 | static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue | 30 | - */ |
30 | *queue, Error **errp); | 31 | -static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q) |
31 | @@ -XXX,XX +XXX,XX @@ static BdrvChildRole bdrv_backing_role(BlockDriverState *bs) | 32 | +static NVMeRequest *nvme_get_free_req_nofail_locked(NVMeQueuePair *q) |
32 | * Sets the bs->backing link of a BDS. A new reference is created; callers | ||
33 | * which don't need their own reference any more must call bdrv_unref(). | ||
34 | */ | ||
35 | -int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, | ||
36 | - Error **errp) | ||
37 | +static int bdrv_set_backing_noperm(BlockDriverState *bs, | ||
38 | + BlockDriverState *backing_hd, | ||
39 | + Transaction *tran, Error **errp) | ||
40 | { | 33 | { |
41 | int ret = 0; | 34 | NVMeRequest *req; |
42 | bool update_inherits_from = bdrv_chain_contains(bs, backing_hd) && | 35 | |
43 | @@ -XXX,XX +XXX,XX @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, | 36 | - qemu_mutex_lock(&q->lock); |
44 | return -EPERM; | 37 | - |
45 | } | 38 | - while (q->free_req_head == -1) { |
46 | 39 | - if (qemu_in_coroutine()) { | |
47 | - if (backing_hd) { | 40 | - trace_nvme_free_req_queue_wait(q->s, q->index); |
48 | - bdrv_ref(backing_hd); | 41 | - qemu_co_queue_wait(&q->free_req_queue, &q->lock); |
42 | - } else { | ||
43 | - qemu_mutex_unlock(&q->lock); | ||
44 | - return NULL; | ||
45 | - } | ||
49 | - } | 46 | - } |
50 | - | 47 | - |
51 | if (bs->backing) { | 48 | req = &q->reqs[q->free_req_head]; |
52 | /* Cannot be frozen, we checked that above */ | 49 | q->free_req_head = req->free_req_next; |
53 | - bdrv_unref_child(bs, bs->backing); | 50 | req->free_req_next = -1; |
54 | - bs->backing = NULL; | 51 | - |
55 | + bdrv_unset_inherits_from(bs, bs->backing, tran); | 52 | - qemu_mutex_unlock(&q->lock); |
56 | + bdrv_remove_filter_or_cow_child(bs, tran); | 53 | return req; |
57 | } | 54 | } |
58 | 55 | ||
59 | if (!backing_hd) { | 56 | +/* Return a free request element if any, otherwise return NULL. */ |
60 | goto out; | 57 | +static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q) |
61 | } | 58 | +{ |
62 | 59 | + QEMU_LOCK_GUARD(&q->lock); | |
63 | - bs->backing = bdrv_attach_child(bs, backing_hd, "backing", &child_of_bds, | 60 | + if (q->free_req_head == -1) { |
64 | - bdrv_backing_role(bs), errp); | 61 | + return NULL; |
65 | - if (!bs->backing) { | 62 | + } |
66 | - ret = -EPERM; | 63 | + return nvme_get_free_req_nofail_locked(q); |
67 | - goto out; | ||
68 | + ret = bdrv_attach_child_noperm(bs, backing_hd, "backing", | ||
69 | + &child_of_bds, bdrv_backing_role(bs), | ||
70 | + &bs->backing, tran, errp); | ||
71 | + if (ret < 0) { | ||
72 | + return ret; | ||
73 | } | ||
74 | |||
75 | - /* If backing_hd was already part of bs's backing chain, and | ||
76 | + | ||
77 | + /* | ||
78 | + * If backing_hd was already part of bs's backing chain, and | ||
79 | * inherits_from pointed recursively to bs then let's update it to | ||
80 | - * point directly to bs (else it will become NULL). */ | ||
81 | + * point directly to bs (else it will become NULL). | ||
82 | + */ | ||
83 | if (update_inherits_from) { | ||
84 | - backing_hd->inherits_from = bs; | ||
85 | + bdrv_set_inherits_from(backing_hd, bs, tran); | ||
86 | } | ||
87 | |||
88 | out: | ||
89 | - bdrv_refresh_limits(bs, NULL, NULL); | ||
90 | + bdrv_refresh_limits(bs, tran, NULL); | ||
91 | + | ||
92 | + return 0; | ||
93 | +} | 64 | +} |
94 | + | 65 | + |
95 | +int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, | 66 | +/* |
96 | + Error **errp) | 67 | + * Wait for a free request to become available if necessary, then |
68 | + * return it. | ||
69 | + */ | ||
70 | +static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q) | ||
97 | +{ | 71 | +{ |
98 | + int ret; | 72 | + QEMU_LOCK_GUARD(&q->lock); |
99 | + Transaction *tran = tran_new(); | ||
100 | + | 73 | + |
101 | + ret = bdrv_set_backing_noperm(bs, backing_hd, tran, errp); | 74 | + while (q->free_req_head == -1) { |
102 | + if (ret < 0) { | 75 | + trace_nvme_free_req_queue_wait(q->s, q->index); |
103 | + goto out; | 76 | + qemu_co_queue_wait(&q->free_req_queue, &q->lock); |
104 | + } | 77 | + } |
105 | + | 78 | + |
106 | + ret = bdrv_refresh_perms(bs, errp); | 79 | + return nvme_get_free_req_nofail_locked(q); |
107 | +out: | 80 | +} |
108 | + tran_finalize(tran, ret); | 81 | + |
109 | 82 | /* With q->lock */ | |
110 | return ret; | 83 | static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req) |
111 | } | 84 | { |
85 | @@ -XXX,XX +XXX,XX @@ static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd) | ||
86 | AioContext *aio_context = bdrv_get_aio_context(bs); | ||
87 | NVMeRequest *req; | ||
88 | int ret = -EINPROGRESS; | ||
89 | - req = nvme_get_free_req(q); | ||
90 | + req = nvme_get_free_req_nowait(q); | ||
91 | if (!req) { | ||
92 | return -EBUSY; | ||
93 | } | ||
112 | -- | 94 | -- |
113 | 2.30.2 | 95 | 2.37.3 |
114 | |||
115 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | To be used in further commit. | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
4 | 6 | ||
5 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
8 | Message-Id: <20220922084924.201610-3-pbonzini@redhat.com> | ||
9 | [kwolf: Fixed up coding style] | ||
6 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
7 | Message-Id: <20210428151804.439460-28-vsementsov@virtuozzo.com> | ||
8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
9 | --- | 12 | --- |
10 | include/block/block.h | 3 ++- | 13 | block.c | 7 ++++--- |
11 | block.c | 9 ++++----- | 14 | block/block-backend.c | 10 +++++----- |
12 | block/io.c | 31 +++++++++++++++++++++++++++++-- | 15 | block/io.c | 22 +++++++++++----------- |
13 | 3 files changed, 35 insertions(+), 8 deletions(-) | 16 | 3 files changed, 20 insertions(+), 19 deletions(-) |
14 | 17 | ||
15 | diff --git a/include/block/block.h b/include/block/block.h | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/include/block/block.h | ||
18 | +++ b/include/block/block.h | ||
19 | @@ -XXX,XX +XXX,XX @@ | ||
20 | #include "block/dirty-bitmap.h" | ||
21 | #include "block/blockjob.h" | ||
22 | #include "qemu/hbitmap.h" | ||
23 | +#include "qemu/transactions.h" | ||
24 | |||
25 | /* | ||
26 | * generated_co_wrapper | ||
27 | @@ -XXX,XX +XXX,XX @@ int64_t bdrv_get_allocated_file_size(BlockDriverState *bs); | ||
28 | BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts, | ||
29 | BlockDriverState *in_bs, Error **errp); | ||
30 | void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr); | ||
31 | -void bdrv_refresh_limits(BlockDriverState *bs, Error **errp); | ||
32 | +void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp); | ||
33 | int bdrv_commit(BlockDriverState *bs); | ||
34 | int bdrv_make_empty(BdrvChild *c, Error **errp); | ||
35 | int bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file, | ||
36 | diff --git a/block.c b/block.c | 18 | diff --git a/block.c b/block.c |
37 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
38 | --- a/block.c | 20 | --- a/block.c |
39 | +++ b/block.c | 21 | +++ b/block.c |
40 | @@ -XXX,XX +XXX,XX @@ | 22 | @@ -XXX,XX +XXX,XX @@ static int64_t create_file_fallback_truncate(BlockBackend *blk, |
41 | #include "qemu/timer.h" | 23 | * Helper function for bdrv_create_file_fallback(): Zero the first |
42 | #include "qemu/cutils.h" | 24 | * sector to remove any potentially pre-existing image header. |
43 | #include "qemu/id.h" | 25 | */ |
44 | -#include "qemu/transactions.h" | 26 | -static int create_file_fallback_zero_first_sector(BlockBackend *blk, |
45 | #include "block/coroutines.h" | 27 | - int64_t current_size, |
46 | 28 | - Error **errp) | |
47 | #ifdef CONFIG_BSD | 29 | +static int coroutine_fn |
48 | @@ -XXX,XX +XXX,XX @@ static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, | 30 | +create_file_fallback_zero_first_sector(BlockBackend *blk, |
49 | return ret; | 31 | + int64_t current_size, |
50 | } | 32 | + Error **errp) |
51 | 33 | { | |
52 | - bdrv_refresh_limits(bs, &local_err); | 34 | int64_t bytes_to_clear; |
53 | + bdrv_refresh_limits(bs, NULL, &local_err); | 35 | int ret; |
54 | if (local_err) { | 36 | diff --git a/block/block-backend.c b/block/block-backend.c |
55 | error_propagate(errp, local_err); | 37 | index XXXXXXX..XXXXXXX 100644 |
56 | return -EINVAL; | 38 | --- a/block/block-backend.c |
57 | @@ -XXX,XX +XXX,XX @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, | 39 | +++ b/block/block-backend.c |
58 | } | 40 | @@ -XXX,XX +XXX,XX @@ static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, |
59 | 41 | return &acb->common; | |
60 | out: | 42 | } |
61 | - bdrv_refresh_limits(bs, NULL); | 43 | |
62 | + bdrv_refresh_limits(bs, NULL, NULL); | 44 | -static void blk_aio_read_entry(void *opaque) |
63 | 45 | +static void coroutine_fn blk_aio_read_entry(void *opaque) | |
46 | { | ||
47 | BlkAioEmAIOCB *acb = opaque; | ||
48 | BlkRwCo *rwco = &acb->rwco; | ||
49 | @@ -XXX,XX +XXX,XX @@ static void blk_aio_read_entry(void *opaque) | ||
50 | blk_aio_complete(acb); | ||
51 | } | ||
52 | |||
53 | -static void blk_aio_write_entry(void *opaque) | ||
54 | +static void coroutine_fn blk_aio_write_entry(void *opaque) | ||
55 | { | ||
56 | BlkAioEmAIOCB *acb = opaque; | ||
57 | BlkRwCo *rwco = &acb->rwco; | ||
58 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req, | ||
64 | return ret; | 59 | return ret; |
65 | } | 60 | } |
66 | @@ -XXX,XX +XXX,XX @@ static void bdrv_reopen_commit(BDRVReopenState *reopen_state) | 61 | |
67 | bdrv_set_backing_hd(bs, reopen_state->new_backing_bs, &error_abort); | 62 | -static void blk_aio_ioctl_entry(void *opaque) |
68 | } | 63 | +static void coroutine_fn blk_aio_ioctl_entry(void *opaque) |
69 | 64 | { | |
70 | - bdrv_refresh_limits(bs, NULL); | 65 | BlkAioEmAIOCB *acb = opaque; |
71 | + bdrv_refresh_limits(bs, NULL, NULL); | 66 | BlkRwCo *rwco = &acb->rwco; |
67 | @@ -XXX,XX +XXX,XX @@ blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes) | ||
68 | return bdrv_co_pdiscard(blk->root, offset, bytes); | ||
72 | } | 69 | } |
73 | 70 | ||
74 | /* | 71 | -static void blk_aio_pdiscard_entry(void *opaque) |
75 | @@ -XXX,XX +XXX,XX @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, | 72 | +static void coroutine_fn blk_aio_pdiscard_entry(void *opaque) |
76 | out: | 73 | { |
77 | tran_finalize(tran, ret); | 74 | BlkAioEmAIOCB *acb = opaque; |
78 | 75 | BlkRwCo *rwco = &acb->rwco; | |
79 | - bdrv_refresh_limits(bs_top, NULL); | 76 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn blk_co_do_flush(BlockBackend *blk) |
80 | + bdrv_refresh_limits(bs_top, NULL, NULL); | 77 | return bdrv_co_flush(blk_bs(blk)); |
81 | |||
82 | return ret; | ||
83 | } | 78 | } |
79 | |||
80 | -static void blk_aio_flush_entry(void *opaque) | ||
81 | +static void coroutine_fn blk_aio_flush_entry(void *opaque) | ||
82 | { | ||
83 | BlkAioEmAIOCB *acb = opaque; | ||
84 | BlkRwCo *rwco = &acb->rwco; | ||
84 | diff --git a/block/io.c b/block/io.c | 85 | diff --git a/block/io.c b/block/io.c |
85 | index XXXXXXX..XXXXXXX 100644 | 86 | index XXXXXXX..XXXXXXX 100644 |
86 | --- a/block/io.c | 87 | --- a/block/io.c |
87 | +++ b/block/io.c | 88 | +++ b/block/io.c |
88 | @@ -XXX,XX +XXX,XX @@ static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) | 89 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req) |
89 | dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); | 90 | /** |
91 | * Add an active request to the tracked requests list | ||
92 | */ | ||
93 | -static void tracked_request_begin(BdrvTrackedRequest *req, | ||
94 | - BlockDriverState *bs, | ||
95 | - int64_t offset, | ||
96 | - int64_t bytes, | ||
97 | - enum BdrvTrackedRequestType type) | ||
98 | +static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req, | ||
99 | + BlockDriverState *bs, | ||
100 | + int64_t offset, | ||
101 | + int64_t bytes, | ||
102 | + enum BdrvTrackedRequestType type) | ||
103 | { | ||
104 | bdrv_check_request(offset, bytes, &error_abort); | ||
105 | |||
106 | @@ -XXX,XX +XXX,XX @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req, | ||
90 | } | 107 | } |
91 | 108 | ||
92 | -void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) | 109 | /* Called with self->bs->reqs_lock held */ |
93 | +typedef struct BdrvRefreshLimitsState { | 110 | -static BdrvTrackedRequest * |
94 | + BlockDriverState *bs; | 111 | +static coroutine_fn BdrvTrackedRequest * |
95 | + BlockLimits old_bl; | 112 | bdrv_find_conflicting_request(BdrvTrackedRequest *self) |
96 | +} BdrvRefreshLimitsState; | ||
97 | + | ||
98 | +static void bdrv_refresh_limits_abort(void *opaque) | ||
99 | +{ | ||
100 | + BdrvRefreshLimitsState *s = opaque; | ||
101 | + | ||
102 | + s->bs->bl = s->old_bl; | ||
103 | +} | ||
104 | + | ||
105 | +static TransactionActionDrv bdrv_refresh_limits_drv = { | ||
106 | + .abort = bdrv_refresh_limits_abort, | ||
107 | + .clean = g_free, | ||
108 | +}; | ||
109 | + | ||
110 | +/* @tran is allowed to be NULL, in this case no rollback is possible. */ | ||
111 | +void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp) | ||
112 | { | 113 | { |
113 | ERRP_GUARD(); | 114 | BdrvTrackedRequest *req; |
115 | @@ -XXX,XX +XXX,XX @@ static bool bdrv_init_padding(BlockDriverState *bs, | ||
116 | return true; | ||
117 | } | ||
118 | |||
119 | -static int bdrv_padding_rmw_read(BdrvChild *child, | ||
120 | - BdrvTrackedRequest *req, | ||
121 | - BdrvRequestPadding *pad, | ||
122 | - bool zero_middle) | ||
123 | +static coroutine_fn int bdrv_padding_rmw_read(BdrvChild *child, | ||
124 | + BdrvTrackedRequest *req, | ||
125 | + BdrvRequestPadding *pad, | ||
126 | + bool zero_middle) | ||
127 | { | ||
128 | QEMUIOVector local_qiov; | ||
129 | BlockDriverState *bs = child->bs; | ||
130 | @@ -XXX,XX +XXX,XX @@ out: | ||
131 | return ret; | ||
132 | } | ||
133 | |||
134 | -int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) | ||
135 | +int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) | ||
136 | { | ||
114 | BlockDriver *drv = bs->drv; | 137 | BlockDriver *drv = bs->drv; |
115 | BdrvChild *c; | 138 | CoroutineIOCompletion co = { |
116 | bool have_limits; | ||
117 | |||
118 | + if (tran) { | ||
119 | + BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1); | ||
120 | + *s = (BdrvRefreshLimitsState) { | ||
121 | + .bs = bs, | ||
122 | + .old_bl = bs->bl, | ||
123 | + }; | ||
124 | + tran_add(tran, &bdrv_refresh_limits_drv, s); | ||
125 | + } | ||
126 | + | ||
127 | memset(&bs->bl, 0, sizeof(bs->bl)); | ||
128 | |||
129 | if (!drv) { | ||
130 | @@ -XXX,XX +XXX,XX @@ void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) | ||
131 | QLIST_FOREACH(c, &bs->children, next) { | ||
132 | if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW)) | ||
133 | { | ||
134 | - bdrv_refresh_limits(c->bs, errp); | ||
135 | + bdrv_refresh_limits(c->bs, tran, errp); | ||
136 | if (*errp) { | ||
137 | return; | ||
138 | } | ||
139 | -- | 139 | -- |
140 | 2.30.2 | 140 | 2.37.3 |
141 | |||
142 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Add additional check that node parents do not interfere with each | 3 | This is incorrect because qcow2_mark_clean() calls qcow2_flush_caches(). |
4 | other. This should not hurt existing callers and allows in further | 4 | qcow2_mark_clean() is called from non-coroutine context in |
5 | patch use bdrv_refresh_perms() to update a subtree of changed | 5 | qcow2_inactivate() and qcow2_amend_options(). |
6 | BdrvChild (check that change is correct). | ||
7 | 6 | ||
8 | New check will substitute bdrv_check_update_perm() in following | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
9 | permissions refactoring, so keep error messages the same to avoid | 8 | Reviewed-by: Eric Blake <eblake@redhat.com> |
10 | unit test result changes. | 9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
11 | 10 | Message-Id: <20220922084924.201610-4-pbonzini@redhat.com> | |
12 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
13 | Reviewed-by: Alberto Garcia <berto@igalia.com> | ||
14 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
15 | Message-Id: <20210428151804.439460-10-vsementsov@virtuozzo.com> | ||
16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
17 | --- | 13 | --- |
18 | block.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++--------- | 14 | block/qcow2.h | 4 ++-- |
19 | 1 file changed, 54 insertions(+), 9 deletions(-) | 15 | block/qcow2-refcount.c | 4 ++-- |
16 | 2 files changed, 4 insertions(+), 4 deletions(-) | ||
20 | 17 | ||
21 | diff --git a/block.c b/block.c | 18 | diff --git a/block/qcow2.h b/block/qcow2.h |
22 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
23 | --- a/block.c | 20 | --- a/block/qcow2.h |
24 | +++ b/block.c | 21 | +++ b/block/qcow2.h |
25 | @@ -XXX,XX +XXX,XX @@ bool bdrv_is_writable(BlockDriverState *bs) | 22 | @@ -XXX,XX +XXX,XX @@ void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry, |
26 | return bdrv_is_writable_after_reopen(bs, NULL); | 23 | int qcow2_update_snapshot_refcount(BlockDriverState *bs, |
24 | int64_t l1_table_offset, int l1_size, int addend); | ||
25 | |||
26 | -int coroutine_fn qcow2_flush_caches(BlockDriverState *bs); | ||
27 | -int coroutine_fn qcow2_write_caches(BlockDriverState *bs); | ||
28 | +int qcow2_flush_caches(BlockDriverState *bs); | ||
29 | +int qcow2_write_caches(BlockDriverState *bs); | ||
30 | int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | ||
31 | BdrvCheckMode fix); | ||
32 | |||
33 | diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c | ||
34 | index XXXXXXX..XXXXXXX 100644 | ||
35 | --- a/block/qcow2-refcount.c | ||
36 | +++ b/block/qcow2-refcount.c | ||
37 | @@ -XXX,XX +XXX,XX @@ void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry, | ||
38 | } | ||
27 | } | 39 | } |
28 | 40 | ||
29 | +static char *bdrv_child_user_desc(BdrvChild *c) | 41 | -int coroutine_fn qcow2_write_caches(BlockDriverState *bs) |
30 | +{ | 42 | +int qcow2_write_caches(BlockDriverState *bs) |
31 | + if (c->klass->get_parent_desc) { | 43 | { |
32 | + return c->klass->get_parent_desc(c); | 44 | BDRVQcow2State *s = bs->opaque; |
33 | + } | 45 | int ret; |
34 | + | 46 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn qcow2_write_caches(BlockDriverState *bs) |
35 | + return g_strdup("another user"); | 47 | return 0; |
36 | +} | ||
37 | + | ||
38 | +static bool bdrv_a_allow_b(BdrvChild *a, BdrvChild *b, Error **errp) | ||
39 | +{ | ||
40 | + g_autofree char *user = NULL; | ||
41 | + g_autofree char *perm_names = NULL; | ||
42 | + | ||
43 | + if ((b->perm & a->shared_perm) == b->perm) { | ||
44 | + return true; | ||
45 | + } | ||
46 | + | ||
47 | + perm_names = bdrv_perm_names(b->perm & ~a->shared_perm); | ||
48 | + user = bdrv_child_user_desc(a); | ||
49 | + error_setg(errp, "Conflicts with use by %s as '%s', which does not " | ||
50 | + "allow '%s' on %s", | ||
51 | + user, a->name, perm_names, bdrv_get_node_name(b->bs)); | ||
52 | + | ||
53 | + return false; | ||
54 | +} | ||
55 | + | ||
56 | +static bool bdrv_parent_perms_conflict(BlockDriverState *bs, Error **errp) | ||
57 | +{ | ||
58 | + BdrvChild *a, *b; | ||
59 | + | ||
60 | + /* | ||
61 | + * During the loop we'll look at each pair twice. That's correct because | ||
62 | + * bdrv_a_allow_b() is asymmetric and we should check each pair in both | ||
63 | + * directions. | ||
64 | + */ | ||
65 | + QLIST_FOREACH(a, &bs->parents, next_parent) { | ||
66 | + QLIST_FOREACH(b, &bs->parents, next_parent) { | ||
67 | + if (a == b) { | ||
68 | + continue; | ||
69 | + } | ||
70 | + | ||
71 | + if (!bdrv_a_allow_b(a, b, errp)) { | ||
72 | + return true; | ||
73 | + } | ||
74 | + } | ||
75 | + } | ||
76 | + | ||
77 | + return false; | ||
78 | +} | ||
79 | + | ||
80 | static void bdrv_child_perm(BlockDriverState *bs, BlockDriverState *child_bs, | ||
81 | BdrvChild *c, BdrvChildRole role, | ||
82 | BlockReopenQueue *reopen_queue, | ||
83 | @@ -XXX,XX +XXX,XX @@ void bdrv_get_cumulative_perm(BlockDriverState *bs, uint64_t *perm, | ||
84 | *shared_perm = cumulative_shared_perms; | ||
85 | } | 48 | } |
86 | 49 | ||
87 | -static char *bdrv_child_user_desc(BdrvChild *c) | 50 | -int coroutine_fn qcow2_flush_caches(BlockDriverState *bs) |
88 | -{ | 51 | +int qcow2_flush_caches(BlockDriverState *bs) |
89 | - if (c->klass->get_parent_desc) { | ||
90 | - return c->klass->get_parent_desc(c); | ||
91 | - } | ||
92 | - | ||
93 | - return g_strdup("another user"); | ||
94 | -} | ||
95 | - | ||
96 | char *bdrv_perm_names(uint64_t perm) | ||
97 | { | 52 | { |
98 | struct perm_name { | 53 | int ret = qcow2_write_caches(bs); |
99 | @@ -XXX,XX +XXX,XX @@ static int bdrv_refresh_perms(BlockDriverState *bs, Error **errp) | ||
100 | int ret; | ||
101 | uint64_t perm, shared_perm; | ||
102 | |||
103 | + if (bdrv_parent_perms_conflict(bs, errp)) { | ||
104 | + return -EPERM; | ||
105 | + } | ||
106 | bdrv_get_cumulative_perm(bs, &perm, &shared_perm); | ||
107 | ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL, errp); | ||
108 | if (ret < 0) { | 54 | if (ret < 0) { |
109 | -- | 55 | -- |
110 | 2.30.2 | 56 | 2.37.3 |
111 | |||
112 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | nbd_co_establish_connection_cancel() cancels a coroutine but is not called | ||
4 | from coroutine context itself, for example in nbd_cancel_in_flight() | ||
5 | and in timer callbacks reconnect_delay_timer_cb() and open_timer_cb(). | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
10 | Message-Id: <20220922084924.201610-5-pbonzini@redhat.com> | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
13 | --- | ||
14 | include/block/nbd.h | 2 +- | ||
15 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
16 | |||
17 | diff --git a/include/block/nbd.h b/include/block/nbd.h | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/include/block/nbd.h | ||
20 | +++ b/include/block/nbd.h | ||
21 | @@ -XXX,XX +XXX,XX @@ QIOChannel *coroutine_fn | ||
22 | nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info, | ||
23 | bool blocking, Error **errp); | ||
24 | |||
25 | -void coroutine_fn nbd_co_establish_connection_cancel(NBDClientConnection *conn); | ||
26 | +void nbd_co_establish_connection_cancel(NBDClientConnection *conn); | ||
27 | |||
28 | #endif | ||
29 | -- | ||
30 | 2.37.3 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | bdrv_append() is not quite good for inserting filters: it does extra | 3 | qemu_coroutine_get_aio_context inspects a coroutine, but it does |
4 | permission update in intermediate state, where filter get it filtered | 4 | not have to be called from the coroutine itself (or from any |
5 | child but is not yet replace it in a backing chain. | 5 | coroutine). |
6 | 6 | ||
7 | Some filters (for example backup-top) may want permissions even when | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
8 | have no parents. And described intermediate state becomes invalid. | 8 | Reviewed-by: Eric Blake <eblake@redhat.com> |
9 | 9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | |
10 | That's (half a) reason, why we need "inactive" state for backup-top | 10 | Message-Id: <20220922084924.201610-6-pbonzini@redhat.com> |
11 | filter. | ||
12 | |||
13 | bdrv_append() will be improved later, now let's add a unit test. | ||
14 | |||
15 | Now test fails, so it runs only with -d flag. To run do | ||
16 | |||
17 | ./test-bdrv-graph-mod -d -p /bdrv-graph-mod/append-greedy-filter | ||
18 | |||
19 | from <build-directory>/tests. | ||
20 | |||
21 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
22 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
23 | Message-Id: <20210428151804.439460-4-vsementsov@virtuozzo.com> | ||
24 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
25 | --- | 13 | --- |
26 | tests/unit/test-bdrv-graph-mod.c | 33 ++++++++++++++++++++++++++++++++ | 14 | include/qemu/coroutine.h | 2 +- |
27 | 1 file changed, 33 insertions(+) | 15 | util/qemu-coroutine.c | 2 +- |
16 | 2 files changed, 2 insertions(+), 2 deletions(-) | ||
28 | 17 | ||
29 | diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c | 18 | diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h |
30 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
31 | --- a/tests/unit/test-bdrv-graph-mod.c | 20 | --- a/include/qemu/coroutine.h |
32 | +++ b/tests/unit/test-bdrv-graph-mod.c | 21 | +++ b/include/qemu/coroutine.h |
33 | @@ -XXX,XX +XXX,XX @@ static void test_parallel_perm_update(void) | 22 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_coroutine_yield(void); |
34 | bdrv_unref(top); | 23 | /** |
24 | * Get the AioContext of the given coroutine | ||
25 | */ | ||
26 | -AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co); | ||
27 | +AioContext *qemu_coroutine_get_aio_context(Coroutine *co); | ||
28 | |||
29 | /** | ||
30 | * Get the currently executing coroutine | ||
31 | diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/util/qemu-coroutine.c | ||
34 | +++ b/util/qemu-coroutine.c | ||
35 | @@ -XXX,XX +XXX,XX @@ bool qemu_coroutine_entered(Coroutine *co) | ||
36 | return co->caller; | ||
35 | } | 37 | } |
36 | 38 | ||
37 | +/* | 39 | -AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co) |
38 | + * It's possible that filter required permissions allows to insert it to backing | 40 | +AioContext *qemu_coroutine_get_aio_context(Coroutine *co) |
39 | + * chain, like: | ||
40 | + * | ||
41 | + * 1. [top] -> [filter] -> [base] | ||
42 | + * | ||
43 | + * but doesn't allow to add it as a branch: | ||
44 | + * | ||
45 | + * 2. [filter] --\ | ||
46 | + * v | ||
47 | + * [top] -> [base] | ||
48 | + * | ||
49 | + * So, inserting such filter should do all graph modifications and only then | ||
50 | + * update permissions. If we try to go through intermediate state [2] and update | ||
51 | + * permissions on it we'll fail. | ||
52 | + * | ||
53 | + * Let's check that bdrv_append() can append such a filter. | ||
54 | + */ | ||
55 | +static void test_append_greedy_filter(void) | ||
56 | +{ | ||
57 | + BlockDriverState *top = exclusive_writer_node("top"); | ||
58 | + BlockDriverState *base = no_perm_node("base"); | ||
59 | + BlockDriverState *fl = exclusive_writer_node("fl1"); | ||
60 | + | ||
61 | + bdrv_attach_child(top, base, "backing", &child_of_bds, BDRV_CHILD_COW, | ||
62 | + &error_abort); | ||
63 | + | ||
64 | + bdrv_append(fl, base, &error_abort); | ||
65 | + bdrv_unref(top); | ||
66 | +} | ||
67 | + | ||
68 | int main(int argc, char *argv[]) | ||
69 | { | 41 | { |
70 | int i; | 42 | return co->ctx; |
71 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char *argv[]) | 43 | } |
72 | test_parallel_exclusive_write); | ||
73 | g_test_add_func("/bdrv-graph-mod/parallel-perm-update", | ||
74 | test_parallel_perm_update); | ||
75 | + g_test_add_func("/bdrv-graph-mod/append-greedy-filter", | ||
76 | + test_append_greedy_filter); | ||
77 | } | ||
78 | |||
79 | return g_test_run(); | ||
80 | -- | 44 | -- |
81 | 2.30.2 | 45 | 2.37.3 |
82 | |||
83 | diff view generated by jsdifflib |
1 | virtio_add_queue() aborts when queue_size > VIRTQUEUE_MAX_SIZE, so | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | vhost_user_blk_device_realize() should check this before calling it. | ||
3 | 2 | ||
4 | Simple reproducer: | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
5 | 6 | ||
6 | qemu-system-x86_64 \ | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
7 | -chardev null,id=foo \ | 8 | Reviewed-by: Eric Blake <eblake@redhat.com> |
8 | -device vhost-user-blk-pci,queue-size=4096,chardev=foo | 9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
9 | 10 | Message-Id: <20220922084924.201610-8-pbonzini@redhat.com> | |
10 | Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1935014 | 11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | Message-Id: <20210413165654.50810-1-kwolf@redhat.com> | ||
13 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
14 | Reviewed-by: Raphael Norwitz <raphael.norwitz@nutanix.com> | ||
15 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
16 | Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
17 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
18 | --- | 13 | --- |
19 | hw/block/vhost-user-blk.c | 5 +++++ | 14 | block/blkverify.c | 2 +- |
20 | 1 file changed, 5 insertions(+) | 15 | 1 file changed, 1 insertion(+), 1 deletion(-) |
21 | 16 | ||
22 | diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c | 17 | diff --git a/block/blkverify.c b/block/blkverify.c |
23 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
24 | --- a/hw/block/vhost-user-blk.c | 19 | --- a/block/blkverify.c |
25 | +++ b/hw/block/vhost-user-blk.c | 20 | +++ b/block/blkverify.c |
26 | @@ -XXX,XX +XXX,XX @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) | 21 | @@ -XXX,XX +XXX,XX @@ blkverify_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, |
27 | error_setg(errp, "vhost-user-blk: queue size must be non-zero"); | 22 | return blkverify_co_prwv(bs, &r, offset, bytes, qiov, qiov, flags, true); |
28 | return; | 23 | } |
29 | } | 24 | |
30 | + if (s->queue_size > VIRTQUEUE_MAX_SIZE) { | 25 | -static int blkverify_co_flush(BlockDriverState *bs) |
31 | + error_setg(errp, "vhost-user-blk: queue size must not exceed %d", | 26 | +static int coroutine_fn blkverify_co_flush(BlockDriverState *bs) |
32 | + VIRTQUEUE_MAX_SIZE); | 27 | { |
33 | + return; | 28 | BDRVBlkverifyState *s = bs->opaque; |
34 | + } | 29 | |
35 | |||
36 | if (!vhost_user_init(&s->vhost_user, &s->chardev, errp)) { | ||
37 | return; | ||
38 | -- | 30 | -- |
39 | 2.30.2 | 31 | 2.37.3 |
40 | |||
41 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Move bdrv_reopen_multiple to new paradigm of permission update: | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | first update graph relations, then do refresh the permissions. | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | functions where this holds. | ||
5 | 6 | ||
6 | We have to modify reopen process in file-posix driver: with new scheme | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
7 | we don't have prepared permissions in raw_reopen_prepare(), so we | 8 | Reviewed-by: Eric Blake <eblake@redhat.com> |
8 | should reconfigure fd in raw_check_perm(). Still this seems more native | 9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
9 | and simple anyway. | 10 | Message-Id: <20220922084924.201610-9-pbonzini@redhat.com> |
10 | |||
11 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
12 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
13 | Message-Id: <20210428151804.439460-31-vsementsov@virtuozzo.com> | ||
14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
15 | --- | 13 | --- |
16 | include/block/block.h | 3 +- | 14 | block/file-posix.c | 2 +- |
17 | block.c | 187 ++++++++++++------------------------------ | 15 | 1 file changed, 1 insertion(+), 1 deletion(-) |
18 | block/file-posix.c | 91 +++++++------------- | ||
19 | 3 files changed, 84 insertions(+), 197 deletions(-) | ||
20 | 16 | ||
21 | diff --git a/include/block/block.h b/include/block/block.h | ||
22 | index XXXXXXX..XXXXXXX 100644 | ||
23 | --- a/include/block/block.h | ||
24 | +++ b/include/block/block.h | ||
25 | @@ -XXX,XX +XXX,XX @@ typedef struct BDRVReopenState { | ||
26 | BlockdevDetectZeroesOptions detect_zeroes; | ||
27 | bool backing_missing; | ||
28 | bool replace_backing_bs; /* new_backing_bs is ignored if this is false */ | ||
29 | - BlockDriverState *new_backing_bs; /* If NULL then detach the current bs */ | ||
30 | - uint64_t perm, shared_perm; | ||
31 | + BlockDriverState *old_backing_bs; /* keep pointer for permissions update */ | ||
32 | QDict *options; | ||
33 | QDict *explicit_options; | ||
34 | void *opaque; | ||
35 | diff --git a/block.c b/block.c | ||
36 | index XXXXXXX..XXXXXXX 100644 | ||
37 | --- a/block.c | ||
38 | +++ b/block.c | ||
39 | @@ -XXX,XX +XXX,XX @@ static int bdrv_attach_child_noperm(BlockDriverState *parent_bs, | ||
40 | static void bdrv_remove_filter_or_cow_child(BlockDriverState *bs, | ||
41 | Transaction *tran); | ||
42 | |||
43 | -static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue | ||
44 | - *queue, Error **errp); | ||
45 | +static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, | ||
46 | + BlockReopenQueue *queue, | ||
47 | + Transaction *set_backings_tran, Error **errp); | ||
48 | static void bdrv_reopen_commit(BDRVReopenState *reopen_state); | ||
49 | static void bdrv_reopen_abort(BDRVReopenState *reopen_state); | ||
50 | |||
51 | @@ -XXX,XX +XXX,XX @@ static void bdrv_list_abort_perm_update(GSList *list) | ||
52 | } | ||
53 | } | ||
54 | |||
55 | +__attribute__((unused)) | ||
56 | static void bdrv_abort_perm_update(BlockDriverState *bs) | ||
57 | { | ||
58 | g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); | ||
59 | @@ -XXX,XX +XXX,XX @@ char *bdrv_perm_names(uint64_t perm) | ||
60 | * | ||
61 | * Needs to be followed by a call to either bdrv_set_perm() or | ||
62 | * bdrv_abort_perm_update(). */ | ||
63 | +__attribute__((unused)) | ||
64 | static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
65 | uint64_t new_used_perm, | ||
66 | uint64_t new_shared_perm, | ||
67 | @@ -XXX,XX +XXX,XX @@ static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, | ||
68 | bs_entry->state.explicit_options = explicit_options; | ||
69 | bs_entry->state.flags = flags; | ||
70 | |||
71 | - /* This needs to be overwritten in bdrv_reopen_prepare() */ | ||
72 | - bs_entry->state.perm = UINT64_MAX; | ||
73 | - bs_entry->state.shared_perm = 0; | ||
74 | - | ||
75 | /* | ||
76 | * If keep_old_opts is false then it means that unspecified | ||
77 | * options must be reset to their original value. We don't allow | ||
78 | @@ -XXX,XX +XXX,XX @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) | ||
79 | { | ||
80 | int ret = -1; | ||
81 | BlockReopenQueueEntry *bs_entry, *next; | ||
82 | + Transaction *tran = tran_new(); | ||
83 | + g_autoptr(GHashTable) found = NULL; | ||
84 | + g_autoptr(GSList) refresh_list = NULL; | ||
85 | |||
86 | assert(bs_queue != NULL); | ||
87 | |||
88 | @@ -XXX,XX +XXX,XX @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) | ||
89 | |||
90 | QTAILQ_FOREACH(bs_entry, bs_queue, entry) { | ||
91 | assert(bs_entry->state.bs->quiesce_counter > 0); | ||
92 | - if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, errp)) { | ||
93 | - goto cleanup; | ||
94 | + ret = bdrv_reopen_prepare(&bs_entry->state, bs_queue, tran, errp); | ||
95 | + if (ret < 0) { | ||
96 | + goto abort; | ||
97 | } | ||
98 | bs_entry->prepared = true; | ||
99 | } | ||
100 | |||
101 | + found = g_hash_table_new(NULL, NULL); | ||
102 | QTAILQ_FOREACH(bs_entry, bs_queue, entry) { | ||
103 | BDRVReopenState *state = &bs_entry->state; | ||
104 | - ret = bdrv_check_perm(state->bs, bs_queue, state->perm, | ||
105 | - state->shared_perm, errp); | ||
106 | - if (ret < 0) { | ||
107 | - goto cleanup_perm; | ||
108 | - } | ||
109 | - /* Check if new_backing_bs would accept the new permissions */ | ||
110 | - if (state->replace_backing_bs && state->new_backing_bs) { | ||
111 | - uint64_t nperm, nshared; | ||
112 | - bdrv_child_perm(state->bs, state->new_backing_bs, | ||
113 | - NULL, bdrv_backing_role(state->bs), | ||
114 | - bs_queue, state->perm, state->shared_perm, | ||
115 | - &nperm, &nshared); | ||
116 | - ret = bdrv_check_update_perm(state->new_backing_bs, NULL, | ||
117 | - nperm, nshared, errp); | ||
118 | - if (ret < 0) { | ||
119 | - goto cleanup_perm; | ||
120 | - } | ||
121 | + | ||
122 | + refresh_list = bdrv_topological_dfs(refresh_list, found, state->bs); | ||
123 | + if (state->old_backing_bs) { | ||
124 | + refresh_list = bdrv_topological_dfs(refresh_list, found, | ||
125 | + state->old_backing_bs); | ||
126 | } | ||
127 | - bs_entry->perms_checked = true; | ||
128 | + } | ||
129 | + | ||
130 | + /* | ||
131 | + * Note that file-posix driver rely on permission update done during reopen | ||
132 | + * (even if no permission changed), because it wants "new" permissions for | ||
133 | + * reconfiguring the fd and that's why it does it in raw_check_perm(), not | ||
134 | + * in raw_reopen_prepare() which is called with "old" permissions. | ||
135 | + */ | ||
136 | + ret = bdrv_list_refresh_perms(refresh_list, bs_queue, tran, errp); | ||
137 | + if (ret < 0) { | ||
138 | + goto abort; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | @@ -XXX,XX +XXX,XX @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) | ||
143 | bdrv_reopen_commit(&bs_entry->state); | ||
144 | } | ||
145 | |||
146 | - ret = 0; | ||
147 | -cleanup_perm: | ||
148 | - QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { | ||
149 | - BDRVReopenState *state = &bs_entry->state; | ||
150 | - | ||
151 | - if (!bs_entry->perms_checked) { | ||
152 | - continue; | ||
153 | - } | ||
154 | - | ||
155 | - if (ret == 0) { | ||
156 | - uint64_t perm, shared; | ||
157 | + tran_commit(tran); | ||
158 | |||
159 | - bdrv_get_cumulative_perm(state->bs, &perm, &shared); | ||
160 | - assert(perm == state->perm); | ||
161 | - assert(shared == state->shared_perm); | ||
162 | + QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) { | ||
163 | + BlockDriverState *bs = bs_entry->state.bs; | ||
164 | |||
165 | - bdrv_set_perm(state->bs); | ||
166 | - } else { | ||
167 | - bdrv_abort_perm_update(state->bs); | ||
168 | - if (state->replace_backing_bs && state->new_backing_bs) { | ||
169 | - bdrv_abort_perm_update(state->new_backing_bs); | ||
170 | - } | ||
171 | + if (bs->drv->bdrv_reopen_commit_post) { | ||
172 | + bs->drv->bdrv_reopen_commit_post(&bs_entry->state); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | - if (ret == 0) { | ||
177 | - QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) { | ||
178 | - BlockDriverState *bs = bs_entry->state.bs; | ||
179 | + ret = 0; | ||
180 | + goto cleanup; | ||
181 | |||
182 | - if (bs->drv->bdrv_reopen_commit_post) | ||
183 | - bs->drv->bdrv_reopen_commit_post(&bs_entry->state); | ||
184 | +abort: | ||
185 | + tran_abort(tran); | ||
186 | + QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { | ||
187 | + if (bs_entry->prepared) { | ||
188 | + bdrv_reopen_abort(&bs_entry->state); | ||
189 | } | ||
190 | + qobject_unref(bs_entry->state.explicit_options); | ||
191 | + qobject_unref(bs_entry->state.options); | ||
192 | } | ||
193 | + | ||
194 | cleanup: | ||
195 | QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { | ||
196 | - if (ret) { | ||
197 | - if (bs_entry->prepared) { | ||
198 | - bdrv_reopen_abort(&bs_entry->state); | ||
199 | - } | ||
200 | - qobject_unref(bs_entry->state.explicit_options); | ||
201 | - qobject_unref(bs_entry->state.options); | ||
202 | - } | ||
203 | - if (bs_entry->state.new_backing_bs) { | ||
204 | - bdrv_unref(bs_entry->state.new_backing_bs); | ||
205 | - } | ||
206 | g_free(bs_entry); | ||
207 | } | ||
208 | g_free(bs_queue); | ||
209 | @@ -XXX,XX +XXX,XX @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, | ||
210 | return ret; | ||
211 | } | ||
212 | |||
213 | -static BlockReopenQueueEntry *find_parent_in_reopen_queue(BlockReopenQueue *q, | ||
214 | - BdrvChild *c) | ||
215 | -{ | ||
216 | - BlockReopenQueueEntry *entry; | ||
217 | - | ||
218 | - QTAILQ_FOREACH(entry, q, entry) { | ||
219 | - BlockDriverState *bs = entry->state.bs; | ||
220 | - BdrvChild *child; | ||
221 | - | ||
222 | - QLIST_FOREACH(child, &bs->children, next) { | ||
223 | - if (child == c) { | ||
224 | - return entry; | ||
225 | - } | ||
226 | - } | ||
227 | - } | ||
228 | - | ||
229 | - return NULL; | ||
230 | -} | ||
231 | - | ||
232 | -static void bdrv_reopen_perm(BlockReopenQueue *q, BlockDriverState *bs, | ||
233 | - uint64_t *perm, uint64_t *shared) | ||
234 | -{ | ||
235 | - BdrvChild *c; | ||
236 | - BlockReopenQueueEntry *parent; | ||
237 | - uint64_t cumulative_perms = 0; | ||
238 | - uint64_t cumulative_shared_perms = BLK_PERM_ALL; | ||
239 | - | ||
240 | - QLIST_FOREACH(c, &bs->parents, next_parent) { | ||
241 | - parent = find_parent_in_reopen_queue(q, c); | ||
242 | - if (!parent) { | ||
243 | - cumulative_perms |= c->perm; | ||
244 | - cumulative_shared_perms &= c->shared_perm; | ||
245 | - } else { | ||
246 | - uint64_t nperm, nshared; | ||
247 | - | ||
248 | - bdrv_child_perm(parent->state.bs, bs, c, c->role, q, | ||
249 | - parent->state.perm, parent->state.shared_perm, | ||
250 | - &nperm, &nshared); | ||
251 | - | ||
252 | - cumulative_perms |= nperm; | ||
253 | - cumulative_shared_perms &= nshared; | ||
254 | - } | ||
255 | - } | ||
256 | - *perm = cumulative_perms; | ||
257 | - *shared = cumulative_shared_perms; | ||
258 | -} | ||
259 | - | ||
260 | static bool bdrv_reopen_can_attach(BlockDriverState *parent, | ||
261 | BdrvChild *child, | ||
262 | BlockDriverState *new_child, | ||
263 | @@ -XXX,XX +XXX,XX @@ static bool bdrv_reopen_can_attach(BlockDriverState *parent, | ||
264 | * Return 0 on success, otherwise return < 0 and set @errp. | ||
265 | */ | ||
266 | static int bdrv_reopen_parse_backing(BDRVReopenState *reopen_state, | ||
267 | + Transaction *set_backings_tran, | ||
268 | Error **errp) | ||
269 | { | ||
270 | BlockDriverState *bs = reopen_state->bs; | ||
271 | @@ -XXX,XX +XXX,XX @@ static int bdrv_reopen_parse_backing(BDRVReopenState *reopen_state, | ||
272 | |||
273 | /* If we want to replace the backing file we need some extra checks */ | ||
274 | if (new_backing_bs != bdrv_filter_or_cow_bs(overlay_bs)) { | ||
275 | + int ret; | ||
276 | + | ||
277 | /* Check for implicit nodes between bs and its backing file */ | ||
278 | if (bs != overlay_bs) { | ||
279 | error_setg(errp, "Cannot change backing link if '%s' has " | ||
280 | @@ -XXX,XX +XXX,XX @@ static int bdrv_reopen_parse_backing(BDRVReopenState *reopen_state, | ||
281 | return -EPERM; | ||
282 | } | ||
283 | reopen_state->replace_backing_bs = true; | ||
284 | - if (new_backing_bs) { | ||
285 | - bdrv_ref(new_backing_bs); | ||
286 | - reopen_state->new_backing_bs = new_backing_bs; | ||
287 | + reopen_state->old_backing_bs = bs->backing ? bs->backing->bs : NULL; | ||
288 | + ret = bdrv_set_backing_noperm(bs, new_backing_bs, set_backings_tran, | ||
289 | + errp); | ||
290 | + if (ret < 0) { | ||
291 | + return ret; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | @@ -XXX,XX +XXX,XX @@ static int bdrv_reopen_parse_backing(BDRVReopenState *reopen_state, | ||
296 | * | ||
297 | */ | ||
298 | static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, | ||
299 | - BlockReopenQueue *queue, Error **errp) | ||
300 | + BlockReopenQueue *queue, | ||
301 | + Transaction *set_backings_tran, Error **errp) | ||
302 | { | ||
303 | int ret = -1; | ||
304 | int old_flags; | ||
305 | @@ -XXX,XX +XXX,XX @@ static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, | ||
306 | goto error; | ||
307 | } | ||
308 | |||
309 | - /* Calculate required permissions after reopening */ | ||
310 | - bdrv_reopen_perm(queue, reopen_state->bs, | ||
311 | - &reopen_state->perm, &reopen_state->shared_perm); | ||
312 | - | ||
313 | if (drv->bdrv_reopen_prepare) { | ||
314 | /* | ||
315 | * If a driver-specific option is missing, it means that we | ||
316 | @@ -XXX,XX +XXX,XX @@ static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, | ||
317 | * either a reference to an existing node (using its node name) | ||
318 | * or NULL to simply detach the current backing file. | ||
319 | */ | ||
320 | - ret = bdrv_reopen_parse_backing(reopen_state, errp); | ||
321 | + ret = bdrv_reopen_parse_backing(reopen_state, set_backings_tran, errp); | ||
322 | if (ret < 0) { | ||
323 | goto error; | ||
324 | } | ||
325 | @@ -XXX,XX +XXX,XX @@ static void bdrv_reopen_commit(BDRVReopenState *reopen_state) | ||
326 | qdict_del(bs->explicit_options, child->name); | ||
327 | qdict_del(bs->options, child->name); | ||
328 | } | ||
329 | - | ||
330 | - /* | ||
331 | - * Change the backing file if a new one was specified. We do this | ||
332 | - * after updating bs->options, so bdrv_refresh_filename() (called | ||
333 | - * from bdrv_set_backing_hd()) has the new values. | ||
334 | - */ | ||
335 | - if (reopen_state->replace_backing_bs) { | ||
336 | - BlockDriverState *old_backing_bs = child_bs(bs->backing); | ||
337 | - assert(!old_backing_bs || !old_backing_bs->implicit); | ||
338 | - /* Abort the permission update on the backing bs we're detaching */ | ||
339 | - if (old_backing_bs) { | ||
340 | - bdrv_abort_perm_update(old_backing_bs); | ||
341 | - } | ||
342 | - bdrv_set_backing_hd(bs, reopen_state->new_backing_bs, &error_abort); | ||
343 | - } | ||
344 | - | ||
345 | bdrv_refresh_limits(bs, NULL, NULL); | ||
346 | } | ||
347 | |||
348 | diff --git a/block/file-posix.c b/block/file-posix.c | 17 | diff --git a/block/file-posix.c b/block/file-posix.c |
349 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
350 | --- a/block/file-posix.c | 19 | --- a/block/file-posix.c |
351 | +++ b/block/file-posix.c | 20 | +++ b/block/file-posix.c |
352 | @@ -XXX,XX +XXX,XX @@ typedef struct BDRVRawState { | 21 | @@ -XXX,XX +XXX,XX @@ static void raw_aio_unplug(BlockDriverState *bs) |
353 | } BDRVRawState; | 22 | #endif |
354 | 23 | } | |
355 | typedef struct BDRVRawReopenState { | 24 | |
356 | - int fd; | 25 | -static int raw_co_flush_to_disk(BlockDriverState *bs) |
357 | int open_flags; | 26 | +static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs) |
358 | bool drop_cache; | ||
359 | bool check_cache_dropped; | ||
360 | @@ -XXX,XX +XXX,XX @@ static int raw_reopen_prepare(BDRVReopenState *state, | ||
361 | BDRVRawReopenState *rs; | ||
362 | QemuOpts *opts; | ||
363 | int ret; | ||
364 | - Error *local_err = NULL; | ||
365 | |||
366 | assert(state != NULL); | ||
367 | assert(state->bs != NULL); | ||
368 | @@ -XXX,XX +XXX,XX @@ static int raw_reopen_prepare(BDRVReopenState *state, | ||
369 | * bdrv_reopen_prepare() will detect changes and complain. */ | ||
370 | qemu_opts_to_qdict(opts, state->options); | ||
371 | |||
372 | - rs->fd = raw_reconfigure_getfd(state->bs, state->flags, &rs->open_flags, | ||
373 | - state->perm, true, &local_err); | ||
374 | - if (local_err) { | ||
375 | - error_propagate(errp, local_err); | ||
376 | - ret = -1; | ||
377 | - goto out; | ||
378 | - } | ||
379 | - | ||
380 | - /* Fail already reopen_prepare() if we can't get a working O_DIRECT | ||
381 | - * alignment with the new fd. */ | ||
382 | - if (rs->fd != -1) { | ||
383 | - raw_probe_alignment(state->bs, rs->fd, &local_err); | ||
384 | - if (local_err) { | ||
385 | - error_propagate(errp, local_err); | ||
386 | - ret = -EINVAL; | ||
387 | - goto out_fd; | ||
388 | - } | ||
389 | - } | ||
390 | + /* | ||
391 | + * As part of reopen prepare we also want to create new fd by | ||
392 | + * raw_reconfigure_getfd(). But it wants updated "perm", when in | ||
393 | + * bdrv_reopen_multiple() .bdrv_reopen_prepare() callback called prior to | ||
394 | + * permission update. Happily, permission update is always a part (a seprate | ||
395 | + * stage) of bdrv_reopen_multiple() so we can rely on this fact and | ||
396 | + * reconfigure fd in raw_check_perm(). | ||
397 | + */ | ||
398 | |||
399 | s->reopen_state = state; | ||
400 | ret = 0; | ||
401 | -out_fd: | ||
402 | - if (ret < 0) { | ||
403 | - qemu_close(rs->fd); | ||
404 | - rs->fd = -1; | ||
405 | - } | ||
406 | + | ||
407 | out: | ||
408 | qemu_opts_del(opts); | ||
409 | return ret; | ||
410 | @@ -XXX,XX +XXX,XX @@ static void raw_reopen_commit(BDRVReopenState *state) | ||
411 | s->drop_cache = rs->drop_cache; | ||
412 | s->check_cache_dropped = rs->check_cache_dropped; | ||
413 | s->open_flags = rs->open_flags; | ||
414 | - | ||
415 | - qemu_close(s->fd); | ||
416 | - s->fd = rs->fd; | ||
417 | - | ||
418 | g_free(state->opaque); | ||
419 | state->opaque = NULL; | ||
420 | |||
421 | @@ -XXX,XX +XXX,XX @@ static void raw_reopen_abort(BDRVReopenState *state) | ||
422 | return; | ||
423 | } | ||
424 | |||
425 | - if (rs->fd >= 0) { | ||
426 | - qemu_close(rs->fd); | ||
427 | - rs->fd = -1; | ||
428 | - } | ||
429 | g_free(state->opaque); | ||
430 | state->opaque = NULL; | ||
431 | |||
432 | @@ -XXX,XX +XXX,XX @@ static int raw_check_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared, | ||
433 | Error **errp) | ||
434 | { | 27 | { |
435 | BDRVRawState *s = bs->opaque; | 28 | BDRVRawState *s = bs->opaque; |
436 | - BDRVRawReopenState *rs = NULL; | 29 | RawPosixAIOData acb; |
437 | + int input_flags = s->reopen_state ? s->reopen_state->flags : bs->open_flags; | ||
438 | int open_flags; | ||
439 | int ret; | ||
440 | |||
441 | - if (s->perm_change_fd) { | ||
442 | + /* We may need a new fd if auto-read-only switches the mode */ | ||
443 | + ret = raw_reconfigure_getfd(bs, input_flags, &open_flags, perm, | ||
444 | + false, errp); | ||
445 | + if (ret < 0) { | ||
446 | + return ret; | ||
447 | + } else if (ret != s->fd) { | ||
448 | + Error *local_err = NULL; | ||
449 | + | ||
450 | /* | ||
451 | - * In the context of reopen, this function may be called several times | ||
452 | - * (directly and recursively while change permissions of the parent). | ||
453 | - * This is even true for children that don't inherit from the original | ||
454 | - * reopen node, so s->reopen_state is not set. | ||
455 | - * | ||
456 | - * Ignore all but the first call. | ||
457 | + * Fail already check_perm() if we can't get a working O_DIRECT | ||
458 | + * alignment with the new fd. | ||
459 | */ | ||
460 | - return 0; | ||
461 | - } | ||
462 | - | ||
463 | - if (s->reopen_state) { | ||
464 | - /* We already have a new file descriptor to set permissions for */ | ||
465 | - assert(s->reopen_state->perm == perm); | ||
466 | - assert(s->reopen_state->shared_perm == shared); | ||
467 | - rs = s->reopen_state->opaque; | ||
468 | - s->perm_change_fd = rs->fd; | ||
469 | - s->perm_change_flags = rs->open_flags; | ||
470 | - } else { | ||
471 | - /* We may need a new fd if auto-read-only switches the mode */ | ||
472 | - ret = raw_reconfigure_getfd(bs, bs->open_flags, &open_flags, perm, | ||
473 | - false, errp); | ||
474 | - if (ret < 0) { | ||
475 | - return ret; | ||
476 | - } else if (ret != s->fd) { | ||
477 | - s->perm_change_fd = ret; | ||
478 | - s->perm_change_flags = open_flags; | ||
479 | + raw_probe_alignment(bs, ret, &local_err); | ||
480 | + if (local_err) { | ||
481 | + error_propagate(errp, local_err); | ||
482 | + return -EINVAL; | ||
483 | } | ||
484 | + | ||
485 | + s->perm_change_fd = ret; | ||
486 | + s->perm_change_flags = open_flags; | ||
487 | } | ||
488 | |||
489 | /* Prepare permissions on old fd to avoid conflicts between old and new, | ||
490 | @@ -XXX,XX +XXX,XX @@ static int raw_check_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared, | ||
491 | return 0; | ||
492 | |||
493 | fail: | ||
494 | - if (s->perm_change_fd && !s->reopen_state) { | ||
495 | + if (s->perm_change_fd) { | ||
496 | qemu_close(s->perm_change_fd); | ||
497 | } | ||
498 | s->perm_change_fd = 0; | ||
499 | @@ -XXX,XX +XXX,XX @@ static void raw_abort_perm_update(BlockDriverState *bs) | ||
500 | |||
501 | /* For reopen, .bdrv_reopen_abort is called afterwards and will close | ||
502 | * the file descriptor. */ | ||
503 | - if (s->perm_change_fd && !s->reopen_state) { | ||
504 | + if (s->perm_change_fd) { | ||
505 | qemu_close(s->perm_change_fd); | ||
506 | } | ||
507 | s->perm_change_fd = 0; | ||
508 | -- | 30 | -- |
509 | 2.30.2 | 31 | 2.37.3 |
510 | |||
511 | diff view generated by jsdifflib |
1 | Normally, blk_new_open() just shares all permissions. This was fine | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | originally when permissions only protected against uses in the same | ||
3 | process because no other part of the code would actually get to access | ||
4 | the block nodes opened with blk_new_open(). However, since we use it for | ||
5 | file locking now, unsharing permissions becomes desirable. | ||
6 | 2 | ||
7 | Add a new BDRV_O_NO_SHARE flag that is used in blk_new_open() to unshare | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
8 | any permissions that can be unshared. | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | functions where this holds. | ||
9 | 6 | ||
10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
11 | Message-Id: <20210422164344.283389-2-kwolf@redhat.com> | ||
12 | Reviewed-by: Eric Blake <eblake@redhat.com> | 8 | Reviewed-by: Eric Blake <eblake@redhat.com> |
13 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
10 | Message-Id: <20220922084924.201610-10-pbonzini@redhat.com> | ||
11 | [kwolf: Fixed up coding style] | ||
12 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
15 | --- | 14 | --- |
16 | include/block/block.h | 1 + | 15 | block/iscsi.c | 3 ++- |
17 | block/block-backend.c | 19 +++++++++++++------ | 16 | 1 file changed, 2 insertions(+), 1 deletion(-) |
18 | 2 files changed, 14 insertions(+), 6 deletions(-) | ||
19 | 17 | ||
20 | diff --git a/include/block/block.h b/include/block/block.h | 18 | diff --git a/block/iscsi.c b/block/iscsi.c |
21 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/include/block/block.h | 20 | --- a/block/iscsi.c |
23 | +++ b/include/block/block.h | 21 | +++ b/block/iscsi.c |
24 | @@ -XXX,XX +XXX,XX @@ typedef struct HDGeometry { | 22 | @@ -XXX,XX +XXX,XX @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, |
25 | uint32_t cylinders; | ||
26 | } HDGeometry; | ||
27 | |||
28 | +#define BDRV_O_NO_SHARE 0x0001 /* don't share permissions */ | ||
29 | #define BDRV_O_RDWR 0x0002 | ||
30 | #define BDRV_O_RESIZE 0x0004 /* request permission for resizing the node */ | ||
31 | #define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */ | ||
32 | diff --git a/block/block-backend.c b/block/block-backend.c | ||
33 | index XXXXXXX..XXXXXXX 100644 | ||
34 | --- a/block/block-backend.c | ||
35 | +++ b/block/block-backend.c | ||
36 | @@ -XXX,XX +XXX,XX @@ BlockBackend *blk_new_open(const char *filename, const char *reference, | ||
37 | BlockBackend *blk; | ||
38 | BlockDriverState *bs; | ||
39 | uint64_t perm = 0; | ||
40 | + uint64_t shared = BLK_PERM_ALL; | ||
41 | |||
42 | - /* blk_new_open() is mainly used in .bdrv_create implementations and the | ||
43 | - * tools where sharing isn't a concern because the BDS stays private, so we | ||
44 | - * just request permission according to the flags. | ||
45 | + /* | ||
46 | + * blk_new_open() is mainly used in .bdrv_create implementations and the | ||
47 | + * tools where sharing isn't a major concern because the BDS stays private | ||
48 | + * and the file is generally not supposed to be used by a second process, | ||
49 | + * so we just request permission according to the flags. | ||
50 | * | ||
51 | * The exceptions are xen_disk and blockdev_init(); in these cases, the | ||
52 | * caller of blk_new_open() doesn't make use of the permissions, but they | ||
53 | * shouldn't hurt either. We can still share everything here because the | ||
54 | - * guest devices will add their own blockers if they can't share. */ | ||
55 | + * guest devices will add their own blockers if they can't share. | ||
56 | + */ | ||
57 | if ((flags & BDRV_O_NO_IO) == 0) { | ||
58 | perm |= BLK_PERM_CONSISTENT_READ; | ||
59 | if (flags & BDRV_O_RDWR) { | ||
60 | @@ -XXX,XX +XXX,XX @@ BlockBackend *blk_new_open(const char *filename, const char *reference, | ||
61 | if (flags & BDRV_O_RESIZE) { | ||
62 | perm |= BLK_PERM_RESIZE; | ||
63 | } | 23 | } |
64 | + if (flags & BDRV_O_NO_SHARE) { | 24 | } |
65 | + shared = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED; | 25 | |
66 | + } | 26 | -static void iscsi_co_init_iscsitask(IscsiLun *iscsilun, struct IscsiTask *iTask) |
67 | 27 | +static void coroutine_fn | |
68 | - blk = blk_new(qemu_get_aio_context(), perm, BLK_PERM_ALL); | 28 | +iscsi_co_init_iscsitask(IscsiLun *iscsilun, struct IscsiTask *iTask) |
69 | + blk = blk_new(qemu_get_aio_context(), perm, shared); | 29 | { |
70 | bs = bdrv_open(filename, reference, options, flags, errp); | 30 | *iTask = (struct IscsiTask) { |
71 | if (!bs) { | 31 | .co = qemu_coroutine_self(), |
72 | blk_unref(blk); | ||
73 | @@ -XXX,XX +XXX,XX @@ BlockBackend *blk_new_open(const char *filename, const char *reference, | ||
74 | |||
75 | blk->root = bdrv_root_attach_child(bs, "root", &child_root, | ||
76 | BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, | ||
77 | - perm, BLK_PERM_ALL, blk, errp); | ||
78 | + perm, shared, blk, errp); | ||
79 | if (!blk->root) { | ||
80 | blk_unref(blk); | ||
81 | return NULL; | ||
82 | -- | 32 | -- |
83 | 2.30.2 | 33 | 2.37.3 |
84 | |||
85 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
10 | Message-Id: <20220922084924.201610-11-pbonzini@redhat.com> | ||
11 | [kwolf: Fixed up coding style] | ||
12 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
14 | --- | ||
15 | block/nbd.c | 11 ++++++----- | ||
16 | 1 file changed, 6 insertions(+), 5 deletions(-) | ||
17 | |||
18 | diff --git a/block/nbd.c b/block/nbd.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/block/nbd.c | ||
21 | +++ b/block/nbd.c | ||
22 | @@ -XXX,XX +XXX,XX @@ static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret) | ||
23 | * nbd_reply_chunk_iter_receive | ||
24 | * The pointer stored in @payload requires g_free() to free it. | ||
25 | */ | ||
26 | -static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s, | ||
27 | - NBDReplyChunkIter *iter, | ||
28 | - uint64_t handle, | ||
29 | - QEMUIOVector *qiov, NBDReply *reply, | ||
30 | - void **payload) | ||
31 | +static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s, | ||
32 | + NBDReplyChunkIter *iter, | ||
33 | + uint64_t handle, | ||
34 | + QEMUIOVector *qiov, | ||
35 | + NBDReply *reply, | ||
36 | + void **payload) | ||
37 | { | ||
38 | int ret, request_ret; | ||
39 | NBDReply local_reply; | ||
40 | -- | ||
41 | 2.37.3 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-12-pbonzini@redhat.com> | ||
10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | --- | ||
13 | block/nfs.c | 2 +- | ||
14 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
15 | |||
16 | diff --git a/block/nfs.c b/block/nfs.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/nfs.c | ||
19 | +++ b/block/nfs.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static void nfs_process_write(void *arg) | ||
21 | qemu_mutex_unlock(&client->mutex); | ||
22 | } | ||
23 | |||
24 | -static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) | ||
25 | +static void coroutine_fn nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) | ||
26 | { | ||
27 | *task = (NFSRPC) { | ||
28 | .co = qemu_coroutine_self(), | ||
29 | -- | ||
30 | 2.37.3 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-13-pbonzini@redhat.com> | ||
10 | [kwolf: Fixed up coding style] | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
13 | --- | ||
14 | block/nvme.c | 6 ++++-- | ||
15 | 1 file changed, 4 insertions(+), 2 deletions(-) | ||
16 | |||
17 | diff --git a/block/nvme.c b/block/nvme.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/block/nvme.c | ||
20 | +++ b/block/nvme.c | ||
21 | @@ -XXX,XX +XXX,XX @@ static inline bool nvme_qiov_aligned(BlockDriverState *bs, | ||
22 | return true; | ||
23 | } | ||
24 | |||
25 | -static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes, | ||
26 | - QEMUIOVector *qiov, bool is_write, int flags) | ||
27 | +static coroutine_fn int nvme_co_prw(BlockDriverState *bs, | ||
28 | + uint64_t offset, uint64_t bytes, | ||
29 | + QEMUIOVector *qiov, bool is_write, | ||
30 | + int flags) | ||
31 | { | ||
32 | BDRVNVMeState *s = bs->opaque; | ||
33 | int r; | ||
34 | -- | ||
35 | 2.37.3 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-14-pbonzini@redhat.com> | ||
10 | [kwolf: Fixed up coding style] | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
13 | --- | ||
14 | block/parallels.c | 5 +++-- | ||
15 | 1 file changed, 3 insertions(+), 2 deletions(-) | ||
16 | |||
17 | diff --git a/block/parallels.c b/block/parallels.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/block/parallels.c | ||
20 | +++ b/block/parallels.c | ||
21 | @@ -XXX,XX +XXX,XX @@ static int64_t block_status(BDRVParallelsState *s, int64_t sector_num, | ||
22 | return start_off; | ||
23 | } | ||
24 | |||
25 | -static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num, | ||
26 | - int nb_sectors, int *pnum) | ||
27 | +static coroutine_fn int64_t allocate_clusters(BlockDriverState *bs, | ||
28 | + int64_t sector_num, | ||
29 | + int nb_sectors, int *pnum) | ||
30 | { | ||
31 | int ret = 0; | ||
32 | BDRVParallelsState *s = bs->opaque; | ||
33 | -- | ||
34 | 2.37.3 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | bdrv_replace_child() has only one caller, the second argument is | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | unused. Inline it now. This triggers deletion of some more unused | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | interfaces. | 5 | functions where this holds. |
6 | 6 | ||
7 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-15-pbonzini@redhat.com> | ||
10 | [kwolf: Fixed up coding style] | ||
8 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
9 | Message-Id: <20210428151804.439460-34-vsementsov@virtuozzo.com> | ||
10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
11 | --- | 13 | --- |
12 | block.c | 101 ++++++++++---------------------------------------------- | 14 | block/qcow2.h | 15 ++++++++------- |
13 | 1 file changed, 18 insertions(+), 83 deletions(-) | 15 | block/qcow2-cluster.c | 21 ++++++++++++--------- |
16 | block/qcow2-refcount.c | 2 +- | ||
17 | block/qcow2.c | 5 +++-- | ||
18 | 4 files changed, 24 insertions(+), 19 deletions(-) | ||
14 | 19 | ||
15 | diff --git a/block.c b/block.c | 20 | diff --git a/block/qcow2.h b/block/qcow2.h |
16 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/block.c | 22 | --- a/block/qcow2.h |
18 | +++ b/block.c | 23 | +++ b/block/qcow2.h |
19 | @@ -XXX,XX +XXX,XX @@ static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q, | 24 | @@ -XXX,XX +XXX,XX @@ int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order, |
25 | void *cb_opaque, Error **errp); | ||
26 | int qcow2_shrink_reftable(BlockDriverState *bs); | ||
27 | int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size); | ||
28 | -int qcow2_detect_metadata_preallocation(BlockDriverState *bs); | ||
29 | +int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs); | ||
30 | |||
31 | /* qcow2-cluster.c functions */ | ||
32 | int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, | ||
33 | @@ -XXX,XX +XXX,XX @@ int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num, | ||
34 | int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset, | ||
35 | unsigned int *bytes, uint64_t *host_offset, | ||
36 | QCow2SubclusterType *subcluster_type); | ||
37 | -int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, | ||
38 | - unsigned int *bytes, uint64_t *host_offset, | ||
39 | - QCowL2Meta **m); | ||
40 | +int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, | ||
41 | + unsigned int *bytes, | ||
42 | + uint64_t *host_offset, QCowL2Meta **m); | ||
43 | int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, | ||
44 | uint64_t offset, | ||
45 | int compressed_size, | ||
46 | @@ -XXX,XX +XXX,XX @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, | ||
47 | void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry, | ||
48 | uint64_t *coffset, int *csize); | ||
49 | |||
50 | -int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m); | ||
51 | +int coroutine_fn qcow2_alloc_cluster_link_l2(BlockDriverState *bs, | ||
52 | + QCowL2Meta *m); | ||
53 | void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m); | ||
54 | int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, | ||
55 | uint64_t bytes, enum qcow2_discard_type type, | ||
56 | bool full_discard); | ||
57 | -int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, | ||
58 | - uint64_t bytes, int flags); | ||
59 | +int coroutine_fn qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, | ||
60 | + uint64_t bytes, int flags); | ||
61 | |||
62 | int qcow2_expand_zero_clusters(BlockDriverState *bs, | ||
63 | BlockDriverAmendStatusCB *status_cb, | ||
64 | diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c | ||
65 | index XXXXXXX..XXXXXXX 100644 | ||
66 | --- a/block/qcow2-cluster.c | ||
67 | +++ b/block/qcow2-cluster.c | ||
68 | @@ -XXX,XX +XXX,XX @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, | ||
20 | return 0; | 69 | return 0; |
21 | } | 70 | } |
22 | 71 | ||
23 | -static void bdrv_node_set_perm(BlockDriverState *bs) | 72 | -static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) |
24 | -{ | 73 | +static int coroutine_fn perform_cow(BlockDriverState *bs, QCowL2Meta *m) |
25 | - BlockDriver *drv = bs->drv; | ||
26 | - BdrvChild *c; | ||
27 | - | ||
28 | - if (!drv) { | ||
29 | - return; | ||
30 | - } | ||
31 | - | ||
32 | - bdrv_drv_set_perm_commit(bs); | ||
33 | - | ||
34 | - /* Drivers that never have children can omit .bdrv_child_perm() */ | ||
35 | - if (!drv->bdrv_child_perm) { | ||
36 | - assert(QLIST_EMPTY(&bs->children)); | ||
37 | - return; | ||
38 | - } | ||
39 | - | ||
40 | - /* Update all children */ | ||
41 | - QLIST_FOREACH(c, &bs->children, next) { | ||
42 | - bdrv_child_set_perm_commit(c); | ||
43 | - } | ||
44 | -} | ||
45 | - | ||
46 | -static void bdrv_list_set_perm(GSList *list) | ||
47 | -{ | ||
48 | - for ( ; list; list = list->next) { | ||
49 | - bdrv_node_set_perm((BlockDriverState *)list->data); | ||
50 | - } | ||
51 | -} | ||
52 | - | ||
53 | -static void bdrv_set_perm(BlockDriverState *bs) | ||
54 | -{ | ||
55 | - g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); | ||
56 | - return bdrv_list_set_perm(list); | ||
57 | -} | ||
58 | - | ||
59 | void bdrv_get_cumulative_perm(BlockDriverState *bs, uint64_t *perm, | ||
60 | uint64_t *shared_perm) | ||
61 | { | 74 | { |
62 | @@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_noperm(BdrvChild *child, | 75 | BDRVQcow2State *s = bs->opaque; |
63 | } | 76 | Qcow2COWRegion *start = &m->cow_start; |
77 | @@ -XXX,XX +XXX,XX @@ fail: | ||
78 | return ret; | ||
64 | } | 79 | } |
65 | 80 | ||
66 | -/* | 81 | -int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) |
67 | - * Updates @child to change its reference to point to @new_bs, including | 82 | +int coroutine_fn qcow2_alloc_cluster_link_l2(BlockDriverState *bs, |
68 | - * checking and applying the necessary permission updates both to the old node | 83 | + QCowL2Meta *m) |
69 | - * and to @new_bs. | ||
70 | - * | ||
71 | - * NULL is passed as @new_bs for removing the reference before freeing @child. | ||
72 | - * | ||
73 | - * If @new_bs is not NULL, bdrv_check_perm() must be called beforehand, as this | ||
74 | - * function uses bdrv_set_perm() to update the permissions according to the new | ||
75 | - * reference that @new_bs gets. | ||
76 | - * | ||
77 | - * Callers must ensure that child->frozen is false. | ||
78 | - */ | ||
79 | -static void bdrv_replace_child(BdrvChild *child, BlockDriverState *new_bs) | ||
80 | -{ | ||
81 | - BlockDriverState *old_bs = child->bs; | ||
82 | - | ||
83 | - /* Asserts that child->frozen == false */ | ||
84 | - bdrv_replace_child_noperm(child, new_bs); | ||
85 | - | ||
86 | - /* | ||
87 | - * Start with the new node's permissions. If @new_bs is a (direct | ||
88 | - * or indirect) child of @old_bs, we must complete the permission | ||
89 | - * update on @new_bs before we loosen the restrictions on @old_bs. | ||
90 | - * Otherwise, bdrv_check_perm() on @old_bs would re-initiate | ||
91 | - * updating the permissions of @new_bs, and thus not purely loosen | ||
92 | - * restrictions. | ||
93 | - */ | ||
94 | - if (new_bs) { | ||
95 | - bdrv_set_perm(new_bs); | ||
96 | - } | ||
97 | - | ||
98 | - if (old_bs) { | ||
99 | - /* | ||
100 | - * Update permissions for old node. We're just taking a parent away, so | ||
101 | - * we're loosening restrictions. Errors of permission update are not | ||
102 | - * fatal in this case, ignore them. | ||
103 | - */ | ||
104 | - bdrv_refresh_perms(old_bs, NULL); | ||
105 | - | ||
106 | - /* When the parent requiring a non-default AioContext is removed, the | ||
107 | - * node moves back to the main AioContext */ | ||
108 | - bdrv_try_set_aio_context(old_bs, qemu_get_aio_context(), NULL); | ||
109 | - } | ||
110 | -} | ||
111 | - | ||
112 | static void bdrv_child_free(void *opaque) | ||
113 | { | 84 | { |
114 | BdrvChild *c = opaque; | 85 | BDRVQcow2State *s = bs->opaque; |
115 | @@ -XXX,XX +XXX,XX @@ static int bdrv_attach_child_noperm(BlockDriverState *parent_bs, | 86 | int i, j = 0, l2_index, ret; |
116 | 87 | @@ -XXX,XX +XXX,XX @@ static int count_single_write_clusters(BlockDriverState *bs, int nb_clusters, | |
117 | static void bdrv_detach_child(BdrvChild *child) | 88 | * information on cluster allocation may be invalid now. The caller |
89 | * must start over anyway, so consider *cur_bytes undefined. | ||
90 | */ | ||
91 | -static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, | ||
92 | - uint64_t *cur_bytes, QCowL2Meta **m) | ||
93 | +static int coroutine_fn handle_dependencies(BlockDriverState *bs, | ||
94 | + uint64_t guest_offset, | ||
95 | + uint64_t *cur_bytes, QCowL2Meta **m) | ||
118 | { | 96 | { |
119 | - bdrv_replace_child(child, NULL); | 97 | BDRVQcow2State *s = bs->opaque; |
120 | + BlockDriverState *old_bs = child->bs; | 98 | QCowL2Meta *old_alloc; |
121 | + | 99 | @@ -XXX,XX +XXX,XX @@ out: |
122 | + bdrv_replace_child_noperm(child, NULL); | 100 | * |
123 | bdrv_remove_empty_child(child); | 101 | * Return 0 on success and -errno in error cases |
124 | + | 102 | */ |
125 | + if (old_bs) { | 103 | -int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, |
126 | + /* | 104 | - unsigned int *bytes, uint64_t *host_offset, |
127 | + * Update permissions for old node. We're just taking a parent away, so | 105 | - QCowL2Meta **m) |
128 | + * we're loosening restrictions. Errors of permission update are not | 106 | +int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, |
129 | + * fatal in this case, ignore them. | 107 | + unsigned int *bytes, |
130 | + */ | 108 | + uint64_t *host_offset, |
131 | + bdrv_refresh_perms(old_bs, NULL); | 109 | + QCowL2Meta **m) |
132 | + | 110 | { |
133 | + /* | 111 | BDRVQcow2State *s = bs->opaque; |
134 | + * When the parent requiring a non-default AioContext is removed, the | 112 | uint64_t start, remaining; |
135 | + * node moves back to the main AioContext | 113 | @@ -XXX,XX +XXX,XX @@ out: |
136 | + */ | 114 | return ret; |
137 | + bdrv_try_set_aio_context(old_bs, qemu_get_aio_context(), NULL); | ||
138 | + } | ||
139 | } | 115 | } |
140 | 116 | ||
141 | /* | 117 | -int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, |
118 | - uint64_t bytes, int flags) | ||
119 | +int coroutine_fn qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, | ||
120 | + uint64_t bytes, int flags) | ||
121 | { | ||
122 | BDRVQcow2State *s = bs->opaque; | ||
123 | uint64_t end_offset = offset + bytes; | ||
124 | diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c | ||
125 | index XXXXXXX..XXXXXXX 100644 | ||
126 | --- a/block/qcow2-refcount.c | ||
127 | +++ b/block/qcow2-refcount.c | ||
128 | @@ -XXX,XX +XXX,XX @@ int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size) | ||
129 | return -EIO; | ||
130 | } | ||
131 | |||
132 | -int qcow2_detect_metadata_preallocation(BlockDriverState *bs) | ||
133 | +int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs) | ||
134 | { | ||
135 | BDRVQcow2State *s = bs->opaque; | ||
136 | int64_t i, end_cluster, cluster_count = 0, threshold; | ||
137 | diff --git a/block/qcow2.c b/block/qcow2.c | ||
138 | index XXXXXXX..XXXXXXX 100644 | ||
139 | --- a/block/qcow2.c | ||
140 | +++ b/block/qcow2.c | ||
141 | @@ -XXX,XX +XXX,XX @@ static bool merge_cow(uint64_t offset, unsigned bytes, | ||
142 | * Return 1 if the COW regions read as zeroes, 0 if not, < 0 on error. | ||
143 | * Note that returning 0 does not guarantee non-zero data. | ||
144 | */ | ||
145 | -static int is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) | ||
146 | +static int coroutine_fn is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) | ||
147 | { | ||
148 | /* | ||
149 | * This check is designed for optimization shortcut so it must be | ||
150 | @@ -XXX,XX +XXX,XX @@ static int is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) | ||
151 | m->cow_end.nb_bytes); | ||
152 | } | ||
153 | |||
154 | -static int handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta) | ||
155 | +static int coroutine_fn handle_alloc_space(BlockDriverState *bs, | ||
156 | + QCowL2Meta *l2meta) | ||
157 | { | ||
158 | BDRVQcow2State *s = bs->opaque; | ||
159 | QCowL2Meta *m; | ||
142 | -- | 160 | -- |
143 | 2.30.2 | 161 | 2.37.3 |
144 | |||
145 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | We don't need this workaround anymore: bdrv_append is already smart | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | enough and we can use new bdrv_drop_filter(). | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | functions where this holds. | ||
5 | 6 | ||
6 | This commit efficiently reverts also recent 705dde27c6c53b73, which | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
7 | checked .active on io path. Still it said that the problem should be | 8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
8 | theoretical. And the logic of filter removement is changed anyway. | 9 | Message-Id: <20220922084924.201610-16-pbonzini@redhat.com> |
9 | 10 | [kwolf: Fixed up coding style] | |
10 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
12 | Message-Id: <20210428151804.439460-25-vsementsov@virtuozzo.com> | ||
13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
14 | --- | 13 | --- |
15 | block/backup-top.c | 47 +------------------------------------- | 14 | block/copy-before-write.c | 9 +++++---- |
16 | tests/qemu-iotests/283.out | 2 +- | 15 | 1 file changed, 5 insertions(+), 4 deletions(-) |
17 | 2 files changed, 2 insertions(+), 47 deletions(-) | ||
18 | 16 | ||
19 | diff --git a/block/backup-top.c b/block/backup-top.c | 17 | diff --git a/block/copy-before-write.c b/block/copy-before-write.c |
20 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/block/backup-top.c | 19 | --- a/block/copy-before-write.c |
22 | +++ b/block/backup-top.c | 20 | +++ b/block/copy-before-write.c |
23 | @@ -XXX,XX +XXX,XX @@ | 21 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn cbw_co_flush(BlockDriverState *bs) |
24 | typedef struct BDRVBackupTopState { | 22 | * It's guaranteed that guest writes will not interact in the region until |
25 | BlockCopyState *bcs; | 23 | * cbw_snapshot_read_unlock() called. |
26 | BdrvChild *target; | 24 | */ |
27 | - bool active; | 25 | -static BlockReq *cbw_snapshot_read_lock(BlockDriverState *bs, |
28 | int64_t cluster_size; | 26 | - int64_t offset, int64_t bytes, |
29 | } BDRVBackupTopState; | 27 | - int64_t *pnum, BdrvChild **file) |
30 | 28 | +static coroutine_fn BlockReq * | |
31 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int backup_top_co_preadv( | 29 | +cbw_snapshot_read_lock(BlockDriverState *bs, int64_t offset, int64_t bytes, |
32 | BlockDriverState *bs, uint64_t offset, uint64_t bytes, | 30 | + int64_t *pnum, BdrvChild **file) |
33 | QEMUIOVector *qiov, int flags) | ||
34 | { | 31 | { |
35 | - BDRVBackupTopState *s = bs->opaque; | 32 | BDRVCopyBeforeWriteState *s = bs->opaque; |
36 | - | 33 | BlockReq *req = g_new(BlockReq, 1); |
37 | - if (!s->active) { | 34 | @@ -XXX,XX +XXX,XX @@ static BlockReq *cbw_snapshot_read_lock(BlockDriverState *bs, |
38 | - return -EIO; | 35 | return req; |
39 | - } | ||
40 | - | ||
41 | return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); | ||
42 | } | 36 | } |
43 | 37 | ||
44 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int backup_top_cbw(BlockDriverState *bs, uint64_t offset, | 38 | -static void cbw_snapshot_read_unlock(BlockDriverState *bs, BlockReq *req) |
45 | BDRVBackupTopState *s = bs->opaque; | 39 | +static coroutine_fn void |
46 | uint64_t off, end; | 40 | +cbw_snapshot_read_unlock(BlockDriverState *bs, BlockReq *req) |
47 | |||
48 | - if (!s->active) { | ||
49 | - return -EIO; | ||
50 | - } | ||
51 | - | ||
52 | if (flags & BDRV_REQ_WRITE_UNCHANGED) { | ||
53 | return 0; | ||
54 | } | ||
55 | @@ -XXX,XX +XXX,XX @@ static void backup_top_child_perm(BlockDriverState *bs, BdrvChild *c, | ||
56 | uint64_t perm, uint64_t shared, | ||
57 | uint64_t *nperm, uint64_t *nshared) | ||
58 | { | 41 | { |
59 | - BDRVBackupTopState *s = bs->opaque; | 42 | BDRVCopyBeforeWriteState *s = bs->opaque; |
60 | - | ||
61 | - if (!s->active) { | ||
62 | - /* | ||
63 | - * The filter node may be in process of bdrv_append(), which firstly do | ||
64 | - * bdrv_set_backing_hd() and then bdrv_replace_node(). This means that | ||
65 | - * we can't unshare BLK_PERM_WRITE during bdrv_append() operation. So, | ||
66 | - * let's require nothing during bdrv_append() and refresh permissions | ||
67 | - * after it (see bdrv_backup_top_append()). | ||
68 | - */ | ||
69 | - *nperm = 0; | ||
70 | - *nshared = BLK_PERM_ALL; | ||
71 | - return; | ||
72 | - } | ||
73 | - | ||
74 | if (!(role & BDRV_CHILD_FILTERED)) { | ||
75 | /* | ||
76 | * Target child | ||
77 | @@ -XXX,XX +XXX,XX @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source, | ||
78 | } | ||
79 | appended = true; | ||
80 | |||
81 | - /* | ||
82 | - * bdrv_append() finished successfully, now we can require permissions | ||
83 | - * we want. | ||
84 | - */ | ||
85 | - state->active = true; | ||
86 | - ret = bdrv_child_refresh_perms(top, top->backing, errp); | ||
87 | - if (ret < 0) { | ||
88 | - error_prepend(errp, "Cannot set permissions for backup-top filter: "); | ||
89 | - goto fail; | ||
90 | - } | ||
91 | - | ||
92 | state->cluster_size = cluster_size; | ||
93 | state->bcs = block_copy_state_new(top->backing, state->target, | ||
94 | cluster_size, perf->use_copy_range, | ||
95 | @@ -XXX,XX +XXX,XX @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source, | ||
96 | |||
97 | fail: | ||
98 | if (appended) { | ||
99 | - state->active = false; | ||
100 | bdrv_backup_top_drop(top); | ||
101 | } else { | ||
102 | bdrv_unref(top); | ||
103 | @@ -XXX,XX +XXX,XX @@ void bdrv_backup_top_drop(BlockDriverState *bs) | ||
104 | { | ||
105 | BDRVBackupTopState *s = bs->opaque; | ||
106 | |||
107 | - bdrv_drained_begin(bs); | ||
108 | + bdrv_drop_filter(bs, &error_abort); | ||
109 | |||
110 | block_copy_state_free(s->bcs); | ||
111 | |||
112 | - s->active = false; | ||
113 | - bdrv_child_refresh_perms(bs, bs->backing, &error_abort); | ||
114 | - bdrv_replace_node(bs, bs->backing->bs, &error_abort); | ||
115 | - bdrv_set_backing_hd(bs, NULL, &error_abort); | ||
116 | - | ||
117 | - bdrv_drained_end(bs); | ||
118 | - | ||
119 | bdrv_unref(bs); | ||
120 | } | ||
121 | diff --git a/tests/qemu-iotests/283.out b/tests/qemu-iotests/283.out | ||
122 | index XXXXXXX..XXXXXXX 100644 | ||
123 | --- a/tests/qemu-iotests/283.out | ||
124 | +++ b/tests/qemu-iotests/283.out | ||
125 | @@ -XXX,XX +XXX,XX @@ | ||
126 | {"execute": "blockdev-add", "arguments": {"driver": "blkdebug", "image": "base", "node-name": "other", "take-child-perms": ["write"]}} | ||
127 | {"return": {}} | ||
128 | {"execute": "blockdev-backup", "arguments": {"device": "source", "sync": "full", "target": "target"}} | ||
129 | -{"error": {"class": "GenericError", "desc": "Cannot set permissions for backup-top filter: Conflicts with use by source as 'image', which does not allow 'write' on base"}} | ||
130 | +{"error": {"class": "GenericError", "desc": "Cannot append backup-top filter: Conflicts with use by source as 'image', which does not allow 'write' on base"}} | ||
131 | |||
132 | === backup-top should be gone after job-finalize === | ||
133 | 43 | ||
134 | -- | 44 | -- |
135 | 2.30.2 | 45 | 2.37.3 |
136 | |||
137 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-17-pbonzini@redhat.com> | ||
10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | --- | ||
13 | block/curl.c | 2 +- | ||
14 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
15 | |||
16 | diff --git a/block/curl.c b/block/curl.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/curl.c | ||
19 | +++ b/block/curl.c | ||
20 | @@ -XXX,XX +XXX,XX @@ out_noclean: | ||
21 | return -EINVAL; | ||
22 | } | ||
23 | |||
24 | -static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb) | ||
25 | +static void coroutine_fn curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb) | ||
26 | { | ||
27 | CURLState *state; | ||
28 | int running; | ||
29 | -- | ||
30 | 2.37.3 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Old interfaces dropped, nobody directly calls | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | bdrv_child_set_perm_abort() and bdrv_child_set_perm_commit(), so we can | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | use personal state structure for the action and stop exploiting | 5 | functions where this holds. |
6 | BdrvChild structure. Also, drop "_safe" suffix which is redundant now. | ||
7 | 6 | ||
8 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-18-pbonzini@redhat.com> | ||
9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
10 | Message-Id: <20210428151804.439460-35-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
12 | --- | 12 | --- |
13 | include/block/block_int.h | 5 ---- | 13 | block/qed.c | 4 ++-- |
14 | block.c | 63 ++++++++++++++------------------------- | 14 | 1 file changed, 2 insertions(+), 2 deletions(-) |
15 | 2 files changed, 22 insertions(+), 46 deletions(-) | ||
16 | 15 | ||
17 | diff --git a/include/block/block_int.h b/include/block/block_int.h | 16 | diff --git a/block/qed.c b/block/qed.c |
18 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/include/block/block_int.h | 18 | --- a/block/qed.c |
20 | +++ b/include/block/block_int.h | 19 | +++ b/block/qed.c |
21 | @@ -XXX,XX +XXX,XX @@ struct BdrvChild { | 20 | @@ -XXX,XX +XXX,XX @@ static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) |
22 | */ | 21 | return l2_table; |
23 | uint64_t shared_perm; | ||
24 | |||
25 | - /* backup of permissions during permission update procedure */ | ||
26 | - bool has_backup_perm; | ||
27 | - uint64_t backup_perm; | ||
28 | - uint64_t backup_shared_perm; | ||
29 | - | ||
30 | /* | ||
31 | * This link is frozen: the child can neither be replaced nor | ||
32 | * detached from the parent. | ||
33 | diff --git a/block.c b/block.c | ||
34 | index XXXXXXX..XXXXXXX 100644 | ||
35 | --- a/block.c | ||
36 | +++ b/block.c | ||
37 | @@ -XXX,XX +XXX,XX @@ static GSList *bdrv_topological_dfs(GSList *list, GHashTable *found, | ||
38 | return g_slist_prepend(list, bs); | ||
39 | } | 22 | } |
40 | 23 | ||
41 | -static void bdrv_child_set_perm_commit(void *opaque) | 24 | -static bool qed_plug_allocating_write_reqs(BDRVQEDState *s) |
42 | -{ | 25 | +static bool coroutine_fn qed_plug_allocating_write_reqs(BDRVQEDState *s) |
43 | - BdrvChild *c = opaque; | ||
44 | - | ||
45 | - c->has_backup_perm = false; | ||
46 | -} | ||
47 | +typedef struct BdrvChildSetPermState { | ||
48 | + BdrvChild *child; | ||
49 | + uint64_t old_perm; | ||
50 | + uint64_t old_shared_perm; | ||
51 | +} BdrvChildSetPermState; | ||
52 | |||
53 | static void bdrv_child_set_perm_abort(void *opaque) | ||
54 | { | 26 | { |
55 | - BdrvChild *c = opaque; | 27 | qemu_co_mutex_lock(&s->table_lock); |
56 | - /* | 28 | |
57 | - * We may have child->has_backup_perm unset at this point, as in case of | 29 | @@ -XXX,XX +XXX,XX @@ static bool qed_plug_allocating_write_reqs(BDRVQEDState *s) |
58 | - * _check_ stage of permission update failure we may _check_ not the whole | 30 | return true; |
59 | - * subtree. Still, _abort_ is called on the whole subtree anyway. | ||
60 | - */ | ||
61 | - if (c->has_backup_perm) { | ||
62 | - c->perm = c->backup_perm; | ||
63 | - c->shared_perm = c->backup_shared_perm; | ||
64 | - c->has_backup_perm = false; | ||
65 | - } | ||
66 | + BdrvChildSetPermState *s = opaque; | ||
67 | + | ||
68 | + s->child->perm = s->old_perm; | ||
69 | + s->child->shared_perm = s->old_shared_perm; | ||
70 | } | 31 | } |
71 | 32 | ||
72 | static TransactionActionDrv bdrv_child_set_pem_drv = { | 33 | -static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) |
73 | .abort = bdrv_child_set_perm_abort, | 34 | +static void coroutine_fn qed_unplug_allocating_write_reqs(BDRVQEDState *s) |
74 | - .commit = bdrv_child_set_perm_commit, | ||
75 | + .clean = g_free, | ||
76 | }; | ||
77 | |||
78 | -/* | ||
79 | - * With tran=NULL needs to be followed by direct call to either | ||
80 | - * bdrv_child_set_perm_commit() or bdrv_child_set_perm_abort(). | ||
81 | - * | ||
82 | - * With non-NULL tran needs to be followed by tran_abort() or tran_commit() | ||
83 | - * instead. | ||
84 | - */ | ||
85 | -static void bdrv_child_set_perm_safe(BdrvChild *c, uint64_t perm, | ||
86 | - uint64_t shared, Transaction *tran) | ||
87 | +static void bdrv_child_set_perm(BdrvChild *c, uint64_t perm, | ||
88 | + uint64_t shared, Transaction *tran) | ||
89 | { | 35 | { |
90 | - if (!c->has_backup_perm) { | 36 | qemu_co_mutex_lock(&s->table_lock); |
91 | - c->has_backup_perm = true; | 37 | assert(s->allocating_write_reqs_plugged); |
92 | - c->backup_perm = c->perm; | ||
93 | - c->backup_shared_perm = c->shared_perm; | ||
94 | - } | ||
95 | - /* | ||
96 | - * Note: it's OK if c->has_backup_perm was already set, as we can find the | ||
97 | - * same c twice during check_perm procedure | ||
98 | - */ | ||
99 | + BdrvChildSetPermState *s = g_new(BdrvChildSetPermState, 1); | ||
100 | + | ||
101 | + *s = (BdrvChildSetPermState) { | ||
102 | + .child = c, | ||
103 | + .old_perm = c->perm, | ||
104 | + .old_shared_perm = c->shared_perm, | ||
105 | + }; | ||
106 | |||
107 | c->perm = perm; | ||
108 | c->shared_perm = shared; | ||
109 | |||
110 | - if (tran) { | ||
111 | - tran_add(tran, &bdrv_child_set_pem_drv, c); | ||
112 | - } | ||
113 | + tran_add(tran, &bdrv_child_set_pem_drv, s); | ||
114 | } | ||
115 | |||
116 | static void bdrv_drv_set_perm_commit(void *opaque) | ||
117 | @@ -XXX,XX +XXX,XX @@ static int bdrv_node_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
118 | bdrv_child_perm(bs, c->bs, c, c->role, q, | ||
119 | cumulative_perms, cumulative_shared_perms, | ||
120 | &cur_perm, &cur_shared); | ||
121 | - bdrv_child_set_perm_safe(c, cur_perm, cur_shared, tran); | ||
122 | + bdrv_child_set_perm(c, cur_perm, cur_shared, tran); | ||
123 | } | ||
124 | |||
125 | return 0; | ||
126 | @@ -XXX,XX +XXX,XX @@ int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared, | ||
127 | Transaction *tran = tran_new(); | ||
128 | int ret; | ||
129 | |||
130 | - bdrv_child_set_perm_safe(c, perm, shared, tran); | ||
131 | + bdrv_child_set_perm(c, perm, shared, tran); | ||
132 | |||
133 | ret = bdrv_refresh_perms(c->bs, &local_err); | ||
134 | |||
135 | -- | 38 | -- |
136 | 2.30.2 | 39 | 2.37.3 |
137 | |||
138 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Rewrite bdrv_check_perm(), bdrv_abort_perm_update() and bdrv_set_perm() | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | to update nodes in topological sort order instead of simple DFS. With | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | topologically sorted nodes, we update a node only when all its parents | 5 | functions where this holds. |
6 | already updated. With DFS it's not so. | ||
7 | 6 | ||
8 | Consider the following example: | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
9 | 8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | |
10 | A -+ | 9 | Message-Id: <20220922084924.201610-19-pbonzini@redhat.com> |
11 | | | | 10 | [kwolf: Fixed up coding style] |
12 | | v | ||
13 | | B | ||
14 | | | | ||
15 | v | | ||
16 | C<-+ | ||
17 | |||
18 | A is parent for B and C, B is parent for C. | ||
19 | |||
20 | Obviously, to update permissions, we should go in order A B C, so, when | ||
21 | we update C, all parent permissions already updated. But with current | ||
22 | approach (simple recursion) we can update in sequence A C B C (C is | ||
23 | updated twice). On first update of C, we consider old B permissions, so | ||
24 | doing wrong thing. If it succeed, all is OK, on second C update we will | ||
25 | finish with correct graph. But if the wrong thing failed, we break the | ||
26 | whole process for no reason (it's possible that updated B permission | ||
27 | will be less strict, but we will never check it). | ||
28 | |||
29 | Also new approach gives a way to simultaneously and correctly update | ||
30 | several nodes, we just need to run bdrv_topological_dfs() several times | ||
31 | to add all nodes and their subtrees into one topologically sorted list | ||
32 | (next patch will update bdrv_replace_node() in this manner). | ||
33 | |||
34 | Test test_parallel_perm_update() is now passing, so move it out of | ||
35 | debugging "if". | ||
36 | |||
37 | We also need to support ignore_children in | ||
38 | bdrv_parent_perms_conflict() | ||
39 | |||
40 | For test 283 order of conflicting parents check is changed. | ||
41 | |||
42 | Note also that in bdrv_check_perm() we don't check for parents conflict | ||
43 | at root bs, as we may be in the middle of permission update in | ||
44 | bdrv_reopen_multiple(). bdrv_reopen_multiple() will be updated soon. | ||
45 | |||
46 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
47 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
48 | Message-Id: <20210428151804.439460-14-vsementsov@virtuozzo.com> | ||
49 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
50 | --- | 13 | --- |
51 | block.c | 116 +++++++++++++++++++++++++------ | 14 | block/quorum.c | 36 +++++++++++++++++++----------------- |
52 | tests/unit/test-bdrv-graph-mod.c | 4 +- | 15 | 1 file changed, 19 insertions(+), 17 deletions(-) |
53 | tests/qemu-iotests/283.out | 2 +- | ||
54 | 3 files changed, 99 insertions(+), 23 deletions(-) | ||
55 | 16 | ||
56 | diff --git a/block.c b/block.c | 17 | diff --git a/block/quorum.c b/block/quorum.c |
57 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
58 | --- a/block.c | 19 | --- a/block/quorum.c |
59 | +++ b/block.c | 20 | +++ b/block/quorum.c |
60 | @@ -XXX,XX +XXX,XX @@ static bool bdrv_a_allow_b(BdrvChild *a, BdrvChild *b, Error **errp) | 21 | @@ -XXX,XX +XXX,XX @@ static bool quorum_64bits_compare(QuorumVoteValue *a, QuorumVoteValue *b) |
61 | return false; | 22 | return a->l == b->l; |
62 | } | 23 | } |
63 | 24 | ||
64 | -static bool bdrv_parent_perms_conflict(BlockDriverState *bs, Error **errp) | 25 | -static QuorumAIOCB *quorum_aio_get(BlockDriverState *bs, |
65 | +static bool bdrv_parent_perms_conflict(BlockDriverState *bs, | 26 | - QEMUIOVector *qiov, |
66 | + GSList *ignore_children, | 27 | - uint64_t offset, |
67 | + Error **errp) | 28 | - uint64_t bytes, |
29 | - int flags) | ||
30 | +static QuorumAIOCB *coroutine_fn quorum_aio_get(BlockDriverState *bs, | ||
31 | + QEMUIOVector *qiov, | ||
32 | + uint64_t offset, uint64_t bytes, | ||
33 | + int flags) | ||
68 | { | 34 | { |
69 | BdrvChild *a, *b; | 35 | BDRVQuorumState *s = bs->opaque; |
70 | 36 | QuorumAIOCB *acb = g_new(QuorumAIOCB, 1); | |
71 | @@ -XXX,XX +XXX,XX @@ static bool bdrv_parent_perms_conflict(BlockDriverState *bs, Error **errp) | 37 | @@ -XXX,XX +XXX,XX @@ static void quorum_report_bad_versions(BDRVQuorumState *s, |
72 | * directions. | ||
73 | */ | ||
74 | QLIST_FOREACH(a, &bs->parents, next_parent) { | ||
75 | + if (g_slist_find(ignore_children, a)) { | ||
76 | + continue; | ||
77 | + } | ||
78 | + | ||
79 | QLIST_FOREACH(b, &bs->parents, next_parent) { | ||
80 | - if (a == b) { | ||
81 | + if (a == b || g_slist_find(ignore_children, b)) { | ||
82 | continue; | ||
83 | } | ||
84 | |||
85 | @@ -XXX,XX +XXX,XX @@ static void bdrv_child_perm(BlockDriverState *bs, BlockDriverState *child_bs, | ||
86 | } | 38 | } |
87 | } | 39 | } |
88 | 40 | ||
89 | +/* | 41 | -static void quorum_rewrite_entry(void *opaque) |
90 | + * Adds the whole subtree of @bs (including @bs itself) to the @list (except for | 42 | +static void coroutine_fn quorum_rewrite_entry(void *opaque) |
91 | + * nodes that are already in the @list, of course) so that final list is | ||
92 | + * topologically sorted. Return the result (GSList @list object is updated, so | ||
93 | + * don't use old reference after function call). | ||
94 | + * | ||
95 | + * On function start @list must be already topologically sorted and for any node | ||
96 | + * in the @list the whole subtree of the node must be in the @list as well. The | ||
97 | + * simplest way to satisfy this criteria: use only result of | ||
98 | + * bdrv_topological_dfs() or NULL as @list parameter. | ||
99 | + */ | ||
100 | +static GSList *bdrv_topological_dfs(GSList *list, GHashTable *found, | ||
101 | + BlockDriverState *bs) | ||
102 | +{ | ||
103 | + BdrvChild *child; | ||
104 | + g_autoptr(GHashTable) local_found = NULL; | ||
105 | + | ||
106 | + if (!found) { | ||
107 | + assert(!list); | ||
108 | + found = local_found = g_hash_table_new(NULL, NULL); | ||
109 | + } | ||
110 | + | ||
111 | + if (g_hash_table_contains(found, bs)) { | ||
112 | + return list; | ||
113 | + } | ||
114 | + g_hash_table_add(found, bs); | ||
115 | + | ||
116 | + QLIST_FOREACH(child, &bs->children, next) { | ||
117 | + list = bdrv_topological_dfs(list, found, child->bs); | ||
118 | + } | ||
119 | + | ||
120 | + return g_slist_prepend(list, bs); | ||
121 | +} | ||
122 | + | ||
123 | static void bdrv_child_set_perm_commit(void *opaque) | ||
124 | { | 43 | { |
125 | BdrvChild *c = opaque; | 44 | QuorumCo *co = opaque; |
126 | @@ -XXX,XX +XXX,XX @@ static void bdrv_child_set_perm_safe(BdrvChild *c, uint64_t perm, | 45 | QuorumAIOCB *acb = co->acb; |
127 | * A call to this function must always be followed by a call to bdrv_set_perm() | 46 | @@ -XXX,XX +XXX,XX @@ free_exit: |
128 | * or bdrv_abort_perm_update(). | 47 | quorum_free_vote_list(&acb->votes); |
129 | */ | 48 | } |
130 | -static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | 49 | |
131 | - uint64_t cumulative_perms, | 50 | -static void read_quorum_children_entry(void *opaque) |
132 | - uint64_t cumulative_shared_perms, | 51 | +static void coroutine_fn read_quorum_children_entry(void *opaque) |
133 | - GSList *ignore_children, Error **errp) | ||
134 | +static int bdrv_node_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
135 | + uint64_t cumulative_perms, | ||
136 | + uint64_t cumulative_shared_perms, | ||
137 | + GSList *ignore_children, Error **errp) | ||
138 | { | 52 | { |
139 | BlockDriver *drv = bs->drv; | 53 | QuorumCo *co = opaque; |
140 | BdrvChild *c; | 54 | QuorumAIOCB *acb = co->acb; |
141 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | 55 | @@ -XXX,XX +XXX,XX @@ static void read_quorum_children_entry(void *opaque) |
142 | /* Check all children */ | ||
143 | QLIST_FOREACH(c, &bs->children, next) { | ||
144 | uint64_t cur_perm, cur_shared; | ||
145 | - GSList *cur_ignore_children; | ||
146 | |||
147 | bdrv_child_perm(bs, c->bs, c, c->role, q, | ||
148 | cumulative_perms, cumulative_shared_perms, | ||
149 | &cur_perm, &cur_shared); | ||
150 | + bdrv_child_set_perm_safe(c, cur_perm, cur_shared, NULL); | ||
151 | + } | ||
152 | + | ||
153 | + return 0; | ||
154 | +} | ||
155 | + | ||
156 | +static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
157 | + uint64_t cumulative_perms, | ||
158 | + uint64_t cumulative_shared_perms, | ||
159 | + GSList *ignore_children, Error **errp) | ||
160 | +{ | ||
161 | + int ret; | ||
162 | + BlockDriverState *root = bs; | ||
163 | + g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, root); | ||
164 | |||
165 | - cur_ignore_children = g_slist_prepend(g_slist_copy(ignore_children), c); | ||
166 | - ret = bdrv_check_update_perm(c->bs, q, cur_perm, cur_shared, | ||
167 | - cur_ignore_children, errp); | ||
168 | - g_slist_free(cur_ignore_children); | ||
169 | + for ( ; list; list = list->next) { | ||
170 | + bs = list->data; | ||
171 | + | ||
172 | + if (bs != root) { | ||
173 | + if (bdrv_parent_perms_conflict(bs, ignore_children, errp)) { | ||
174 | + return -EINVAL; | ||
175 | + } | ||
176 | + | ||
177 | + bdrv_get_cumulative_perm(bs, &cumulative_perms, | ||
178 | + &cumulative_shared_perms); | ||
179 | + } | ||
180 | + | ||
181 | + ret = bdrv_node_check_perm(bs, q, cumulative_perms, | ||
182 | + cumulative_shared_perms, | ||
183 | + ignore_children, errp); | ||
184 | if (ret < 0) { | ||
185 | return ret; | ||
186 | } | ||
187 | - | ||
188 | - bdrv_child_set_perm_safe(c, cur_perm, cur_shared, NULL); | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
193 | * Notifies drivers that after a previous bdrv_check_perm() call, the | ||
194 | * permission update is not performed and any preparations made for it (e.g. | ||
195 | * taken file locks) need to be undone. | ||
196 | - * | ||
197 | - * This function recursively notifies all child nodes. | ||
198 | */ | ||
199 | -static void bdrv_abort_perm_update(BlockDriverState *bs) | ||
200 | +static void bdrv_node_abort_perm_update(BlockDriverState *bs) | ||
201 | { | ||
202 | BlockDriver *drv = bs->drv; | ||
203 | BdrvChild *c; | ||
204 | @@ -XXX,XX +XXX,XX @@ static void bdrv_abort_perm_update(BlockDriverState *bs) | ||
205 | |||
206 | QLIST_FOREACH(c, &bs->children, next) { | ||
207 | bdrv_child_set_perm_abort(c); | ||
208 | - bdrv_abort_perm_update(c->bs); | ||
209 | } | 56 | } |
210 | } | 57 | } |
211 | 58 | ||
212 | -static void bdrv_set_perm(BlockDriverState *bs) | 59 | -static int read_quorum_children(QuorumAIOCB *acb) |
213 | +static void bdrv_abort_perm_update(BlockDriverState *bs) | 60 | +static int coroutine_fn read_quorum_children(QuorumAIOCB *acb) |
214 | +{ | ||
215 | + g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); | ||
216 | + | ||
217 | + for ( ; list; list = list->next) { | ||
218 | + bdrv_node_abort_perm_update((BlockDriverState *)list->data); | ||
219 | + } | ||
220 | +} | ||
221 | + | ||
222 | +static void bdrv_node_set_perm(BlockDriverState *bs) | ||
223 | { | 61 | { |
224 | uint64_t cumulative_perms, cumulative_shared_perms; | 62 | BDRVQuorumState *s = acb->bs->opaque; |
225 | BlockDriver *drv = bs->drv; | 63 | int i; |
226 | @@ -XXX,XX +XXX,XX @@ static void bdrv_set_perm(BlockDriverState *bs) | 64 | @@ -XXX,XX +XXX,XX @@ static int read_quorum_children(QuorumAIOCB *acb) |
227 | /* Update all children */ | 65 | return acb->vote_ret; |
228 | QLIST_FOREACH(c, &bs->children, next) { | 66 | } |
229 | bdrv_child_set_perm_commit(c); | 67 | |
230 | - bdrv_set_perm(c->bs); | 68 | -static int read_fifo_child(QuorumAIOCB *acb) |
231 | + } | 69 | +static int coroutine_fn read_fifo_child(QuorumAIOCB *acb) |
232 | +} | 70 | { |
233 | + | 71 | BDRVQuorumState *s = acb->bs->opaque; |
234 | +static void bdrv_set_perm(BlockDriverState *bs) | 72 | int n, ret; |
235 | +{ | 73 | @@ -XXX,XX +XXX,XX @@ static int read_fifo_child(QuorumAIOCB *acb) |
236 | + g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); | 74 | return ret; |
237 | + | 75 | } |
238 | + for ( ; list; list = list->next) { | 76 | |
239 | + bdrv_node_set_perm((BlockDriverState *)list->data); | 77 | -static int quorum_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, |
78 | - QEMUIOVector *qiov, BdrvRequestFlags flags) | ||
79 | +static int coroutine_fn quorum_co_preadv(BlockDriverState *bs, | ||
80 | + int64_t offset, int64_t bytes, | ||
81 | + QEMUIOVector *qiov, | ||
82 | + BdrvRequestFlags flags) | ||
83 | { | ||
84 | BDRVQuorumState *s = bs->opaque; | ||
85 | QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags); | ||
86 | @@ -XXX,XX +XXX,XX @@ static int quorum_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, | ||
87 | return ret; | ||
88 | } | ||
89 | |||
90 | -static void write_quorum_entry(void *opaque) | ||
91 | +static void coroutine_fn write_quorum_entry(void *opaque) | ||
92 | { | ||
93 | QuorumCo *co = opaque; | ||
94 | QuorumAIOCB *acb = co->acb; | ||
95 | @@ -XXX,XX +XXX,XX @@ static void write_quorum_entry(void *opaque) | ||
240 | } | 96 | } |
241 | } | 97 | } |
242 | 98 | ||
243 | @@ -XXX,XX +XXX,XX @@ static int bdrv_refresh_perms(BlockDriverState *bs, Error **errp) | 99 | -static int quorum_co_pwritev(BlockDriverState *bs, int64_t offset, |
244 | int ret; | 100 | - int64_t bytes, QEMUIOVector *qiov, |
245 | uint64_t perm, shared_perm; | 101 | - BdrvRequestFlags flags) |
246 | 102 | +static int coroutine_fn quorum_co_pwritev(BlockDriverState *bs, int64_t offset, | |
247 | - if (bdrv_parent_perms_conflict(bs, errp)) { | 103 | + int64_t bytes, QEMUIOVector *qiov, |
248 | + if (bdrv_parent_perms_conflict(bs, NULL, errp)) { | 104 | + BdrvRequestFlags flags) |
249 | return -EPERM; | 105 | { |
250 | } | 106 | BDRVQuorumState *s = bs->opaque; |
251 | bdrv_get_cumulative_perm(bs, &perm, &shared_perm); | 107 | QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags); |
252 | diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c | 108 | @@ -XXX,XX +XXX,XX @@ static int quorum_co_pwritev(BlockDriverState *bs, int64_t offset, |
253 | index XXXXXXX..XXXXXXX 100644 | 109 | return ret; |
254 | --- a/tests/unit/test-bdrv-graph-mod.c | 110 | } |
255 | +++ b/tests/unit/test-bdrv-graph-mod.c | 111 | |
256 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char *argv[]) | 112 | -static int quorum_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, |
257 | g_test_add_func("/bdrv-graph-mod/update-perm-tree", test_update_perm_tree); | 113 | - int64_t bytes, BdrvRequestFlags flags) |
258 | g_test_add_func("/bdrv-graph-mod/should-update-child", | 114 | +static int coroutine_fn quorum_co_pwrite_zeroes(BlockDriverState *bs, |
259 | test_should_update_child); | 115 | + int64_t offset, int64_t bytes, |
260 | + g_test_add_func("/bdrv-graph-mod/parallel-perm-update", | 116 | + BdrvRequestFlags flags) |
261 | + test_parallel_perm_update); | 117 | |
262 | 118 | { | |
263 | if (debug) { | 119 | return quorum_co_pwritev(bs, offset, bytes, NULL, |
264 | g_test_add_func("/bdrv-graph-mod/parallel-exclusive-write", | ||
265 | test_parallel_exclusive_write); | ||
266 | - g_test_add_func("/bdrv-graph-mod/parallel-perm-update", | ||
267 | - test_parallel_perm_update); | ||
268 | g_test_add_func("/bdrv-graph-mod/append-greedy-filter", | ||
269 | test_append_greedy_filter); | ||
270 | } | ||
271 | diff --git a/tests/qemu-iotests/283.out b/tests/qemu-iotests/283.out | ||
272 | index XXXXXXX..XXXXXXX 100644 | ||
273 | --- a/tests/qemu-iotests/283.out | ||
274 | +++ b/tests/qemu-iotests/283.out | ||
275 | @@ -XXX,XX +XXX,XX @@ | ||
276 | {"execute": "blockdev-add", "arguments": {"driver": "blkdebug", "image": "base", "node-name": "other", "take-child-perms": ["write"]}} | ||
277 | {"return": {}} | ||
278 | {"execute": "blockdev-backup", "arguments": {"device": "source", "sync": "full", "target": "target"}} | ||
279 | -{"error": {"class": "GenericError", "desc": "Cannot set permissions for backup-top filter: Conflicts with use by other as 'image', which uses 'write' on base"}} | ||
280 | +{"error": {"class": "GenericError", "desc": "Cannot set permissions for backup-top filter: Conflicts with use by source as 'image', which does not allow 'write' on base"}} | ||
281 | |||
282 | === backup-top should be gone after job-finalize === | ||
283 | |||
284 | -- | 120 | -- |
285 | 2.30.2 | 121 | 2.37.3 |
286 | |||
287 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Now, bdrv_node_check_perm() is called only with fresh cumulative | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | permissions, so its actually "refresh_perm". | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | functions where this holds. | ||
5 | 6 | ||
6 | Move permission calculation to the function. Also, drop unreachable | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
7 | error message and rewrite the remaining one to be more generic (as now | 8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
8 | we don't know which node is added and which was already here). | 9 | Message-Id: <20220922084924.201610-20-pbonzini@redhat.com> |
9 | |||
10 | Add also Virtuozzo copyright, as big work is done at this point. | ||
11 | |||
12 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
13 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
14 | Message-Id: <20210428151804.439460-37-vsementsov@virtuozzo.com> | ||
15 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
16 | --- | 12 | --- |
17 | block.c | 38 +++++++++++--------------------------- | 13 | block/throttle.c | 2 +- |
18 | tests/qemu-iotests/245 | 2 +- | 14 | 1 file changed, 1 insertion(+), 1 deletion(-) |
19 | 2 files changed, 12 insertions(+), 28 deletions(-) | ||
20 | 15 | ||
21 | diff --git a/block.c b/block.c | 16 | diff --git a/block/throttle.c b/block/throttle.c |
22 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
23 | --- a/block.c | 18 | --- a/block/throttle.c |
24 | +++ b/block.c | 19 | +++ b/block/throttle.c |
25 | @@ -XXX,XX +XXX,XX @@ | 20 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn throttle_co_pwritev_compressed(BlockDriverState *bs, |
26 | * QEMU System Emulator block driver | 21 | BDRV_REQ_WRITE_COMPRESSED); |
27 | * | ||
28 | * Copyright (c) 2003 Fabrice Bellard | ||
29 | + * Copyright (c) 2020 Virtuozzo International GmbH. | ||
30 | * | ||
31 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
32 | * of this software and associated documentation files (the "Software"), to deal | ||
33 | @@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child(BdrvChild *child, BlockDriverState *new_bs, | ||
34 | } | 22 | } |
35 | 23 | ||
36 | /* | 24 | -static int throttle_co_flush(BlockDriverState *bs) |
37 | - * Check whether permissions on this node can be changed in a way that | 25 | +static int coroutine_fn throttle_co_flush(BlockDriverState *bs) |
38 | - * @cumulative_perms and @cumulative_shared_perms are the new cumulative | ||
39 | - * permissions of all its parents. This involves checking whether all necessary | ||
40 | - * permission changes to child nodes can be performed. | ||
41 | - * | ||
42 | - * A call to this function must always be followed by a call to bdrv_set_perm() | ||
43 | - * or bdrv_abort_perm_update(). | ||
44 | + * Refresh permissions in @bs subtree. The function is intended to be called | ||
45 | + * after some graph modification that was done without permission update. | ||
46 | */ | ||
47 | -static int bdrv_node_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
48 | - uint64_t cumulative_perms, | ||
49 | - uint64_t cumulative_shared_perms, | ||
50 | - Transaction *tran, Error **errp) | ||
51 | +static int bdrv_node_refresh_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
52 | + Transaction *tran, Error **errp) | ||
53 | { | 26 | { |
54 | BlockDriver *drv = bs->drv; | 27 | return bdrv_co_flush(bs->file->bs); |
55 | BdrvChild *c; | 28 | } |
56 | int ret; | ||
57 | + uint64_t cumulative_perms, cumulative_shared_perms; | ||
58 | + | ||
59 | + bdrv_get_cumulative_perm(bs, &cumulative_perms, &cumulative_shared_perms); | ||
60 | |||
61 | /* Write permissions never work with read-only images */ | ||
62 | if ((cumulative_perms & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) && | ||
63 | @@ -XXX,XX +XXX,XX @@ static int bdrv_node_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
64 | if (!bdrv_is_writable_after_reopen(bs, NULL)) { | ||
65 | error_setg(errp, "Block node is read-only"); | ||
66 | } else { | ||
67 | - uint64_t current_perms, current_shared; | ||
68 | - bdrv_get_cumulative_perm(bs, ¤t_perms, ¤t_shared); | ||
69 | - if (current_perms & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) { | ||
70 | - error_setg(errp, "Cannot make block node read-only, there is " | ||
71 | - "a writer on it"); | ||
72 | - } else { | ||
73 | - error_setg(errp, "Cannot make block node read-only and create " | ||
74 | - "a writer on it"); | ||
75 | - } | ||
76 | + error_setg(errp, "Read-only block node '%s' cannot support " | ||
77 | + "read-write users", bdrv_get_node_name(bs)); | ||
78 | } | ||
79 | |||
80 | return -EPERM; | ||
81 | @@ -XXX,XX +XXX,XX @@ static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q, | ||
82 | Transaction *tran, Error **errp) | ||
83 | { | ||
84 | int ret; | ||
85 | - uint64_t cumulative_perms, cumulative_shared_perms; | ||
86 | BlockDriverState *bs; | ||
87 | |||
88 | for ( ; list; list = list->next) { | ||
89 | @@ -XXX,XX +XXX,XX @@ static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q, | ||
90 | return -EINVAL; | ||
91 | } | ||
92 | |||
93 | - bdrv_get_cumulative_perm(bs, &cumulative_perms, | ||
94 | - &cumulative_shared_perms); | ||
95 | - | ||
96 | - ret = bdrv_node_check_perm(bs, q, cumulative_perms, | ||
97 | - cumulative_shared_perms, | ||
98 | - tran, errp); | ||
99 | + ret = bdrv_node_refresh_perm(bs, q, tran, errp); | ||
100 | if (ret < 0) { | ||
101 | return ret; | ||
102 | } | ||
103 | diff --git a/tests/qemu-iotests/245 b/tests/qemu-iotests/245 | ||
104 | index XXXXXXX..XXXXXXX 100755 | ||
105 | --- a/tests/qemu-iotests/245 | ||
106 | +++ b/tests/qemu-iotests/245 | ||
107 | @@ -XXX,XX +XXX,XX @@ class TestBlockdevReopen(iotests.QMPTestCase): | ||
108 | # We can't reopen hd1 to read-only, as block-stream requires it to be | ||
109 | # read-write | ||
110 | self.reopen(opts['backing'], {'read-only': True}, | ||
111 | - "Cannot make block node read-only, there is a writer on it") | ||
112 | + "Read-only block node 'hd1' cannot support read-write users") | ||
113 | |||
114 | # We can't remove hd2 while the stream job is ongoing | ||
115 | opts['backing']['backing'] = None | ||
116 | -- | 29 | -- |
117 | 2.30.2 | 30 | 2.37.3 |
118 | |||
119 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | bdrv_check_perm_common() has only one caller, so no more sense in | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | "common". | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | functions where this holds. | ||
5 | 6 | ||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-21-pbonzini@redhat.com> | ||
10 | [kwolf: Fixed up coding style] | ||
7 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
8 | Message-Id: <20210428151804.439460-33-vsementsov@virtuozzo.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
10 | --- | 13 | --- |
11 | block.c | 32 +++----------------------------- | 14 | block/vmdk.c | 22 ++++++++++++---------- |
12 | 1 file changed, 3 insertions(+), 29 deletions(-) | 15 | 1 file changed, 12 insertions(+), 10 deletions(-) |
13 | 16 | ||
14 | diff --git a/block.c b/block.c | 17 | diff --git a/block/vmdk.c b/block/vmdk.c |
15 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/block.c | 19 | --- a/block/vmdk.c |
17 | +++ b/block.c | 20 | +++ b/block/vmdk.c |
18 | @@ -XXX,XX +XXX,XX @@ static int bdrv_node_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | 21 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vmdk_co_block_status(BlockDriverState *bs, |
19 | return 0; | 22 | return ret; |
20 | } | 23 | } |
21 | 24 | ||
22 | -/* | 25 | -static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, |
23 | - * If use_cumulative_perms is true, use cumulative_perms and | 26 | - int64_t offset_in_cluster, QEMUIOVector *qiov, |
24 | - * cumulative_shared_perms for first element of the list. Otherwise just refresh | 27 | - uint64_t qiov_offset, uint64_t n_bytes, |
25 | - * all permissions. | 28 | - uint64_t offset) |
26 | - */ | 29 | +static int coroutine_fn |
27 | -static int bdrv_check_perm_common(GSList *list, BlockReopenQueue *q, | 30 | +vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, |
28 | - bool use_cumulative_perms, | 31 | + int64_t offset_in_cluster, QEMUIOVector *qiov, |
29 | - uint64_t cumulative_perms, | 32 | + uint64_t qiov_offset, uint64_t n_bytes, |
30 | - uint64_t cumulative_shared_perms, | 33 | + uint64_t offset) |
31 | - Transaction *tran, Error **errp) | ||
32 | +static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q, | ||
33 | + Transaction *tran, Error **errp) | ||
34 | { | 34 | { |
35 | int ret; | 35 | int ret; |
36 | + uint64_t cumulative_perms, cumulative_shared_perms; | 36 | VmdkGrainMarker *data = NULL; |
37 | BlockDriverState *bs; | 37 | @@ -XXX,XX +XXX,XX @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, |
38 | 38 | return ret; | |
39 | - if (use_cumulative_perms) { | ||
40 | - bs = list->data; | ||
41 | - | ||
42 | - ret = bdrv_node_check_perm(bs, q, cumulative_perms, | ||
43 | - cumulative_shared_perms, | ||
44 | - tran, errp); | ||
45 | - if (ret < 0) { | ||
46 | - return ret; | ||
47 | - } | ||
48 | - | ||
49 | - list = list->next; | ||
50 | - } | ||
51 | - | ||
52 | for ( ; list; list = list->next) { | ||
53 | bs = list->data; | ||
54 | |||
55 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_perm_common(GSList *list, BlockReopenQueue *q, | ||
56 | return 0; | ||
57 | } | 39 | } |
58 | 40 | ||
59 | -static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q, | 41 | -static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset, |
60 | - Transaction *tran, Error **errp) | 42 | - int64_t offset_in_cluster, QEMUIOVector *qiov, |
61 | -{ | 43 | - int bytes) |
62 | - return bdrv_check_perm_common(list, q, false, 0, 0, tran, errp); | 44 | +static int coroutine_fn |
63 | -} | 45 | +vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset, |
64 | - | 46 | + int64_t offset_in_cluster, QEMUIOVector *qiov, |
65 | static void bdrv_node_set_perm(BlockDriverState *bs) | 47 | + int bytes) |
66 | { | 48 | { |
67 | BlockDriver *drv = bs->drv; | 49 | int ret; |
50 | int cluster_bytes, buf_bytes; | ||
51 | @@ -XXX,XX +XXX,XX @@ fail: | ||
52 | * | ||
53 | * Returns: error code with 0 for success. | ||
54 | */ | ||
55 | -static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset, | ||
56 | - uint64_t bytes, QEMUIOVector *qiov, | ||
57 | - bool zeroed, bool zero_dry_run) | ||
58 | +static int coroutine_fn vmdk_pwritev(BlockDriverState *bs, uint64_t offset, | ||
59 | + uint64_t bytes, QEMUIOVector *qiov, | ||
60 | + bool zeroed, bool zero_dry_run) | ||
61 | { | ||
62 | BDRVVmdkState *s = bs->opaque; | ||
63 | VmdkExtent *extent = NULL; | ||
68 | -- | 64 | -- |
69 | 2.30.2 | 65 | 2.37.3 |
70 | |||
71 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | To be used in the further commit. | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
4 | 6 | ||
5 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-22-pbonzini@redhat.com> | ||
6 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
7 | Message-Id: <20210428151804.439460-27-vsementsov@virtuozzo.com> | ||
8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
9 | --- | 12 | --- |
10 | block.c | 46 ++++++++++++++++++++++++++++++++++++++++++---- | 13 | include/qemu/job.h | 2 +- |
11 | 1 file changed, 42 insertions(+), 4 deletions(-) | 14 | job.c | 2 +- |
15 | 2 files changed, 2 insertions(+), 2 deletions(-) | ||
12 | 16 | ||
13 | diff --git a/block.c b/block.c | 17 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
14 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/block.c | 19 | --- a/include/qemu/job.h |
16 | +++ b/block.c | 20 | +++ b/include/qemu/job.h |
17 | @@ -XXX,XX +XXX,XX @@ void bdrv_root_unref_child(BdrvChild *child) | 21 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn job_pause_point(Job *job); |
18 | bdrv_unref(child_bs); | 22 | * |
19 | } | 23 | * Yield the job coroutine. |
20 | 24 | */ | |
21 | +typedef struct BdrvSetInheritsFrom { | 25 | -void job_yield(Job *job); |
22 | + BlockDriverState *bs; | 26 | +void coroutine_fn job_yield(Job *job); |
23 | + BlockDriverState *old_inherits_from; | 27 | |
24 | +} BdrvSetInheritsFrom; | ||
25 | + | ||
26 | +static void bdrv_set_inherits_from_abort(void *opaque) | ||
27 | +{ | ||
28 | + BdrvSetInheritsFrom *s = opaque; | ||
29 | + | ||
30 | + s->bs->inherits_from = s->old_inherits_from; | ||
31 | +} | ||
32 | + | ||
33 | +static TransactionActionDrv bdrv_set_inherits_from_drv = { | ||
34 | + .abort = bdrv_set_inherits_from_abort, | ||
35 | + .clean = g_free, | ||
36 | +}; | ||
37 | + | ||
38 | +/* @tran is allowed to be NULL. In this case no rollback is possible */ | ||
39 | +static void bdrv_set_inherits_from(BlockDriverState *bs, | ||
40 | + BlockDriverState *new_inherits_from, | ||
41 | + Transaction *tran) | ||
42 | +{ | ||
43 | + if (tran) { | ||
44 | + BdrvSetInheritsFrom *s = g_new(BdrvSetInheritsFrom, 1); | ||
45 | + | ||
46 | + *s = (BdrvSetInheritsFrom) { | ||
47 | + .bs = bs, | ||
48 | + .old_inherits_from = bs->inherits_from, | ||
49 | + }; | ||
50 | + | ||
51 | + tran_add(tran, &bdrv_set_inherits_from_drv, s); | ||
52 | + } | ||
53 | + | ||
54 | + bs->inherits_from = new_inherits_from; | ||
55 | +} | ||
56 | + | ||
57 | /** | 28 | /** |
58 | * Clear all inherits_from pointers from children and grandchildren of | 29 | * @job: The job that calls the function. |
59 | * @root that point to @root, where necessary. | 30 | diff --git a/job.c b/job.c |
60 | + * @tran is allowed to be NULL. In this case no rollback is possible | 31 | index XXXXXXX..XXXXXXX 100644 |
61 | */ | 32 | --- a/job.c |
62 | -static void bdrv_unset_inherits_from(BlockDriverState *root, BdrvChild *child) | 33 | +++ b/job.c |
63 | +static void bdrv_unset_inherits_from(BlockDriverState *root, BdrvChild *child, | 34 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn job_pause_point(Job *job) |
64 | + Transaction *tran) | ||
65 | { | ||
66 | BdrvChild *c; | ||
67 | |||
68 | @@ -XXX,XX +XXX,XX @@ static void bdrv_unset_inherits_from(BlockDriverState *root, BdrvChild *child) | ||
69 | } | ||
70 | } | ||
71 | if (c == NULL) { | ||
72 | - child->bs->inherits_from = NULL; | ||
73 | + bdrv_set_inherits_from(child->bs, NULL, tran); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | QLIST_FOREACH(c, &child->bs->children, next) { | ||
78 | - bdrv_unset_inherits_from(root, c); | ||
79 | + bdrv_unset_inherits_from(root, c, tran); | ||
80 | } | 35 | } |
81 | } | 36 | } |
82 | 37 | ||
83 | @@ -XXX,XX +XXX,XX @@ void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child) | 38 | -void job_yield(Job *job) |
84 | return; | 39 | +void coroutine_fn job_yield(Job *job) |
85 | } | 40 | { |
86 | 41 | assert(job->busy); | |
87 | - bdrv_unset_inherits_from(parent, child); | ||
88 | + bdrv_unset_inherits_from(parent, child, NULL); | ||
89 | bdrv_root_unref_child(child); | ||
90 | } | ||
91 | 42 | ||
92 | -- | 43 | -- |
93 | 2.30.2 | 44 | 2.37.3 |
94 | |||
95 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Split out non-recursive parts, and refactor as block graph transaction | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | action. | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | functions where this holds. | ||
5 | 6 | ||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-23-pbonzini@redhat.com> | ||
7 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
8 | Message-Id: <20210428151804.439460-11-vsementsov@virtuozzo.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
10 | --- | 12 | --- |
11 | block.c | 79 ++++++++++++++++++++++++++++++++++++++++++--------------- | 13 | util/qemu-coroutine-lock.c | 14 +++++++------- |
12 | 1 file changed, 59 insertions(+), 20 deletions(-) | 14 | 1 file changed, 7 insertions(+), 7 deletions(-) |
13 | 15 | ||
14 | diff --git a/block.c b/block.c | 16 | diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c |
15 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/block.c | 18 | --- a/util/qemu-coroutine-lock.c |
17 | +++ b/block.c | 19 | +++ b/util/qemu-coroutine-lock.c |
18 | @@ -XXX,XX +XXX,XX @@ | 20 | @@ -XXX,XX +XXX,XX @@ typedef struct CoWaitRecord { |
19 | #include "qemu/timer.h" | 21 | QSLIST_ENTRY(CoWaitRecord) next; |
20 | #include "qemu/cutils.h" | 22 | } CoWaitRecord; |
21 | #include "qemu/id.h" | 23 | |
22 | +#include "qemu/transactions.h" | 24 | -static void push_waiter(CoMutex *mutex, CoWaitRecord *w) |
23 | #include "block/coroutines.h" | 25 | +static void coroutine_fn push_waiter(CoMutex *mutex, CoWaitRecord *w) |
24 | 26 | { | |
25 | #ifdef CONFIG_BSD | 27 | w->co = qemu_coroutine_self(); |
26 | @@ -XXX,XX +XXX,XX @@ static void bdrv_child_perm(BlockDriverState *bs, BlockDriverState *child_bs, | 28 | QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next); |
29 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_init(CoRwlock *lock) | ||
30 | } | ||
31 | |||
32 | /* Releases the internal CoMutex. */ | ||
33 | -static void qemu_co_rwlock_maybe_wake_one(CoRwlock *lock) | ||
34 | +static void coroutine_fn qemu_co_rwlock_maybe_wake_one(CoRwlock *lock) | ||
35 | { | ||
36 | CoRwTicket *tkt = QSIMPLEQ_FIRST(&lock->tickets); | ||
37 | Coroutine *co = NULL; | ||
38 | @@ -XXX,XX +XXX,XX @@ static void qemu_co_rwlock_maybe_wake_one(CoRwlock *lock) | ||
27 | } | 39 | } |
28 | } | 40 | } |
29 | 41 | ||
30 | +static void bdrv_child_set_perm_commit(void *opaque) | 42 | -void qemu_co_rwlock_rdlock(CoRwlock *lock) |
31 | +{ | 43 | +void coroutine_fn qemu_co_rwlock_rdlock(CoRwlock *lock) |
32 | + BdrvChild *c = opaque; | 44 | { |
33 | + | 45 | Coroutine *self = qemu_coroutine_self(); |
34 | + c->has_backup_perm = false; | 46 | |
35 | +} | 47 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_rdlock(CoRwlock *lock) |
36 | + | 48 | self->locks_held++; |
37 | +static void bdrv_child_set_perm_abort(void *opaque) | ||
38 | +{ | ||
39 | + BdrvChild *c = opaque; | ||
40 | + /* | ||
41 | + * We may have child->has_backup_perm unset at this point, as in case of | ||
42 | + * _check_ stage of permission update failure we may _check_ not the whole | ||
43 | + * subtree. Still, _abort_ is called on the whole subtree anyway. | ||
44 | + */ | ||
45 | + if (c->has_backup_perm) { | ||
46 | + c->perm = c->backup_perm; | ||
47 | + c->shared_perm = c->backup_shared_perm; | ||
48 | + c->has_backup_perm = false; | ||
49 | + } | ||
50 | +} | ||
51 | + | ||
52 | +static TransactionActionDrv bdrv_child_set_pem_drv = { | ||
53 | + .abort = bdrv_child_set_perm_abort, | ||
54 | + .commit = bdrv_child_set_perm_commit, | ||
55 | +}; | ||
56 | + | ||
57 | +/* | ||
58 | + * With tran=NULL needs to be followed by direct call to either | ||
59 | + * bdrv_child_set_perm_commit() or bdrv_child_set_perm_abort(). | ||
60 | + * | ||
61 | + * With non-NULL tran needs to be followed by tran_abort() or tran_commit() | ||
62 | + * instead. | ||
63 | + */ | ||
64 | +static void bdrv_child_set_perm_safe(BdrvChild *c, uint64_t perm, | ||
65 | + uint64_t shared, Transaction *tran) | ||
66 | +{ | ||
67 | + if (!c->has_backup_perm) { | ||
68 | + c->has_backup_perm = true; | ||
69 | + c->backup_perm = c->perm; | ||
70 | + c->backup_shared_perm = c->shared_perm; | ||
71 | + } | ||
72 | + /* | ||
73 | + * Note: it's OK if c->has_backup_perm was already set, as we can find the | ||
74 | + * same c twice during check_perm procedure | ||
75 | + */ | ||
76 | + | ||
77 | + c->perm = perm; | ||
78 | + c->shared_perm = shared; | ||
79 | + | ||
80 | + if (tran) { | ||
81 | + tran_add(tran, &bdrv_child_set_pem_drv, c); | ||
82 | + } | ||
83 | +} | ||
84 | + | ||
85 | /* | ||
86 | * Check whether permissions on this node can be changed in a way that | ||
87 | * @cumulative_perms and @cumulative_shared_perms are the new cumulative | ||
88 | @@ -XXX,XX +XXX,XX @@ static int bdrv_child_check_perm(BdrvChild *c, BlockReopenQueue *q, | ||
89 | return ret; | ||
90 | } | ||
91 | |||
92 | - if (!c->has_backup_perm) { | ||
93 | - c->has_backup_perm = true; | ||
94 | - c->backup_perm = c->perm; | ||
95 | - c->backup_shared_perm = c->shared_perm; | ||
96 | - } | ||
97 | - /* | ||
98 | - * Note: it's OK if c->has_backup_perm was already set, as we can find the | ||
99 | - * same child twice during check_perm procedure | ||
100 | - */ | ||
101 | - | ||
102 | - c->perm = perm; | ||
103 | - c->shared_perm = shared; | ||
104 | + bdrv_child_set_perm_safe(c, perm, shared, NULL); | ||
105 | |||
106 | return 0; | ||
107 | } | 49 | } |
108 | 50 | ||
109 | static void bdrv_child_set_perm(BdrvChild *c) | 51 | -void qemu_co_rwlock_unlock(CoRwlock *lock) |
52 | +void coroutine_fn qemu_co_rwlock_unlock(CoRwlock *lock) | ||
110 | { | 53 | { |
111 | - c->has_backup_perm = false; | 54 | Coroutine *self = qemu_coroutine_self(); |
112 | - | 55 | |
113 | + bdrv_child_set_perm_commit(c); | 56 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_unlock(CoRwlock *lock) |
114 | bdrv_set_perm(c->bs); | 57 | qemu_co_rwlock_maybe_wake_one(lock); |
115 | } | 58 | } |
116 | 59 | ||
117 | static void bdrv_child_abort_perm_update(BdrvChild *c) | 60 | -void qemu_co_rwlock_downgrade(CoRwlock *lock) |
61 | +void coroutine_fn qemu_co_rwlock_downgrade(CoRwlock *lock) | ||
118 | { | 62 | { |
119 | - if (c->has_backup_perm) { | 63 | qemu_co_mutex_lock(&lock->mutex); |
120 | - c->perm = c->backup_perm; | 64 | assert(lock->owners == -1); |
121 | - c->shared_perm = c->backup_shared_perm; | 65 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_downgrade(CoRwlock *lock) |
122 | - c->has_backup_perm = false; | 66 | qemu_co_rwlock_maybe_wake_one(lock); |
123 | - } | ||
124 | - | ||
125 | + bdrv_child_set_perm_abort(c); | ||
126 | bdrv_abort_perm_update(c->bs); | ||
127 | } | 67 | } |
128 | 68 | ||
69 | -void qemu_co_rwlock_wrlock(CoRwlock *lock) | ||
70 | +void coroutine_fn qemu_co_rwlock_wrlock(CoRwlock *lock) | ||
71 | { | ||
72 | Coroutine *self = qemu_coroutine_self(); | ||
73 | |||
74 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_wrlock(CoRwlock *lock) | ||
75 | self->locks_held++; | ||
76 | } | ||
77 | |||
78 | -void qemu_co_rwlock_upgrade(CoRwlock *lock) | ||
79 | +void coroutine_fn qemu_co_rwlock_upgrade(CoRwlock *lock) | ||
80 | { | ||
81 | qemu_co_mutex_lock(&lock->mutex); | ||
82 | assert(lock->owners > 0); | ||
129 | -- | 83 | -- |
130 | 2.30.2 | 84 | 2.37.3 |
131 | |||
132 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-24-pbonzini@redhat.com> | ||
10 | [kwolf: Fixed up coding style] | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
13 | --- | ||
14 | block/raw-format.c | 3 ++- | ||
15 | 1 file changed, 2 insertions(+), 1 deletion(-) | ||
16 | |||
17 | diff --git a/block/raw-format.c b/block/raw-format.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/block/raw-format.c | ||
20 | +++ b/block/raw-format.c | ||
21 | @@ -XXX,XX +XXX,XX @@ static void raw_lock_medium(BlockDriverState *bs, bool locked) | ||
22 | bdrv_lock_medium(bs->file->bs, locked); | ||
23 | } | ||
24 | |||
25 | -static int raw_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) | ||
26 | +static int coroutine_fn raw_co_ioctl(BlockDriverState *bs, | ||
27 | + unsigned long int req, void *buf) | ||
28 | { | ||
29 | BDRVRawState *s = bs->opaque; | ||
30 | if (s->offset || s->has_size) { | ||
31 | -- | ||
32 | 2.37.3 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Marc-André Lureau <marcandre.lureau@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Each of them has only one caller. Open-coding simplifies further | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | pemission-update system changes. | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | functions where this holds. | ||
5 | 6 | ||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 7 | Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> |
7 | Reviewed-by: Alberto Garcia <berto@igalia.com> | 8 | Acked-by: Greg Kurz <groug@kaod.org> |
9 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
10 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
11 | Message-Id: <20220922084924.201610-25-pbonzini@redhat.com> | ||
8 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 12 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
9 | Message-Id: <20210428151804.439460-13-vsementsov@virtuozzo.com> | ||
10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
11 | --- | 14 | --- |
12 | block.c | 59 +++++++++++++++++---------------------------------------- | 15 | hw/9pfs/9p.h | 9 ++++++--- |
13 | 1 file changed, 17 insertions(+), 42 deletions(-) | 16 | 1 file changed, 6 insertions(+), 3 deletions(-) |
14 | 17 | ||
15 | diff --git a/block.c b/block.c | 18 | diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h |
16 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/block.c | 20 | --- a/hw/9pfs/9p.h |
18 | +++ b/block.c | 21 | +++ b/hw/9pfs/9p.h |
19 | @@ -XXX,XX +XXX,XX @@ static int bdrv_fill_options(QDict **options, const char *filename, | 22 | @@ -XXX,XX +XXX,XX @@ typedef struct V9fsGetlock |
20 | return 0; | 23 | extern int open_fd_hw; |
21 | } | 24 | extern int total_open_fd; |
22 | 25 | ||
23 | -static int bdrv_child_check_perm(BdrvChild *c, BlockReopenQueue *q, | 26 | -static inline void v9fs_path_write_lock(V9fsState *s) |
24 | - uint64_t perm, uint64_t shared, | 27 | +static inline void coroutine_fn |
25 | - GSList *ignore_children, Error **errp); | 28 | +v9fs_path_write_lock(V9fsState *s) |
26 | -static void bdrv_child_abort_perm_update(BdrvChild *c); | 29 | { |
27 | -static void bdrv_child_set_perm(BdrvChild *c); | 30 | if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { |
28 | +static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q, | 31 | qemu_co_rwlock_wrlock(&s->rename_lock); |
29 | + uint64_t new_used_perm, | ||
30 | + uint64_t new_shared_perm, | ||
31 | + GSList *ignore_children, | ||
32 | + Error **errp); | ||
33 | |||
34 | typedef struct BlockReopenQueueEntry { | ||
35 | bool prepared; | ||
36 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
37 | /* Check all children */ | ||
38 | QLIST_FOREACH(c, &bs->children, next) { | ||
39 | uint64_t cur_perm, cur_shared; | ||
40 | + GSList *cur_ignore_children; | ||
41 | |||
42 | bdrv_child_perm(bs, c->bs, c, c->role, q, | ||
43 | cumulative_perms, cumulative_shared_perms, | ||
44 | &cur_perm, &cur_shared); | ||
45 | - ret = bdrv_child_check_perm(c, q, cur_perm, cur_shared, ignore_children, | ||
46 | - errp); | ||
47 | + | ||
48 | + cur_ignore_children = g_slist_prepend(g_slist_copy(ignore_children), c); | ||
49 | + ret = bdrv_check_update_perm(c->bs, q, cur_perm, cur_shared, | ||
50 | + cur_ignore_children, errp); | ||
51 | + g_slist_free(cur_ignore_children); | ||
52 | if (ret < 0) { | ||
53 | return ret; | ||
54 | } | ||
55 | + | ||
56 | + bdrv_child_set_perm_safe(c, cur_perm, cur_shared, NULL); | ||
57 | } | ||
58 | |||
59 | return 0; | ||
60 | @@ -XXX,XX +XXX,XX @@ static void bdrv_abort_perm_update(BlockDriverState *bs) | ||
61 | } | ||
62 | |||
63 | QLIST_FOREACH(c, &bs->children, next) { | ||
64 | - bdrv_child_abort_perm_update(c); | ||
65 | + bdrv_child_set_perm_abort(c); | ||
66 | + bdrv_abort_perm_update(c->bs); | ||
67 | } | 32 | } |
68 | } | 33 | } |
69 | 34 | ||
70 | @@ -XXX,XX +XXX,XX @@ static void bdrv_set_perm(BlockDriverState *bs) | 35 | -static inline void v9fs_path_read_lock(V9fsState *s) |
71 | 36 | +static inline void coroutine_fn | |
72 | /* Update all children */ | 37 | +v9fs_path_read_lock(V9fsState *s) |
73 | QLIST_FOREACH(c, &bs->children, next) { | 38 | { |
74 | - bdrv_child_set_perm(c); | 39 | if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { |
75 | + bdrv_child_set_perm_commit(c); | 40 | qemu_co_rwlock_rdlock(&s->rename_lock); |
76 | + bdrv_set_perm(c->bs); | ||
77 | } | 41 | } |
78 | } | 42 | } |
79 | 43 | ||
80 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q, | 44 | -static inline void v9fs_path_unlock(V9fsState *s) |
81 | ignore_children, errp); | 45 | +static inline void coroutine_fn |
82 | } | 46 | +v9fs_path_unlock(V9fsState *s) |
83 | |||
84 | -/* Needs to be followed by a call to either bdrv_child_set_perm() or | ||
85 | - * bdrv_child_abort_perm_update(). */ | ||
86 | -static int bdrv_child_check_perm(BdrvChild *c, BlockReopenQueue *q, | ||
87 | - uint64_t perm, uint64_t shared, | ||
88 | - GSList *ignore_children, Error **errp) | ||
89 | -{ | ||
90 | - int ret; | ||
91 | - | ||
92 | - ignore_children = g_slist_prepend(g_slist_copy(ignore_children), c); | ||
93 | - ret = bdrv_check_update_perm(c->bs, q, perm, shared, ignore_children, errp); | ||
94 | - g_slist_free(ignore_children); | ||
95 | - | ||
96 | - if (ret < 0) { | ||
97 | - return ret; | ||
98 | - } | ||
99 | - | ||
100 | - bdrv_child_set_perm_safe(c, perm, shared, NULL); | ||
101 | - | ||
102 | - return 0; | ||
103 | -} | ||
104 | - | ||
105 | -static void bdrv_child_set_perm(BdrvChild *c) | ||
106 | -{ | ||
107 | - bdrv_child_set_perm_commit(c); | ||
108 | - bdrv_set_perm(c->bs); | ||
109 | -} | ||
110 | - | ||
111 | -static void bdrv_child_abort_perm_update(BdrvChild *c) | ||
112 | -{ | ||
113 | - bdrv_child_set_perm_abort(c); | ||
114 | - bdrv_abort_perm_update(c->bs); | ||
115 | -} | ||
116 | - | ||
117 | static int bdrv_refresh_perms(BlockDriverState *bs, Error **errp) | ||
118 | { | 47 | { |
119 | int ret; | 48 | if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { |
49 | qemu_co_rwlock_unlock(&s->rename_lock); | ||
120 | -- | 50 | -- |
121 | 2.30.2 | 51 | 2.37.3 |
122 | 52 | ||
123 | 53 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Marc-André Lureau <marcandre.lureau@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | During reopen we may add backing bs from other aio context, which may | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | lead to changing original context of top bs. | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | functions where this holds. | ||
5 | 6 | ||
6 | We are going to move graph modification to prepare stage. So, it will | 7 | Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> |
7 | be possible that bdrv_flush() in bdrv_reopen_prepare called on bs in | 8 | Reviewed-by: Juan Quintela <quintela@redhat.com> |
8 | non-original aio context, which we didn't aquire which leads to crash. | 9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
9 | 10 | Reviewed-by: Alberto Faria <afaria@redhat.com> | |
10 | To avoid this problem move bdrv_flush() to be a separate reopen stage | 11 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
11 | before bdrv_reopen_prepare(). | 12 | Message-Id: <20220922084924.201610-26-pbonzini@redhat.com> |
12 | |||
13 | This doesn't seem correct to acquire only one aio context and not all | ||
14 | contexts participating in reopen. But it's not obvious how to do it | ||
15 | correctly, keeping in mind: | ||
16 | |||
17 | 1. rules of bdrv_set_aio_context_ignore() that requires new_context | ||
18 | lock not being held | ||
19 | |||
20 | 2. possible deadlocks because of holding all (or several?) AioContext | ||
21 | locks | ||
22 | |||
23 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
24 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 13 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
25 | Message-Id: <20210428151804.439460-30-vsementsov@virtuozzo.com> | ||
26 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
27 | --- | 15 | --- |
28 | block.c | 14 ++++++++------ | 16 | migration/migration.c | 3 ++- |
29 | 1 file changed, 8 insertions(+), 6 deletions(-) | 17 | 1 file changed, 2 insertions(+), 1 deletion(-) |
30 | 18 | ||
31 | diff --git a/block.c b/block.c | 19 | diff --git a/migration/migration.c b/migration/migration.c |
32 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
33 | --- a/block.c | 21 | --- a/migration/migration.c |
34 | +++ b/block.c | 22 | +++ b/migration/migration.c |
35 | @@ -XXX,XX +XXX,XX @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) | 23 | @@ -XXX,XX +XXX,XX @@ static void process_incoming_migration_bh(void *opaque) |
36 | 24 | migration_incoming_state_destroy(); | |
37 | assert(bs_queue != NULL); | 25 | } |
38 | 26 | ||
39 | + QTAILQ_FOREACH(bs_entry, bs_queue, entry) { | 27 | -static void process_incoming_migration_co(void *opaque) |
40 | + ret = bdrv_flush(bs_entry->state.bs); | 28 | +static void coroutine_fn |
41 | + if (ret < 0) { | 29 | +process_incoming_migration_co(void *opaque) |
42 | + error_setg_errno(errp, -ret, "Error flushing drive"); | 30 | { |
43 | + goto cleanup; | 31 | MigrationIncomingState *mis = migration_incoming_get_current(); |
44 | + } | 32 | PostcopyState ps; |
45 | + } | ||
46 | + | ||
47 | QTAILQ_FOREACH(bs_entry, bs_queue, entry) { | ||
48 | assert(bs_entry->state.bs->quiesce_counter > 0); | ||
49 | if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, errp)) { | ||
50 | @@ -XXX,XX +XXX,XX @@ static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, | ||
51 | bdrv_reopen_perm(queue, reopen_state->bs, | ||
52 | &reopen_state->perm, &reopen_state->shared_perm); | ||
53 | |||
54 | - ret = bdrv_flush(reopen_state->bs); | ||
55 | - if (ret) { | ||
56 | - error_setg_errno(errp, -ret, "Error flushing drive"); | ||
57 | - goto error; | ||
58 | - } | ||
59 | - | ||
60 | if (drv->bdrv_reopen_prepare) { | ||
61 | /* | ||
62 | * If a driver-specific option is missing, it means that we | ||
63 | -- | 33 | -- |
64 | 2.30.2 | 34 | 2.37.3 |
65 | 35 | ||
66 | 36 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Marc-André Lureau <marcandre.lureau@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | bdrv_append is not very good for inserting filters: it does extra | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | permission update as part of bdrv_set_backing_hd(). During this update | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | filter may conflict with other parents of top_bs. | 5 | functions where this holds. |
6 | 6 | ||
7 | Instead, let's first do all graph modifications and after it update | 7 | Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> |
8 | permissions. | 8 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
9 | 9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | |
10 | append-greedy-filter test-case in test-bdrv-graph-mod is now works, so | 10 | Message-Id: <20220922084924.201610-27-pbonzini@redhat.com> |
11 | move it out of debug option. | ||
12 | |||
13 | Note: bdrv_append() is still only works for backing-child based | ||
14 | filters. It's something to improve later. | ||
15 | |||
16 | Note2: we use the fact that bdrv_append() is used to append new nodes, | ||
17 | without backing child, so we don't need frozen check and inherits_from | ||
18 | logic from bdrv_set_backing_hd(). | ||
19 | |||
20 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
21 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
22 | Message-Id: <20210428151804.439460-22-vsementsov@virtuozzo.com> | ||
23 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
24 | --- | 13 | --- |
25 | block.c | 27 ++++++++++++++++++++------- | 14 | tests/unit/test-coroutine.c | 2 +- |
26 | tests/unit/test-bdrv-graph-mod.c | 17 ++--------------- | 15 | 1 file changed, 1 insertion(+), 1 deletion(-) |
27 | 2 files changed, 22 insertions(+), 22 deletions(-) | ||
28 | 16 | ||
29 | diff --git a/block.c b/block.c | 17 | diff --git a/tests/unit/test-coroutine.c b/tests/unit/test-coroutine.c |
30 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
31 | --- a/block.c | 19 | --- a/tests/unit/test-coroutine.c |
32 | +++ b/block.c | 20 | +++ b/tests/unit/test-coroutine.c |
33 | @@ -XXX,XX +XXX,XX @@ int bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, | 21 | @@ -XXX,XX +XXX,XX @@ static void perf_baseline(void) |
34 | * This will modify the BlockDriverState fields, and swap contents | 22 | g_test_message("Function call %u iterations: %f s", maxcycles, duration); |
35 | * between bs_new and bs_top. Both bs_new and bs_top are modified. | 23 | } |
36 | * | 24 | |
37 | - * bs_new must not be attached to a BlockBackend. | 25 | -static __attribute__((noinline)) void perf_cost_func(void *opaque) |
38 | + * bs_new must not be attached to a BlockBackend and must not have backing | 26 | +static __attribute__((noinline)) void coroutine_fn perf_cost_func(void *opaque) |
39 | + * child. | ||
40 | * | ||
41 | * This function does not create any image files. | ||
42 | */ | ||
43 | int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, | ||
44 | Error **errp) | ||
45 | { | 27 | { |
46 | - int ret = bdrv_set_backing_hd(bs_new, bs_top, errp); | 28 | qemu_coroutine_yield(); |
47 | + int ret; | ||
48 | + Transaction *tran = tran_new(); | ||
49 | + | ||
50 | + assert(!bs_new->backing); | ||
51 | + | ||
52 | + ret = bdrv_attach_child_noperm(bs_new, bs_top, "backing", | ||
53 | + &child_of_bds, bdrv_backing_role(bs_new), | ||
54 | + &bs_new->backing, tran, errp); | ||
55 | if (ret < 0) { | ||
56 | - return ret; | ||
57 | + goto out; | ||
58 | } | ||
59 | |||
60 | - ret = bdrv_replace_node(bs_top, bs_new, errp); | ||
61 | + ret = bdrv_replace_node_noperm(bs_top, bs_new, true, tran, errp); | ||
62 | if (ret < 0) { | ||
63 | - bdrv_set_backing_hd(bs_new, NULL, &error_abort); | ||
64 | - return ret; | ||
65 | + goto out; | ||
66 | } | ||
67 | |||
68 | - return 0; | ||
69 | + ret = bdrv_refresh_perms(bs_new, errp); | ||
70 | +out: | ||
71 | + tran_finalize(tran, ret); | ||
72 | + | ||
73 | + bdrv_refresh_limits(bs_top, NULL); | ||
74 | + | ||
75 | + return ret; | ||
76 | } | ||
77 | |||
78 | static void bdrv_delete(BlockDriverState *bs) | ||
79 | diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c | ||
80 | index XXXXXXX..XXXXXXX 100644 | ||
81 | --- a/tests/unit/test-bdrv-graph-mod.c | ||
82 | +++ b/tests/unit/test-bdrv-graph-mod.c | ||
83 | @@ -XXX,XX +XXX,XX @@ static void test_append_greedy_filter(void) | ||
84 | |||
85 | int main(int argc, char *argv[]) | ||
86 | { | ||
87 | - int i; | ||
88 | - bool debug = false; | ||
89 | - | ||
90 | - for (i = 1; i < argc; i++) { | ||
91 | - if (!strcmp(argv[i], "-d")) { | ||
92 | - debug = true; | ||
93 | - break; | ||
94 | - } | ||
95 | - } | ||
96 | - | ||
97 | bdrv_init(); | ||
98 | qemu_init_main_loop(&error_abort); | ||
99 | |||
100 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char *argv[]) | ||
101 | test_parallel_perm_update); | ||
102 | g_test_add_func("/bdrv-graph-mod/parallel-exclusive-write", | ||
103 | test_parallel_exclusive_write); | ||
104 | - | ||
105 | - if (debug) { | ||
106 | - g_test_add_func("/bdrv-graph-mod/append-greedy-filter", | ||
107 | - test_append_greedy_filter); | ||
108 | - } | ||
109 | + g_test_add_func("/bdrv-graph-mod/append-greedy-filter", | ||
110 | + test_append_greedy_filter); | ||
111 | |||
112 | return g_test_run(); | ||
113 | } | 29 | } |
114 | -- | 30 | -- |
115 | 2.30.2 | 31 | 2.37.3 |
116 | 32 | ||
117 | 33 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
---|---|---|---|
2 | 2 | Message-Id: <20221006122607.162769-1-kwolf@redhat.com> | |
3 | Add test to show that simple DFS recursion order is not correct for | 3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
4 | permission update. Correct order is topological-sort order, which will | 4 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
5 | be introduced later. | ||
6 | |||
7 | Consider the block driver which has two filter children: one active | ||
8 | with exclusive write access and one inactive with no specific | ||
9 | permissions. | ||
10 | |||
11 | And, these two children has a common base child, like this: | ||
12 | |||
13 | ┌─────┐ ┌──────┐ | ||
14 | │ fl2 │ ◀── │ top │ | ||
15 | └─────┘ └──────┘ | ||
16 | │ │ | ||
17 | │ │ w | ||
18 | │ ▼ | ||
19 | │ ┌──────┐ | ||
20 | │ │ fl1 │ | ||
21 | │ └──────┘ | ||
22 | │ │ | ||
23 | │ │ w | ||
24 | │ ▼ | ||
25 | │ ┌──────┐ | ||
26 | └───────▶ │ base │ | ||
27 | └──────┘ | ||
28 | |||
29 | So, exclusive write is propagated. | ||
30 | |||
31 | Assume, we want to make fl2 active instead of fl1. | ||
32 | So, we set some option for top driver and do permission update. | ||
33 | |||
34 | If permission update (remember, it's DFS) goes first through | ||
35 | top->fl1->base branch it will succeed: it firstly drop exclusive write | ||
36 | permissions and than apply them for another BdrvChildren. | ||
37 | But if permission update goes first through top->fl2->base branch it | ||
38 | will fail, as when we try to update fl2->base child, old not yet | ||
39 | updated fl1->base child will be in conflict. | ||
40 | |||
41 | Now test fails, so it runs only with -d flag. To run do | ||
42 | |||
43 | ./test-bdrv-graph-mod -d -p /bdrv-graph-mod/parallel-perm-update | ||
44 | |||
45 | from <build-directory>/tests. | ||
46 | |||
47 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
48 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
49 | Message-Id: <20210428151804.439460-3-vsementsov@virtuozzo.com> | ||
50 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 5 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
51 | --- | 6 | --- |
52 | tests/unit/test-bdrv-graph-mod.c | 116 +++++++++++++++++++++++++++++++ | 7 | block/quorum.c | 2 -- |
53 | 1 file changed, 116 insertions(+) | 8 | 1 file changed, 2 deletions(-) |
54 | 9 | ||
55 | diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c | 10 | diff --git a/block/quorum.c b/block/quorum.c |
56 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
57 | --- a/tests/unit/test-bdrv-graph-mod.c | 12 | --- a/block/quorum.c |
58 | +++ b/tests/unit/test-bdrv-graph-mod.c | 13 | +++ b/block/quorum.c |
59 | @@ -XXX,XX +XXX,XX @@ static void test_parallel_exclusive_write(void) | 14 | @@ -XXX,XX +XXX,XX @@ static bool quorum_has_too_much_io_failed(QuorumAIOCB *acb) |
60 | bdrv_unref(top); | 15 | return false; |
61 | } | 16 | } |
62 | 17 | ||
63 | +static void write_to_file_perms(BlockDriverState *bs, BdrvChild *c, | 18 | -static int read_fifo_child(QuorumAIOCB *acb); |
64 | + BdrvChildRole role, | 19 | - |
65 | + BlockReopenQueue *reopen_queue, | 20 | static void quorum_copy_qiov(QEMUIOVector *dest, QEMUIOVector *source) |
66 | + uint64_t perm, uint64_t shared, | ||
67 | + uint64_t *nperm, uint64_t *nshared) | ||
68 | +{ | ||
69 | + if (bs->file && c == bs->file) { | ||
70 | + *nperm = BLK_PERM_WRITE; | ||
71 | + *nshared = BLK_PERM_ALL & ~BLK_PERM_WRITE; | ||
72 | + } else { | ||
73 | + *nperm = 0; | ||
74 | + *nshared = BLK_PERM_ALL; | ||
75 | + } | ||
76 | +} | ||
77 | + | ||
78 | +static BlockDriver bdrv_write_to_file = { | ||
79 | + .format_name = "tricky-perm", | ||
80 | + .bdrv_child_perm = write_to_file_perms, | ||
81 | +}; | ||
82 | + | ||
83 | + | ||
84 | +/* | ||
85 | + * The following test shows that topological-sort order is required for | ||
86 | + * permission update, simple DFS is not enough. | ||
87 | + * | ||
88 | + * Consider the block driver which has two filter children: one active | ||
89 | + * with exclusive write access and one inactive with no specific | ||
90 | + * permissions. | ||
91 | + * | ||
92 | + * And, these two children has a common base child, like this: | ||
93 | + * | ||
94 | + * ┌─────┐ ┌──────┐ | ||
95 | + * │ fl2 │ ◀── │ top │ | ||
96 | + * └─────┘ └──────┘ | ||
97 | + * │ │ | ||
98 | + * │ │ w | ||
99 | + * │ ▼ | ||
100 | + * │ ┌──────┐ | ||
101 | + * │ │ fl1 │ | ||
102 | + * │ └──────┘ | ||
103 | + * │ │ | ||
104 | + * │ │ w | ||
105 | + * │ ▼ | ||
106 | + * │ ┌──────┐ | ||
107 | + * └───────▶ │ base │ | ||
108 | + * └──────┘ | ||
109 | + * | ||
110 | + * So, exclusive write is propagated. | ||
111 | + * | ||
112 | + * Assume, we want to make fl2 active instead of fl1. | ||
113 | + * So, we set some option for top driver and do permission update. | ||
114 | + * | ||
115 | + * With simple DFS, if permission update goes first through | ||
116 | + * top->fl1->base branch it will succeed: it firstly drop exclusive write | ||
117 | + * permissions and than apply them for another BdrvChildren. | ||
118 | + * But if permission update goes first through top->fl2->base branch it | ||
119 | + * will fail, as when we try to update fl2->base child, old not yet | ||
120 | + * updated fl1->base child will be in conflict. | ||
121 | + * | ||
122 | + * With topological-sort order we always update parents before children, so fl1 | ||
123 | + * and fl2 are both updated when we update base and there is no conflict. | ||
124 | + */ | ||
125 | +static void test_parallel_perm_update(void) | ||
126 | +{ | ||
127 | + BlockDriverState *top = no_perm_node("top"); | ||
128 | + BlockDriverState *tricky = | ||
129 | + bdrv_new_open_driver(&bdrv_write_to_file, "tricky", BDRV_O_RDWR, | ||
130 | + &error_abort); | ||
131 | + BlockDriverState *base = no_perm_node("base"); | ||
132 | + BlockDriverState *fl1 = pass_through_node("fl1"); | ||
133 | + BlockDriverState *fl2 = pass_through_node("fl2"); | ||
134 | + BdrvChild *c_fl1, *c_fl2; | ||
135 | + | ||
136 | + /* | ||
137 | + * bdrv_attach_child() eats child bs reference, so we need two @base | ||
138 | + * references for two filters: | ||
139 | + */ | ||
140 | + bdrv_ref(base); | ||
141 | + | ||
142 | + bdrv_attach_child(top, tricky, "file", &child_of_bds, BDRV_CHILD_DATA, | ||
143 | + &error_abort); | ||
144 | + c_fl1 = bdrv_attach_child(tricky, fl1, "first", &child_of_bds, | ||
145 | + BDRV_CHILD_FILTERED, &error_abort); | ||
146 | + c_fl2 = bdrv_attach_child(tricky, fl2, "second", &child_of_bds, | ||
147 | + BDRV_CHILD_FILTERED, &error_abort); | ||
148 | + bdrv_attach_child(fl1, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED, | ||
149 | + &error_abort); | ||
150 | + bdrv_attach_child(fl2, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED, | ||
151 | + &error_abort); | ||
152 | + | ||
153 | + /* Select fl1 as first child to be active */ | ||
154 | + tricky->file = c_fl1; | ||
155 | + bdrv_child_refresh_perms(top, top->children.lh_first, &error_abort); | ||
156 | + | ||
157 | + assert(c_fl1->perm & BLK_PERM_WRITE); | ||
158 | + assert(!(c_fl2->perm & BLK_PERM_WRITE)); | ||
159 | + | ||
160 | + /* Now, try to switch active child and update permissions */ | ||
161 | + tricky->file = c_fl2; | ||
162 | + bdrv_child_refresh_perms(top, top->children.lh_first, &error_abort); | ||
163 | + | ||
164 | + assert(c_fl2->perm & BLK_PERM_WRITE); | ||
165 | + assert(!(c_fl1->perm & BLK_PERM_WRITE)); | ||
166 | + | ||
167 | + /* Switch once more, to not care about real child order in the list */ | ||
168 | + tricky->file = c_fl1; | ||
169 | + bdrv_child_refresh_perms(top, top->children.lh_first, &error_abort); | ||
170 | + | ||
171 | + assert(c_fl1->perm & BLK_PERM_WRITE); | ||
172 | + assert(!(c_fl2->perm & BLK_PERM_WRITE)); | ||
173 | + | ||
174 | + bdrv_unref(top); | ||
175 | +} | ||
176 | + | ||
177 | int main(int argc, char *argv[]) | ||
178 | { | 21 | { |
179 | int i; | 22 | int i; |
180 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char *argv[]) | ||
181 | if (debug) { | ||
182 | g_test_add_func("/bdrv-graph-mod/parallel-exclusive-write", | ||
183 | test_parallel_exclusive_write); | ||
184 | + g_test_add_func("/bdrv-graph-mod/parallel-perm-update", | ||
185 | + test_parallel_perm_update); | ||
186 | } | ||
187 | |||
188 | return g_test_run(); | ||
189 | -- | 23 | -- |
190 | 2.30.2 | 24 | 2.37.3 |
191 | 25 | ||
192 | 26 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Refactor calling driver callbacks to a separate transaction action to | 3 | job mutex will be used to protect the job struct elements and list, |
4 | be used later. | 4 | replacing AioContext locks. |
5 | 5 | ||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 6 | Right now use a shared lock for all jobs, in order to keep things |
7 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 7 | simple. Once the AioContext lock is gone, we can introduce per-job |
8 | Message-Id: <20210428151804.439460-15-vsementsov@virtuozzo.com> | 8 | locks. |
9 | |||
10 | To simplify the switch from aiocontext to job lock, introduce | ||
11 | *nop* lock/unlock functions and macros. | ||
12 | We want to always call job_lock/unlock outside the AioContext locks, | ||
13 | and not vice-versa, otherwise we might get a deadlock. This is not | ||
14 | straightforward to do, and that's why we start with nop functions. | ||
15 | Once everything is protected by job_lock/unlock, we can change the nop into | ||
16 | an actual mutex and remove the aiocontext lock. | ||
17 | |||
18 | Since job_mutex is already being used, add static | ||
19 | real_job_{lock/unlock} for the existing usage. | ||
20 | |||
21 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
22 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
23 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
24 | Message-Id: <20220926093214.506243-2-eesposit@redhat.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 25 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
10 | --- | 26 | --- |
11 | block.c | 70 ++++++++++++++++++++++++++++++++++++++++++++------------- | 27 | include/qemu/job.h | 24 ++++++++++++++++++++++++ |
12 | 1 file changed, 54 insertions(+), 16 deletions(-) | 28 | job.c | 35 +++++++++++++++++++++++------------ |
29 | 2 files changed, 47 insertions(+), 12 deletions(-) | ||
13 | 30 | ||
14 | diff --git a/block.c b/block.c | 31 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
15 | index XXXXXXX..XXXXXXX 100644 | 32 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/block.c | 33 | --- a/include/qemu/job.h |
17 | +++ b/block.c | 34 | +++ b/include/qemu/job.h |
18 | @@ -XXX,XX +XXX,XX @@ static void bdrv_child_set_perm_safe(BdrvChild *c, uint64_t perm, | 35 | @@ -XXX,XX +XXX,XX @@ typedef enum JobCreateFlags { |
19 | } | 36 | JOB_MANUAL_DISMISS = 0x04, |
20 | } | 37 | } JobCreateFlags; |
21 | 38 | ||
22 | +static void bdrv_drv_set_perm_commit(void *opaque) | 39 | +extern QemuMutex job_mutex; |
40 | + | ||
41 | +#define JOB_LOCK_GUARD() /* QEMU_LOCK_GUARD(&job_mutex) */ | ||
42 | + | ||
43 | +#define WITH_JOB_LOCK_GUARD() /* WITH_QEMU_LOCK_GUARD(&job_mutex) */ | ||
44 | + | ||
45 | +/** | ||
46 | + * job_lock: | ||
47 | + * | ||
48 | + * Take the mutex protecting the list of jobs and their status. | ||
49 | + * Most functions called by the monitor need to call job_lock | ||
50 | + * and job_unlock manually. On the other hand, function called | ||
51 | + * by the block jobs themselves and by the block layer will take the | ||
52 | + * lock for you. | ||
53 | + */ | ||
54 | +void job_lock(void); | ||
55 | + | ||
56 | +/** | ||
57 | + * job_unlock: | ||
58 | + * | ||
59 | + * Release the mutex protecting the list of jobs and their status. | ||
60 | + */ | ||
61 | +void job_unlock(void); | ||
62 | + | ||
63 | /** | ||
64 | * Allocate and return a new job transaction. Jobs can be added to the | ||
65 | * transaction using job_txn_add_job(). | ||
66 | diff --git a/job.c b/job.c | ||
67 | index XXXXXXX..XXXXXXX 100644 | ||
68 | --- a/job.c | ||
69 | +++ b/job.c | ||
70 | @@ -XXX,XX +XXX,XX @@ | ||
71 | #include "trace/trace-root.h" | ||
72 | #include "qapi/qapi-events-job.h" | ||
73 | |||
74 | +/* | ||
75 | + * job_mutex protects the jobs list, but also makes the | ||
76 | + * struct job fields thread-safe. | ||
77 | + */ | ||
78 | +QemuMutex job_mutex; | ||
79 | + | ||
80 | static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs); | ||
81 | |||
82 | /* Job State Transition Table */ | ||
83 | @@ -XXX,XX +XXX,XX @@ struct JobTxn { | ||
84 | int refcnt; | ||
85 | }; | ||
86 | |||
87 | -/* Right now, this mutex is only needed to synchronize accesses to job->busy | ||
88 | - * and job->sleep_timer, such as concurrent calls to job_do_yield and | ||
89 | - * job_enter. */ | ||
90 | -static QemuMutex job_mutex; | ||
91 | +void job_lock(void) | ||
23 | +{ | 92 | +{ |
24 | + BlockDriverState *bs = opaque; | 93 | + /* nop */ |
25 | + uint64_t cumulative_perms, cumulative_shared_perms; | ||
26 | + | ||
27 | + if (bs->drv->bdrv_set_perm) { | ||
28 | + bdrv_get_cumulative_perm(bs, &cumulative_perms, | ||
29 | + &cumulative_shared_perms); | ||
30 | + bs->drv->bdrv_set_perm(bs, cumulative_perms, cumulative_shared_perms); | ||
31 | + } | ||
32 | +} | 94 | +} |
33 | + | 95 | + |
34 | +static void bdrv_drv_set_perm_abort(void *opaque) | 96 | +void job_unlock(void) |
35 | +{ | 97 | +{ |
36 | + BlockDriverState *bs = opaque; | 98 | + /* nop */ |
37 | + | ||
38 | + if (bs->drv->bdrv_abort_perm_update) { | ||
39 | + bs->drv->bdrv_abort_perm_update(bs); | ||
40 | + } | ||
41 | +} | 99 | +} |
42 | + | 100 | |
43 | +TransactionActionDrv bdrv_drv_set_perm_drv = { | 101 | -static void job_lock(void) |
44 | + .abort = bdrv_drv_set_perm_abort, | 102 | +static void real_job_lock(void) |
45 | + .commit = bdrv_drv_set_perm_commit, | 103 | { |
46 | +}; | 104 | qemu_mutex_lock(&job_mutex); |
47 | + | 105 | } |
48 | +static int bdrv_drv_set_perm(BlockDriverState *bs, uint64_t perm, | 106 | |
49 | + uint64_t shared_perm, Transaction *tran, | 107 | -static void job_unlock(void) |
50 | + Error **errp) | 108 | +static void real_job_unlock(void) |
51 | +{ | 109 | { |
52 | + if (!bs->drv) { | 110 | qemu_mutex_unlock(&job_mutex); |
53 | + return 0; | 111 | } |
54 | + } | 112 | @@ -XXX,XX +XXX,XX @@ void job_enter_cond(Job *job, bool(*fn)(Job *job)) |
55 | + | ||
56 | + if (bs->drv->bdrv_check_perm) { | ||
57 | + int ret = bs->drv->bdrv_check_perm(bs, perm, shared_perm, errp); | ||
58 | + if (ret < 0) { | ||
59 | + return ret; | ||
60 | + } | ||
61 | + } | ||
62 | + | ||
63 | + if (tran) { | ||
64 | + tran_add(tran, &bdrv_drv_set_perm_drv, bs); | ||
65 | + } | ||
66 | + | ||
67 | + return 0; | ||
68 | +} | ||
69 | + | ||
70 | /* | ||
71 | * Check whether permissions on this node can be changed in a way that | ||
72 | * @cumulative_perms and @cumulative_shared_perms are the new cumulative | ||
73 | @@ -XXX,XX +XXX,XX @@ static int bdrv_node_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | - if (drv->bdrv_check_perm) { | ||
78 | - ret = drv->bdrv_check_perm(bs, cumulative_perms, | ||
79 | - cumulative_shared_perms, errp); | ||
80 | - if (ret < 0) { | ||
81 | - return ret; | ||
82 | - } | ||
83 | + ret = bdrv_drv_set_perm(bs, cumulative_perms, cumulative_shared_perms, NULL, | ||
84 | + errp); | ||
85 | + if (ret < 0) { | ||
86 | + return ret; | ||
87 | } | ||
88 | |||
89 | /* Drivers that never have children can omit .bdrv_child_perm() */ | ||
90 | @@ -XXX,XX +XXX,XX @@ static void bdrv_node_abort_perm_update(BlockDriverState *bs) | ||
91 | return; | 113 | return; |
92 | } | 114 | } |
93 | 115 | ||
94 | - if (drv->bdrv_abort_perm_update) { | 116 | - job_lock(); |
95 | - drv->bdrv_abort_perm_update(bs); | 117 | + real_job_lock(); |
96 | - } | 118 | if (job->busy) { |
97 | + bdrv_drv_set_perm_abort(bs); | 119 | - job_unlock(); |
98 | 120 | + real_job_unlock(); | |
99 | QLIST_FOREACH(c, &bs->children, next) { | ||
100 | bdrv_child_set_perm_abort(c); | ||
101 | @@ -XXX,XX +XXX,XX @@ static void bdrv_abort_perm_update(BlockDriverState *bs) | ||
102 | |||
103 | static void bdrv_node_set_perm(BlockDriverState *bs) | ||
104 | { | ||
105 | - uint64_t cumulative_perms, cumulative_shared_perms; | ||
106 | BlockDriver *drv = bs->drv; | ||
107 | BdrvChild *c; | ||
108 | |||
109 | @@ -XXX,XX +XXX,XX @@ static void bdrv_node_set_perm(BlockDriverState *bs) | ||
110 | return; | 121 | return; |
111 | } | 122 | } |
112 | 123 | ||
113 | - bdrv_get_cumulative_perm(bs, &cumulative_perms, &cumulative_shared_perms); | 124 | if (fn && !fn(job)) { |
114 | - | 125 | - job_unlock(); |
115 | - /* Update this node */ | 126 | + real_job_unlock(); |
116 | - if (drv->bdrv_set_perm) { | 127 | return; |
117 | - drv->bdrv_set_perm(bs, cumulative_perms, cumulative_shared_perms); | 128 | } |
118 | - } | 129 | |
119 | + bdrv_drv_set_perm_commit(bs); | 130 | assert(!job->deferred_to_main_loop); |
120 | 131 | timer_del(&job->sleep_timer); | |
121 | /* Drivers that never have children can omit .bdrv_child_perm() */ | 132 | job->busy = true; |
122 | if (!drv->bdrv_child_perm) { | 133 | - job_unlock(); |
134 | + real_job_unlock(); | ||
135 | aio_co_enter(job->aio_context, job->co); | ||
136 | } | ||
137 | |||
138 | @@ -XXX,XX +XXX,XX @@ void job_enter(Job *job) | ||
139 | * called explicitly. */ | ||
140 | static void coroutine_fn job_do_yield(Job *job, uint64_t ns) | ||
141 | { | ||
142 | - job_lock(); | ||
143 | + real_job_lock(); | ||
144 | if (ns != -1) { | ||
145 | timer_mod(&job->sleep_timer, ns); | ||
146 | } | ||
147 | job->busy = false; | ||
148 | job_event_idle(job); | ||
149 | - job_unlock(); | ||
150 | + real_job_unlock(); | ||
151 | qemu_coroutine_yield(); | ||
152 | |||
153 | /* Set by job_enter_cond() before re-entering the coroutine. */ | ||
123 | -- | 154 | -- |
124 | 2.30.2 | 155 | 2.37.3 |
125 | |||
126 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
1 | 2 | ||
3 | Categorize the fields in struct Job to understand which ones | ||
4 | need to be protected by the job mutex and which don't. | ||
5 | |||
6 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
7 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
8 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | Message-Id: <20220926093214.506243-3-eesposit@redhat.com> | ||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | --- | ||
13 | include/qemu/job.h | 61 +++++++++++++++++++++++++++------------------- | ||
14 | 1 file changed, 36 insertions(+), 25 deletions(-) | ||
15 | |||
16 | diff --git a/include/qemu/job.h b/include/qemu/job.h | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/include/qemu/job.h | ||
19 | +++ b/include/qemu/job.h | ||
20 | @@ -XXX,XX +XXX,XX @@ typedef struct JobTxn JobTxn; | ||
21 | * Long-running operation. | ||
22 | */ | ||
23 | typedef struct Job { | ||
24 | + | ||
25 | + /* Fields set at initialization (job_create), and never modified */ | ||
26 | + | ||
27 | /** The ID of the job. May be NULL for internal jobs. */ | ||
28 | char *id; | ||
29 | |||
30 | - /** The type of this job. */ | ||
31 | + /** | ||
32 | + * The type of this job. | ||
33 | + * All callbacks are called with job_mutex *not* held. | ||
34 | + */ | ||
35 | const JobDriver *driver; | ||
36 | |||
37 | - /** Reference count of the block job */ | ||
38 | - int refcnt; | ||
39 | - | ||
40 | - /** Current state; See @JobStatus for details. */ | ||
41 | - JobStatus status; | ||
42 | - | ||
43 | - /** AioContext to run the job coroutine in */ | ||
44 | - AioContext *aio_context; | ||
45 | - | ||
46 | /** | ||
47 | * The coroutine that executes the job. If not NULL, it is reentered when | ||
48 | * busy is false and the job is cancelled. | ||
49 | + * Initialized in job_start() | ||
50 | */ | ||
51 | Coroutine *co; | ||
52 | |||
53 | + /** True if this job should automatically finalize itself */ | ||
54 | + bool auto_finalize; | ||
55 | + | ||
56 | + /** True if this job should automatically dismiss itself */ | ||
57 | + bool auto_dismiss; | ||
58 | + | ||
59 | + /** The completion function that will be called when the job completes. */ | ||
60 | + BlockCompletionFunc *cb; | ||
61 | + | ||
62 | + /** The opaque value that is passed to the completion function. */ | ||
63 | + void *opaque; | ||
64 | + | ||
65 | + /* ProgressMeter API is thread-safe */ | ||
66 | + ProgressMeter progress; | ||
67 | + | ||
68 | + | ||
69 | + /** Protected by AioContext lock */ | ||
70 | + | ||
71 | + /** AioContext to run the job coroutine in */ | ||
72 | + AioContext *aio_context; | ||
73 | + | ||
74 | + /** Reference count of the block job */ | ||
75 | + int refcnt; | ||
76 | + | ||
77 | + /** Current state; See @JobStatus for details. */ | ||
78 | + JobStatus status; | ||
79 | + | ||
80 | /** | ||
81 | * Timer that is used by @job_sleep_ns. Accessed under job_mutex (in | ||
82 | * job.c). | ||
83 | @@ -XXX,XX +XXX,XX @@ typedef struct Job { | ||
84 | /** Set to true when the job has deferred work to the main loop. */ | ||
85 | bool deferred_to_main_loop; | ||
86 | |||
87 | - /** True if this job should automatically finalize itself */ | ||
88 | - bool auto_finalize; | ||
89 | - | ||
90 | - /** True if this job should automatically dismiss itself */ | ||
91 | - bool auto_dismiss; | ||
92 | - | ||
93 | - ProgressMeter progress; | ||
94 | - | ||
95 | /** | ||
96 | * Return code from @run and/or @prepare callback(s). | ||
97 | * Not final until the job has reached the CONCLUDED status. | ||
98 | @@ -XXX,XX +XXX,XX @@ typedef struct Job { | ||
99 | */ | ||
100 | Error *err; | ||
101 | |||
102 | - /** The completion function that will be called when the job completes. */ | ||
103 | - BlockCompletionFunc *cb; | ||
104 | - | ||
105 | - /** The opaque value that is passed to the completion function. */ | ||
106 | - void *opaque; | ||
107 | - | ||
108 | /** Notifiers called when a cancelled job is finalised */ | ||
109 | NotifierList on_finalize_cancelled; | ||
110 | |||
111 | @@ -XXX,XX +XXX,XX @@ typedef struct Job { | ||
112 | |||
113 | /** | ||
114 | * Callbacks and other information about a Job driver. | ||
115 | + * All callbacks are invoked with job_mutex *not* held. | ||
116 | */ | ||
117 | struct JobDriver { | ||
118 | |||
119 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn job_yield(Job *job); | ||
120 | */ | ||
121 | void coroutine_fn job_sleep_ns(Job *job, int64_t ns); | ||
122 | |||
123 | - | ||
124 | /** Returns the JobType of a given Job. */ | ||
125 | JobType job_type(const Job *job); | ||
126 | |||
127 | -- | ||
128 | 2.37.3 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 3 | job_event_* functions can all be static, as they are not used |
4 | outside job.c. | ||
5 | |||
6 | Same applies for job_txn_add_job(). | ||
7 | |||
8 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
4 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
5 | Message-Id: <20210428151804.439460-23-vsementsov@virtuozzo.com> | 12 | Message-Id: <20220926093214.506243-4-eesposit@redhat.com> |
6 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
7 | --- | 14 | --- |
8 | block.c | 84 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-- | 15 | include/qemu/job.h | 18 ------------------ |
9 | 1 file changed, 82 insertions(+), 2 deletions(-) | 16 | job.c | 22 +++++++++++++++++++--- |
17 | 2 files changed, 19 insertions(+), 21 deletions(-) | ||
10 | 18 | ||
11 | diff --git a/block.c b/block.c | 19 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
12 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/block.c | 21 | --- a/include/qemu/job.h |
14 | +++ b/block.c | 22 | +++ b/include/qemu/job.h |
15 | @@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child(BdrvChild *child, BlockDriverState *new_bs) | 23 | @@ -XXX,XX +XXX,XX @@ JobTxn *job_txn_new(void); |
24 | */ | ||
25 | void job_txn_unref(JobTxn *txn); | ||
26 | |||
27 | -/** | ||
28 | - * @txn: The transaction (may be NULL) | ||
29 | - * @job: Job to add to the transaction | ||
30 | - * | ||
31 | - * Add @job to the transaction. The @job must not already be in a transaction. | ||
32 | - * The caller must call either job_txn_unref() or job_completed() to release | ||
33 | - * the reference that is automatically grabbed here. | ||
34 | - * | ||
35 | - * If @txn is NULL, the function does nothing. | ||
36 | - */ | ||
37 | -void job_txn_add_job(JobTxn *txn, Job *job); | ||
38 | - | ||
39 | /** | ||
40 | * Create a new long-running job and return it. | ||
41 | * | ||
42 | @@ -XXX,XX +XXX,XX @@ void job_progress_set_remaining(Job *job, uint64_t remaining); | ||
43 | */ | ||
44 | void job_progress_increase_remaining(Job *job, uint64_t delta); | ||
45 | |||
46 | -/** To be called when a cancelled job is finalised. */ | ||
47 | -void job_event_cancelled(Job *job); | ||
48 | - | ||
49 | -/** To be called when a successfully completed job is finalised. */ | ||
50 | -void job_event_completed(Job *job); | ||
51 | - | ||
52 | /** | ||
53 | * Conditionally enter the job coroutine if the job is ready to run, not | ||
54 | * already busy and fn() returns true. fn() is called while under the job_lock | ||
55 | diff --git a/job.c b/job.c | ||
56 | index XXXXXXX..XXXXXXX 100644 | ||
57 | --- a/job.c | ||
58 | +++ b/job.c | ||
59 | @@ -XXX,XX +XXX,XX @@ void job_txn_unref(JobTxn *txn) | ||
16 | } | 60 | } |
17 | } | 61 | } |
18 | 62 | ||
19 | +static void bdrv_child_free(void *opaque) | 63 | -void job_txn_add_job(JobTxn *txn, Job *job) |
20 | +{ | 64 | +/** |
21 | + BdrvChild *c = opaque; | 65 | + * @txn: The transaction (may be NULL) |
22 | + | 66 | + * @job: Job to add to the transaction |
23 | + g_free(c->name); | 67 | + * |
24 | + g_free(c); | 68 | + * Add @job to the transaction. The @job must not already be in a transaction. |
25 | +} | 69 | + * The caller must call either job_txn_unref() or job_completed() to release |
26 | + | 70 | + * the reference that is automatically grabbed here. |
27 | static void bdrv_remove_empty_child(BdrvChild *child) | 71 | + * |
72 | + * If @txn is NULL, the function does nothing. | ||
73 | + */ | ||
74 | +static void job_txn_add_job(JobTxn *txn, Job *job) | ||
28 | { | 75 | { |
29 | assert(!child->bs); | 76 | if (!txn) { |
30 | QLIST_SAFE_REMOVE(child, next); | 77 | return; |
31 | - g_free(child->name); | 78 | @@ -XXX,XX +XXX,XX @@ void job_progress_increase_remaining(Job *job, uint64_t delta) |
32 | - g_free(child); | 79 | progress_increase_remaining(&job->progress, delta); |
33 | + bdrv_child_free(child); | ||
34 | } | 80 | } |
35 | 81 | ||
36 | typedef struct BdrvAttachChildCommonState { | 82 | -void job_event_cancelled(Job *job) |
37 | @@ -XXX,XX +XXX,XX @@ static bool should_update_child(BdrvChild *c, BlockDriverState *to) | 83 | +/** |
38 | return ret; | 84 | + * To be called when a cancelled job is finalised. |
85 | + */ | ||
86 | +static void job_event_cancelled(Job *job) | ||
87 | { | ||
88 | notifier_list_notify(&job->on_finalize_cancelled, job); | ||
39 | } | 89 | } |
40 | 90 | ||
41 | +typedef struct BdrvRemoveFilterOrCowChild { | 91 | -void job_event_completed(Job *job) |
42 | + BdrvChild *child; | 92 | +/** |
43 | + bool is_backing; | 93 | + * To be called when a successfully completed job is finalised. |
44 | +} BdrvRemoveFilterOrCowChild; | ||
45 | + | ||
46 | +static void bdrv_remove_filter_or_cow_child_abort(void *opaque) | ||
47 | +{ | ||
48 | + BdrvRemoveFilterOrCowChild *s = opaque; | ||
49 | + BlockDriverState *parent_bs = s->child->opaque; | ||
50 | + | ||
51 | + QLIST_INSERT_HEAD(&parent_bs->children, s->child, next); | ||
52 | + if (s->is_backing) { | ||
53 | + parent_bs->backing = s->child; | ||
54 | + } else { | ||
55 | + parent_bs->file = s->child; | ||
56 | + } | ||
57 | + | ||
58 | + /* | ||
59 | + * We don't have to restore child->bs here to undo bdrv_replace_child() | ||
60 | + * because that function is transactionable and it registered own completion | ||
61 | + * entries in @tran, so .abort() for bdrv_replace_child_safe() will be | ||
62 | + * called automatically. | ||
63 | + */ | ||
64 | +} | ||
65 | + | ||
66 | +static void bdrv_remove_filter_or_cow_child_commit(void *opaque) | ||
67 | +{ | ||
68 | + BdrvRemoveFilterOrCowChild *s = opaque; | ||
69 | + | ||
70 | + bdrv_child_free(s->child); | ||
71 | +} | ||
72 | + | ||
73 | +static TransactionActionDrv bdrv_remove_filter_or_cow_child_drv = { | ||
74 | + .abort = bdrv_remove_filter_or_cow_child_abort, | ||
75 | + .commit = bdrv_remove_filter_or_cow_child_commit, | ||
76 | + .clean = g_free, | ||
77 | +}; | ||
78 | + | ||
79 | +/* | ||
80 | + * A function to remove backing-chain child of @bs if exists: cow child for | ||
81 | + * format nodes (always .backing) and filter child for filters (may be .file or | ||
82 | + * .backing) | ||
83 | + */ | 94 | + */ |
84 | +__attribute__((unused)) | 95 | +static void job_event_completed(Job *job) |
85 | +static void bdrv_remove_filter_or_cow_child(BlockDriverState *bs, | 96 | { |
86 | + Transaction *tran) | 97 | notifier_list_notify(&job->on_finalize_completed, job); |
87 | +{ | 98 | } |
88 | + BdrvRemoveFilterOrCowChild *s; | ||
89 | + BdrvChild *child = bdrv_filter_or_cow_child(bs); | ||
90 | + | ||
91 | + if (!child) { | ||
92 | + return; | ||
93 | + } | ||
94 | + | ||
95 | + if (child->bs) { | ||
96 | + bdrv_replace_child_safe(child, NULL, tran); | ||
97 | + } | ||
98 | + | ||
99 | + s = g_new(BdrvRemoveFilterOrCowChild, 1); | ||
100 | + *s = (BdrvRemoveFilterOrCowChild) { | ||
101 | + .child = child, | ||
102 | + .is_backing = (child == bs->backing), | ||
103 | + }; | ||
104 | + tran_add(tran, &bdrv_remove_filter_or_cow_child_drv, s); | ||
105 | + | ||
106 | + QLIST_SAFE_REMOVE(child, next); | ||
107 | + if (s->is_backing) { | ||
108 | + bs->backing = NULL; | ||
109 | + } else { | ||
110 | + bs->file = NULL; | ||
111 | + } | ||
112 | +} | ||
113 | + | ||
114 | static int bdrv_replace_node_noperm(BlockDriverState *from, | ||
115 | BlockDriverState *to, | ||
116 | bool auto_skip, Transaction *tran, | ||
117 | -- | 99 | -- |
118 | 2.30.2 | 100 | 2.37.3 |
119 | |||
120 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
1 | 2 | ||
3 | Same as AIO_WAIT_WHILE macro, but if we are in the Main loop | ||
4 | do not release and then acquire ctx_ 's aiocontext. | ||
5 | |||
6 | Once all Aiocontext locks go away, this macro will replace | ||
7 | AIO_WAIT_WHILE. | ||
8 | |||
9 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
10 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
11 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
12 | Message-Id: <20220926093214.506243-5-eesposit@redhat.com> | ||
13 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
15 | --- | ||
16 | include/block/aio-wait.h | 17 +++++++++++++---- | ||
17 | 1 file changed, 13 insertions(+), 4 deletions(-) | ||
18 | |||
19 | diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/include/block/aio-wait.h | ||
22 | +++ b/include/block/aio-wait.h | ||
23 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
24 | extern AioWait global_aio_wait; | ||
25 | |||
26 | /** | ||
27 | - * AIO_WAIT_WHILE: | ||
28 | + * AIO_WAIT_WHILE_INTERNAL: | ||
29 | * @ctx: the aio context, or NULL if multiple aio contexts (for which the | ||
30 | * caller does not hold a lock) are involved in the polling condition. | ||
31 | * @cond: wait while this conditional expression is true | ||
32 | + * @unlock: whether to unlock and then lock again @ctx. This apples | ||
33 | + * only when waiting for another AioContext from the main loop. | ||
34 | + * Otherwise it's ignored. | ||
35 | * | ||
36 | * Wait while a condition is true. Use this to implement synchronous | ||
37 | * operations that require event loop activity. | ||
38 | @@ -XXX,XX +XXX,XX @@ extern AioWait global_aio_wait; | ||
39 | * wait on conditions between two IOThreads since that could lead to deadlock, | ||
40 | * go via the main loop instead. | ||
41 | */ | ||
42 | -#define AIO_WAIT_WHILE(ctx, cond) ({ \ | ||
43 | +#define AIO_WAIT_WHILE_INTERNAL(ctx, cond, unlock) ({ \ | ||
44 | bool waited_ = false; \ | ||
45 | AioWait *wait_ = &global_aio_wait; \ | ||
46 | AioContext *ctx_ = (ctx); \ | ||
47 | @@ -XXX,XX +XXX,XX @@ extern AioWait global_aio_wait; | ||
48 | assert(qemu_get_current_aio_context() == \ | ||
49 | qemu_get_aio_context()); \ | ||
50 | while ((cond)) { \ | ||
51 | - if (ctx_) { \ | ||
52 | + if (unlock && ctx_) { \ | ||
53 | aio_context_release(ctx_); \ | ||
54 | } \ | ||
55 | aio_poll(qemu_get_aio_context(), true); \ | ||
56 | - if (ctx_) { \ | ||
57 | + if (unlock && ctx_) { \ | ||
58 | aio_context_acquire(ctx_); \ | ||
59 | } \ | ||
60 | waited_ = true; \ | ||
61 | @@ -XXX,XX +XXX,XX @@ extern AioWait global_aio_wait; | ||
62 | qatomic_dec(&wait_->num_waiters); \ | ||
63 | waited_; }) | ||
64 | |||
65 | +#define AIO_WAIT_WHILE(ctx, cond) \ | ||
66 | + AIO_WAIT_WHILE_INTERNAL(ctx, cond, true) | ||
67 | + | ||
68 | +#define AIO_WAIT_WHILE_UNLOCKED(ctx, cond) \ | ||
69 | + AIO_WAIT_WHILE_INTERNAL(ctx, cond, false) | ||
70 | + | ||
71 | /** | ||
72 | * aio_wait_kick: | ||
73 | * Wake up the main thread if it is waiting on AIO_WAIT_WHILE(). During | ||
74 | -- | ||
75 | 2.37.3 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | We are going to drop recursive bdrv_child_* functions, so stop use them | 3 | With "intact" we mean that all job.h functions implicitly |
4 | in bdrv_child_try_set_perm() as a first step. | 4 | take the lock. Therefore API callers are unmodified. |
5 | 5 | ||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 6 | This means that: |
7 | - many static functions that will be always called with job lock held | ||
8 | become _locked, and call _locked functions | ||
9 | - all public functions take the lock internally if needed, and call _locked | ||
10 | functions | ||
11 | - all public functions called internally by other functions in job.c will have a | ||
12 | _locked counterpart (sometimes public), to avoid deadlocks (job lock already taken). | ||
13 | These functions are not used for now. | ||
14 | - some public functions called only from exernal files (not job.c) do not | ||
15 | have _locked() counterpart and take the lock inside. Others won't need | ||
16 | the lock at all because use fields only set at initialization and | ||
17 | never modified. | ||
18 | |||
19 | job_{lock/unlock} is independent from real_job_{lock/unlock}. | ||
20 | |||
21 | Note: at this stage, job_{lock/unlock} and job lock guard macros | ||
22 | are *nop* | ||
23 | |||
24 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
7 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 25 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
8 | Message-Id: <20210428151804.439460-12-vsementsov@virtuozzo.com> | 26 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
27 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
28 | Message-Id: <20220926093214.506243-6-eesposit@redhat.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 29 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
10 | --- | 30 | --- |
11 | block.c | 14 ++++++++------ | 31 | include/qemu/job.h | 138 +++++++++- |
12 | 1 file changed, 8 insertions(+), 6 deletions(-) | 32 | job.c | 610 ++++++++++++++++++++++++++++++++------------- |
33 | 2 files changed, 561 insertions(+), 187 deletions(-) | ||
13 | 34 | ||
14 | diff --git a/block.c b/block.c | 35 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
15 | index XXXXXXX..XXXXXXX 100644 | 36 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/block.c | 37 | --- a/include/qemu/job.h |
17 | +++ b/block.c | 38 | +++ b/include/qemu/job.h |
18 | @@ -XXX,XX +XXX,XX @@ int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared, | 39 | @@ -XXX,XX +XXX,XX @@ JobTxn *job_txn_new(void); |
19 | Error **errp) | 40 | */ |
41 | void job_txn_unref(JobTxn *txn); | ||
42 | |||
43 | +/* | ||
44 | + * Same as job_txn_unref(), but called with job lock held. | ||
45 | + * Might release the lock temporarily. | ||
46 | + */ | ||
47 | +void job_txn_unref_locked(JobTxn *txn); | ||
48 | + | ||
49 | /** | ||
50 | * Create a new long-running job and return it. | ||
51 | + * Called with job_mutex *not* held. | ||
52 | * | ||
53 | * @job_id: The id of the newly-created job, or %NULL for internal jobs | ||
54 | * @driver: The class object for the newly-created job. | ||
55 | @@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, | ||
56 | */ | ||
57 | void job_ref(Job *job); | ||
58 | |||
59 | +/* Same as job_ref(), but called with job lock held. */ | ||
60 | +void job_ref_locked(Job *job); | ||
61 | + | ||
62 | /** | ||
63 | * Release a reference that was previously acquired with job_ref() or | ||
64 | * job_create(). If it's the last reference to the object, it will be freed. | ||
65 | */ | ||
66 | void job_unref(Job *job); | ||
67 | |||
68 | +/* Same as job_unref(), but called with job lock held. */ | ||
69 | +void job_unref_locked(Job *job); | ||
70 | + | ||
71 | /** | ||
72 | * @job: The job that has made progress | ||
73 | * @done: How much progress the job made since the last call | ||
74 | * | ||
75 | * Updates the progress counter of the job. | ||
76 | + * | ||
77 | + * May be called with mutex held or not held. | ||
78 | */ | ||
79 | void job_progress_update(Job *job, uint64_t done); | ||
80 | |||
81 | @@ -XXX,XX +XXX,XX @@ void job_progress_update(Job *job, uint64_t done); | ||
82 | * | ||
83 | * Sets the expected end value of the progress counter of a job so that a | ||
84 | * completion percentage can be calculated when the progress is updated. | ||
85 | + * | ||
86 | + * May be called with mutex held or not held. | ||
87 | */ | ||
88 | void job_progress_set_remaining(Job *job, uint64_t remaining); | ||
89 | |||
90 | @@ -XXX,XX +XXX,XX @@ void job_progress_set_remaining(Job *job, uint64_t remaining); | ||
91 | * length before, and job_progress_update() afterwards. | ||
92 | * (So the operation acts as a parenthesis in regards to the main job | ||
93 | * operation running in background.) | ||
94 | + * | ||
95 | + * May be called with mutex held or not held. | ||
96 | */ | ||
97 | void job_progress_increase_remaining(Job *job, uint64_t delta); | ||
98 | |||
99 | @@ -XXX,XX +XXX,XX @@ void job_progress_increase_remaining(Job *job, uint64_t delta); | ||
100 | */ | ||
101 | void job_enter_cond(Job *job, bool(*fn)(Job *job)); | ||
102 | |||
103 | +/* | ||
104 | + * Same as job_enter_cond(), but called with job lock held. | ||
105 | + * Might release the lock temporarily. | ||
106 | + */ | ||
107 | +void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)); | ||
108 | + | ||
109 | /** | ||
110 | * @job: A job that has not yet been started. | ||
111 | * | ||
112 | * Begins execution of a job. | ||
113 | * Takes ownership of one reference to the job object. | ||
114 | + * | ||
115 | + * Called with job_mutex *not* held. | ||
116 | */ | ||
117 | void job_start(Job *job); | ||
118 | |||
119 | @@ -XXX,XX +XXX,XX @@ void job_start(Job *job); | ||
120 | * @job: The job to enter. | ||
121 | * | ||
122 | * Continue the specified job by entering the coroutine. | ||
123 | + * Called with job_mutex *not* held. | ||
124 | */ | ||
125 | void job_enter(Job *job); | ||
126 | |||
127 | @@ -XXX,XX +XXX,XX @@ void job_enter(Job *job); | ||
128 | * | ||
129 | * Pause now if job_pause() has been called. Jobs that perform lots of I/O | ||
130 | * must call this between requests so that the job can be paused. | ||
131 | + * | ||
132 | + * Called with job_mutex *not* held. | ||
133 | */ | ||
134 | void coroutine_fn job_pause_point(Job *job); | ||
135 | |||
136 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn job_pause_point(Job *job); | ||
137 | * @job: The job that calls the function. | ||
138 | * | ||
139 | * Yield the job coroutine. | ||
140 | + * Called with job_mutex *not* held. | ||
141 | */ | ||
142 | void coroutine_fn job_yield(Job *job); | ||
143 | |||
144 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn job_yield(Job *job); | ||
145 | * Put the job to sleep (assuming that it wasn't canceled) for @ns | ||
146 | * %QEMU_CLOCK_REALTIME nanoseconds. Canceling the job will immediately | ||
147 | * interrupt the wait. | ||
148 | + * | ||
149 | + * Called with job_mutex *not* held. | ||
150 | */ | ||
151 | void coroutine_fn job_sleep_ns(Job *job, int64_t ns); | ||
152 | |||
153 | @@ -XXX,XX +XXX,XX @@ const char *job_type_str(const Job *job); | ||
154 | /** Returns true if the job should not be visible to the management layer. */ | ||
155 | bool job_is_internal(Job *job); | ||
156 | |||
157 | -/** Returns whether the job is being cancelled. */ | ||
158 | +/** | ||
159 | + * Returns whether the job is being cancelled. | ||
160 | + * Called with job_mutex *not* held. | ||
161 | + */ | ||
162 | bool job_is_cancelled(Job *job); | ||
163 | |||
164 | +/* Same as job_is_cancelled(), but called with job lock held. */ | ||
165 | +bool job_is_cancelled_locked(Job *job); | ||
166 | + | ||
167 | /** | ||
168 | * Returns whether the job is scheduled for cancellation (at an | ||
169 | * indefinite point). | ||
170 | + * Called with job_mutex *not* held. | ||
171 | */ | ||
172 | bool job_cancel_requested(Job *job); | ||
173 | |||
174 | -/** Returns whether the job is in a completed state. */ | ||
175 | +/** | ||
176 | + * Returns whether the job is in a completed state. | ||
177 | + * Called with job_mutex *not* held. | ||
178 | + */ | ||
179 | bool job_is_completed(Job *job); | ||
180 | |||
181 | -/** Returns whether the job is ready to be completed. */ | ||
182 | +/* Same as job_is_completed(), but called with job lock held. */ | ||
183 | +bool job_is_completed_locked(Job *job); | ||
184 | + | ||
185 | +/** | ||
186 | + * Returns whether the job is ready to be completed. | ||
187 | + * Called with job_mutex *not* held. | ||
188 | + */ | ||
189 | bool job_is_ready(Job *job); | ||
190 | |||
191 | +/* Same as job_is_ready(), but called with job lock held. */ | ||
192 | +bool job_is_ready_locked(Job *job); | ||
193 | + | ||
194 | /** | ||
195 | * Request @job to pause at the next pause point. Must be paired with | ||
196 | * job_resume(). If the job is supposed to be resumed by user action, call | ||
197 | @@ -XXX,XX +XXX,XX @@ bool job_is_ready(Job *job); | ||
198 | */ | ||
199 | void job_pause(Job *job); | ||
200 | |||
201 | +/* Same as job_pause(), but called with job lock held. */ | ||
202 | +void job_pause_locked(Job *job); | ||
203 | + | ||
204 | /** Resumes a @job paused with job_pause. */ | ||
205 | void job_resume(Job *job); | ||
206 | |||
207 | +/* | ||
208 | + * Same as job_resume(), but called with job lock held. | ||
209 | + * Might release the lock temporarily. | ||
210 | + */ | ||
211 | +void job_resume_locked(Job *job); | ||
212 | + | ||
213 | /** | ||
214 | * Asynchronously pause the specified @job. | ||
215 | * Do not allow a resume until a matching call to job_user_resume. | ||
216 | */ | ||
217 | void job_user_pause(Job *job, Error **errp); | ||
218 | |||
219 | +/* Same as job_user_pause(), but called with job lock held. */ | ||
220 | +void job_user_pause_locked(Job *job, Error **errp); | ||
221 | + | ||
222 | /** Returns true if the job is user-paused. */ | ||
223 | bool job_user_paused(Job *job); | ||
224 | |||
225 | +/* Same as job_user_paused(), but called with job lock held. */ | ||
226 | +bool job_user_paused_locked(Job *job); | ||
227 | + | ||
228 | /** | ||
229 | * Resume the specified @job. | ||
230 | * Must be paired with a preceding job_user_pause. | ||
231 | */ | ||
232 | void job_user_resume(Job *job, Error **errp); | ||
233 | |||
234 | +/* | ||
235 | + * Same as job_user_resume(), but called with job lock held. | ||
236 | + * Might release the lock temporarily. | ||
237 | + */ | ||
238 | +void job_user_resume_locked(Job *job, Error **errp); | ||
239 | + | ||
240 | /** | ||
241 | * Get the next element from the list of block jobs after @job, or the | ||
242 | * first one if @job is %NULL. | ||
243 | @@ -XXX,XX +XXX,XX @@ void job_user_resume(Job *job, Error **errp); | ||
244 | */ | ||
245 | Job *job_next(Job *job); | ||
246 | |||
247 | +/* Same as job_next(), but called with job lock held. */ | ||
248 | +Job *job_next_locked(Job *job); | ||
249 | + | ||
250 | /** | ||
251 | * Get the job identified by @id (which must not be %NULL). | ||
252 | * | ||
253 | @@ -XXX,XX +XXX,XX @@ Job *job_next(Job *job); | ||
254 | */ | ||
255 | Job *job_get(const char *id); | ||
256 | |||
257 | +/* Same as job_get(), but called with job lock held. */ | ||
258 | +Job *job_get_locked(const char *id); | ||
259 | + | ||
260 | /** | ||
261 | * Check whether the verb @verb can be applied to @job in its current state. | ||
262 | * Returns 0 if the verb can be applied; otherwise errp is set and -EPERM | ||
263 | @@ -XXX,XX +XXX,XX @@ Job *job_get(const char *id); | ||
264 | */ | ||
265 | int job_apply_verb(Job *job, JobVerb verb, Error **errp); | ||
266 | |||
267 | -/** The @job could not be started, free it. */ | ||
268 | +/* Same as job_apply_verb, but called with job lock held. */ | ||
269 | +int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp); | ||
270 | + | ||
271 | +/** | ||
272 | + * The @job could not be started, free it. | ||
273 | + * Called with job_mutex *not* held. | ||
274 | + */ | ||
275 | void job_early_fail(Job *job); | ||
276 | |||
277 | -/** Moves the @job from RUNNING to READY */ | ||
278 | +/** | ||
279 | + * Moves the @job from RUNNING to READY. | ||
280 | + * Called with job_mutex *not* held. | ||
281 | + */ | ||
282 | void job_transition_to_ready(Job *job); | ||
283 | |||
284 | /** Asynchronously complete the specified @job. */ | ||
285 | void job_complete(Job *job, Error **errp); | ||
286 | |||
287 | +/* | ||
288 | + * Same as job_complete(), but called with job lock held. | ||
289 | + * Might release the lock temporarily. | ||
290 | + */ | ||
291 | +void job_complete_locked(Job *job, Error **errp); | ||
292 | + | ||
293 | /** | ||
294 | * Asynchronously cancel the specified @job. If @force is true, the job should | ||
295 | * be cancelled immediately without waiting for a consistent state. | ||
296 | */ | ||
297 | void job_cancel(Job *job, bool force); | ||
298 | |||
299 | +/* Same as job_cancel(), but called with job lock held. */ | ||
300 | +void job_cancel_locked(Job *job, bool force); | ||
301 | + | ||
302 | /** | ||
303 | * Cancels the specified job like job_cancel(), but may refuse to do so if the | ||
304 | * operation isn't meaningful in the current state of the job. | ||
305 | */ | ||
306 | void job_user_cancel(Job *job, bool force, Error **errp); | ||
307 | |||
308 | +/* Same as job_user_cancel(), but called with job lock held. */ | ||
309 | +void job_user_cancel_locked(Job *job, bool force, Error **errp); | ||
310 | + | ||
311 | /** | ||
312 | * Synchronously cancel the @job. The completion callback is called | ||
313 | * before the function returns. If @force is false, the job may | ||
314 | @@ -XXX,XX +XXX,XX @@ void job_user_cancel(Job *job, bool force, Error **errp); | ||
315 | */ | ||
316 | int job_cancel_sync(Job *job, bool force); | ||
317 | |||
318 | -/** Synchronously force-cancels all jobs using job_cancel_sync(). */ | ||
319 | +/* Same as job_cancel_sync, but called with job lock held. */ | ||
320 | +int job_cancel_sync_locked(Job *job, bool force); | ||
321 | + | ||
322 | +/** | ||
323 | + * Synchronously force-cancels all jobs using job_cancel_sync_locked(). | ||
324 | + * | ||
325 | + * Called with job_lock *not* held. | ||
326 | + */ | ||
327 | void job_cancel_sync_all(void); | ||
328 | |||
329 | /** | ||
330 | @@ -XXX,XX +XXX,XX @@ void job_cancel_sync_all(void); | ||
331 | */ | ||
332 | int job_complete_sync(Job *job, Error **errp); | ||
333 | |||
334 | +/* Same as job_complete_sync, but called with job lock held. */ | ||
335 | +int job_complete_sync_locked(Job *job, Error **errp); | ||
336 | + | ||
337 | /** | ||
338 | * For a @job that has finished its work and is pending awaiting explicit | ||
339 | * acknowledgement to commit its work, this will commit that work. | ||
340 | @@ -XXX,XX +XXX,XX @@ int job_complete_sync(Job *job, Error **errp); | ||
341 | */ | ||
342 | void job_finalize(Job *job, Error **errp); | ||
343 | |||
344 | +/* Same as job_finalize(), but called with job lock held. */ | ||
345 | +void job_finalize_locked(Job *job, Error **errp); | ||
346 | + | ||
347 | /** | ||
348 | * Remove the concluded @job from the query list and resets the passed pointer | ||
349 | * to %NULL. Returns an error if the job is not actually concluded. | ||
350 | */ | ||
351 | void job_dismiss(Job **job, Error **errp); | ||
352 | |||
353 | +/* Same as job_dismiss(), but called with job lock held. */ | ||
354 | +void job_dismiss_locked(Job **job, Error **errp); | ||
355 | + | ||
356 | /** | ||
357 | * Synchronously finishes the given @job. If @finish is given, it is called to | ||
358 | * trigger completion or cancellation of the job. | ||
359 | @@ -XXX,XX +XXX,XX @@ void job_dismiss(Job **job, Error **errp); | ||
360 | * | ||
361 | * Callers must hold the AioContext lock of job->aio_context. | ||
362 | */ | ||
363 | -int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp); | ||
364 | +int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), | ||
365 | + Error **errp); | ||
366 | + | ||
367 | +/* | ||
368 | + * Same as job_finish_sync(), but called with job lock held. | ||
369 | + * Might release the lock temporarily. | ||
370 | + */ | ||
371 | +int job_finish_sync_locked(Job *job, void (*finish)(Job *, Error **errp), | ||
372 | + Error **errp); | ||
373 | |||
374 | #endif | ||
375 | diff --git a/job.c b/job.c | ||
376 | index XXXXXXX..XXXXXXX 100644 | ||
377 | --- a/job.c | ||
378 | +++ b/job.c | ||
379 | @@ -XXX,XX +XXX,XX @@ | ||
380 | */ | ||
381 | QemuMutex job_mutex; | ||
382 | |||
383 | +/* Protected by job_mutex */ | ||
384 | static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs); | ||
385 | |||
386 | /* Job State Transition Table */ | ||
387 | @@ -XXX,XX +XXX,XX @@ JobTxn *job_txn_new(void) | ||
388 | return txn; | ||
389 | } | ||
390 | |||
391 | -static void job_txn_ref(JobTxn *txn) | ||
392 | +/* Called with job_mutex held. */ | ||
393 | +static void job_txn_ref_locked(JobTxn *txn) | ||
394 | { | ||
395 | txn->refcnt++; | ||
396 | } | ||
397 | |||
398 | -void job_txn_unref(JobTxn *txn) | ||
399 | +void job_txn_unref_locked(JobTxn *txn) | ||
400 | { | ||
401 | if (txn && --txn->refcnt == 0) { | ||
402 | g_free(txn); | ||
403 | } | ||
404 | } | ||
405 | |||
406 | +void job_txn_unref(JobTxn *txn) | ||
407 | +{ | ||
408 | + JOB_LOCK_GUARD(); | ||
409 | + job_txn_unref_locked(txn); | ||
410 | +} | ||
411 | + | ||
412 | /** | ||
413 | * @txn: The transaction (may be NULL) | ||
414 | * @job: Job to add to the transaction | ||
415 | @@ -XXX,XX +XXX,XX @@ void job_txn_unref(JobTxn *txn) | ||
416 | * the reference that is automatically grabbed here. | ||
417 | * | ||
418 | * If @txn is NULL, the function does nothing. | ||
419 | + * | ||
420 | + * Called with job_mutex held. | ||
421 | */ | ||
422 | -static void job_txn_add_job(JobTxn *txn, Job *job) | ||
423 | +static void job_txn_add_job_locked(JobTxn *txn, Job *job) | ||
424 | { | ||
425 | if (!txn) { | ||
426 | return; | ||
427 | @@ -XXX,XX +XXX,XX @@ static void job_txn_add_job(JobTxn *txn, Job *job) | ||
428 | job->txn = txn; | ||
429 | |||
430 | QLIST_INSERT_HEAD(&txn->jobs, job, txn_list); | ||
431 | - job_txn_ref(txn); | ||
432 | + job_txn_ref_locked(txn); | ||
433 | } | ||
434 | |||
435 | -static void job_txn_del_job(Job *job) | ||
436 | +/* Called with job_mutex held. */ | ||
437 | +static void job_txn_del_job_locked(Job *job) | ||
438 | { | ||
439 | if (job->txn) { | ||
440 | QLIST_REMOVE(job, txn_list); | ||
441 | - job_txn_unref(job->txn); | ||
442 | + job_txn_unref_locked(job->txn); | ||
443 | job->txn = NULL; | ||
444 | } | ||
445 | } | ||
446 | |||
447 | -static int job_txn_apply(Job *job, int fn(Job *)) | ||
448 | +/* Called with job_mutex held, but releases it temporarily. */ | ||
449 | +static int job_txn_apply_locked(Job *job, int fn(Job *)) | ||
450 | { | ||
451 | AioContext *inner_ctx; | ||
452 | Job *other_job, *next; | ||
453 | @@ -XXX,XX +XXX,XX @@ static int job_txn_apply(Job *job, int fn(Job *)) | ||
454 | * we need to release it here to avoid holding the lock twice - which would | ||
455 | * break AIO_WAIT_WHILE from within fn. | ||
456 | */ | ||
457 | - job_ref(job); | ||
458 | + job_ref_locked(job); | ||
459 | aio_context_release(job->aio_context); | ||
460 | |||
461 | QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { | ||
462 | @@ -XXX,XX +XXX,XX @@ static int job_txn_apply(Job *job, int fn(Job *)) | ||
463 | * can't use a local variable to cache it. | ||
464 | */ | ||
465 | aio_context_acquire(job->aio_context); | ||
466 | - job_unref(job); | ||
467 | + job_unref_locked(job); | ||
468 | return rc; | ||
469 | } | ||
470 | |||
471 | @@ -XXX,XX +XXX,XX @@ bool job_is_internal(Job *job) | ||
472 | return (job->id == NULL); | ||
473 | } | ||
474 | |||
475 | -static void job_state_transition(Job *job, JobStatus s1) | ||
476 | +/* Called with job_mutex held. */ | ||
477 | +static void job_state_transition_locked(Job *job, JobStatus s1) | ||
478 | { | ||
479 | JobStatus s0 = job->status; | ||
480 | assert(s1 >= 0 && s1 < JOB_STATUS__MAX); | ||
481 | @@ -XXX,XX +XXX,XX @@ static void job_state_transition(Job *job, JobStatus s1) | ||
482 | } | ||
483 | } | ||
484 | |||
485 | -int job_apply_verb(Job *job, JobVerb verb, Error **errp) | ||
486 | +int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp) | ||
487 | { | ||
488 | JobStatus s0 = job->status; | ||
489 | assert(verb >= 0 && verb < JOB_VERB__MAX); | ||
490 | @@ -XXX,XX +XXX,XX @@ int job_apply_verb(Job *job, JobVerb verb, Error **errp) | ||
491 | return -EPERM; | ||
492 | } | ||
493 | |||
494 | +int job_apply_verb(Job *job, JobVerb verb, Error **errp) | ||
495 | +{ | ||
496 | + JOB_LOCK_GUARD(); | ||
497 | + return job_apply_verb_locked(job, verb, errp); | ||
498 | +} | ||
499 | + | ||
500 | JobType job_type(const Job *job) | ||
501 | { | ||
502 | return job->driver->job_type; | ||
503 | @@ -XXX,XX +XXX,XX @@ const char *job_type_str(const Job *job) | ||
504 | return JobType_str(job_type(job)); | ||
505 | } | ||
506 | |||
507 | -bool job_is_cancelled(Job *job) | ||
508 | +bool job_is_cancelled_locked(Job *job) | ||
509 | { | ||
510 | /* force_cancel may be true only if cancelled is true, too */ | ||
511 | assert(job->cancelled || !job->force_cancel); | ||
512 | return job->force_cancel; | ||
513 | } | ||
514 | |||
515 | -bool job_cancel_requested(Job *job) | ||
516 | +bool job_is_cancelled(Job *job) | ||
517 | +{ | ||
518 | + JOB_LOCK_GUARD(); | ||
519 | + return job_is_cancelled_locked(job); | ||
520 | +} | ||
521 | + | ||
522 | +/* Called with job_mutex held. */ | ||
523 | +static bool job_cancel_requested_locked(Job *job) | ||
524 | { | ||
525 | return job->cancelled; | ||
526 | } | ||
527 | |||
528 | -bool job_is_ready(Job *job) | ||
529 | +bool job_cancel_requested(Job *job) | ||
530 | +{ | ||
531 | + JOB_LOCK_GUARD(); | ||
532 | + return job_cancel_requested_locked(job); | ||
533 | +} | ||
534 | + | ||
535 | +bool job_is_ready_locked(Job *job) | ||
536 | { | ||
537 | switch (job->status) { | ||
538 | case JOB_STATUS_UNDEFINED: | ||
539 | @@ -XXX,XX +XXX,XX @@ bool job_is_ready(Job *job) | ||
540 | return false; | ||
541 | } | ||
542 | |||
543 | -bool job_is_completed(Job *job) | ||
544 | +bool job_is_ready(Job *job) | ||
545 | +{ | ||
546 | + JOB_LOCK_GUARD(); | ||
547 | + return job_is_ready_locked(job); | ||
548 | +} | ||
549 | + | ||
550 | +bool job_is_completed_locked(Job *job) | ||
551 | { | ||
552 | switch (job->status) { | ||
553 | case JOB_STATUS_UNDEFINED: | ||
554 | @@ -XXX,XX +XXX,XX @@ bool job_is_completed(Job *job) | ||
555 | return false; | ||
556 | } | ||
557 | |||
558 | -static bool job_started(Job *job) | ||
559 | +bool job_is_completed(Job *job) | ||
560 | +{ | ||
561 | + JOB_LOCK_GUARD(); | ||
562 | + return job_is_completed_locked(job); | ||
563 | +} | ||
564 | + | ||
565 | +static bool job_started_locked(Job *job) | ||
566 | { | ||
567 | return job->co; | ||
568 | } | ||
569 | |||
570 | -static bool job_should_pause(Job *job) | ||
571 | +/* Called with job_mutex held. */ | ||
572 | +static bool job_should_pause_locked(Job *job) | ||
573 | { | ||
574 | return job->pause_count > 0; | ||
575 | } | ||
576 | |||
577 | -Job *job_next(Job *job) | ||
578 | +Job *job_next_locked(Job *job) | ||
579 | { | ||
580 | if (!job) { | ||
581 | return QLIST_FIRST(&jobs); | ||
582 | @@ -XXX,XX +XXX,XX @@ Job *job_next(Job *job) | ||
583 | return QLIST_NEXT(job, job_list); | ||
584 | } | ||
585 | |||
586 | -Job *job_get(const char *id) | ||
587 | +Job *job_next(Job *job) | ||
588 | +{ | ||
589 | + JOB_LOCK_GUARD(); | ||
590 | + return job_next_locked(job); | ||
591 | +} | ||
592 | + | ||
593 | +Job *job_get_locked(const char *id) | ||
594 | { | ||
595 | Job *job; | ||
596 | |||
597 | @@ -XXX,XX +XXX,XX @@ Job *job_get(const char *id) | ||
598 | return NULL; | ||
599 | } | ||
600 | |||
601 | +Job *job_get(const char *id) | ||
602 | +{ | ||
603 | + JOB_LOCK_GUARD(); | ||
604 | + return job_get_locked(id); | ||
605 | +} | ||
606 | + | ||
607 | +/* Called with job_mutex *not* held. */ | ||
608 | static void job_sleep_timer_cb(void *opaque) | ||
609 | { | ||
610 | Job *job = opaque; | ||
611 | @@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, | ||
612 | { | ||
613 | Job *job; | ||
614 | |||
615 | + JOB_LOCK_GUARD(); | ||
616 | + | ||
617 | if (job_id) { | ||
618 | if (flags & JOB_INTERNAL) { | ||
619 | error_setg(errp, "Cannot specify job ID for internal job"); | ||
620 | @@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, | ||
621 | error_setg(errp, "Invalid job ID '%s'", job_id); | ||
622 | return NULL; | ||
623 | } | ||
624 | - if (job_get(job_id)) { | ||
625 | + if (job_get_locked(job_id)) { | ||
626 | error_setg(errp, "Job ID '%s' already in use", job_id); | ||
627 | return NULL; | ||
628 | } | ||
629 | @@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, | ||
630 | notifier_list_init(&job->on_ready); | ||
631 | notifier_list_init(&job->on_idle); | ||
632 | |||
633 | - job_state_transition(job, JOB_STATUS_CREATED); | ||
634 | + job_state_transition_locked(job, JOB_STATUS_CREATED); | ||
635 | aio_timer_init(qemu_get_aio_context(), &job->sleep_timer, | ||
636 | QEMU_CLOCK_REALTIME, SCALE_NS, | ||
637 | job_sleep_timer_cb, job); | ||
638 | @@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, | ||
639 | * consolidating the job management logic */ | ||
640 | if (!txn) { | ||
641 | txn = job_txn_new(); | ||
642 | - job_txn_add_job(txn, job); | ||
643 | - job_txn_unref(txn); | ||
644 | + job_txn_add_job_locked(txn, job); | ||
645 | + job_txn_unref_locked(txn); | ||
646 | } else { | ||
647 | - job_txn_add_job(txn, job); | ||
648 | + job_txn_add_job_locked(txn, job); | ||
649 | } | ||
650 | |||
651 | return job; | ||
652 | } | ||
653 | |||
654 | -void job_ref(Job *job) | ||
655 | +void job_ref_locked(Job *job) | ||
656 | { | ||
657 | ++job->refcnt; | ||
658 | } | ||
659 | |||
660 | -void job_unref(Job *job) | ||
661 | +void job_ref(Job *job) | ||
662 | +{ | ||
663 | + JOB_LOCK_GUARD(); | ||
664 | + job_ref_locked(job); | ||
665 | +} | ||
666 | + | ||
667 | +void job_unref_locked(Job *job) | ||
668 | { | ||
669 | GLOBAL_STATE_CODE(); | ||
670 | |||
671 | @@ -XXX,XX +XXX,XX @@ void job_unref(Job *job) | ||
672 | assert(!job->txn); | ||
673 | |||
674 | if (job->driver->free) { | ||
675 | + job_unlock(); | ||
676 | job->driver->free(job); | ||
677 | + job_lock(); | ||
678 | } | ||
679 | |||
680 | QLIST_REMOVE(job, job_list); | ||
681 | @@ -XXX,XX +XXX,XX @@ void job_unref(Job *job) | ||
682 | } | ||
683 | } | ||
684 | |||
685 | +void job_unref(Job *job) | ||
686 | +{ | ||
687 | + JOB_LOCK_GUARD(); | ||
688 | + job_unref_locked(job); | ||
689 | +} | ||
690 | + | ||
691 | void job_progress_update(Job *job, uint64_t done) | ||
692 | { | ||
693 | progress_work_done(&job->progress, done); | ||
694 | @@ -XXX,XX +XXX,XX @@ void job_progress_increase_remaining(Job *job, uint64_t delta) | ||
695 | |||
696 | /** | ||
697 | * To be called when a cancelled job is finalised. | ||
698 | + * Called with job_mutex held. | ||
699 | */ | ||
700 | -static void job_event_cancelled(Job *job) | ||
701 | +static void job_event_cancelled_locked(Job *job) | ||
702 | { | ||
703 | notifier_list_notify(&job->on_finalize_cancelled, job); | ||
704 | } | ||
705 | |||
706 | /** | ||
707 | * To be called when a successfully completed job is finalised. | ||
708 | + * Called with job_mutex held. | ||
709 | */ | ||
710 | -static void job_event_completed(Job *job) | ||
711 | +static void job_event_completed_locked(Job *job) | ||
712 | { | ||
713 | notifier_list_notify(&job->on_finalize_completed, job); | ||
714 | } | ||
715 | |||
716 | -static void job_event_pending(Job *job) | ||
717 | +/* Called with job_mutex held. */ | ||
718 | +static void job_event_pending_locked(Job *job) | ||
719 | { | ||
720 | notifier_list_notify(&job->on_pending, job); | ||
721 | } | ||
722 | |||
723 | -static void job_event_ready(Job *job) | ||
724 | +/* Called with job_mutex held. */ | ||
725 | +static void job_event_ready_locked(Job *job) | ||
726 | { | ||
727 | notifier_list_notify(&job->on_ready, job); | ||
728 | } | ||
729 | |||
730 | -static void job_event_idle(Job *job) | ||
731 | +/* Called with job_mutex held. */ | ||
732 | +static void job_event_idle_locked(Job *job) | ||
733 | { | ||
734 | notifier_list_notify(&job->on_idle, job); | ||
735 | } | ||
736 | |||
737 | -void job_enter_cond(Job *job, bool(*fn)(Job *job)) | ||
738 | +void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)) | ||
739 | { | ||
740 | - if (!job_started(job)) { | ||
741 | + if (!job_started_locked(job)) { | ||
742 | return; | ||
743 | } | ||
744 | if (job->deferred_to_main_loop) { | ||
745 | @@ -XXX,XX +XXX,XX @@ void job_enter_cond(Job *job, bool(*fn)(Job *job)) | ||
746 | timer_del(&job->sleep_timer); | ||
747 | job->busy = true; | ||
748 | real_job_unlock(); | ||
749 | + job_unlock(); | ||
750 | aio_co_enter(job->aio_context, job->co); | ||
751 | + job_lock(); | ||
752 | +} | ||
753 | + | ||
754 | +void job_enter_cond(Job *job, bool(*fn)(Job *job)) | ||
755 | +{ | ||
756 | + JOB_LOCK_GUARD(); | ||
757 | + job_enter_cond_locked(job, fn); | ||
758 | } | ||
759 | |||
760 | void job_enter(Job *job) | ||
761 | { | ||
762 | - job_enter_cond(job, NULL); | ||
763 | + JOB_LOCK_GUARD(); | ||
764 | + job_enter_cond_locked(job, NULL); | ||
765 | } | ||
766 | |||
767 | /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds. | ||
768 | @@ -XXX,XX +XXX,XX @@ void job_enter(Job *job) | ||
769 | * is allowed and cancels the timer. | ||
770 | * | ||
771 | * If @ns is (uint64_t) -1, no timer is scheduled and job_enter() must be | ||
772 | - * called explicitly. */ | ||
773 | -static void coroutine_fn job_do_yield(Job *job, uint64_t ns) | ||
774 | + * called explicitly. | ||
775 | + * | ||
776 | + * Called with job_mutex held, but releases it temporarily. | ||
777 | + */ | ||
778 | +static void coroutine_fn job_do_yield_locked(Job *job, uint64_t ns) | ||
779 | { | ||
780 | real_job_lock(); | ||
781 | if (ns != -1) { | ||
782 | timer_mod(&job->sleep_timer, ns); | ||
783 | } | ||
784 | job->busy = false; | ||
785 | - job_event_idle(job); | ||
786 | + job_event_idle_locked(job); | ||
787 | real_job_unlock(); | ||
788 | + job_unlock(); | ||
789 | qemu_coroutine_yield(); | ||
790 | + job_lock(); | ||
791 | |||
792 | /* Set by job_enter_cond() before re-entering the coroutine. */ | ||
793 | assert(job->busy); | ||
794 | } | ||
795 | |||
796 | -void coroutine_fn job_pause_point(Job *job) | ||
797 | +/* Called with job_mutex held, but releases it temporarily. */ | ||
798 | +static void coroutine_fn job_pause_point_locked(Job *job) | ||
799 | { | ||
800 | - assert(job && job_started(job)); | ||
801 | + assert(job && job_started_locked(job)); | ||
802 | |||
803 | - if (!job_should_pause(job)) { | ||
804 | + if (!job_should_pause_locked(job)) { | ||
805 | return; | ||
806 | } | ||
807 | - if (job_is_cancelled(job)) { | ||
808 | + if (job_is_cancelled_locked(job)) { | ||
809 | return; | ||
810 | } | ||
811 | |||
812 | if (job->driver->pause) { | ||
813 | + job_unlock(); | ||
814 | job->driver->pause(job); | ||
815 | + job_lock(); | ||
816 | } | ||
817 | |||
818 | - if (job_should_pause(job) && !job_is_cancelled(job)) { | ||
819 | + if (job_should_pause_locked(job) && !job_is_cancelled_locked(job)) { | ||
820 | JobStatus status = job->status; | ||
821 | - job_state_transition(job, status == JOB_STATUS_READY | ||
822 | - ? JOB_STATUS_STANDBY | ||
823 | - : JOB_STATUS_PAUSED); | ||
824 | + job_state_transition_locked(job, status == JOB_STATUS_READY | ||
825 | + ? JOB_STATUS_STANDBY | ||
826 | + : JOB_STATUS_PAUSED); | ||
827 | job->paused = true; | ||
828 | - job_do_yield(job, -1); | ||
829 | + job_do_yield_locked(job, -1); | ||
830 | job->paused = false; | ||
831 | - job_state_transition(job, status); | ||
832 | + job_state_transition_locked(job, status); | ||
833 | } | ||
834 | |||
835 | if (job->driver->resume) { | ||
836 | + job_unlock(); | ||
837 | job->driver->resume(job); | ||
838 | + job_lock(); | ||
839 | } | ||
840 | } | ||
841 | |||
842 | -void coroutine_fn job_yield(Job *job) | ||
843 | +void coroutine_fn job_pause_point(Job *job) | ||
844 | +{ | ||
845 | + JOB_LOCK_GUARD(); | ||
846 | + job_pause_point_locked(job); | ||
847 | +} | ||
848 | + | ||
849 | +static void coroutine_fn job_yield_locked(Job *job) | ||
850 | { | ||
851 | assert(job->busy); | ||
852 | |||
853 | /* Check cancellation *before* setting busy = false, too! */ | ||
854 | - if (job_is_cancelled(job)) { | ||
855 | + if (job_is_cancelled_locked(job)) { | ||
856 | return; | ||
857 | } | ||
858 | |||
859 | - if (!job_should_pause(job)) { | ||
860 | - job_do_yield(job, -1); | ||
861 | + if (!job_should_pause_locked(job)) { | ||
862 | + job_do_yield_locked(job, -1); | ||
863 | } | ||
864 | |||
865 | - job_pause_point(job); | ||
866 | + job_pause_point_locked(job); | ||
867 | +} | ||
868 | + | ||
869 | +void coroutine_fn job_yield(Job *job) | ||
870 | +{ | ||
871 | + JOB_LOCK_GUARD(); | ||
872 | + job_yield_locked(job); | ||
873 | } | ||
874 | |||
875 | void coroutine_fn job_sleep_ns(Job *job, int64_t ns) | ||
876 | { | ||
877 | + JOB_LOCK_GUARD(); | ||
878 | assert(job->busy); | ||
879 | |||
880 | /* Check cancellation *before* setting busy = false, too! */ | ||
881 | - if (job_is_cancelled(job)) { | ||
882 | + if (job_is_cancelled_locked(job)) { | ||
883 | return; | ||
884 | } | ||
885 | |||
886 | - if (!job_should_pause(job)) { | ||
887 | - job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns); | ||
888 | + if (!job_should_pause_locked(job)) { | ||
889 | + job_do_yield_locked(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns); | ||
890 | } | ||
891 | |||
892 | - job_pause_point(job); | ||
893 | + job_pause_point_locked(job); | ||
894 | } | ||
895 | |||
896 | -/* Assumes the block_job_mutex is held */ | ||
897 | -static bool job_timer_not_pending(Job *job) | ||
898 | +/* Assumes the job_mutex is held */ | ||
899 | +static bool job_timer_not_pending_locked(Job *job) | ||
900 | { | ||
901 | return !timer_pending(&job->sleep_timer); | ||
902 | } | ||
903 | |||
904 | -void job_pause(Job *job) | ||
905 | +void job_pause_locked(Job *job) | ||
906 | { | ||
907 | job->pause_count++; | ||
908 | if (!job->paused) { | ||
909 | - job_enter(job); | ||
910 | + job_enter_cond_locked(job, NULL); | ||
911 | } | ||
912 | } | ||
913 | |||
914 | -void job_resume(Job *job) | ||
915 | +void job_pause(Job *job) | ||
916 | +{ | ||
917 | + JOB_LOCK_GUARD(); | ||
918 | + job_pause_locked(job); | ||
919 | +} | ||
920 | + | ||
921 | +void job_resume_locked(Job *job) | ||
922 | { | ||
923 | assert(job->pause_count > 0); | ||
924 | job->pause_count--; | ||
925 | @@ -XXX,XX +XXX,XX @@ void job_resume(Job *job) | ||
926 | } | ||
927 | |||
928 | /* kick only if no timer is pending */ | ||
929 | - job_enter_cond(job, job_timer_not_pending); | ||
930 | + job_enter_cond_locked(job, job_timer_not_pending_locked); | ||
931 | } | ||
932 | |||
933 | -void job_user_pause(Job *job, Error **errp) | ||
934 | +void job_resume(Job *job) | ||
935 | { | ||
936 | - if (job_apply_verb(job, JOB_VERB_PAUSE, errp)) { | ||
937 | + JOB_LOCK_GUARD(); | ||
938 | + job_resume_locked(job); | ||
939 | +} | ||
940 | + | ||
941 | +void job_user_pause_locked(Job *job, Error **errp) | ||
942 | +{ | ||
943 | + if (job_apply_verb_locked(job, JOB_VERB_PAUSE, errp)) { | ||
944 | return; | ||
945 | } | ||
946 | if (job->user_paused) { | ||
947 | @@ -XXX,XX +XXX,XX @@ void job_user_pause(Job *job, Error **errp) | ||
948 | return; | ||
949 | } | ||
950 | job->user_paused = true; | ||
951 | - job_pause(job); | ||
952 | + job_pause_locked(job); | ||
953 | } | ||
954 | |||
955 | -bool job_user_paused(Job *job) | ||
956 | +void job_user_pause(Job *job, Error **errp) | ||
957 | +{ | ||
958 | + JOB_LOCK_GUARD(); | ||
959 | + job_user_pause_locked(job, errp); | ||
960 | +} | ||
961 | + | ||
962 | +bool job_user_paused_locked(Job *job) | ||
963 | { | ||
964 | return job->user_paused; | ||
965 | } | ||
966 | |||
967 | -void job_user_resume(Job *job, Error **errp) | ||
968 | +bool job_user_paused(Job *job) | ||
969 | +{ | ||
970 | + JOB_LOCK_GUARD(); | ||
971 | + return job_user_paused_locked(job); | ||
972 | +} | ||
973 | + | ||
974 | +void job_user_resume_locked(Job *job, Error **errp) | ||
975 | { | ||
976 | assert(job); | ||
977 | GLOBAL_STATE_CODE(); | ||
978 | @@ -XXX,XX +XXX,XX @@ void job_user_resume(Job *job, Error **errp) | ||
979 | error_setg(errp, "Can't resume a job that was not paused"); | ||
980 | return; | ||
981 | } | ||
982 | - if (job_apply_verb(job, JOB_VERB_RESUME, errp)) { | ||
983 | + if (job_apply_verb_locked(job, JOB_VERB_RESUME, errp)) { | ||
984 | return; | ||
985 | } | ||
986 | if (job->driver->user_resume) { | ||
987 | + job_unlock(); | ||
988 | job->driver->user_resume(job); | ||
989 | + job_lock(); | ||
990 | } | ||
991 | job->user_paused = false; | ||
992 | - job_resume(job); | ||
993 | + job_resume_locked(job); | ||
994 | } | ||
995 | |||
996 | -static void job_do_dismiss(Job *job) | ||
997 | +void job_user_resume(Job *job, Error **errp) | ||
998 | +{ | ||
999 | + JOB_LOCK_GUARD(); | ||
1000 | + job_user_resume_locked(job, errp); | ||
1001 | +} | ||
1002 | + | ||
1003 | +/* Called with job_mutex held, but releases it temporarily. */ | ||
1004 | +static void job_do_dismiss_locked(Job *job) | ||
1005 | { | ||
1006 | assert(job); | ||
1007 | job->busy = false; | ||
1008 | job->paused = false; | ||
1009 | job->deferred_to_main_loop = true; | ||
1010 | |||
1011 | - job_txn_del_job(job); | ||
1012 | + job_txn_del_job_locked(job); | ||
1013 | |||
1014 | - job_state_transition(job, JOB_STATUS_NULL); | ||
1015 | - job_unref(job); | ||
1016 | + job_state_transition_locked(job, JOB_STATUS_NULL); | ||
1017 | + job_unref_locked(job); | ||
1018 | } | ||
1019 | |||
1020 | -void job_dismiss(Job **jobptr, Error **errp) | ||
1021 | +void job_dismiss_locked(Job **jobptr, Error **errp) | ||
1022 | { | ||
1023 | Job *job = *jobptr; | ||
1024 | /* similarly to _complete, this is QMP-interface only. */ | ||
1025 | assert(job->id); | ||
1026 | - if (job_apply_verb(job, JOB_VERB_DISMISS, errp)) { | ||
1027 | + if (job_apply_verb_locked(job, JOB_VERB_DISMISS, errp)) { | ||
1028 | return; | ||
1029 | } | ||
1030 | |||
1031 | - job_do_dismiss(job); | ||
1032 | + job_do_dismiss_locked(job); | ||
1033 | *jobptr = NULL; | ||
1034 | } | ||
1035 | |||
1036 | +void job_dismiss(Job **jobptr, Error **errp) | ||
1037 | +{ | ||
1038 | + JOB_LOCK_GUARD(); | ||
1039 | + job_dismiss_locked(jobptr, errp); | ||
1040 | +} | ||
1041 | + | ||
1042 | void job_early_fail(Job *job) | ||
1043 | { | ||
1044 | + JOB_LOCK_GUARD(); | ||
1045 | assert(job->status == JOB_STATUS_CREATED); | ||
1046 | - job_do_dismiss(job); | ||
1047 | + job_do_dismiss_locked(job); | ||
1048 | } | ||
1049 | |||
1050 | -static void job_conclude(Job *job) | ||
1051 | +/* Called with job_mutex held. */ | ||
1052 | +static void job_conclude_locked(Job *job) | ||
1053 | { | ||
1054 | - job_state_transition(job, JOB_STATUS_CONCLUDED); | ||
1055 | - if (job->auto_dismiss || !job_started(job)) { | ||
1056 | - job_do_dismiss(job); | ||
1057 | + job_state_transition_locked(job, JOB_STATUS_CONCLUDED); | ||
1058 | + if (job->auto_dismiss || !job_started_locked(job)) { | ||
1059 | + job_do_dismiss_locked(job); | ||
1060 | } | ||
1061 | } | ||
1062 | |||
1063 | -static void job_update_rc(Job *job) | ||
1064 | +/* Called with job_mutex held. */ | ||
1065 | +static void job_update_rc_locked(Job *job) | ||
1066 | { | ||
1067 | - if (!job->ret && job_is_cancelled(job)) { | ||
1068 | + if (!job->ret && job_is_cancelled_locked(job)) { | ||
1069 | job->ret = -ECANCELED; | ||
1070 | } | ||
1071 | if (job->ret) { | ||
1072 | if (!job->err) { | ||
1073 | error_setg(&job->err, "%s", strerror(-job->ret)); | ||
1074 | } | ||
1075 | - job_state_transition(job, JOB_STATUS_ABORTING); | ||
1076 | + job_state_transition_locked(job, JOB_STATUS_ABORTING); | ||
1077 | } | ||
1078 | } | ||
1079 | |||
1080 | @@ -XXX,XX +XXX,XX @@ static void job_clean(Job *job) | ||
1081 | } | ||
1082 | } | ||
1083 | |||
1084 | -static int job_finalize_single(Job *job) | ||
1085 | +/* Called with job_mutex held, but releases it temporarily */ | ||
1086 | +static int job_finalize_single_locked(Job *job) | ||
1087 | { | ||
1088 | - assert(job_is_completed(job)); | ||
1089 | + int job_ret; | ||
1090 | + | ||
1091 | + assert(job_is_completed_locked(job)); | ||
1092 | |||
1093 | /* Ensure abort is called for late-transactional failures */ | ||
1094 | - job_update_rc(job); | ||
1095 | + job_update_rc_locked(job); | ||
1096 | + | ||
1097 | + job_ret = job->ret; | ||
1098 | + job_unlock(); | ||
1099 | |||
1100 | - if (!job->ret) { | ||
1101 | + if (!job_ret) { | ||
1102 | job_commit(job); | ||
1103 | } else { | ||
1104 | job_abort(job); | ||
1105 | } | ||
1106 | job_clean(job); | ||
1107 | |||
1108 | + job_lock(); | ||
1109 | + | ||
1110 | if (job->cb) { | ||
1111 | - job->cb(job->opaque, job->ret); | ||
1112 | + job_ret = job->ret; | ||
1113 | + job_unlock(); | ||
1114 | + job->cb(job->opaque, job_ret); | ||
1115 | + job_lock(); | ||
1116 | } | ||
1117 | |||
1118 | /* Emit events only if we actually started */ | ||
1119 | - if (job_started(job)) { | ||
1120 | - if (job_is_cancelled(job)) { | ||
1121 | - job_event_cancelled(job); | ||
1122 | + if (job_started_locked(job)) { | ||
1123 | + if (job_is_cancelled_locked(job)) { | ||
1124 | + job_event_cancelled_locked(job); | ||
1125 | } else { | ||
1126 | - job_event_completed(job); | ||
1127 | + job_event_completed_locked(job); | ||
1128 | } | ||
1129 | } | ||
1130 | |||
1131 | - job_txn_del_job(job); | ||
1132 | - job_conclude(job); | ||
1133 | + job_txn_del_job_locked(job); | ||
1134 | + job_conclude_locked(job); | ||
1135 | return 0; | ||
1136 | } | ||
1137 | |||
1138 | -static void job_cancel_async(Job *job, bool force) | ||
1139 | +/* Called with job_mutex held, but releases it temporarily */ | ||
1140 | +static void job_cancel_async_locked(Job *job, bool force) | ||
1141 | { | ||
1142 | GLOBAL_STATE_CODE(); | ||
1143 | if (job->driver->cancel) { | ||
1144 | + job_unlock(); | ||
1145 | force = job->driver->cancel(job, force); | ||
1146 | + job_lock(); | ||
1147 | } else { | ||
1148 | /* No .cancel() means the job will behave as if force-cancelled */ | ||
1149 | force = true; | ||
1150 | @@ -XXX,XX +XXX,XX @@ static void job_cancel_async(Job *job, bool force) | ||
1151 | if (job->user_paused) { | ||
1152 | /* Do not call job_enter here, the caller will handle it. */ | ||
1153 | if (job->driver->user_resume) { | ||
1154 | + job_unlock(); | ||
1155 | job->driver->user_resume(job); | ||
1156 | + job_lock(); | ||
1157 | } | ||
1158 | job->user_paused = false; | ||
1159 | assert(job->pause_count > 0); | ||
1160 | @@ -XXX,XX +XXX,XX @@ static void job_cancel_async(Job *job, bool force) | ||
1161 | } | ||
1162 | } | ||
1163 | |||
1164 | -static void job_completed_txn_abort(Job *job) | ||
1165 | +/* Called with job_mutex held, but releases it temporarily. */ | ||
1166 | +static void job_completed_txn_abort_locked(Job *job) | ||
1167 | { | ||
1168 | AioContext *ctx; | ||
1169 | JobTxn *txn = job->txn; | ||
1170 | @@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job) | ||
1171 | return; | ||
1172 | } | ||
1173 | txn->aborting = true; | ||
1174 | - job_txn_ref(txn); | ||
1175 | + job_txn_ref_locked(txn); | ||
1176 | |||
1177 | /* | ||
1178 | * We can only hold the single job's AioContext lock while calling | ||
1179 | @@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job) | ||
1180 | * calls of AIO_WAIT_WHILE(), which could deadlock otherwise. | ||
1181 | * Note that the job's AioContext may change when it is finalized. | ||
1182 | */ | ||
1183 | - job_ref(job); | ||
1184 | + job_ref_locked(job); | ||
1185 | aio_context_release(job->aio_context); | ||
1186 | |||
1187 | /* Other jobs are effectively cancelled by us, set the status for | ||
1188 | @@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job) | ||
1189 | * Therefore, pass force=true to terminate all other jobs as quickly | ||
1190 | * as possible. | ||
1191 | */ | ||
1192 | - job_cancel_async(other_job, true); | ||
1193 | + job_cancel_async_locked(other_job, true); | ||
1194 | aio_context_release(ctx); | ||
1195 | } | ||
1196 | } | ||
1197 | @@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job) | ||
1198 | */ | ||
1199 | ctx = other_job->aio_context; | ||
1200 | aio_context_acquire(ctx); | ||
1201 | - if (!job_is_completed(other_job)) { | ||
1202 | - assert(job_cancel_requested(other_job)); | ||
1203 | - job_finish_sync(other_job, NULL, NULL); | ||
1204 | + if (!job_is_completed_locked(other_job)) { | ||
1205 | + assert(job_cancel_requested_locked(other_job)); | ||
1206 | + job_finish_sync_locked(other_job, NULL, NULL); | ||
1207 | } | ||
1208 | - job_finalize_single(other_job); | ||
1209 | + job_finalize_single_locked(other_job); | ||
1210 | aio_context_release(ctx); | ||
1211 | } | ||
1212 | |||
1213 | @@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job) | ||
1214 | * even if the job went away during job_finalize_single(). | ||
1215 | */ | ||
1216 | aio_context_acquire(job->aio_context); | ||
1217 | - job_unref(job); | ||
1218 | + job_unref_locked(job); | ||
1219 | |||
1220 | - job_txn_unref(txn); | ||
1221 | + job_txn_unref_locked(txn); | ||
1222 | } | ||
1223 | |||
1224 | -static int job_prepare(Job *job) | ||
1225 | +/* Called with job_mutex held, but releases it temporarily */ | ||
1226 | +static int job_prepare_locked(Job *job) | ||
1227 | { | ||
1228 | + int ret; | ||
1229 | + | ||
1230 | GLOBAL_STATE_CODE(); | ||
1231 | if (job->ret == 0 && job->driver->prepare) { | ||
1232 | - job->ret = job->driver->prepare(job); | ||
1233 | - job_update_rc(job); | ||
1234 | + job_unlock(); | ||
1235 | + ret = job->driver->prepare(job); | ||
1236 | + job_lock(); | ||
1237 | + job->ret = ret; | ||
1238 | + job_update_rc_locked(job); | ||
1239 | } | ||
1240 | return job->ret; | ||
1241 | } | ||
1242 | |||
1243 | -static int job_needs_finalize(Job *job) | ||
1244 | +/* Called with job_mutex held */ | ||
1245 | +static int job_needs_finalize_locked(Job *job) | ||
1246 | { | ||
1247 | return !job->auto_finalize; | ||
1248 | } | ||
1249 | |||
1250 | -static void job_do_finalize(Job *job) | ||
1251 | +/* Called with job_mutex held */ | ||
1252 | +static void job_do_finalize_locked(Job *job) | ||
1253 | { | ||
1254 | int rc; | ||
1255 | assert(job && job->txn); | ||
1256 | |||
1257 | /* prepare the transaction to complete */ | ||
1258 | - rc = job_txn_apply(job, job_prepare); | ||
1259 | + rc = job_txn_apply_locked(job, job_prepare_locked); | ||
1260 | if (rc) { | ||
1261 | - job_completed_txn_abort(job); | ||
1262 | + job_completed_txn_abort_locked(job); | ||
1263 | } else { | ||
1264 | - job_txn_apply(job, job_finalize_single); | ||
1265 | + job_txn_apply_locked(job, job_finalize_single_locked); | ||
1266 | } | ||
1267 | } | ||
1268 | |||
1269 | -void job_finalize(Job *job, Error **errp) | ||
1270 | +void job_finalize_locked(Job *job, Error **errp) | ||
1271 | { | ||
1272 | assert(job && job->id); | ||
1273 | - if (job_apply_verb(job, JOB_VERB_FINALIZE, errp)) { | ||
1274 | + if (job_apply_verb_locked(job, JOB_VERB_FINALIZE, errp)) { | ||
1275 | return; | ||
1276 | } | ||
1277 | - job_do_finalize(job); | ||
1278 | + job_do_finalize_locked(job); | ||
1279 | } | ||
1280 | |||
1281 | -static int job_transition_to_pending(Job *job) | ||
1282 | +void job_finalize(Job *job, Error **errp) | ||
1283 | { | ||
1284 | - job_state_transition(job, JOB_STATUS_PENDING); | ||
1285 | + JOB_LOCK_GUARD(); | ||
1286 | + job_finalize_locked(job, errp); | ||
1287 | +} | ||
1288 | + | ||
1289 | +/* Called with job_mutex held. */ | ||
1290 | +static int job_transition_to_pending_locked(Job *job) | ||
1291 | +{ | ||
1292 | + job_state_transition_locked(job, JOB_STATUS_PENDING); | ||
1293 | if (!job->auto_finalize) { | ||
1294 | - job_event_pending(job); | ||
1295 | + job_event_pending_locked(job); | ||
1296 | } | ||
1297 | return 0; | ||
1298 | } | ||
1299 | |||
1300 | void job_transition_to_ready(Job *job) | ||
1301 | { | ||
1302 | - job_state_transition(job, JOB_STATUS_READY); | ||
1303 | - job_event_ready(job); | ||
1304 | + JOB_LOCK_GUARD(); | ||
1305 | + job_state_transition_locked(job, JOB_STATUS_READY); | ||
1306 | + job_event_ready_locked(job); | ||
1307 | } | ||
1308 | |||
1309 | -static void job_completed_txn_success(Job *job) | ||
1310 | +/* Called with job_mutex held. */ | ||
1311 | +static void job_completed_txn_success_locked(Job *job) | ||
1312 | { | ||
1313 | JobTxn *txn = job->txn; | ||
1314 | Job *other_job; | ||
1315 | |||
1316 | - job_state_transition(job, JOB_STATUS_WAITING); | ||
1317 | + job_state_transition_locked(job, JOB_STATUS_WAITING); | ||
1318 | |||
1319 | /* | ||
1320 | * Successful completion, see if there are other running jobs in this | ||
1321 | * txn. | ||
1322 | */ | ||
1323 | QLIST_FOREACH(other_job, &txn->jobs, txn_list) { | ||
1324 | - if (!job_is_completed(other_job)) { | ||
1325 | + if (!job_is_completed_locked(other_job)) { | ||
1326 | return; | ||
1327 | } | ||
1328 | assert(other_job->ret == 0); | ||
1329 | } | ||
1330 | |||
1331 | - job_txn_apply(job, job_transition_to_pending); | ||
1332 | + job_txn_apply_locked(job, job_transition_to_pending_locked); | ||
1333 | |||
1334 | /* If no jobs need manual finalization, automatically do so */ | ||
1335 | - if (job_txn_apply(job, job_needs_finalize) == 0) { | ||
1336 | - job_do_finalize(job); | ||
1337 | + if (job_txn_apply_locked(job, job_needs_finalize_locked) == 0) { | ||
1338 | + job_do_finalize_locked(job); | ||
1339 | } | ||
1340 | } | ||
1341 | |||
1342 | -static void job_completed(Job *job) | ||
1343 | +/* Called with job_mutex held. */ | ||
1344 | +static void job_completed_locked(Job *job) | ||
1345 | { | ||
1346 | - assert(job && job->txn && !job_is_completed(job)); | ||
1347 | + assert(job && job->txn && !job_is_completed_locked(job)); | ||
1348 | |||
1349 | - job_update_rc(job); | ||
1350 | + job_update_rc_locked(job); | ||
1351 | trace_job_completed(job, job->ret); | ||
1352 | if (job->ret) { | ||
1353 | - job_completed_txn_abort(job); | ||
1354 | + job_completed_txn_abort_locked(job); | ||
1355 | } else { | ||
1356 | - job_completed_txn_success(job); | ||
1357 | + job_completed_txn_success_locked(job); | ||
1358 | } | ||
1359 | } | ||
1360 | |||
1361 | -/** Useful only as a type shim for aio_bh_schedule_oneshot. */ | ||
1362 | +/** | ||
1363 | + * Useful only as a type shim for aio_bh_schedule_oneshot. | ||
1364 | + * Called with job_mutex *not* held. | ||
1365 | + */ | ||
1366 | static void job_exit(void *opaque) | ||
1367 | { | ||
1368 | Job *job = (Job *)opaque; | ||
1369 | AioContext *ctx; | ||
1370 | + JOB_LOCK_GUARD(); | ||
1371 | |||
1372 | - job_ref(job); | ||
1373 | + job_ref_locked(job); | ||
1374 | aio_context_acquire(job->aio_context); | ||
1375 | |||
1376 | /* This is a lie, we're not quiescent, but still doing the completion | ||
1377 | @@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque) | ||
1378 | * drain block nodes, and if .drained_poll still returned true, we would | ||
1379 | * deadlock. */ | ||
1380 | job->busy = false; | ||
1381 | - job_event_idle(job); | ||
1382 | + job_event_idle_locked(job); | ||
1383 | |||
1384 | - job_completed(job); | ||
1385 | + job_completed_locked(job); | ||
1386 | |||
1387 | /* | ||
1388 | * Note that calling job_completed can move the job to a different | ||
1389 | @@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque) | ||
1390 | * the job underneath us. | ||
1391 | */ | ||
1392 | ctx = job->aio_context; | ||
1393 | - job_unref(job); | ||
1394 | + job_unref_locked(job); | ||
1395 | aio_context_release(ctx); | ||
1396 | } | ||
1397 | |||
1398 | @@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque) | ||
1399 | static void coroutine_fn job_co_entry(void *opaque) | ||
1400 | { | ||
1401 | Job *job = opaque; | ||
1402 | + int ret; | ||
1403 | |||
1404 | assert(job && job->driver && job->driver->run); | ||
1405 | - assert(job->aio_context == qemu_get_current_aio_context()); | ||
1406 | - job_pause_point(job); | ||
1407 | - job->ret = job->driver->run(job, &job->err); | ||
1408 | - job->deferred_to_main_loop = true; | ||
1409 | - job->busy = true; | ||
1410 | + WITH_JOB_LOCK_GUARD() { | ||
1411 | + assert(job->aio_context == qemu_get_current_aio_context()); | ||
1412 | + job_pause_point_locked(job); | ||
1413 | + } | ||
1414 | + ret = job->driver->run(job, &job->err); | ||
1415 | + WITH_JOB_LOCK_GUARD() { | ||
1416 | + job->ret = ret; | ||
1417 | + job->deferred_to_main_loop = true; | ||
1418 | + job->busy = true; | ||
1419 | + } | ||
1420 | aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job); | ||
1421 | } | ||
1422 | |||
1423 | void job_start(Job *job) | ||
1424 | { | ||
1425 | - assert(job && !job_started(job) && job->paused && | ||
1426 | - job->driver && job->driver->run); | ||
1427 | - job->co = qemu_coroutine_create(job_co_entry, job); | ||
1428 | - job->pause_count--; | ||
1429 | - job->busy = true; | ||
1430 | - job->paused = false; | ||
1431 | - job_state_transition(job, JOB_STATUS_RUNNING); | ||
1432 | + assert(qemu_in_main_thread()); | ||
1433 | + | ||
1434 | + WITH_JOB_LOCK_GUARD() { | ||
1435 | + assert(job && !job_started_locked(job) && job->paused && | ||
1436 | + job->driver && job->driver->run); | ||
1437 | + job->co = qemu_coroutine_create(job_co_entry, job); | ||
1438 | + job->pause_count--; | ||
1439 | + job->busy = true; | ||
1440 | + job->paused = false; | ||
1441 | + job_state_transition_locked(job, JOB_STATUS_RUNNING); | ||
1442 | + } | ||
1443 | aio_co_enter(job->aio_context, job->co); | ||
1444 | } | ||
1445 | |||
1446 | -void job_cancel(Job *job, bool force) | ||
1447 | +void job_cancel_locked(Job *job, bool force) | ||
1448 | { | ||
1449 | if (job->status == JOB_STATUS_CONCLUDED) { | ||
1450 | - job_do_dismiss(job); | ||
1451 | + job_do_dismiss_locked(job); | ||
1452 | return; | ||
1453 | } | ||
1454 | - job_cancel_async(job, force); | ||
1455 | - if (!job_started(job)) { | ||
1456 | - job_completed(job); | ||
1457 | + job_cancel_async_locked(job, force); | ||
1458 | + if (!job_started_locked(job)) { | ||
1459 | + job_completed_locked(job); | ||
1460 | } else if (job->deferred_to_main_loop) { | ||
1461 | /* | ||
1462 | * job_cancel_async() ignores soft-cancel requests for jobs | ||
1463 | @@ -XXX,XX +XXX,XX @@ void job_cancel(Job *job, bool force) | ||
1464 | * choose to call job_is_cancelled() to show that we invoke | ||
1465 | * job_completed_txn_abort() only for force-cancelled jobs.) | ||
1466 | */ | ||
1467 | - if (job_is_cancelled(job)) { | ||
1468 | - job_completed_txn_abort(job); | ||
1469 | + if (job_is_cancelled_locked(job)) { | ||
1470 | + job_completed_txn_abort_locked(job); | ||
1471 | } | ||
1472 | } else { | ||
1473 | - job_enter(job); | ||
1474 | + job_enter_cond_locked(job, NULL); | ||
1475 | } | ||
1476 | } | ||
1477 | |||
1478 | -void job_user_cancel(Job *job, bool force, Error **errp) | ||
1479 | +void job_cancel(Job *job, bool force) | ||
1480 | { | ||
1481 | - if (job_apply_verb(job, JOB_VERB_CANCEL, errp)) { | ||
1482 | + JOB_LOCK_GUARD(); | ||
1483 | + job_cancel_locked(job, force); | ||
1484 | +} | ||
1485 | + | ||
1486 | +void job_user_cancel_locked(Job *job, bool force, Error **errp) | ||
1487 | +{ | ||
1488 | + if (job_apply_verb_locked(job, JOB_VERB_CANCEL, errp)) { | ||
1489 | return; | ||
1490 | } | ||
1491 | - job_cancel(job, force); | ||
1492 | + job_cancel_locked(job, force); | ||
1493 | +} | ||
1494 | + | ||
1495 | +void job_user_cancel(Job *job, bool force, Error **errp) | ||
1496 | +{ | ||
1497 | + JOB_LOCK_GUARD(); | ||
1498 | + job_user_cancel_locked(job, force, errp); | ||
1499 | } | ||
1500 | |||
1501 | /* A wrapper around job_cancel() taking an Error ** parameter so it may be | ||
1502 | * used with job_finish_sync() without the need for (rather nasty) function | ||
1503 | - * pointer casts there. */ | ||
1504 | -static void job_cancel_err(Job *job, Error **errp) | ||
1505 | + * pointer casts there. | ||
1506 | + * | ||
1507 | + * Called with job_mutex held. | ||
1508 | + */ | ||
1509 | +static void job_cancel_err_locked(Job *job, Error **errp) | ||
1510 | { | ||
1511 | - job_cancel(job, false); | ||
1512 | + job_cancel_locked(job, false); | ||
1513 | } | ||
1514 | |||
1515 | /** | ||
1516 | * Same as job_cancel_err(), but force-cancel. | ||
1517 | + * Called with job_mutex held. | ||
1518 | */ | ||
1519 | -static void job_force_cancel_err(Job *job, Error **errp) | ||
1520 | +static void job_force_cancel_err_locked(Job *job, Error **errp) | ||
1521 | { | ||
1522 | - job_cancel(job, true); | ||
1523 | + job_cancel_locked(job, true); | ||
1524 | } | ||
1525 | |||
1526 | -int job_cancel_sync(Job *job, bool force) | ||
1527 | +int job_cancel_sync_locked(Job *job, bool force) | ||
1528 | { | ||
1529 | if (force) { | ||
1530 | - return job_finish_sync(job, &job_force_cancel_err, NULL); | ||
1531 | + return job_finish_sync_locked(job, &job_force_cancel_err_locked, NULL); | ||
1532 | } else { | ||
1533 | - return job_finish_sync(job, &job_cancel_err, NULL); | ||
1534 | + return job_finish_sync_locked(job, &job_cancel_err_locked, NULL); | ||
1535 | } | ||
1536 | } | ||
1537 | |||
1538 | +int job_cancel_sync(Job *job, bool force) | ||
1539 | +{ | ||
1540 | + JOB_LOCK_GUARD(); | ||
1541 | + return job_cancel_sync_locked(job, force); | ||
1542 | +} | ||
1543 | + | ||
1544 | void job_cancel_sync_all(void) | ||
1545 | { | ||
1546 | Job *job; | ||
1547 | AioContext *aio_context; | ||
1548 | + JOB_LOCK_GUARD(); | ||
1549 | |||
1550 | - while ((job = job_next(NULL))) { | ||
1551 | + while ((job = job_next_locked(NULL))) { | ||
1552 | aio_context = job->aio_context; | ||
1553 | aio_context_acquire(aio_context); | ||
1554 | - job_cancel_sync(job, true); | ||
1555 | + job_cancel_sync_locked(job, true); | ||
1556 | aio_context_release(aio_context); | ||
1557 | } | ||
1558 | } | ||
1559 | |||
1560 | +int job_complete_sync_locked(Job *job, Error **errp) | ||
1561 | +{ | ||
1562 | + return job_finish_sync_locked(job, job_complete_locked, errp); | ||
1563 | +} | ||
1564 | + | ||
1565 | int job_complete_sync(Job *job, Error **errp) | ||
1566 | { | ||
1567 | - return job_finish_sync(job, job_complete, errp); | ||
1568 | + JOB_LOCK_GUARD(); | ||
1569 | + return job_complete_sync_locked(job, errp); | ||
1570 | } | ||
1571 | |||
1572 | -void job_complete(Job *job, Error **errp) | ||
1573 | +void job_complete_locked(Job *job, Error **errp) | ||
1574 | { | ||
1575 | /* Should not be reachable via external interface for internal jobs */ | ||
1576 | assert(job->id); | ||
1577 | GLOBAL_STATE_CODE(); | ||
1578 | - if (job_apply_verb(job, JOB_VERB_COMPLETE, errp)) { | ||
1579 | + if (job_apply_verb_locked(job, JOB_VERB_COMPLETE, errp)) { | ||
1580 | return; | ||
1581 | } | ||
1582 | - if (job_cancel_requested(job) || !job->driver->complete) { | ||
1583 | + if (job_cancel_requested_locked(job) || !job->driver->complete) { | ||
1584 | error_setg(errp, "The active block job '%s' cannot be completed", | ||
1585 | job->id); | ||
1586 | return; | ||
1587 | } | ||
1588 | |||
1589 | + job_unlock(); | ||
1590 | job->driver->complete(job, errp); | ||
1591 | + job_lock(); | ||
1592 | } | ||
1593 | |||
1594 | -int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp) | ||
1595 | +void job_complete(Job *job, Error **errp) | ||
1596 | +{ | ||
1597 | + JOB_LOCK_GUARD(); | ||
1598 | + job_complete_locked(job, errp); | ||
1599 | +} | ||
1600 | + | ||
1601 | +int job_finish_sync_locked(Job *job, | ||
1602 | + void (*finish)(Job *, Error **errp), | ||
1603 | + Error **errp) | ||
20 | { | 1604 | { |
21 | Error *local_err = NULL; | 1605 | Error *local_err = NULL; |
22 | + Transaction *tran = tran_new(); | ||
23 | int ret; | 1606 | int ret; |
24 | 1607 | ||
25 | - ret = bdrv_child_check_perm(c, NULL, perm, shared, NULL, &local_err); | 1608 | - job_ref(job); |
26 | + bdrv_child_set_perm_safe(c, perm, shared, tran); | 1609 | + job_ref_locked(job); |
27 | + | 1610 | |
28 | + ret = bdrv_refresh_perms(c->bs, &local_err); | 1611 | if (finish) { |
29 | + | 1612 | finish(job, &local_err); |
30 | + tran_finalize(tran, ret); | 1613 | } |
31 | + | 1614 | if (local_err) { |
32 | if (ret < 0) { | 1615 | error_propagate(errp, local_err); |
33 | - bdrv_child_abort_perm_update(c); | 1616 | - job_unref(job); |
34 | if ((perm & ~c->perm) || (c->shared_perm & ~shared)) { | 1617 | + job_unref_locked(job); |
35 | /* tighten permissions */ | 1618 | return -EBUSY; |
36 | error_propagate(errp, local_err); | 1619 | } |
37 | @@ -XXX,XX +XXX,XX @@ int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared, | 1620 | |
38 | error_free(local_err); | 1621 | + job_unlock(); |
39 | ret = 0; | 1622 | AIO_WAIT_WHILE(job->aio_context, |
40 | } | 1623 | (job_enter(job), !job_is_completed(job))); |
41 | - return ret; | 1624 | + job_lock(); |
42 | } | 1625 | |
43 | 1626 | - ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret; | |
44 | - bdrv_child_set_perm(c); | 1627 | - job_unref(job); |
45 | - | 1628 | + ret = (job_is_cancelled_locked(job) && job->ret == 0) |
46 | - return 0; | 1629 | + ? -ECANCELED : job->ret; |
47 | + return ret; | 1630 | + job_unref_locked(job); |
48 | } | 1631 | return ret; |
49 | 1632 | } | |
50 | int bdrv_child_refresh_perms(BlockDriverState *bs, BdrvChild *c, Error **errp) | 1633 | + |
1634 | +int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp) | ||
1635 | +{ | ||
1636 | + JOB_LOCK_GUARD(); | ||
1637 | + return job_finish_sync_locked(job, finish, errp); | ||
1638 | +} | ||
51 | -- | 1639 | -- |
52 | 2.30.2 | 1640 | 2.37.3 |
53 | |||
54 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Add simple transaction API to use in further update of block graph | 3 | This comment applies more on job, it was left in blockjob as in the past |
4 | operations. | 4 | the whole job logic was implemented there. |
5 | 5 | ||
6 | Supposed usage is: | 6 | Note: at this stage, job_{lock/unlock} and job lock guard macros |
7 | are *nop*. | ||
7 | 8 | ||
8 | - "prepare" is main function of the action and it should make the main | 9 | No functional change intended. |
9 | effect of the action to be visible for the following actions, keeping | ||
10 | possibility of roll-back, saving necessary things in action state, | ||
11 | which is prepended to the action list (to do that, prepare func | ||
12 | should call tran_add()). So, driver struct doesn't include "prepare" | ||
13 | field, as it is supposed to be called directly. | ||
14 | 10 | ||
15 | - commit/rollback is supposed to be called for the list of action | 11 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
16 | states, to commit/rollback all the actions in reverse order | 12 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
17 | 13 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | |
18 | - When possible "commit" should not make visible effect for other | 14 | Message-Id: <20220926093214.506243-7-eesposit@redhat.com> |
19 | actions, which make possible transparent logical interaction between | ||
20 | actions. | ||
21 | |||
22 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
23 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 15 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
24 | Message-Id: <20210428151804.439460-9-vsementsov@virtuozzo.com> | ||
25 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
26 | --- | 17 | --- |
27 | include/qemu/transactions.h | 63 ++++++++++++++++++++++++ | 18 | blockjob.c | 20 -------------------- |
28 | util/transactions.c | 96 +++++++++++++++++++++++++++++++++++++ | 19 | job.c | 16 ++++++++++++++++ |
29 | MAINTAINERS | 6 +++ | 20 | 2 files changed, 16 insertions(+), 20 deletions(-) |
30 | util/meson.build | 1 + | ||
31 | 4 files changed, 166 insertions(+) | ||
32 | create mode 100644 include/qemu/transactions.h | ||
33 | create mode 100644 util/transactions.c | ||
34 | 21 | ||
35 | diff --git a/include/qemu/transactions.h b/include/qemu/transactions.h | 22 | diff --git a/blockjob.c b/blockjob.c |
36 | new file mode 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
37 | index XXXXXXX..XXXXXXX | 24 | --- a/blockjob.c |
38 | --- /dev/null | 25 | +++ b/blockjob.c |
39 | +++ b/include/qemu/transactions.h | ||
40 | @@ -XXX,XX +XXX,XX @@ | 26 | @@ -XXX,XX +XXX,XX @@ |
27 | #include "qemu/main-loop.h" | ||
28 | #include "qemu/timer.h" | ||
29 | |||
30 | -/* | ||
31 | - * The block job API is composed of two categories of functions. | ||
32 | - * | ||
33 | - * The first includes functions used by the monitor. The monitor is | ||
34 | - * peculiar in that it accesses the block job list with block_job_get, and | ||
35 | - * therefore needs consistency across block_job_get and the actual operation | ||
36 | - * (e.g. block_job_set_speed). The consistency is achieved with | ||
37 | - * aio_context_acquire/release. These functions are declared in blockjob.h. | ||
38 | - * | ||
39 | - * The second includes functions used by the block job drivers and sometimes | ||
40 | - * by the core block layer. These do not care about locking, because the | ||
41 | - * whole coroutine runs under the AioContext lock, and are declared in | ||
42 | - * blockjob_int.h. | ||
43 | - */ | ||
44 | - | ||
45 | static bool is_block_job(Job *job) | ||
46 | { | ||
47 | return job_type(job) == JOB_TYPE_BACKUP || | ||
48 | @@ -XXX,XX +XXX,XX @@ static void block_job_event_ready(Notifier *n, void *opaque) | ||
49 | } | ||
50 | |||
51 | |||
52 | -/* | ||
53 | - * API for block job drivers and the block layer. These functions are | ||
54 | - * declared in blockjob_int.h. | ||
55 | - */ | ||
56 | - | ||
57 | void *block_job_create(const char *job_id, const BlockJobDriver *driver, | ||
58 | JobTxn *txn, BlockDriverState *bs, uint64_t perm, | ||
59 | uint64_t shared_perm, int64_t speed, int flags, | ||
60 | diff --git a/job.c b/job.c | ||
61 | index XXXXXXX..XXXXXXX 100644 | ||
62 | --- a/job.c | ||
63 | +++ b/job.c | ||
64 | @@ -XXX,XX +XXX,XX @@ | ||
65 | #include "trace/trace-root.h" | ||
66 | #include "qapi/qapi-events-job.h" | ||
67 | |||
41 | +/* | 68 | +/* |
42 | + * Simple transactions API | 69 | + * The job API is composed of two categories of functions. |
43 | + * | 70 | + * |
44 | + * Copyright (c) 2021 Virtuozzo International GmbH. | 71 | + * The first includes functions used by the monitor. The monitor is |
45 | + * | 72 | + * peculiar in that it accesses the job list with job_get, and |
46 | + * Author: | 73 | + * therefore needs consistency across job_get and the actual operation |
47 | + * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 74 | + * (e.g. job_user_cancel). To achieve this consistency, the caller |
48 | + * | 75 | + * calls job_lock/job_unlock itself around the whole operation. |
49 | + * This program is free software; you can redistribute it and/or modify | ||
50 | + * it under the terms of the GNU General Public License as published by | ||
51 | + * the Free Software Foundation; either version 2 of the License, or | ||
52 | + * (at your option) any later version. | ||
53 | + * | ||
54 | + * This program is distributed in the hope that it will be useful, | ||
55 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
56 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
57 | + * GNU General Public License for more details. | ||
58 | + * | ||
59 | + * You should have received a copy of the GNU General Public License | ||
60 | + * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
61 | + * | 76 | + * |
62 | + * | 77 | + * |
63 | + * = Generic transaction API = | 78 | + * The second includes functions used by the job drivers and sometimes |
79 | + * by the core block layer. These delegate the locking to the callee instead. | ||
64 | + * | 80 | + * |
65 | + * The intended usage is the following: you create "prepare" functions, which | 81 | + * TODO Actually make this true |
66 | + * represents the actions. They will usually have Transaction* argument, and | ||
67 | + * call tran_add() to register finalization callbacks. For finalization | ||
68 | + * callbacks, prepare corresponding TransactionActionDrv structures. | ||
69 | + * | ||
70 | + * Then, when you need to make a transaction, create an empty Transaction by | ||
71 | + * tran_create(), call your "prepare" functions on it, and finally call | ||
72 | + * tran_abort() or tran_commit() to finalize the transaction by corresponding | ||
73 | + * finalization actions in reverse order. | ||
74 | + */ | 82 | + */ |
75 | + | 83 | + |
76 | +#ifndef QEMU_TRANSACTIONS_H | 84 | /* |
77 | +#define QEMU_TRANSACTIONS_H | 85 | * job_mutex protects the jobs list, but also makes the |
78 | + | 86 | * struct job fields thread-safe. |
79 | +#include <gmodule.h> | ||
80 | + | ||
81 | +typedef struct TransactionActionDrv { | ||
82 | + void (*abort)(void *opaque); | ||
83 | + void (*commit)(void *opaque); | ||
84 | + void (*clean)(void *opaque); | ||
85 | +} TransactionActionDrv; | ||
86 | + | ||
87 | +typedef struct Transaction Transaction; | ||
88 | + | ||
89 | +Transaction *tran_new(void); | ||
90 | +void tran_add(Transaction *tran, TransactionActionDrv *drv, void *opaque); | ||
91 | +void tran_abort(Transaction *tran); | ||
92 | +void tran_commit(Transaction *tran); | ||
93 | + | ||
94 | +static inline void tran_finalize(Transaction *tran, int ret) | ||
95 | +{ | ||
96 | + if (ret < 0) { | ||
97 | + tran_abort(tran); | ||
98 | + } else { | ||
99 | + tran_commit(tran); | ||
100 | + } | ||
101 | +} | ||
102 | + | ||
103 | +#endif /* QEMU_TRANSACTIONS_H */ | ||
104 | diff --git a/util/transactions.c b/util/transactions.c | ||
105 | new file mode 100644 | ||
106 | index XXXXXXX..XXXXXXX | ||
107 | --- /dev/null | ||
108 | +++ b/util/transactions.c | ||
109 | @@ -XXX,XX +XXX,XX @@ | ||
110 | +/* | ||
111 | + * Simple transactions API | ||
112 | + * | ||
113 | + * Copyright (c) 2021 Virtuozzo International GmbH. | ||
114 | + * | ||
115 | + * Author: | ||
116 | + * Sementsov-Ogievskiy Vladimir <vsementsov@virtuozzo.com> | ||
117 | + * | ||
118 | + * This program is free software; you can redistribute it and/or modify | ||
119 | + * it under the terms of the GNU General Public License as published by | ||
120 | + * the Free Software Foundation; either version 2 of the License, or | ||
121 | + * (at your option) any later version. | ||
122 | + * | ||
123 | + * This program is distributed in the hope that it will be useful, | ||
124 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
125 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
126 | + * GNU General Public License for more details. | ||
127 | + * | ||
128 | + * You should have received a copy of the GNU General Public License | ||
129 | + * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
130 | + */ | ||
131 | + | ||
132 | +#include "qemu/osdep.h" | ||
133 | + | ||
134 | +#include "qemu/transactions.h" | ||
135 | +#include "qemu/queue.h" | ||
136 | + | ||
137 | +typedef struct TransactionAction { | ||
138 | + TransactionActionDrv *drv; | ||
139 | + void *opaque; | ||
140 | + QSLIST_ENTRY(TransactionAction) entry; | ||
141 | +} TransactionAction; | ||
142 | + | ||
143 | +struct Transaction { | ||
144 | + QSLIST_HEAD(, TransactionAction) actions; | ||
145 | +}; | ||
146 | + | ||
147 | +Transaction *tran_new(void) | ||
148 | +{ | ||
149 | + Transaction *tran = g_new(Transaction, 1); | ||
150 | + | ||
151 | + QSLIST_INIT(&tran->actions); | ||
152 | + | ||
153 | + return tran; | ||
154 | +} | ||
155 | + | ||
156 | +void tran_add(Transaction *tran, TransactionActionDrv *drv, void *opaque) | ||
157 | +{ | ||
158 | + TransactionAction *act; | ||
159 | + | ||
160 | + act = g_new(TransactionAction, 1); | ||
161 | + *act = (TransactionAction) { | ||
162 | + .drv = drv, | ||
163 | + .opaque = opaque | ||
164 | + }; | ||
165 | + | ||
166 | + QSLIST_INSERT_HEAD(&tran->actions, act, entry); | ||
167 | +} | ||
168 | + | ||
169 | +void tran_abort(Transaction *tran) | ||
170 | +{ | ||
171 | + TransactionAction *act, *next; | ||
172 | + | ||
173 | + QSLIST_FOREACH_SAFE(act, &tran->actions, entry, next) { | ||
174 | + if (act->drv->abort) { | ||
175 | + act->drv->abort(act->opaque); | ||
176 | + } | ||
177 | + | ||
178 | + if (act->drv->clean) { | ||
179 | + act->drv->clean(act->opaque); | ||
180 | + } | ||
181 | + | ||
182 | + g_free(act); | ||
183 | + } | ||
184 | + | ||
185 | + g_free(tran); | ||
186 | +} | ||
187 | + | ||
188 | +void tran_commit(Transaction *tran) | ||
189 | +{ | ||
190 | + TransactionAction *act, *next; | ||
191 | + | ||
192 | + QSLIST_FOREACH_SAFE(act, &tran->actions, entry, next) { | ||
193 | + if (act->drv->commit) { | ||
194 | + act->drv->commit(act->opaque); | ||
195 | + } | ||
196 | + | ||
197 | + if (act->drv->clean) { | ||
198 | + act->drv->clean(act->opaque); | ||
199 | + } | ||
200 | + | ||
201 | + g_free(act); | ||
202 | + } | ||
203 | + | ||
204 | + g_free(tran); | ||
205 | +} | ||
206 | diff --git a/MAINTAINERS b/MAINTAINERS | ||
207 | index XXXXXXX..XXXXXXX 100644 | ||
208 | --- a/MAINTAINERS | ||
209 | +++ b/MAINTAINERS | ||
210 | @@ -XXX,XX +XXX,XX @@ M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
211 | S: Maintained | ||
212 | F: scripts/simplebench/ | ||
213 | |||
214 | +Transactions helper | ||
215 | +M: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
216 | +S: Maintained | ||
217 | +F: include/qemu/transactions.h | ||
218 | +F: util/transactions.c | ||
219 | + | ||
220 | QAPI | ||
221 | M: Markus Armbruster <armbru@redhat.com> | ||
222 | M: Michael Roth <michael.roth@amd.com> | ||
223 | diff --git a/util/meson.build b/util/meson.build | ||
224 | index XXXXXXX..XXXXXXX 100644 | ||
225 | --- a/util/meson.build | ||
226 | +++ b/util/meson.build | ||
227 | @@ -XXX,XX +XXX,XX @@ util_ss.add(files('qsp.c')) | ||
228 | util_ss.add(files('range.c')) | ||
229 | util_ss.add(files('stats64.c')) | ||
230 | util_ss.add(files('systemd.c')) | ||
231 | +util_ss.add(files('transactions.c')) | ||
232 | util_ss.add(when: 'CONFIG_POSIX', if_true: files('drm.c')) | ||
233 | util_ss.add(files('guest-random.c')) | ||
234 | util_ss.add(files('yank.c')) | ||
235 | -- | 87 | -- |
236 | 2.30.2 | 88 | 2.37.3 |
237 | |||
238 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Split part of bdrv_replace_node_common() to be used separately. | 3 | Just as done with job.h, create _locked() functions in blockjob.h |
4 | 4 | ||
5 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 5 | These functions will be later useful when caller has already taken |
6 | the lock. All blockjob _locked functions call job _locked functions. | ||
7 | |||
8 | Note: at this stage, job_{lock/unlock} and job lock guard macros | ||
9 | are *nop*. | ||
10 | |||
11 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
12 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
6 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 13 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
7 | Message-Id: <20210428151804.439460-21-vsementsov@virtuozzo.com> | 14 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
15 | Message-Id: <20220926093214.506243-8-eesposit@redhat.com> | ||
8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
9 | --- | 17 | --- |
10 | block.c | 50 +++++++++++++++++++++++++++++++------------------- | 18 | include/block/blockjob.h | 18 ++++++++++++++ |
11 | 1 file changed, 31 insertions(+), 19 deletions(-) | 19 | blockjob.c | 52 ++++++++++++++++++++++++++++++++-------- |
12 | 20 | 2 files changed, 60 insertions(+), 10 deletions(-) | |
13 | diff --git a/block.c b/block.c | 21 | |
22 | diff --git a/include/block/blockjob.h b/include/block/blockjob.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/block.c | 24 | --- a/include/block/blockjob.h |
16 | +++ b/block.c | 25 | +++ b/include/block/blockjob.h |
17 | @@ -XXX,XX +XXX,XX @@ static bool should_update_child(BdrvChild *c, BlockDriverState *to) | 26 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJob { |
18 | return ret; | 27 | */ |
19 | } | 28 | BlockJob *block_job_next(BlockJob *job); |
20 | 29 | ||
21 | +static int bdrv_replace_node_noperm(BlockDriverState *from, | 30 | +/* Same as block_job_next(), but called with job lock held. */ |
22 | + BlockDriverState *to, | 31 | +BlockJob *block_job_next_locked(BlockJob *job); |
23 | + bool auto_skip, Transaction *tran, | 32 | + |
24 | + Error **errp) | 33 | /** |
25 | +{ | 34 | * block_job_get: |
26 | + BdrvChild *c, *next; | 35 | * @id: The id of the block job. |
27 | + | 36 | @@ -XXX,XX +XXX,XX @@ BlockJob *block_job_next(BlockJob *job); |
28 | + QLIST_FOREACH_SAFE(c, &from->parents, next_parent, next) { | 37 | */ |
29 | + assert(c->bs == from); | 38 | BlockJob *block_job_get(const char *id); |
30 | + if (!should_update_child(c, to)) { | 39 | |
31 | + if (auto_skip) { | 40 | +/* Same as block_job_get(), but called with job lock held. */ |
32 | + continue; | 41 | +BlockJob *block_job_get_locked(const char *id); |
33 | + } | 42 | + |
34 | + error_setg(errp, "Should not change '%s' link to '%s'", | 43 | /** |
35 | + c->name, from->node_name); | 44 | * block_job_add_bdrv: |
36 | + return -EINVAL; | 45 | * @job: A block job |
37 | + } | 46 | @@ -XXX,XX +XXX,XX @@ bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs); |
38 | + if (c->frozen) { | 47 | */ |
39 | + error_setg(errp, "Cannot change '%s' link to '%s'", | 48 | bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp); |
40 | + c->name, from->node_name); | 49 | |
41 | + return -EPERM; | 50 | +/* |
42 | + } | 51 | + * Same as block_job_set_speed(), but called with job lock held. |
43 | + bdrv_replace_child_safe(c, to, tran); | 52 | + * Might release the lock temporarily. |
44 | + } | 53 | + */ |
45 | + | 54 | +bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp); |
46 | + return 0; | 55 | + |
47 | +} | 56 | /** |
57 | * block_job_query: | ||
58 | * @job: The job to get information about. | ||
59 | @@ -XXX,XX +XXX,XX @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp); | ||
60 | */ | ||
61 | BlockJobInfo *block_job_query(BlockJob *job, Error **errp); | ||
62 | |||
63 | +/* Same as block_job_query(), but called with job lock held. */ | ||
64 | +BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp); | ||
65 | + | ||
66 | /** | ||
67 | * block_job_iostatus_reset: | ||
68 | * @job: The job whose I/O status should be reset. | ||
69 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp); | ||
70 | */ | ||
71 | void block_job_iostatus_reset(BlockJob *job); | ||
72 | |||
73 | +/* Same as block_job_iostatus_reset(), but called with job lock held. */ | ||
74 | +void block_job_iostatus_reset_locked(BlockJob *job); | ||
48 | + | 75 | + |
49 | /* | 76 | /* |
50 | * With auto_skip=true bdrv_replace_node_common skips updating from parents | 77 | * block_job_get_aio_context: |
51 | * if it creates a parent-child relation loop or if parent is block-job. | 78 | * |
52 | @@ -XXX,XX +XXX,XX @@ static int bdrv_replace_node_common(BlockDriverState *from, | 79 | diff --git a/blockjob.c b/blockjob.c |
53 | BlockDriverState *to, | 80 | index XXXXXXX..XXXXXXX 100644 |
54 | bool auto_skip, Error **errp) | 81 | --- a/blockjob.c |
55 | { | 82 | +++ b/blockjob.c |
56 | - BdrvChild *c, *next; | 83 | @@ -XXX,XX +XXX,XX @@ static bool is_block_job(Job *job) |
57 | Transaction *tran = tran_new(); | 84 | job_type(job) == JOB_TYPE_STREAM; |
58 | g_autoptr(GHashTable) found = NULL; | 85 | } |
59 | g_autoptr(GSList) refresh_list = NULL; | 86 | |
60 | @@ -XXX,XX +XXX,XX @@ static int bdrv_replace_node_common(BlockDriverState *from, | 87 | -BlockJob *block_job_next(BlockJob *bjob) |
61 | * permissions based on new graph. If we fail, we'll roll-back the | 88 | +BlockJob *block_job_next_locked(BlockJob *bjob) |
62 | * replacement. | 89 | { |
63 | */ | 90 | Job *job = bjob ? &bjob->job : NULL; |
64 | - QLIST_FOREACH_SAFE(c, &from->parents, next_parent, next) { | 91 | GLOBAL_STATE_CODE(); |
65 | - assert(c->bs == from); | 92 | |
66 | - if (!should_update_child(c, to)) { | 93 | do { |
67 | - if (auto_skip) { | 94 | - job = job_next(job); |
68 | - continue; | 95 | + job = job_next_locked(job); |
69 | - } | 96 | } while (job && !is_block_job(job)); |
70 | - ret = -EINVAL; | 97 | |
71 | - error_setg(errp, "Should not change '%s' link to '%s'", | 98 | return job ? container_of(job, BlockJob, job) : NULL; |
72 | - c->name, from->node_name); | 99 | } |
73 | - goto out; | 100 | |
74 | - } | 101 | -BlockJob *block_job_get(const char *id) |
75 | - if (c->frozen) { | 102 | +BlockJob *block_job_next(BlockJob *bjob) |
76 | - ret = -EPERM; | 103 | { |
77 | - error_setg(errp, "Cannot change '%s' link to '%s'", | 104 | - Job *job = job_get(id); |
78 | - c->name, from->node_name); | 105 | + JOB_LOCK_GUARD(); |
79 | - goto out; | 106 | + return block_job_next_locked(bjob); |
80 | - } | 107 | +} |
81 | - bdrv_replace_child_safe(c, to, tran); | 108 | + |
82 | + ret = bdrv_replace_node_noperm(from, to, auto_skip, tran, errp); | 109 | +BlockJob *block_job_get_locked(const char *id) |
83 | + if (ret < 0) { | 110 | +{ |
84 | + goto out; | 111 | + Job *job = job_get_locked(id); |
85 | } | 112 | GLOBAL_STATE_CODE(); |
86 | 113 | ||
87 | found = g_hash_table_new(NULL, NULL); | 114 | if (job && is_block_job(job)) { |
115 | @@ -XXX,XX +XXX,XX @@ BlockJob *block_job_get(const char *id) | ||
116 | } | ||
117 | } | ||
118 | |||
119 | +BlockJob *block_job_get(const char *id) | ||
120 | +{ | ||
121 | + JOB_LOCK_GUARD(); | ||
122 | + return block_job_get_locked(id); | ||
123 | +} | ||
124 | + | ||
125 | void block_job_free(Job *job) | ||
126 | { | ||
127 | BlockJob *bjob = container_of(job, BlockJob, job); | ||
128 | @@ -XXX,XX +XXX,XX @@ static bool job_timer_pending(Job *job) | ||
129 | return timer_pending(&job->sleep_timer); | ||
130 | } | ||
131 | |||
132 | -bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) | ||
133 | +bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp) | ||
134 | { | ||
135 | const BlockJobDriver *drv = block_job_driver(job); | ||
136 | int64_t old_speed = job->speed; | ||
137 | |||
138 | GLOBAL_STATE_CODE(); | ||
139 | |||
140 | - if (job_apply_verb(&job->job, JOB_VERB_SET_SPEED, errp) < 0) { | ||
141 | + if (job_apply_verb_locked(&job->job, JOB_VERB_SET_SPEED, errp) < 0) { | ||
142 | return false; | ||
143 | } | ||
144 | if (speed < 0) { | ||
145 | @@ -XXX,XX +XXX,XX @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) | ||
146 | job->speed = speed; | ||
147 | |||
148 | if (drv->set_speed) { | ||
149 | + job_unlock(); | ||
150 | drv->set_speed(job, speed); | ||
151 | + job_lock(); | ||
152 | } | ||
153 | |||
154 | if (speed && speed <= old_speed) { | ||
155 | @@ -XXX,XX +XXX,XX @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) | ||
156 | } | ||
157 | |||
158 | /* kick only if a timer is pending */ | ||
159 | - job_enter_cond(&job->job, job_timer_pending); | ||
160 | + job_enter_cond_locked(&job->job, job_timer_pending); | ||
161 | |||
162 | return true; | ||
163 | } | ||
164 | |||
165 | +bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) | ||
166 | +{ | ||
167 | + JOB_LOCK_GUARD(); | ||
168 | + return block_job_set_speed_locked(job, speed, errp); | ||
169 | +} | ||
170 | + | ||
171 | int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n) | ||
172 | { | ||
173 | IO_CODE(); | ||
174 | return ratelimit_calculate_delay(&job->limit, n); | ||
175 | } | ||
176 | |||
177 | -BlockJobInfo *block_job_query(BlockJob *job, Error **errp) | ||
178 | +BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp) | ||
179 | { | ||
180 | BlockJobInfo *info; | ||
181 | uint64_t progress_current, progress_total; | ||
182 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp) | ||
183 | info->len = progress_total; | ||
184 | info->speed = job->speed; | ||
185 | info->io_status = job->iostatus; | ||
186 | - info->ready = job_is_ready(&job->job), | ||
187 | + info->ready = job_is_ready_locked(&job->job), | ||
188 | info->status = job->job.status; | ||
189 | info->auto_finalize = job->job.auto_finalize; | ||
190 | info->auto_dismiss = job->job.auto_dismiss; | ||
191 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp) | ||
192 | return info; | ||
193 | } | ||
194 | |||
195 | +BlockJobInfo *block_job_query(BlockJob *job, Error **errp) | ||
196 | +{ | ||
197 | + JOB_LOCK_GUARD(); | ||
198 | + return block_job_query_locked(job, errp); | ||
199 | +} | ||
200 | + | ||
201 | static void block_job_iostatus_set_err(BlockJob *job, int error) | ||
202 | { | ||
203 | if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { | ||
204 | @@ -XXX,XX +XXX,XX @@ fail: | ||
205 | return NULL; | ||
206 | } | ||
207 | |||
208 | -void block_job_iostatus_reset(BlockJob *job) | ||
209 | +void block_job_iostatus_reset_locked(BlockJob *job) | ||
210 | { | ||
211 | GLOBAL_STATE_CODE(); | ||
212 | if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { | ||
213 | @@ -XXX,XX +XXX,XX @@ void block_job_iostatus_reset(BlockJob *job) | ||
214 | job->iostatus = BLOCK_DEVICE_IO_STATUS_OK; | ||
215 | } | ||
216 | |||
217 | +void block_job_iostatus_reset(BlockJob *job) | ||
218 | +{ | ||
219 | + JOB_LOCK_GUARD(); | ||
220 | + block_job_iostatus_reset_locked(job); | ||
221 | +} | ||
222 | + | ||
223 | void block_job_user_resume(Job *job) | ||
224 | { | ||
225 | BlockJob *bjob = container_of(job, BlockJob, job); | ||
88 | -- | 226 | -- |
89 | 2.30.2 | 227 | 2.37.3 |
90 | |||
91 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | We have too much comments for this feature. It seems better just don't | 3 | Both blockdev.c and job-qmp.c have TOC/TOU conditions, because |
4 | do it. Most of real users (tests don't count) have to create additional | 4 | they first search for the job and then perform an action on it. |
5 | reference. | 5 | Therefore, we need to do the search + action under the same |
6 | job mutex critical section. | ||
6 | 7 | ||
7 | Drop also comment in external_snapshot_prepare: | 8 | Note: at this stage, job_{lock/unlock} and job lock guard macros |
8 | - bdrv_append doesn't "remove" old bs in common sense, it sounds | 9 | are *nop*. |
9 | strange | ||
10 | - the fact that bdrv_append can fail is obvious from the context | ||
11 | - the fact that we must rollback all changes in transaction abort is | ||
12 | known (it's the direct role of abort) | ||
13 | 10 | ||
14 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 11 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
12 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
13 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
15 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 14 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
16 | Message-Id: <20210428151804.439460-5-vsementsov@virtuozzo.com> | 15 | Message-Id: <20220926093214.506243-9-eesposit@redhat.com> |
17 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
18 | --- | 17 | --- |
19 | block.c | 25 +++---------------------- | 18 | blockdev.c | 67 +++++++++++++++++++++++++++++++++++++----------------- |
20 | block/backup-top.c | 1 - | 19 | job-qmp.c | 57 ++++++++++++++++++++++++++++++++-------------- |
21 | block/commit.c | 1 + | 20 | 2 files changed, 86 insertions(+), 38 deletions(-) |
22 | block/mirror.c | 3 --- | ||
23 | blockdev.c | 4 ---- | ||
24 | tests/unit/test-bdrv-drain.c | 2 +- | ||
25 | tests/unit/test-bdrv-graph-mod.c | 3 +++ | ||
26 | 7 files changed, 8 insertions(+), 31 deletions(-) | ||
27 | 21 | ||
28 | diff --git a/block.c b/block.c | ||
29 | index XXXXXXX..XXXXXXX 100644 | ||
30 | --- a/block.c | ||
31 | +++ b/block.c | ||
32 | @@ -XXX,XX +XXX,XX @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs, | ||
33 | goto out; | ||
34 | } | ||
35 | |||
36 | - /* bdrv_append() consumes a strong reference to bs_snapshot | ||
37 | - * (i.e. it will call bdrv_unref() on it) even on error, so in | ||
38 | - * order to be able to return one, we have to increase | ||
39 | - * bs_snapshot's refcount here */ | ||
40 | - bdrv_ref(bs_snapshot); | ||
41 | ret = bdrv_append(bs_snapshot, bs, errp); | ||
42 | if (ret < 0) { | ||
43 | bs_snapshot = NULL; | ||
44 | @@ -XXX,XX +XXX,XX @@ int bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, | ||
45 | * bs_new must not be attached to a BlockBackend. | ||
46 | * | ||
47 | * This function does not create any image files. | ||
48 | - * | ||
49 | - * bdrv_append() takes ownership of a bs_new reference and unrefs it because | ||
50 | - * that's what the callers commonly need. bs_new will be referenced by the old | ||
51 | - * parents of bs_top after bdrv_append() returns. If the caller needs to keep a | ||
52 | - * reference of its own, it must call bdrv_ref(). | ||
53 | */ | ||
54 | int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, | ||
55 | Error **errp) | ||
56 | { | ||
57 | int ret = bdrv_set_backing_hd(bs_new, bs_top, errp); | ||
58 | if (ret < 0) { | ||
59 | - goto out; | ||
60 | + return ret; | ||
61 | } | ||
62 | |||
63 | ret = bdrv_replace_node(bs_top, bs_new, errp); | ||
64 | if (ret < 0) { | ||
65 | bdrv_set_backing_hd(bs_new, NULL, &error_abort); | ||
66 | - goto out; | ||
67 | + return ret; | ||
68 | } | ||
69 | |||
70 | - ret = 0; | ||
71 | - | ||
72 | -out: | ||
73 | - /* | ||
74 | - * bs_new is now referenced by its new parents, we don't need the | ||
75 | - * additional reference any more. | ||
76 | - */ | ||
77 | - bdrv_unref(bs_new); | ||
78 | - | ||
79 | - return ret; | ||
80 | + return 0; | ||
81 | } | ||
82 | |||
83 | static void bdrv_delete(BlockDriverState *bs) | ||
84 | diff --git a/block/backup-top.c b/block/backup-top.c | ||
85 | index XXXXXXX..XXXXXXX 100644 | ||
86 | --- a/block/backup-top.c | ||
87 | +++ b/block/backup-top.c | ||
88 | @@ -XXX,XX +XXX,XX @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source, | ||
89 | |||
90 | bdrv_drained_begin(source); | ||
91 | |||
92 | - bdrv_ref(top); | ||
93 | ret = bdrv_append(top, source, errp); | ||
94 | if (ret < 0) { | ||
95 | error_prepend(errp, "Cannot append backup-top filter: "); | ||
96 | diff --git a/block/commit.c b/block/commit.c | ||
97 | index XXXXXXX..XXXXXXX 100644 | ||
98 | --- a/block/commit.c | ||
99 | +++ b/block/commit.c | ||
100 | @@ -XXX,XX +XXX,XX @@ void commit_start(const char *job_id, BlockDriverState *bs, | ||
101 | commit_top_bs->total_sectors = top->total_sectors; | ||
102 | |||
103 | ret = bdrv_append(commit_top_bs, top, errp); | ||
104 | + bdrv_unref(commit_top_bs); /* referenced by new parents or failed */ | ||
105 | if (ret < 0) { | ||
106 | commit_top_bs = NULL; | ||
107 | goto fail; | ||
108 | diff --git a/block/mirror.c b/block/mirror.c | ||
109 | index XXXXXXX..XXXXXXX 100644 | ||
110 | --- a/block/mirror.c | ||
111 | +++ b/block/mirror.c | ||
112 | @@ -XXX,XX +XXX,XX @@ static BlockJob *mirror_start_job( | ||
113 | |||
114 | bs_opaque->is_commit = target_is_backing; | ||
115 | |||
116 | - /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep | ||
117 | - * it alive until block_job_create() succeeds even if bs has no parent. */ | ||
118 | - bdrv_ref(mirror_top_bs); | ||
119 | bdrv_drained_begin(bs); | ||
120 | ret = bdrv_append(mirror_top_bs, bs, errp); | ||
121 | bdrv_drained_end(bs); | ||
122 | diff --git a/blockdev.c b/blockdev.c | 22 | diff --git a/blockdev.c b/blockdev.c |
123 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
124 | --- a/blockdev.c | 24 | --- a/blockdev.c |
125 | +++ b/blockdev.c | 25 | +++ b/blockdev.c |
126 | @@ -XXX,XX +XXX,XX @@ static void external_snapshot_prepare(BlkActionState *common, | 26 | @@ -XXX,XX +XXX,XX @@ out: |
27 | aio_context_release(aio_context); | ||
28 | } | ||
29 | |||
30 | -/* Get a block job using its ID and acquire its AioContext */ | ||
31 | -static BlockJob *find_block_job(const char *id, AioContext **aio_context, | ||
32 | - Error **errp) | ||
33 | +/* | ||
34 | + * Get a block job using its ID and acquire its AioContext. | ||
35 | + * Called with job_mutex held. | ||
36 | + */ | ||
37 | +static BlockJob *find_block_job_locked(const char *id, | ||
38 | + AioContext **aio_context, | ||
39 | + Error **errp) | ||
40 | { | ||
41 | BlockJob *job; | ||
42 | |||
43 | @@ -XXX,XX +XXX,XX @@ static BlockJob *find_block_job(const char *id, AioContext **aio_context, | ||
44 | |||
45 | *aio_context = NULL; | ||
46 | |||
47 | - job = block_job_get(id); | ||
48 | + job = block_job_get_locked(id); | ||
49 | |||
50 | if (!job) { | ||
51 | error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE, | ||
52 | @@ -XXX,XX +XXX,XX @@ static BlockJob *find_block_job(const char *id, AioContext **aio_context, | ||
53 | void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp) | ||
54 | { | ||
55 | AioContext *aio_context; | ||
56 | - BlockJob *job = find_block_job(device, &aio_context, errp); | ||
57 | + BlockJob *job; | ||
58 | + | ||
59 | + JOB_LOCK_GUARD(); | ||
60 | + job = find_block_job_locked(device, &aio_context, errp); | ||
61 | |||
62 | if (!job) { | ||
63 | return; | ||
64 | } | ||
65 | |||
66 | - block_job_set_speed(job, speed, errp); | ||
67 | + block_job_set_speed_locked(job, speed, errp); | ||
68 | aio_context_release(aio_context); | ||
69 | } | ||
70 | |||
71 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_cancel(const char *device, | ||
72 | bool has_force, bool force, Error **errp) | ||
73 | { | ||
74 | AioContext *aio_context; | ||
75 | - BlockJob *job = find_block_job(device, &aio_context, errp); | ||
76 | + BlockJob *job; | ||
77 | + | ||
78 | + JOB_LOCK_GUARD(); | ||
79 | + job = find_block_job_locked(device, &aio_context, errp); | ||
80 | |||
81 | if (!job) { | ||
82 | return; | ||
83 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_cancel(const char *device, | ||
84 | force = false; | ||
85 | } | ||
86 | |||
87 | - if (job_user_paused(&job->job) && !force) { | ||
88 | + if (job_user_paused_locked(&job->job) && !force) { | ||
89 | error_setg(errp, "The block job for device '%s' is currently paused", | ||
90 | device); | ||
127 | goto out; | 91 | goto out; |
128 | } | 92 | } |
129 | 93 | ||
130 | - /* This removes our old bs and adds the new bs. This is an operation that | 94 | trace_qmp_block_job_cancel(job); |
131 | - * can fail, so we need to do it in .prepare; undoing it for abort is | 95 | - job_user_cancel(&job->job, force, errp); |
132 | - * always possible. */ | 96 | + job_user_cancel_locked(&job->job, force, errp); |
133 | - bdrv_ref(state->new_bs); | 97 | out: |
134 | ret = bdrv_append(state->new_bs, state->old_bs, errp); | 98 | aio_context_release(aio_context); |
135 | if (ret < 0) { | 99 | } |
136 | goto out; | 100 | @@ -XXX,XX +XXX,XX @@ out: |
137 | diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c | 101 | void qmp_block_job_pause(const char *device, Error **errp) |
102 | { | ||
103 | AioContext *aio_context; | ||
104 | - BlockJob *job = find_block_job(device, &aio_context, errp); | ||
105 | + BlockJob *job; | ||
106 | + | ||
107 | + JOB_LOCK_GUARD(); | ||
108 | + job = find_block_job_locked(device, &aio_context, errp); | ||
109 | |||
110 | if (!job) { | ||
111 | return; | ||
112 | } | ||
113 | |||
114 | trace_qmp_block_job_pause(job); | ||
115 | - job_user_pause(&job->job, errp); | ||
116 | + job_user_pause_locked(&job->job, errp); | ||
117 | aio_context_release(aio_context); | ||
118 | } | ||
119 | |||
120 | void qmp_block_job_resume(const char *device, Error **errp) | ||
121 | { | ||
122 | AioContext *aio_context; | ||
123 | - BlockJob *job = find_block_job(device, &aio_context, errp); | ||
124 | + BlockJob *job; | ||
125 | + | ||
126 | + JOB_LOCK_GUARD(); | ||
127 | + job = find_block_job_locked(device, &aio_context, errp); | ||
128 | |||
129 | if (!job) { | ||
130 | return; | ||
131 | } | ||
132 | |||
133 | trace_qmp_block_job_resume(job); | ||
134 | - job_user_resume(&job->job, errp); | ||
135 | + job_user_resume_locked(&job->job, errp); | ||
136 | aio_context_release(aio_context); | ||
137 | } | ||
138 | |||
139 | void qmp_block_job_complete(const char *device, Error **errp) | ||
140 | { | ||
141 | AioContext *aio_context; | ||
142 | - BlockJob *job = find_block_job(device, &aio_context, errp); | ||
143 | + BlockJob *job; | ||
144 | + | ||
145 | + JOB_LOCK_GUARD(); | ||
146 | + job = find_block_job_locked(device, &aio_context, errp); | ||
147 | |||
148 | if (!job) { | ||
149 | return; | ||
150 | } | ||
151 | |||
152 | trace_qmp_block_job_complete(job); | ||
153 | - job_complete(&job->job, errp); | ||
154 | + job_complete_locked(&job->job, errp); | ||
155 | aio_context_release(aio_context); | ||
156 | } | ||
157 | |||
158 | void qmp_block_job_finalize(const char *id, Error **errp) | ||
159 | { | ||
160 | AioContext *aio_context; | ||
161 | - BlockJob *job = find_block_job(id, &aio_context, errp); | ||
162 | + BlockJob *job; | ||
163 | + | ||
164 | + JOB_LOCK_GUARD(); | ||
165 | + job = find_block_job_locked(id, &aio_context, errp); | ||
166 | |||
167 | if (!job) { | ||
168 | return; | ||
169 | } | ||
170 | |||
171 | trace_qmp_block_job_finalize(job); | ||
172 | - job_ref(&job->job); | ||
173 | - job_finalize(&job->job, errp); | ||
174 | + job_ref_locked(&job->job); | ||
175 | + job_finalize_locked(&job->job, errp); | ||
176 | |||
177 | /* | ||
178 | * Job's context might have changed via job_finalize (and job_txn_apply | ||
179 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_finalize(const char *id, Error **errp) | ||
180 | * one. | ||
181 | */ | ||
182 | aio_context = block_job_get_aio_context(job); | ||
183 | - job_unref(&job->job); | ||
184 | + job_unref_locked(&job->job); | ||
185 | aio_context_release(aio_context); | ||
186 | } | ||
187 | |||
188 | void qmp_block_job_dismiss(const char *id, Error **errp) | ||
189 | { | ||
190 | AioContext *aio_context; | ||
191 | - BlockJob *bjob = find_block_job(id, &aio_context, errp); | ||
192 | + BlockJob *bjob; | ||
193 | Job *job; | ||
194 | |||
195 | + JOB_LOCK_GUARD(); | ||
196 | + bjob = find_block_job_locked(id, &aio_context, errp); | ||
197 | + | ||
198 | if (!bjob) { | ||
199 | return; | ||
200 | } | ||
201 | |||
202 | trace_qmp_block_job_dismiss(bjob); | ||
203 | job = &bjob->job; | ||
204 | - job_dismiss(&job, errp); | ||
205 | + job_dismiss_locked(&job, errp); | ||
206 | aio_context_release(aio_context); | ||
207 | } | ||
208 | |||
209 | diff --git a/job-qmp.c b/job-qmp.c | ||
138 | index XXXXXXX..XXXXXXX 100644 | 210 | index XXXXXXX..XXXXXXX 100644 |
139 | --- a/tests/unit/test-bdrv-drain.c | 211 | --- a/job-qmp.c |
140 | +++ b/tests/unit/test-bdrv-drain.c | 212 | +++ b/job-qmp.c |
141 | @@ -XXX,XX +XXX,XX @@ static void test_append_to_drained(void) | 213 | @@ -XXX,XX +XXX,XX @@ |
142 | g_assert_cmpint(base_s->drain_count, ==, 1); | 214 | #include "qapi/error.h" |
143 | g_assert_cmpint(base->in_flight, ==, 0); | 215 | #include "trace/trace-root.h" |
144 | 216 | ||
145 | - /* Takes ownership of overlay, so we don't have to unref it later */ | 217 | -/* Get a job using its ID and acquire its AioContext */ |
146 | bdrv_append(overlay, base, &error_abort); | 218 | -static Job *find_job(const char *id, AioContext **aio_context, Error **errp) |
147 | g_assert_cmpint(base->in_flight, ==, 0); | 219 | +/* |
148 | g_assert_cmpint(overlay->in_flight, ==, 0); | 220 | + * Get a job using its ID and acquire its AioContext. |
149 | @@ -XXX,XX +XXX,XX @@ static void test_append_to_drained(void) | 221 | + * Called with job_mutex held. |
150 | g_assert_cmpint(overlay->quiesce_counter, ==, 0); | 222 | + */ |
151 | g_assert_cmpint(overlay_s->drain_count, ==, 0); | 223 | +static Job *find_job_locked(const char *id, |
152 | 224 | + AioContext **aio_context, | |
153 | + bdrv_unref(overlay); | 225 | + Error **errp) |
154 | bdrv_unref(base); | 226 | { |
155 | blk_unref(blk); | 227 | Job *job; |
156 | } | 228 | |
157 | diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c | 229 | *aio_context = NULL; |
158 | index XXXXXXX..XXXXXXX 100644 | 230 | |
159 | --- a/tests/unit/test-bdrv-graph-mod.c | 231 | - job = job_get(id); |
160 | +++ b/tests/unit/test-bdrv-graph-mod.c | 232 | + job = job_get_locked(id); |
161 | @@ -XXX,XX +XXX,XX @@ static void test_update_perm_tree(void) | 233 | if (!job) { |
162 | ret = bdrv_append(filter, bs, NULL); | 234 | error_setg(errp, "Job not found"); |
163 | g_assert_cmpint(ret, <, 0); | 235 | return NULL; |
164 | 236 | @@ -XXX,XX +XXX,XX @@ static Job *find_job(const char *id, AioContext **aio_context, Error **errp) | |
165 | + bdrv_unref(filter); | 237 | void qmp_job_cancel(const char *id, Error **errp) |
166 | blk_unref(root); | 238 | { |
167 | } | 239 | AioContext *aio_context; |
168 | 240 | - Job *job = find_job(id, &aio_context, errp); | |
169 | @@ -XXX,XX +XXX,XX @@ static void test_should_update_child(void) | 241 | + Job *job; |
170 | bdrv_append(filter, bs, &error_abort); | 242 | + |
171 | g_assert(target->backing->bs == bs); | 243 | + JOB_LOCK_GUARD(); |
172 | 244 | + job = find_job_locked(id, &aio_context, errp); | |
173 | + bdrv_unref(filter); | 245 | |
174 | bdrv_unref(bs); | 246 | if (!job) { |
175 | blk_unref(root); | 247 | return; |
176 | } | 248 | } |
177 | @@ -XXX,XX +XXX,XX @@ static void test_append_greedy_filter(void) | 249 | |
178 | &error_abort); | 250 | trace_qmp_job_cancel(job); |
179 | 251 | - job_user_cancel(job, true, errp); | |
180 | bdrv_append(fl, base, &error_abort); | 252 | + job_user_cancel_locked(job, true, errp); |
181 | + bdrv_unref(fl); | 253 | aio_context_release(aio_context); |
182 | bdrv_unref(top); | 254 | } |
183 | } | 255 | |
256 | void qmp_job_pause(const char *id, Error **errp) | ||
257 | { | ||
258 | AioContext *aio_context; | ||
259 | - Job *job = find_job(id, &aio_context, errp); | ||
260 | + Job *job; | ||
261 | + | ||
262 | + JOB_LOCK_GUARD(); | ||
263 | + job = find_job_locked(id, &aio_context, errp); | ||
264 | |||
265 | if (!job) { | ||
266 | return; | ||
267 | } | ||
268 | |||
269 | trace_qmp_job_pause(job); | ||
270 | - job_user_pause(job, errp); | ||
271 | + job_user_pause_locked(job, errp); | ||
272 | aio_context_release(aio_context); | ||
273 | } | ||
274 | |||
275 | void qmp_job_resume(const char *id, Error **errp) | ||
276 | { | ||
277 | AioContext *aio_context; | ||
278 | - Job *job = find_job(id, &aio_context, errp); | ||
279 | + Job *job; | ||
280 | + | ||
281 | + JOB_LOCK_GUARD(); | ||
282 | + job = find_job_locked(id, &aio_context, errp); | ||
283 | |||
284 | if (!job) { | ||
285 | return; | ||
286 | } | ||
287 | |||
288 | trace_qmp_job_resume(job); | ||
289 | - job_user_resume(job, errp); | ||
290 | + job_user_resume_locked(job, errp); | ||
291 | aio_context_release(aio_context); | ||
292 | } | ||
293 | |||
294 | void qmp_job_complete(const char *id, Error **errp) | ||
295 | { | ||
296 | AioContext *aio_context; | ||
297 | - Job *job = find_job(id, &aio_context, errp); | ||
298 | + Job *job; | ||
299 | + | ||
300 | + JOB_LOCK_GUARD(); | ||
301 | + job = find_job_locked(id, &aio_context, errp); | ||
302 | |||
303 | if (!job) { | ||
304 | return; | ||
305 | } | ||
306 | |||
307 | trace_qmp_job_complete(job); | ||
308 | - job_complete(job, errp); | ||
309 | + job_complete_locked(job, errp); | ||
310 | aio_context_release(aio_context); | ||
311 | } | ||
312 | |||
313 | void qmp_job_finalize(const char *id, Error **errp) | ||
314 | { | ||
315 | AioContext *aio_context; | ||
316 | - Job *job = find_job(id, &aio_context, errp); | ||
317 | + Job *job; | ||
318 | + | ||
319 | + JOB_LOCK_GUARD(); | ||
320 | + job = find_job_locked(id, &aio_context, errp); | ||
321 | |||
322 | if (!job) { | ||
323 | return; | ||
324 | } | ||
325 | |||
326 | trace_qmp_job_finalize(job); | ||
327 | - job_ref(job); | ||
328 | - job_finalize(job, errp); | ||
329 | + job_ref_locked(job); | ||
330 | + job_finalize_locked(job, errp); | ||
331 | |||
332 | /* | ||
333 | * Job's context might have changed via job_finalize (and job_txn_apply | ||
334 | @@ -XXX,XX +XXX,XX @@ void qmp_job_finalize(const char *id, Error **errp) | ||
335 | * one. | ||
336 | */ | ||
337 | aio_context = job->aio_context; | ||
338 | - job_unref(job); | ||
339 | + job_unref_locked(job); | ||
340 | aio_context_release(aio_context); | ||
341 | } | ||
342 | |||
343 | void qmp_job_dismiss(const char *id, Error **errp) | ||
344 | { | ||
345 | AioContext *aio_context; | ||
346 | - Job *job = find_job(id, &aio_context, errp); | ||
347 | + Job *job; | ||
348 | + | ||
349 | + JOB_LOCK_GUARD(); | ||
350 | + job = find_job_locked(id, &aio_context, errp); | ||
351 | |||
352 | if (!job) { | ||
353 | return; | ||
354 | } | ||
355 | |||
356 | trace_qmp_job_dismiss(job); | ||
357 | - job_dismiss(&job, errp); | ||
358 | + job_dismiss_locked(&job, errp); | ||
359 | aio_context_release(aio_context); | ||
360 | } | ||
184 | 361 | ||
185 | -- | 362 | -- |
186 | 2.30.2 | 363 | 2.37.3 |
187 | |||
188 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Using bdrv_replace_node() for removing filter is not good enough: it | 3 | Add missing job synchronization in the unit tests, with |
4 | keeps child reference of the filter, which may conflict with original | 4 | explicit locks. |
5 | top node during permission update. | 5 | |
6 | 6 | We are deliberately using _locked functions wrapped by a guard | |
7 | Instead let's create new interface, which will do all graph | 7 | instead of a normal call because the normal call will be removed |
8 | modifications first and then update permissions. | 8 | in future, as the only usage is limited to the tests. |
9 | 9 | ||
10 | Let's modify bdrv_replace_node_common(), allowing it additionally drop | 10 | In other words, if a function like job_pause() is/will be only used |
11 | backing chain child link pointing to new node. This is quite | 11 | in tests to avoid: |
12 | appropriate for bdrv_drop_intermediate() and makes possible to add | 12 | |
13 | new bdrv_drop_filter() as a simple wrapper. | 13 | WITH_JOB_LOCK_GUARD(){ |
14 | 14 | job_pause_locked(); | |
15 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 15 | } |
16 | |||
17 | then it is not worth keeping job_pause(), and just use the guard. | ||
18 | |||
19 | Note: at this stage, job_{lock/unlock} and job lock guard macros | ||
20 | are *nop*. | ||
21 | |||
22 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
23 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
16 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 24 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
17 | Message-Id: <20210428151804.439460-24-vsementsov@virtuozzo.com> | 25 | Message-Id: <20220926093214.506243-10-eesposit@redhat.com> |
18 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 26 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
19 | --- | 27 | --- |
20 | include/block/block.h | 1 + | 28 | tests/unit/test-bdrv-drain.c | 76 ++++++++++++-------- |
21 | block.c | 43 +++++++++++++++++++++++++++++++++++++++---- | 29 | tests/unit/test-block-iothread.c | 8 ++- |
22 | 2 files changed, 40 insertions(+), 4 deletions(-) | 30 | tests/unit/test-blockjob-txn.c | 24 ++++--- |
23 | 31 | tests/unit/test-blockjob.c | 115 +++++++++++++++++++------------ | |
24 | diff --git a/include/block/block.h b/include/block/block.h | 32 | 4 files changed, 140 insertions(+), 83 deletions(-) |
33 | |||
34 | diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c | ||
25 | index XXXXXXX..XXXXXXX 100644 | 35 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/include/block/block.h | 36 | --- a/tests/unit/test-bdrv-drain.c |
27 | +++ b/include/block/block.h | 37 | +++ b/tests/unit/test-bdrv-drain.c |
28 | @@ -XXX,XX +XXX,XX @@ int bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, | 38 | @@ -XXX,XX +XXX,XX @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, |
29 | Error **errp); | 39 | } |
30 | BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *node_options, | 40 | } |
31 | int flags, Error **errp); | 41 | |
32 | +int bdrv_drop_filter(BlockDriverState *bs, Error **errp); | 42 | - g_assert_cmpint(job->job.pause_count, ==, 0); |
33 | 43 | - g_assert_false(job->job.paused); | |
34 | int bdrv_parse_aio(const char *mode, int *flags); | 44 | - g_assert_true(tjob->running); |
35 | int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough); | 45 | - g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ |
36 | diff --git a/block.c b/block.c | 46 | + WITH_JOB_LOCK_GUARD() { |
47 | + g_assert_cmpint(job->job.pause_count, ==, 0); | ||
48 | + g_assert_false(job->job.paused); | ||
49 | + g_assert_true(tjob->running); | ||
50 | + g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ | ||
51 | + } | ||
52 | |||
53 | do_drain_begin_unlocked(drain_type, drain_bs); | ||
54 | |||
55 | - if (drain_type == BDRV_DRAIN_ALL) { | ||
56 | - /* bdrv_drain_all() drains both src and target */ | ||
57 | - g_assert_cmpint(job->job.pause_count, ==, 2); | ||
58 | - } else { | ||
59 | - g_assert_cmpint(job->job.pause_count, ==, 1); | ||
60 | + WITH_JOB_LOCK_GUARD() { | ||
61 | + if (drain_type == BDRV_DRAIN_ALL) { | ||
62 | + /* bdrv_drain_all() drains both src and target */ | ||
63 | + g_assert_cmpint(job->job.pause_count, ==, 2); | ||
64 | + } else { | ||
65 | + g_assert_cmpint(job->job.pause_count, ==, 1); | ||
66 | + } | ||
67 | + g_assert_true(job->job.paused); | ||
68 | + g_assert_false(job->job.busy); /* The job is paused */ | ||
69 | } | ||
70 | - g_assert_true(job->job.paused); | ||
71 | - g_assert_false(job->job.busy); /* The job is paused */ | ||
72 | |||
73 | do_drain_end_unlocked(drain_type, drain_bs); | ||
74 | |||
75 | if (use_iothread) { | ||
76 | - /* paused is reset in the I/O thread, wait for it */ | ||
77 | + /* | ||
78 | + * Here we are waiting for the paused status to change, | ||
79 | + * so don't bother protecting the read every time. | ||
80 | + * | ||
81 | + * paused is reset in the I/O thread, wait for it | ||
82 | + */ | ||
83 | while (job->job.paused) { | ||
84 | aio_poll(qemu_get_aio_context(), false); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | - g_assert_cmpint(job->job.pause_count, ==, 0); | ||
89 | - g_assert_false(job->job.paused); | ||
90 | - g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ | ||
91 | + WITH_JOB_LOCK_GUARD() { | ||
92 | + g_assert_cmpint(job->job.pause_count, ==, 0); | ||
93 | + g_assert_false(job->job.paused); | ||
94 | + g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ | ||
95 | + } | ||
96 | |||
97 | do_drain_begin_unlocked(drain_type, target); | ||
98 | |||
99 | - if (drain_type == BDRV_DRAIN_ALL) { | ||
100 | - /* bdrv_drain_all() drains both src and target */ | ||
101 | - g_assert_cmpint(job->job.pause_count, ==, 2); | ||
102 | - } else { | ||
103 | - g_assert_cmpint(job->job.pause_count, ==, 1); | ||
104 | + WITH_JOB_LOCK_GUARD() { | ||
105 | + if (drain_type == BDRV_DRAIN_ALL) { | ||
106 | + /* bdrv_drain_all() drains both src and target */ | ||
107 | + g_assert_cmpint(job->job.pause_count, ==, 2); | ||
108 | + } else { | ||
109 | + g_assert_cmpint(job->job.pause_count, ==, 1); | ||
110 | + } | ||
111 | + g_assert_true(job->job.paused); | ||
112 | + g_assert_false(job->job.busy); /* The job is paused */ | ||
113 | } | ||
114 | - g_assert_true(job->job.paused); | ||
115 | - g_assert_false(job->job.busy); /* The job is paused */ | ||
116 | |||
117 | do_drain_end_unlocked(drain_type, target); | ||
118 | |||
119 | if (use_iothread) { | ||
120 | - /* paused is reset in the I/O thread, wait for it */ | ||
121 | + /* | ||
122 | + * Here we are waiting for the paused status to change, | ||
123 | + * so don't bother protecting the read every time. | ||
124 | + * | ||
125 | + * paused is reset in the I/O thread, wait for it | ||
126 | + */ | ||
127 | while (job->job.paused) { | ||
128 | aio_poll(qemu_get_aio_context(), false); | ||
129 | } | ||
130 | } | ||
131 | |||
132 | - g_assert_cmpint(job->job.pause_count, ==, 0); | ||
133 | - g_assert_false(job->job.paused); | ||
134 | - g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ | ||
135 | + WITH_JOB_LOCK_GUARD() { | ||
136 | + g_assert_cmpint(job->job.pause_count, ==, 0); | ||
137 | + g_assert_false(job->job.paused); | ||
138 | + g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ | ||
139 | + } | ||
140 | |||
141 | aio_context_acquire(ctx); | ||
142 | - ret = job_complete_sync(&job->job, &error_abort); | ||
143 | + WITH_JOB_LOCK_GUARD() { | ||
144 | + ret = job_complete_sync_locked(&job->job, &error_abort); | ||
145 | + } | ||
146 | g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO)); | ||
147 | |||
148 | if (use_iothread) { | ||
149 | diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c | ||
37 | index XXXXXXX..XXXXXXX 100644 | 150 | index XXXXXXX..XXXXXXX 100644 |
38 | --- a/block.c | 151 | --- a/tests/unit/test-block-iothread.c |
39 | +++ b/block.c | 152 | +++ b/tests/unit/test-block-iothread.c |
40 | @@ -XXX,XX +XXX,XX @@ static TransactionActionDrv bdrv_remove_filter_or_cow_child_drv = { | 153 | @@ -XXX,XX +XXX,XX @@ static void test_attach_blockjob(void) |
41 | * format nodes (always .backing) and filter child for filters (may be .file or | 154 | } |
42 | * .backing) | 155 | |
43 | */ | 156 | aio_context_acquire(ctx); |
44 | -__attribute__((unused)) | 157 | - job_complete_sync(&tjob->common.job, &error_abort); |
45 | static void bdrv_remove_filter_or_cow_child(BlockDriverState *bs, | 158 | + WITH_JOB_LOCK_GUARD() { |
46 | Transaction *tran) | 159 | + job_complete_sync_locked(&tjob->common.job, &error_abort); |
47 | { | 160 | + } |
48 | @@ -XXX,XX +XXX,XX @@ static int bdrv_replace_node_noperm(BlockDriverState *from, | 161 | blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); |
49 | * | 162 | aio_context_release(ctx); |
50 | * With auto_skip=false the error is returned if from has a parent which should | 163 | |
51 | * not be updated. | 164 | @@ -XXX,XX +XXX,XX @@ static void test_propagate_mirror(void) |
52 | + * | 165 | BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT, |
53 | + * With @detach_subchain=true @to must be in a backing chain of @from. In this | 166 | false, "filter_node", MIRROR_COPY_MODE_BACKGROUND, |
54 | + * case backing link of the cow-parent of @to is removed. | 167 | &error_abort); |
55 | */ | 168 | - job = job_get("job0"); |
56 | static int bdrv_replace_node_common(BlockDriverState *from, | 169 | + WITH_JOB_LOCK_GUARD() { |
57 | BlockDriverState *to, | 170 | + job = job_get_locked("job0"); |
58 | - bool auto_skip, Error **errp) | 171 | + } |
59 | + bool auto_skip, bool detach_subchain, | 172 | filter = bdrv_find_node("filter_node"); |
60 | + Error **errp) | 173 | |
61 | { | 174 | /* Change the AioContext of src */ |
62 | Transaction *tran = tran_new(); | 175 | diff --git a/tests/unit/test-blockjob-txn.c b/tests/unit/test-blockjob-txn.c |
63 | g_autoptr(GHashTable) found = NULL; | 176 | index XXXXXXX..XXXXXXX 100644 |
64 | g_autoptr(GSList) refresh_list = NULL; | 177 | --- a/tests/unit/test-blockjob-txn.c |
65 | + BlockDriverState *to_cow_parent; | 178 | +++ b/tests/unit/test-blockjob-txn.c |
66 | int ret; | 179 | @@ -XXX,XX +XXX,XX @@ static void test_single_job(int expected) |
67 | 180 | job = test_block_job_start(1, true, expected, &result, txn); | |
68 | + if (detach_subchain) { | 181 | job_start(&job->job); |
69 | + assert(bdrv_chain_contains(from, to)); | 182 | |
70 | + assert(from != to); | 183 | - if (expected == -ECANCELED) { |
71 | + for (to_cow_parent = from; | 184 | - job_cancel(&job->job, false); |
72 | + bdrv_filter_or_cow_bs(to_cow_parent) != to; | 185 | + WITH_JOB_LOCK_GUARD() { |
73 | + to_cow_parent = bdrv_filter_or_cow_bs(to_cow_parent)) | 186 | + if (expected == -ECANCELED) { |
74 | + { | 187 | + job_cancel_locked(&job->job, false); |
75 | + ; | 188 | + } |
76 | + } | 189 | } |
190 | |||
191 | while (result == -EINPROGRESS) { | ||
192 | @@ -XXX,XX +XXX,XX @@ static void test_pair_jobs(int expected1, int expected2) | ||
193 | /* Release our reference now to trigger as many nice | ||
194 | * use-after-free bugs as possible. | ||
195 | */ | ||
196 | - job_txn_unref(txn); | ||
197 | + WITH_JOB_LOCK_GUARD() { | ||
198 | + job_txn_unref_locked(txn); | ||
199 | |||
200 | - if (expected1 == -ECANCELED) { | ||
201 | - job_cancel(&job1->job, false); | ||
202 | - } | ||
203 | - if (expected2 == -ECANCELED) { | ||
204 | - job_cancel(&job2->job, false); | ||
205 | + if (expected1 == -ECANCELED) { | ||
206 | + job_cancel_locked(&job1->job, false); | ||
207 | + } | ||
208 | + if (expected2 == -ECANCELED) { | ||
209 | + job_cancel_locked(&job2->job, false); | ||
210 | + } | ||
211 | } | ||
212 | |||
213 | while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) { | ||
214 | @@ -XXX,XX +XXX,XX @@ static void test_pair_jobs_fail_cancel_race(void) | ||
215 | job_start(&job1->job); | ||
216 | job_start(&job2->job); | ||
217 | |||
218 | - job_cancel(&job1->job, false); | ||
219 | + WITH_JOB_LOCK_GUARD() { | ||
220 | + job_cancel_locked(&job1->job, false); | ||
221 | + } | ||
222 | |||
223 | /* Now make job2 finish before the main loop kicks jobs. This simulates | ||
224 | * the race between a pending kick and another job completing. | ||
225 | diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c | ||
226 | index XXXXXXX..XXXXXXX 100644 | ||
227 | --- a/tests/unit/test-blockjob.c | ||
228 | +++ b/tests/unit/test-blockjob.c | ||
229 | @@ -XXX,XX +XXX,XX @@ static CancelJob *create_common(Job **pjob) | ||
230 | bjob = mk_job(blk, "Steve", &test_cancel_driver, true, | ||
231 | JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS); | ||
232 | job = &bjob->job; | ||
233 | - job_ref(job); | ||
234 | - assert(job->status == JOB_STATUS_CREATED); | ||
235 | + WITH_JOB_LOCK_GUARD() { | ||
236 | + job_ref_locked(job); | ||
237 | + assert(job->status == JOB_STATUS_CREATED); | ||
77 | + } | 238 | + } |
78 | + | 239 | + |
79 | /* Make sure that @from doesn't go away until we have successfully attached | 240 | s = container_of(bjob, CancelJob, common); |
80 | * all of its parents to @to. */ | 241 | s->blk = blk; |
81 | bdrv_ref(from); | 242 | |
82 | @@ -XXX,XX +XXX,XX @@ static int bdrv_replace_node_common(BlockDriverState *from, | 243 | @@ -XXX,XX +XXX,XX @@ static void cancel_common(CancelJob *s) |
83 | goto out; | 244 | aio_context_acquire(ctx); |
84 | } | 245 | |
85 | 246 | job_cancel_sync(&job->job, true); | |
86 | + if (detach_subchain) { | 247 | - if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) { |
87 | + bdrv_remove_filter_or_cow_child(to_cow_parent, tran); | 248 | - Job *dummy = &job->job; |
88 | + } | 249 | - job_dismiss(&dummy, &error_abort); |
89 | + | 250 | + WITH_JOB_LOCK_GUARD() { |
90 | found = g_hash_table_new(NULL, NULL); | 251 | + if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) { |
91 | 252 | + Job *dummy = &job->job; | |
92 | refresh_list = bdrv_topological_dfs(refresh_list, found, to); | 253 | + job_dismiss_locked(&dummy, &error_abort); |
93 | @@ -XXX,XX +XXX,XX @@ out: | 254 | + } |
94 | int bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, | 255 | + assert(job->job.status == JOB_STATUS_NULL); |
95 | Error **errp) | 256 | + job_unref_locked(&job->job); |
96 | { | 257 | } |
97 | - return bdrv_replace_node_common(from, to, true, errp); | 258 | - assert(job->job.status == JOB_STATUS_NULL); |
98 | + return bdrv_replace_node_common(from, to, true, false, errp); | 259 | - job_unref(&job->job); |
260 | destroy_blk(blk); | ||
261 | |||
262 | aio_context_release(ctx); | ||
263 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_created(void) | ||
264 | cancel_common(s); | ||
265 | } | ||
266 | |||
267 | +static void assert_job_status_is(Job *job, int status) | ||
268 | +{ | ||
269 | + WITH_JOB_LOCK_GUARD() { | ||
270 | + assert(job->status == status); | ||
271 | + } | ||
99 | +} | 272 | +} |
100 | + | 273 | + |
101 | +int bdrv_drop_filter(BlockDriverState *bs, Error **errp) | 274 | static void test_cancel_running(void) |
102 | +{ | 275 | { |
103 | + return bdrv_replace_node_common(bs, bdrv_filter_or_cow_bs(bs), true, true, | 276 | Job *job; |
104 | + errp); | 277 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_running(void) |
105 | } | 278 | s = create_common(&job); |
106 | 279 | ||
107 | /* | 280 | job_start(job); |
108 | @@ -XXX,XX +XXX,XX @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, | 281 | - assert(job->status == JOB_STATUS_RUNNING); |
109 | updated_children = g_slist_prepend(updated_children, c); | 282 | + assert_job_status_is(job, JOB_STATUS_RUNNING); |
110 | } | 283 | |
111 | 284 | cancel_common(s); | |
112 | - bdrv_replace_node_common(top, base, false, &local_err); | 285 | } |
286 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_paused(void) | ||
287 | s = create_common(&job); | ||
288 | |||
289 | job_start(job); | ||
290 | - assert(job->status == JOB_STATUS_RUNNING); | ||
291 | - | ||
292 | - job_user_pause(job, &error_abort); | ||
293 | + WITH_JOB_LOCK_GUARD() { | ||
294 | + assert(job->status == JOB_STATUS_RUNNING); | ||
295 | + job_user_pause_locked(job, &error_abort); | ||
296 | + } | ||
297 | job_enter(job); | ||
298 | - assert(job->status == JOB_STATUS_PAUSED); | ||
299 | + assert_job_status_is(job, JOB_STATUS_PAUSED); | ||
300 | |||
301 | cancel_common(s); | ||
302 | } | ||
303 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_ready(void) | ||
304 | s = create_common(&job); | ||
305 | |||
306 | job_start(job); | ||
307 | - assert(job->status == JOB_STATUS_RUNNING); | ||
308 | + assert_job_status_is(job, JOB_STATUS_RUNNING); | ||
309 | |||
310 | s->should_converge = true; | ||
311 | job_enter(job); | ||
312 | - assert(job->status == JOB_STATUS_READY); | ||
313 | + assert_job_status_is(job, JOB_STATUS_READY); | ||
314 | |||
315 | cancel_common(s); | ||
316 | } | ||
317 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_standby(void) | ||
318 | s = create_common(&job); | ||
319 | |||
320 | job_start(job); | ||
321 | - assert(job->status == JOB_STATUS_RUNNING); | ||
322 | + assert_job_status_is(job, JOB_STATUS_RUNNING); | ||
323 | |||
324 | s->should_converge = true; | ||
325 | job_enter(job); | ||
326 | - assert(job->status == JOB_STATUS_READY); | ||
327 | - | ||
328 | - job_user_pause(job, &error_abort); | ||
329 | + WITH_JOB_LOCK_GUARD() { | ||
330 | + assert(job->status == JOB_STATUS_READY); | ||
331 | + job_user_pause_locked(job, &error_abort); | ||
332 | + } | ||
333 | job_enter(job); | ||
334 | - assert(job->status == JOB_STATUS_STANDBY); | ||
335 | + assert_job_status_is(job, JOB_STATUS_STANDBY); | ||
336 | |||
337 | cancel_common(s); | ||
338 | } | ||
339 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_pending(void) | ||
340 | s = create_common(&job); | ||
341 | |||
342 | job_start(job); | ||
343 | - assert(job->status == JOB_STATUS_RUNNING); | ||
344 | + assert_job_status_is(job, JOB_STATUS_RUNNING); | ||
345 | |||
346 | s->should_converge = true; | ||
347 | job_enter(job); | ||
348 | - assert(job->status == JOB_STATUS_READY); | ||
349 | - | ||
350 | - job_complete(job, &error_abort); | ||
351 | + WITH_JOB_LOCK_GUARD() { | ||
352 | + assert(job->status == JOB_STATUS_READY); | ||
353 | + job_complete_locked(job, &error_abort); | ||
354 | + } | ||
355 | job_enter(job); | ||
356 | while (!job->deferred_to_main_loop) { | ||
357 | aio_poll(qemu_get_aio_context(), true); | ||
358 | } | ||
359 | - assert(job->status == JOB_STATUS_READY); | ||
360 | + assert_job_status_is(job, JOB_STATUS_READY); | ||
361 | aio_poll(qemu_get_aio_context(), true); | ||
362 | - assert(job->status == JOB_STATUS_PENDING); | ||
363 | + assert_job_status_is(job, JOB_STATUS_PENDING); | ||
364 | |||
365 | cancel_common(s); | ||
366 | } | ||
367 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_concluded(void) | ||
368 | s = create_common(&job); | ||
369 | |||
370 | job_start(job); | ||
371 | - assert(job->status == JOB_STATUS_RUNNING); | ||
372 | + assert_job_status_is(job, JOB_STATUS_RUNNING); | ||
373 | |||
374 | s->should_converge = true; | ||
375 | job_enter(job); | ||
376 | - assert(job->status == JOB_STATUS_READY); | ||
377 | - | ||
378 | - job_complete(job, &error_abort); | ||
379 | + WITH_JOB_LOCK_GUARD() { | ||
380 | + assert(job->status == JOB_STATUS_READY); | ||
381 | + job_complete_locked(job, &error_abort); | ||
382 | + } | ||
383 | job_enter(job); | ||
384 | while (!job->deferred_to_main_loop) { | ||
385 | aio_poll(qemu_get_aio_context(), true); | ||
386 | } | ||
387 | - assert(job->status == JOB_STATUS_READY); | ||
388 | + assert_job_status_is(job, JOB_STATUS_READY); | ||
389 | aio_poll(qemu_get_aio_context(), true); | ||
390 | - assert(job->status == JOB_STATUS_PENDING); | ||
391 | + assert_job_status_is(job, JOB_STATUS_PENDING); | ||
392 | |||
393 | aio_context_acquire(job->aio_context); | ||
394 | - job_finalize(job, &error_abort); | ||
395 | + WITH_JOB_LOCK_GUARD() { | ||
396 | + job_finalize_locked(job, &error_abort); | ||
397 | + } | ||
398 | aio_context_release(job->aio_context); | ||
399 | - assert(job->status == JOB_STATUS_CONCLUDED); | ||
400 | + assert_job_status_is(job, JOB_STATUS_CONCLUDED); | ||
401 | |||
402 | cancel_common(s); | ||
403 | } | ||
404 | @@ -XXX,XX +XXX,XX @@ static void test_complete_in_standby(void) | ||
405 | bjob = mk_job(blk, "job", &test_yielding_driver, true, | ||
406 | JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS); | ||
407 | job = &bjob->job; | ||
408 | - assert(job->status == JOB_STATUS_CREATED); | ||
409 | + assert_job_status_is(job, JOB_STATUS_CREATED); | ||
410 | |||
411 | /* Wait for the job to become READY */ | ||
412 | job_start(job); | ||
413 | aio_context_acquire(ctx); | ||
113 | + /* | 414 | + /* |
114 | + * It seems correct to pass detach_subchain=true here, but it triggers | 415 | + * Here we are waiting for the status to change, so don't bother |
115 | + * one more yet not fixed bug, when due to nested aio_poll loop we switch to | 416 | + * protecting the read every time. |
116 | + * another drained section, which modify the graph (for example, removing | ||
117 | + * the child, which we keep in updated_children list). So, it's a TODO. | ||
118 | + * | ||
119 | + * Note, bug triggered if pass detach_subchain=true here and run | ||
120 | + * test-bdrv-drain. test_drop_intermediate_poll() test-case will crash. | ||
121 | + * That's a FIXME. | ||
122 | + */ | 417 | + */ |
123 | + bdrv_replace_node_common(top, base, false, false, &local_err); | 418 | AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY); |
124 | if (local_err) { | 419 | aio_context_release(ctx); |
125 | error_report_err(local_err); | 420 | |
126 | goto exit; | 421 | /* Begin the drained section, pausing the job */ |
422 | bdrv_drain_all_begin(); | ||
423 | - assert(job->status == JOB_STATUS_STANDBY); | ||
424 | + assert_job_status_is(job, JOB_STATUS_STANDBY); | ||
425 | + | ||
426 | /* Lock the IO thread to prevent the job from being run */ | ||
427 | aio_context_acquire(ctx); | ||
428 | /* This will schedule the job to resume it */ | ||
429 | bdrv_drain_all_end(); | ||
430 | |||
431 | - /* But the job cannot run, so it will remain on standby */ | ||
432 | - assert(job->status == JOB_STATUS_STANDBY); | ||
433 | + WITH_JOB_LOCK_GUARD() { | ||
434 | + /* But the job cannot run, so it will remain on standby */ | ||
435 | + assert(job->status == JOB_STATUS_STANDBY); | ||
436 | |||
437 | - /* Even though the job is on standby, this should work */ | ||
438 | - job_complete(job, &error_abort); | ||
439 | + /* Even though the job is on standby, this should work */ | ||
440 | + job_complete_locked(job, &error_abort); | ||
441 | |||
442 | - /* The test is done now, clean up. */ | ||
443 | - job_finish_sync(job, NULL, &error_abort); | ||
444 | - assert(job->status == JOB_STATUS_PENDING); | ||
445 | + /* The test is done now, clean up. */ | ||
446 | + job_finish_sync_locked(job, NULL, &error_abort); | ||
447 | + assert(job->status == JOB_STATUS_PENDING); | ||
448 | |||
449 | - job_finalize(job, &error_abort); | ||
450 | - assert(job->status == JOB_STATUS_CONCLUDED); | ||
451 | + job_finalize_locked(job, &error_abort); | ||
452 | + assert(job->status == JOB_STATUS_CONCLUDED); | ||
453 | |||
454 | - job_dismiss(&job, &error_abort); | ||
455 | + job_dismiss_locked(&job, &error_abort); | ||
456 | + } | ||
457 | |||
458 | destroy_blk(blk); | ||
459 | aio_context_release(ctx); | ||
127 | -- | 460 | -- |
128 | 2.30.2 | 461 | 2.37.3 |
129 | |||
130 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Split out no-perm part of bdrv_root_attach_child() into separate | 3 | Once job lock is used and aiocontext is removed, mirror has |
4 | transaction action. bdrv_root_attach_child() now moves to new | 4 | to perform job operations under the same critical section, |
5 | permission update paradigm: first update graph relations then update | 5 | Note: at this stage, job_{lock/unlock} and job lock guard macros |
6 | permissions. | 6 | are *nop*. |
7 | 7 | ||
8 | qsd-jobs test output updated. Seems now permission update goes in | 8 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
9 | another order. Still, the test comment say that we only want to check | 9 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
10 | that command doesn't crash, and it's still so. | 10 | Message-Id: <20220926093214.506243-11-eesposit@redhat.com> |
11 | 11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | |
12 | Error message is a bit misleading as it looks like job was added first. | ||
13 | But actually in new paradigm of graph update we can't distinguish such | ||
14 | things. We should update the error message, but let's not do it now. | ||
15 | |||
16 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
17 | Message-Id: <20210428151804.439460-19-vsementsov@virtuozzo.com> | ||
18 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
19 | --- | 13 | --- |
20 | block.c | 190 ++++++++++++++++++-------- | 14 | block/mirror.c | 13 +++++++++---- |
21 | tests/qemu-iotests/tests/qsd-jobs.out | 2 +- | 15 | 1 file changed, 9 insertions(+), 4 deletions(-) |
22 | 2 files changed, 137 insertions(+), 55 deletions(-) | ||
23 | 16 | ||
24 | diff --git a/block.c b/block.c | 17 | diff --git a/block/mirror.c b/block/mirror.c |
25 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/block.c | 19 | --- a/block/mirror.c |
27 | +++ b/block.c | 20 | +++ b/block/mirror.c |
28 | @@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child(BdrvChild *child, BlockDriverState *new_bs) | 21 | @@ -XXX,XX +XXX,XX @@ static void mirror_complete(Job *job, Error **errp) |
22 | s->should_complete = true; | ||
23 | |||
24 | /* If the job is paused, it will be re-entered when it is resumed */ | ||
25 | - if (!job->paused) { | ||
26 | - job_enter(job); | ||
27 | + WITH_JOB_LOCK_GUARD() { | ||
28 | + if (!job->paused) { | ||
29 | + job_enter_cond_locked(job, NULL); | ||
30 | + } | ||
29 | } | 31 | } |
30 | } | 32 | } |
31 | 33 | ||
32 | -/* | 34 | @@ -XXX,XX +XXX,XX @@ static bool mirror_drained_poll(BlockJob *job) |
33 | - * This function steals the reference to child_bs from the caller. | 35 | * from one of our own drain sections, to avoid a deadlock waiting for |
34 | - * That reference is later dropped by bdrv_root_unref_child(). | 36 | * ourselves. |
35 | - * | 37 | */ |
36 | - * On failure NULL is returned, errp is set and the reference to | 38 | - if (!s->common.job.paused && !job_is_cancelled(&job->job) && !s->in_drain) { |
37 | - * child_bs is also dropped. | 39 | - return true; |
38 | - * | 40 | + WITH_JOB_LOCK_GUARD() { |
39 | - * The caller must hold the AioContext lock @child_bs, but not that of @ctx | 41 | + if (!s->common.job.paused && !job_is_cancelled_locked(&job->job) |
40 | - * (unless @child_bs is already in @ctx). | 42 | + && !s->in_drain) { |
41 | - */ | 43 | + return true; |
42 | -BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, | 44 | + } |
43 | - const char *child_name, | ||
44 | - const BdrvChildClass *child_class, | ||
45 | - BdrvChildRole child_role, | ||
46 | - uint64_t perm, uint64_t shared_perm, | ||
47 | - void *opaque, Error **errp) | ||
48 | +static void bdrv_remove_empty_child(BdrvChild *child) | ||
49 | { | ||
50 | - BdrvChild *child; | ||
51 | - Error *local_err = NULL; | ||
52 | - int ret; | ||
53 | - AioContext *ctx; | ||
54 | + assert(!child->bs); | ||
55 | + QLIST_SAFE_REMOVE(child, next); | ||
56 | + g_free(child->name); | ||
57 | + g_free(child); | ||
58 | +} | ||
59 | |||
60 | - ret = bdrv_check_update_perm(child_bs, NULL, perm, shared_perm, NULL, errp); | ||
61 | - if (ret < 0) { | ||
62 | - bdrv_abort_perm_update(child_bs); | ||
63 | - bdrv_unref(child_bs); | ||
64 | - return NULL; | ||
65 | +typedef struct BdrvAttachChildCommonState { | ||
66 | + BdrvChild **child; | ||
67 | + AioContext *old_parent_ctx; | ||
68 | + AioContext *old_child_ctx; | ||
69 | +} BdrvAttachChildCommonState; | ||
70 | + | ||
71 | +static void bdrv_attach_child_common_abort(void *opaque) | ||
72 | +{ | ||
73 | + BdrvAttachChildCommonState *s = opaque; | ||
74 | + BdrvChild *child = *s->child; | ||
75 | + BlockDriverState *bs = child->bs; | ||
76 | + | ||
77 | + bdrv_replace_child_noperm(child, NULL); | ||
78 | + | ||
79 | + if (bdrv_get_aio_context(bs) != s->old_child_ctx) { | ||
80 | + bdrv_try_set_aio_context(bs, s->old_child_ctx, &error_abort); | ||
81 | } | 45 | } |
82 | 46 | ||
83 | - child = g_new(BdrvChild, 1); | 47 | return !!s->in_flight; |
84 | - *child = (BdrvChild) { | ||
85 | + if (bdrv_child_get_parent_aio_context(child) != s->old_parent_ctx) { | ||
86 | + GSList *ignore = g_slist_prepend(NULL, child); | ||
87 | + | ||
88 | + child->klass->can_set_aio_ctx(child, s->old_parent_ctx, &ignore, | ||
89 | + &error_abort); | ||
90 | + g_slist_free(ignore); | ||
91 | + ignore = g_slist_prepend(NULL, child); | ||
92 | + child->klass->set_aio_ctx(child, s->old_parent_ctx, &ignore); | ||
93 | + | ||
94 | + g_slist_free(ignore); | ||
95 | + } | ||
96 | + | ||
97 | + bdrv_unref(bs); | ||
98 | + bdrv_remove_empty_child(child); | ||
99 | + *s->child = NULL; | ||
100 | +} | ||
101 | + | ||
102 | +static TransactionActionDrv bdrv_attach_child_common_drv = { | ||
103 | + .abort = bdrv_attach_child_common_abort, | ||
104 | + .clean = g_free, | ||
105 | +}; | ||
106 | + | ||
107 | +/* | ||
108 | + * Common part of attaching bdrv child to bs or to blk or to job | ||
109 | + */ | ||
110 | +static int bdrv_attach_child_common(BlockDriverState *child_bs, | ||
111 | + const char *child_name, | ||
112 | + const BdrvChildClass *child_class, | ||
113 | + BdrvChildRole child_role, | ||
114 | + uint64_t perm, uint64_t shared_perm, | ||
115 | + void *opaque, BdrvChild **child, | ||
116 | + Transaction *tran, Error **errp) | ||
117 | +{ | ||
118 | + BdrvChild *new_child; | ||
119 | + AioContext *parent_ctx; | ||
120 | + AioContext *child_ctx = bdrv_get_aio_context(child_bs); | ||
121 | + | ||
122 | + assert(child); | ||
123 | + assert(*child == NULL); | ||
124 | + | ||
125 | + new_child = g_new(BdrvChild, 1); | ||
126 | + *new_child = (BdrvChild) { | ||
127 | .bs = NULL, | ||
128 | .name = g_strdup(child_name), | ||
129 | .klass = child_class, | ||
130 | @@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, | ||
131 | .opaque = opaque, | ||
132 | }; | ||
133 | |||
134 | - ctx = bdrv_child_get_parent_aio_context(child); | ||
135 | - | ||
136 | - /* If the AioContexts don't match, first try to move the subtree of | ||
137 | + /* | ||
138 | + * If the AioContexts don't match, first try to move the subtree of | ||
139 | * child_bs into the AioContext of the new parent. If this doesn't work, | ||
140 | - * try moving the parent into the AioContext of child_bs instead. */ | ||
141 | - if (bdrv_get_aio_context(child_bs) != ctx) { | ||
142 | - ret = bdrv_try_set_aio_context(child_bs, ctx, &local_err); | ||
143 | + * try moving the parent into the AioContext of child_bs instead. | ||
144 | + */ | ||
145 | + parent_ctx = bdrv_child_get_parent_aio_context(new_child); | ||
146 | + if (child_ctx != parent_ctx) { | ||
147 | + Error *local_err = NULL; | ||
148 | + int ret = bdrv_try_set_aio_context(child_bs, parent_ctx, &local_err); | ||
149 | + | ||
150 | if (ret < 0 && child_class->can_set_aio_ctx) { | ||
151 | - GSList *ignore = g_slist_prepend(NULL, child); | ||
152 | - ctx = bdrv_get_aio_context(child_bs); | ||
153 | - if (child_class->can_set_aio_ctx(child, ctx, &ignore, NULL)) { | ||
154 | + GSList *ignore = g_slist_prepend(NULL, new_child); | ||
155 | + if (child_class->can_set_aio_ctx(new_child, child_ctx, &ignore, | ||
156 | + NULL)) | ||
157 | + { | ||
158 | error_free(local_err); | ||
159 | ret = 0; | ||
160 | g_slist_free(ignore); | ||
161 | - ignore = g_slist_prepend(NULL, child); | ||
162 | - child_class->set_aio_ctx(child, ctx, &ignore); | ||
163 | + ignore = g_slist_prepend(NULL, new_child); | ||
164 | + child_class->set_aio_ctx(new_child, child_ctx, &ignore); | ||
165 | } | ||
166 | g_slist_free(ignore); | ||
167 | } | ||
168 | + | ||
169 | if (ret < 0) { | ||
170 | error_propagate(errp, local_err); | ||
171 | - g_free(child); | ||
172 | - bdrv_abort_perm_update(child_bs); | ||
173 | - bdrv_unref(child_bs); | ||
174 | - return NULL; | ||
175 | + bdrv_remove_empty_child(new_child); | ||
176 | + return ret; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | - /* This performs the matching bdrv_set_perm() for the above check. */ | ||
181 | - bdrv_replace_child(child, child_bs); | ||
182 | + bdrv_ref(child_bs); | ||
183 | + bdrv_replace_child_noperm(new_child, child_bs); | ||
184 | |||
185 | + *child = new_child; | ||
186 | + | ||
187 | + BdrvAttachChildCommonState *s = g_new(BdrvAttachChildCommonState, 1); | ||
188 | + *s = (BdrvAttachChildCommonState) { | ||
189 | + .child = child, | ||
190 | + .old_parent_ctx = parent_ctx, | ||
191 | + .old_child_ctx = child_ctx, | ||
192 | + }; | ||
193 | + tran_add(tran, &bdrv_attach_child_common_drv, s); | ||
194 | + | ||
195 | + return 0; | ||
196 | +} | ||
197 | + | ||
198 | +static void bdrv_detach_child(BdrvChild *child) | ||
199 | +{ | ||
200 | + bdrv_replace_child(child, NULL); | ||
201 | + bdrv_remove_empty_child(child); | ||
202 | +} | ||
203 | + | ||
204 | +/* | ||
205 | + * This function steals the reference to child_bs from the caller. | ||
206 | + * That reference is later dropped by bdrv_root_unref_child(). | ||
207 | + * | ||
208 | + * On failure NULL is returned, errp is set and the reference to | ||
209 | + * child_bs is also dropped. | ||
210 | + * | ||
211 | + * The caller must hold the AioContext lock @child_bs, but not that of @ctx | ||
212 | + * (unless @child_bs is already in @ctx). | ||
213 | + */ | ||
214 | +BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, | ||
215 | + const char *child_name, | ||
216 | + const BdrvChildClass *child_class, | ||
217 | + BdrvChildRole child_role, | ||
218 | + uint64_t perm, uint64_t shared_perm, | ||
219 | + void *opaque, Error **errp) | ||
220 | +{ | ||
221 | + int ret; | ||
222 | + BdrvChild *child = NULL; | ||
223 | + Transaction *tran = tran_new(); | ||
224 | + | ||
225 | + ret = bdrv_attach_child_common(child_bs, child_name, child_class, | ||
226 | + child_role, perm, shared_perm, opaque, | ||
227 | + &child, tran, errp); | ||
228 | + if (ret < 0) { | ||
229 | + bdrv_unref(child_bs); | ||
230 | + return NULL; | ||
231 | + } | ||
232 | + | ||
233 | + ret = bdrv_refresh_perms(child_bs, errp); | ||
234 | + tran_finalize(tran, ret); | ||
235 | + | ||
236 | + bdrv_unref(child_bs); | ||
237 | return child; | ||
238 | } | ||
239 | |||
240 | @@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, | ||
241 | return child; | ||
242 | } | ||
243 | |||
244 | -static void bdrv_detach_child(BdrvChild *child) | ||
245 | -{ | ||
246 | - QLIST_SAFE_REMOVE(child, next); | ||
247 | - | ||
248 | - bdrv_replace_child(child, NULL); | ||
249 | - | ||
250 | - g_free(child->name); | ||
251 | - g_free(child); | ||
252 | -} | ||
253 | - | ||
254 | /* Callers must ensure that child->frozen is false. */ | ||
255 | void bdrv_root_unref_child(BdrvChild *child) | ||
256 | { | ||
257 | diff --git a/tests/qemu-iotests/tests/qsd-jobs.out b/tests/qemu-iotests/tests/qsd-jobs.out | ||
258 | index XXXXXXX..XXXXXXX 100644 | ||
259 | --- a/tests/qemu-iotests/tests/qsd-jobs.out | ||
260 | +++ b/tests/qemu-iotests/tests/qsd-jobs.out | ||
261 | @@ -XXX,XX +XXX,XX @@ QMP_VERSION | ||
262 | {"return": {}} | ||
263 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}} | ||
264 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job0"}} | ||
265 | -{"error": {"class": "GenericError", "desc": "Conflicts with use by a block device as 'root', which uses 'write' on fmt_base"}} | ||
266 | +{"error": {"class": "GenericError", "desc": "Conflicts with use by stream job 'job0' as 'intermediate node', which does not allow 'write' on fmt_base"}} | ||
267 | {"return": {}} | ||
268 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "export1"}} | ||
269 | *** done | ||
270 | -- | 48 | -- |
271 | 2.30.2 | 49 | 2.37.3 |
272 | |||
273 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | To be used in the following commit. | 3 | Now that the API offers also _locked() functions, take advantage |
4 | 4 | of it and give also the caller control to take the lock and call | |
5 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 5 | _locked functions. |
6 | |||
7 | This makes sense especially when we have for loops, because it | ||
8 | makes no sense to have: | ||
9 | |||
10 | for(job = job_next(); ...) | ||
11 | |||
12 | where each job_next() takes the lock internally. | ||
13 | Instead we want | ||
14 | |||
15 | JOB_LOCK_GUARD(); | ||
16 | for(job = job_next_locked(); ...) | ||
17 | |||
18 | In addition, protect also direct field accesses, by either creating a | ||
19 | new critical section or widening the existing ones. | ||
20 | |||
21 | Note: at this stage, job_{lock/unlock} and job lock guard macros | ||
22 | are *nop*. | ||
23 | |||
24 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
25 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
26 | Message-Id: <20220926093214.506243-12-eesposit@redhat.com> | ||
6 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 27 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
7 | Message-Id: <20210428151804.439460-17-vsementsov@virtuozzo.com> | ||
8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 28 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
9 | --- | 29 | --- |
10 | block.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ | 30 | block.c | 17 ++++++++++------- |
11 | 1 file changed, 54 insertions(+) | 31 | blockdev.c | 14 ++++++++++---- |
32 | blockjob.c | 35 ++++++++++++++++++++++------------- | ||
33 | job-qmp.c | 9 ++++++--- | ||
34 | monitor/qmp-cmds.c | 7 +++++-- | ||
35 | qemu-img.c | 15 ++++++++++----- | ||
36 | 6 files changed, 63 insertions(+), 34 deletions(-) | ||
12 | 37 | ||
13 | diff --git a/block.c b/block.c | 38 | diff --git a/block.c b/block.c |
14 | index XXXXXXX..XXXXXXX 100644 | 39 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/block.c | 40 | --- a/block.c |
16 | +++ b/block.c | 41 | +++ b/block.c |
17 | @@ -XXX,XX +XXX,XX @@ static BlockDriverState *bdrv_open_inherit(const char *filename, | 42 | @@ -XXX,XX +XXX,XX @@ static void bdrv_close(BlockDriverState *bs) |
18 | BdrvChildRole child_role, | 43 | |
19 | Error **errp); | 44 | void bdrv_close_all(void) |
20 | 45 | { | |
21 | +static void bdrv_replace_child_noperm(BdrvChild *child, | 46 | - assert(job_next(NULL) == NULL); |
22 | + BlockDriverState *new_bs); | 47 | GLOBAL_STATE_CODE(); |
48 | + assert(job_next(NULL) == NULL); | ||
49 | |||
50 | /* Drop references from requests still in flight, such as canceled block | ||
51 | * jobs whose AIO context has not been polled yet */ | ||
52 | @@ -XXX,XX +XXX,XX @@ XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp) | ||
53 | } | ||
54 | } | ||
55 | |||
56 | - for (job = block_job_next(NULL); job; job = block_job_next(job)) { | ||
57 | - GSList *el; | ||
58 | + WITH_JOB_LOCK_GUARD() { | ||
59 | + for (job = block_job_next_locked(NULL); job; | ||
60 | + job = block_job_next_locked(job)) { | ||
61 | + GSList *el; | ||
62 | |||
63 | - xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB, | ||
64 | - job->job.id); | ||
65 | - for (el = job->nodes; el; el = el->next) { | ||
66 | - xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data); | ||
67 | + xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB, | ||
68 | + job->job.id); | ||
69 | + for (el = job->nodes; el; el = el->next) { | ||
70 | + xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data); | ||
71 | + } | ||
72 | } | ||
73 | } | ||
74 | |||
75 | diff --git a/blockdev.c b/blockdev.c | ||
76 | index XXXXXXX..XXXXXXX 100644 | ||
77 | --- a/blockdev.c | ||
78 | +++ b/blockdev.c | ||
79 | @@ -XXX,XX +XXX,XX @@ void blockdev_mark_auto_del(BlockBackend *blk) | ||
80 | return; | ||
81 | } | ||
82 | |||
83 | - for (job = block_job_next(NULL); job; job = block_job_next(job)) { | ||
84 | + JOB_LOCK_GUARD(); | ||
23 | + | 85 | + |
24 | static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue | 86 | + for (job = block_job_next_locked(NULL); job; |
25 | *queue, Error **errp); | 87 | + job = block_job_next_locked(job)) { |
26 | static void bdrv_reopen_commit(BDRVReopenState *reopen_state); | 88 | if (block_job_has_bdrv(job, blk_bs(blk))) { |
27 | @@ -XXX,XX +XXX,XX @@ static int bdrv_drv_set_perm(BlockDriverState *bs, uint64_t perm, | 89 | AioContext *aio_context = job->job.aio_context; |
28 | return 0; | 90 | aio_context_acquire(aio_context); |
91 | |||
92 | - job_cancel(&job->job, false); | ||
93 | + job_cancel_locked(&job->job, false); | ||
94 | |||
95 | aio_context_release(aio_context); | ||
96 | } | ||
97 | @@ -XXX,XX +XXX,XX @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp) | ||
98 | BlockJobInfoList *head = NULL, **tail = &head; | ||
99 | BlockJob *job; | ||
100 | |||
101 | - for (job = block_job_next(NULL); job; job = block_job_next(job)) { | ||
102 | + JOB_LOCK_GUARD(); | ||
103 | + | ||
104 | + for (job = block_job_next_locked(NULL); job; | ||
105 | + job = block_job_next_locked(job)) { | ||
106 | BlockJobInfo *value; | ||
107 | AioContext *aio_context; | ||
108 | |||
109 | @@ -XXX,XX +XXX,XX @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp) | ||
110 | } | ||
111 | aio_context = block_job_get_aio_context(job); | ||
112 | aio_context_acquire(aio_context); | ||
113 | - value = block_job_query(job, errp); | ||
114 | + value = block_job_query_locked(job, errp); | ||
115 | aio_context_release(aio_context); | ||
116 | if (!value) { | ||
117 | qapi_free_BlockJobInfoList(head); | ||
118 | diff --git a/blockjob.c b/blockjob.c | ||
119 | index XXXXXXX..XXXXXXX 100644 | ||
120 | --- a/blockjob.c | ||
121 | +++ b/blockjob.c | ||
122 | @@ -XXX,XX +XXX,XX @@ static bool child_job_drained_poll(BdrvChild *c) | ||
123 | /* An inactive or completed job doesn't have any pending requests. Jobs | ||
124 | * with !job->busy are either already paused or have a pause point after | ||
125 | * being reentered, so no job driver code will run before they pause. */ | ||
126 | - if (!job->busy || job_is_completed(job)) { | ||
127 | - return false; | ||
128 | + WITH_JOB_LOCK_GUARD() { | ||
129 | + if (!job->busy || job_is_completed_locked(job)) { | ||
130 | + return false; | ||
131 | + } | ||
132 | } | ||
133 | |||
134 | /* Otherwise, assume that it isn't fully stopped yet, but allow the job to | ||
135 | @@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, | ||
136 | job->ready_notifier.notify = block_job_event_ready; | ||
137 | job->idle_notifier.notify = block_job_on_idle; | ||
138 | |||
139 | - notifier_list_add(&job->job.on_finalize_cancelled, | ||
140 | - &job->finalize_cancelled_notifier); | ||
141 | - notifier_list_add(&job->job.on_finalize_completed, | ||
142 | - &job->finalize_completed_notifier); | ||
143 | - notifier_list_add(&job->job.on_pending, &job->pending_notifier); | ||
144 | - notifier_list_add(&job->job.on_ready, &job->ready_notifier); | ||
145 | - notifier_list_add(&job->job.on_idle, &job->idle_notifier); | ||
146 | + WITH_JOB_LOCK_GUARD() { | ||
147 | + notifier_list_add(&job->job.on_finalize_cancelled, | ||
148 | + &job->finalize_cancelled_notifier); | ||
149 | + notifier_list_add(&job->job.on_finalize_completed, | ||
150 | + &job->finalize_completed_notifier); | ||
151 | + notifier_list_add(&job->job.on_pending, &job->pending_notifier); | ||
152 | + notifier_list_add(&job->job.on_ready, &job->ready_notifier); | ||
153 | + notifier_list_add(&job->job.on_idle, &job->idle_notifier); | ||
154 | + } | ||
155 | |||
156 | error_setg(&job->blocker, "block device is in use by block job: %s", | ||
157 | job_type_str(&job->job)); | ||
158 | @@ -XXX,XX +XXX,XX @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, | ||
159 | action); | ||
160 | } | ||
161 | if (action == BLOCK_ERROR_ACTION_STOP) { | ||
162 | - if (!job->job.user_paused) { | ||
163 | - job_pause(&job->job); | ||
164 | - /* make the pause user visible, which will be resumed from QMP. */ | ||
165 | - job->job.user_paused = true; | ||
166 | + WITH_JOB_LOCK_GUARD() { | ||
167 | + if (!job->job.user_paused) { | ||
168 | + job_pause_locked(&job->job); | ||
169 | + /* | ||
170 | + * make the pause user visible, which will be | ||
171 | + * resumed from QMP. | ||
172 | + */ | ||
173 | + job->job.user_paused = true; | ||
174 | + } | ||
175 | } | ||
176 | block_job_iostatus_set_err(job, error); | ||
177 | } | ||
178 | diff --git a/job-qmp.c b/job-qmp.c | ||
179 | index XXXXXXX..XXXXXXX 100644 | ||
180 | --- a/job-qmp.c | ||
181 | +++ b/job-qmp.c | ||
182 | @@ -XXX,XX +XXX,XX @@ void qmp_job_dismiss(const char *id, Error **errp) | ||
183 | aio_context_release(aio_context); | ||
29 | } | 184 | } |
30 | 185 | ||
31 | +typedef struct BdrvReplaceChildState { | 186 | -static JobInfo *job_query_single(Job *job, Error **errp) |
32 | + BdrvChild *child; | 187 | +/* Called with job_mutex held. */ |
33 | + BlockDriverState *old_bs; | 188 | +static JobInfo *job_query_single_locked(Job *job, Error **errp) |
34 | +} BdrvReplaceChildState; | 189 | { |
190 | JobInfo *info; | ||
191 | uint64_t progress_current; | ||
192 | @@ -XXX,XX +XXX,XX @@ JobInfoList *qmp_query_jobs(Error **errp) | ||
193 | JobInfoList *head = NULL, **tail = &head; | ||
194 | Job *job; | ||
195 | |||
196 | - for (job = job_next(NULL); job; job = job_next(job)) { | ||
197 | + JOB_LOCK_GUARD(); | ||
35 | + | 198 | + |
36 | +static void bdrv_replace_child_commit(void *opaque) | 199 | + for (job = job_next_locked(NULL); job; job = job_next_locked(job)) { |
37 | +{ | 200 | JobInfo *value; |
38 | + BdrvReplaceChildState *s = opaque; | 201 | AioContext *aio_context; |
39 | + | 202 | |
40 | + bdrv_unref(s->old_bs); | 203 | @@ -XXX,XX +XXX,XX @@ JobInfoList *qmp_query_jobs(Error **errp) |
41 | +} | 204 | } |
42 | + | 205 | aio_context = job->aio_context; |
43 | +static void bdrv_replace_child_abort(void *opaque) | 206 | aio_context_acquire(aio_context); |
44 | +{ | 207 | - value = job_query_single(job, errp); |
45 | + BdrvReplaceChildState *s = opaque; | 208 | + value = job_query_single_locked(job, errp); |
46 | + BlockDriverState *new_bs = s->child->bs; | 209 | aio_context_release(aio_context); |
47 | + | 210 | if (!value) { |
48 | + /* old_bs reference is transparently moved from @s to @s->child */ | 211 | qapi_free_JobInfoList(head); |
49 | + bdrv_replace_child_noperm(s->child, s->old_bs); | 212 | diff --git a/monitor/qmp-cmds.c b/monitor/qmp-cmds.c |
50 | + bdrv_unref(new_bs); | 213 | index XXXXXXX..XXXXXXX 100644 |
51 | +} | 214 | --- a/monitor/qmp-cmds.c |
52 | + | 215 | +++ b/monitor/qmp-cmds.c |
53 | +static TransactionActionDrv bdrv_replace_child_drv = { | 216 | @@ -XXX,XX +XXX,XX @@ void qmp_cont(Error **errp) |
54 | + .commit = bdrv_replace_child_commit, | 217 | blk_iostatus_reset(blk); |
55 | + .abort = bdrv_replace_child_abort, | 218 | } |
56 | + .clean = g_free, | 219 | |
57 | +}; | 220 | - for (job = block_job_next(NULL); job; job = block_job_next(job)) { |
58 | + | 221 | - block_job_iostatus_reset(job); |
59 | +/* | 222 | + WITH_JOB_LOCK_GUARD() { |
60 | + * bdrv_replace_child_safe | 223 | + for (job = block_job_next_locked(NULL); job; |
61 | + * | 224 | + job = block_job_next_locked(job)) { |
62 | + * Note: real unref of old_bs is done only on commit. | 225 | + block_job_iostatus_reset_locked(job); |
63 | + */ | 226 | + } |
64 | +__attribute__((unused)) | 227 | } |
65 | +static void bdrv_replace_child_safe(BdrvChild *child, BlockDriverState *new_bs, | 228 | |
66 | + Transaction *tran) | 229 | /* Continuing after completed migration. Images have been inactivated to |
67 | +{ | 230 | diff --git a/qemu-img.c b/qemu-img.c |
68 | + BdrvReplaceChildState *s = g_new(BdrvReplaceChildState, 1); | 231 | index XXXXXXX..XXXXXXX 100644 |
69 | + *s = (BdrvReplaceChildState) { | 232 | --- a/qemu-img.c |
70 | + .child = child, | 233 | +++ b/qemu-img.c |
71 | + .old_bs = child->bs, | 234 | @@ -XXX,XX +XXX,XX @@ static void run_block_job(BlockJob *job, Error **errp) |
72 | + }; | 235 | int ret = 0; |
73 | + tran_add(tran, &bdrv_replace_child_drv, s); | 236 | |
74 | + | 237 | aio_context_acquire(aio_context); |
75 | + if (new_bs) { | 238 | - job_ref(&job->job); |
76 | + bdrv_ref(new_bs); | 239 | + job_lock(); |
77 | + } | 240 | + job_ref_locked(&job->job); |
78 | + bdrv_replace_child_noperm(child, new_bs); | 241 | do { |
79 | + /* old_bs reference is transparently moved from @child to @s */ | 242 | float progress = 0.0f; |
80 | +} | 243 | + job_unlock(); |
81 | + | 244 | aio_poll(aio_context, true); |
82 | /* | 245 | |
83 | * Check whether permissions on this node can be changed in a way that | 246 | progress_get_snapshot(&job->job.progress, &progress_current, |
84 | * @cumulative_perms and @cumulative_shared_perms are the new cumulative | 247 | @@ -XXX,XX +XXX,XX @@ static void run_block_job(BlockJob *job, Error **errp) |
248 | progress = (float)progress_current / progress_total * 100.f; | ||
249 | } | ||
250 | qemu_progress_print(progress, 0); | ||
251 | - } while (!job_is_ready(&job->job) && !job_is_completed(&job->job)); | ||
252 | + job_lock(); | ||
253 | + } while (!job_is_ready_locked(&job->job) && | ||
254 | + !job_is_completed_locked(&job->job)); | ||
255 | |||
256 | - if (!job_is_completed(&job->job)) { | ||
257 | - ret = job_complete_sync(&job->job, errp); | ||
258 | + if (!job_is_completed_locked(&job->job)) { | ||
259 | + ret = job_complete_sync_locked(&job->job, errp); | ||
260 | } else { | ||
261 | ret = job->job.ret; | ||
262 | } | ||
263 | - job_unref(&job->job); | ||
264 | + job_unref_locked(&job->job); | ||
265 | + job_unlock(); | ||
266 | aio_context_release(aio_context); | ||
267 | |||
268 | /* publish completion progress only when success */ | ||
85 | -- | 269 | -- |
86 | 2.30.2 | 270 | 2.37.3 |
87 | |||
88 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Split no-perm part of bdrv_attach_child as separate transaction action. | 3 | We want to make sure access of job->aio_context is always done |
4 | It will be used in later commits. | 4 | under either BQL or job_mutex. The problem is that using |
5 | aio_co_enter(job->aiocontext, job->co) in job_start and job_enter_cond | ||
6 | makes the coroutine immediately resume, so we can't hold the job lock. | ||
7 | And caching it is not safe either, as it might change. | ||
5 | 8 | ||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 9 | job_start is under BQL, so it can freely read job->aiocontext, but |
10 | job_enter_cond is not. | ||
11 | We want to avoid reading job->aio_context in job_enter_cond, therefore: | ||
12 | 1) use aio_co_wake(), since it doesn't want an aiocontext as argument | ||
13 | but uses job->co->ctx | ||
14 | 2) detect possible discrepancy between job->co->ctx and job->aio_context | ||
15 | by checking right after the coroutine resumes back from yielding if | ||
16 | job->aio_context has changed. If so, reschedule the coroutine to the | ||
17 | new context. | ||
18 | |||
19 | Calling bdrv_try_set_aio_context() will issue the following calls | ||
20 | (simplified): | ||
21 | * in terms of bdrv callbacks: | ||
22 | .drained_begin -> .set_aio_context -> .drained_end | ||
23 | * in terms of child_job functions: | ||
24 | child_job_drained_begin -> child_job_set_aio_context -> child_job_drained_end | ||
25 | * in terms of job functions: | ||
26 | job_pause_locked -> job_set_aio_context -> job_resume_locked | ||
27 | |||
28 | We can see that after setting the new aio_context, job_resume_locked | ||
29 | calls again job_enter_cond, which then invokes aio_co_wake(). But | ||
30 | while job->aiocontext has been set in job_set_aio_context, | ||
31 | job->co->ctx has not changed, so the coroutine would be entering in | ||
32 | the wrong aiocontext. | ||
33 | |||
34 | Using aio_co_schedule in job_resume_locked() might seem as a valid | ||
35 | alternative, but the problem is that the bh resuming the coroutine | ||
36 | is not scheduled immediately, and if in the meanwhile another | ||
37 | bdrv_try_set_aio_context() is run (see test_propagate_mirror() in | ||
38 | test-block-iothread.c), we would have the first schedule in the | ||
39 | wrong aiocontext, and the second set of drains won't even manage | ||
40 | to schedule the coroutine, as job->busy would still be true from | ||
41 | the previous job_resume_locked(). | ||
42 | |||
43 | The solution is to stick with aio_co_wake() and detect every time | ||
44 | the coroutine resumes back from yielding if job->aio_context | ||
45 | has changed. If so, we can reschedule it to the new context. | ||
46 | |||
47 | Check for the aiocontext change in job_do_yield_locked because: | ||
48 | 1) aio_co_reschedule_self requires to be in the running coroutine | ||
49 | 2) since child_job_set_aio_context allows changing the aiocontext only | ||
50 | while the job is paused, this is the exact place where the coroutine | ||
51 | resumes, before running JobDriver's code. | ||
52 | |||
53 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
54 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
55 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
56 | Message-Id: <20220926093214.506243-13-eesposit@redhat.com> | ||
7 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 57 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
8 | Message-Id: <20210428151804.439460-20-vsementsov@virtuozzo.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 58 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
10 | --- | 59 | --- |
11 | block.c | 71 ++++++++++++++++++++++++++++++++++++++++++++++----------- | 60 | job.c | 19 +++++++++++++++++-- |
12 | 1 file changed, 58 insertions(+), 13 deletions(-) | 61 | 1 file changed, 17 insertions(+), 2 deletions(-) |
13 | 62 | ||
14 | diff --git a/block.c b/block.c | 63 | diff --git a/job.c b/job.c |
15 | index XXXXXXX..XXXXXXX 100644 | 64 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/block.c | 65 | --- a/job.c |
17 | +++ b/block.c | 66 | +++ b/job.c |
18 | @@ -XXX,XX +XXX,XX @@ static BlockDriverState *bdrv_open_inherit(const char *filename, | 67 | @@ -XXX,XX +XXX,XX @@ void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)) |
19 | 68 | job->busy = true; | |
20 | static void bdrv_replace_child_noperm(BdrvChild *child, | 69 | real_job_unlock(); |
21 | BlockDriverState *new_bs); | 70 | job_unlock(); |
22 | +static int bdrv_attach_child_noperm(BlockDriverState *parent_bs, | 71 | - aio_co_enter(job->aio_context, job->co); |
23 | + BlockDriverState *child_bs, | 72 | + aio_co_wake(job->co); |
24 | + const char *child_name, | 73 | job_lock(); |
25 | + const BdrvChildClass *child_class, | ||
26 | + BdrvChildRole child_role, | ||
27 | + BdrvChild **child, | ||
28 | + Transaction *tran, | ||
29 | + Error **errp); | ||
30 | |||
31 | static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue | ||
32 | *queue, Error **errp); | ||
33 | @@ -XXX,XX +XXX,XX @@ static int bdrv_attach_child_common(BlockDriverState *child_bs, | ||
34 | return 0; | ||
35 | } | 74 | } |
36 | 75 | ||
37 | +static int bdrv_attach_child_noperm(BlockDriverState *parent_bs, | 76 | @@ -XXX,XX +XXX,XX @@ void job_enter(Job *job) |
38 | + BlockDriverState *child_bs, | 77 | */ |
39 | + const char *child_name, | 78 | static void coroutine_fn job_do_yield_locked(Job *job, uint64_t ns) |
40 | + const BdrvChildClass *child_class, | 79 | { |
41 | + BdrvChildRole child_role, | 80 | + AioContext *next_aio_context; |
42 | + BdrvChild **child, | ||
43 | + Transaction *tran, | ||
44 | + Error **errp) | ||
45 | +{ | ||
46 | + int ret; | ||
47 | + uint64_t perm, shared_perm; | ||
48 | + | 81 | + |
49 | + assert(parent_bs->drv); | 82 | real_job_lock(); |
50 | + | 83 | if (ns != -1) { |
51 | + bdrv_get_cumulative_perm(parent_bs, &perm, &shared_perm); | 84 | timer_mod(&job->sleep_timer, ns); |
52 | + bdrv_child_perm(parent_bs, child_bs, NULL, child_role, NULL, | 85 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_do_yield_locked(Job *job, uint64_t ns) |
53 | + perm, shared_perm, &perm, &shared_perm); | 86 | qemu_coroutine_yield(); |
54 | + | 87 | job_lock(); |
55 | + ret = bdrv_attach_child_common(child_bs, child_name, child_class, | 88 | |
56 | + child_role, perm, shared_perm, parent_bs, | 89 | - /* Set by job_enter_cond() before re-entering the coroutine. */ |
57 | + child, tran, errp); | 90 | + next_aio_context = job->aio_context; |
58 | + if (ret < 0) { | 91 | + /* |
59 | + return ret; | 92 | + * Coroutine has resumed, but in the meanwhile the job AioContext |
93 | + * might have changed via bdrv_try_set_aio_context(), so we need to move | ||
94 | + * the coroutine too in the new aiocontext. | ||
95 | + */ | ||
96 | + while (qemu_get_current_aio_context() != next_aio_context) { | ||
97 | + job_unlock(); | ||
98 | + aio_co_reschedule_self(next_aio_context); | ||
99 | + job_lock(); | ||
100 | + next_aio_context = job->aio_context; | ||
60 | + } | 101 | + } |
61 | + | 102 | + |
62 | + QLIST_INSERT_HEAD(&parent_bs->children, *child, next); | 103 | + /* Set by job_enter_cond_locked() before re-entering the coroutine. */ |
63 | + /* | 104 | assert(job->busy); |
64 | + * child is removed in bdrv_attach_child_common_abort(), so don't care to | ||
65 | + * abort this change separately. | ||
66 | + */ | ||
67 | + | ||
68 | + return 0; | ||
69 | +} | ||
70 | + | ||
71 | static void bdrv_detach_child(BdrvChild *child) | ||
72 | { | ||
73 | bdrv_replace_child(child, NULL); | ||
74 | @@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, | ||
75 | BdrvChildRole child_role, | ||
76 | Error **errp) | ||
77 | { | ||
78 | - BdrvChild *child; | ||
79 | - uint64_t perm, shared_perm; | ||
80 | - | ||
81 | - bdrv_get_cumulative_perm(parent_bs, &perm, &shared_perm); | ||
82 | + int ret; | ||
83 | + BdrvChild *child = NULL; | ||
84 | + Transaction *tran = tran_new(); | ||
85 | |||
86 | - assert(parent_bs->drv); | ||
87 | - bdrv_child_perm(parent_bs, child_bs, NULL, child_role, NULL, | ||
88 | - perm, shared_perm, &perm, &shared_perm); | ||
89 | + ret = bdrv_attach_child_noperm(parent_bs, child_bs, child_name, child_class, | ||
90 | + child_role, &child, tran, errp); | ||
91 | + if (ret < 0) { | ||
92 | + goto out; | ||
93 | + } | ||
94 | |||
95 | - child = bdrv_root_attach_child(child_bs, child_name, child_class, | ||
96 | - child_role, perm, shared_perm, parent_bs, | ||
97 | - errp); | ||
98 | - if (child == NULL) { | ||
99 | - return NULL; | ||
100 | + ret = bdrv_refresh_perms(parent_bs, errp); | ||
101 | + if (ret < 0) { | ||
102 | + goto out; | ||
103 | } | ||
104 | |||
105 | - QLIST_INSERT_HEAD(&parent_bs->children, child, next); | ||
106 | +out: | ||
107 | + tran_finalize(tran, ret); | ||
108 | + | ||
109 | + bdrv_unref(child_bs); | ||
110 | + | ||
111 | return child; | ||
112 | } | 105 | } |
113 | 106 | ||
114 | -- | 107 | -- |
115 | 2.30.2 | 108 | 2.37.3 |
116 | |||
117 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Add new handler to get aio context and implement it in all child | 3 | In order to make it thread safe, implement a "fake rwlock", |
4 | classes. Add corresponding public interface to be used soon. | 4 | where we allow reads under BQL *or* job_mutex held, but |
5 | writes only under BQL *and* job_mutex. | ||
5 | 6 | ||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 7 | The only write we have is in child_job_set_aio_ctx, which always |
7 | Reviewed-by: Alberto Garcia <berto@igalia.com> | 8 | happens under drain (so the job is paused). |
9 | For this reason, introduce job_set_aio_context and make sure that | ||
10 | the context is set under BQL, job_mutex and drain. | ||
11 | Also make sure all other places where the aiocontext is read | ||
12 | are protected. | ||
13 | |||
14 | The reads in commit.c and mirror.c are actually safe, because always | ||
15 | done under BQL. | ||
16 | |||
17 | Note: at this stage, job_{lock/unlock} and job lock guard macros | ||
18 | are *nop*. | ||
19 | |||
20 | Suggested-by: Paolo Bonzini <pbonzini@redhat.com> | ||
21 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
22 | Message-Id: <20220926093214.506243-14-eesposit@redhat.com> | ||
8 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 23 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
9 | Message-Id: <20210428151804.439460-6-vsementsov@virtuozzo.com> | 24 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 25 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
11 | --- | 26 | --- |
12 | include/block/block.h | 2 ++ | 27 | include/qemu/job.h | 23 ++++++++++++++++++++--- |
13 | include/block/block_int.h | 2 ++ | 28 | block/replication.c | 1 + |
14 | block.c | 13 +++++++++++++ | 29 | blockjob.c | 3 ++- |
15 | block/block-backend.c | 9 +++++++++ | 30 | job.c | 12 ++++++++++++ |
16 | blockjob.c | 8 ++++++++ | 31 | 4 files changed, 35 insertions(+), 4 deletions(-) |
17 | 5 files changed, 34 insertions(+) | ||
18 | 32 | ||
19 | diff --git a/include/block/block.h b/include/block/block.h | 33 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
20 | index XXXXXXX..XXXXXXX 100644 | 34 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/include/block/block.h | 35 | --- a/include/qemu/job.h |
22 | +++ b/include/block/block.h | 36 | +++ b/include/qemu/job.h |
23 | @@ -XXX,XX +XXX,XX @@ bool bdrv_child_can_set_aio_context(BdrvChild *c, AioContext *ctx, | 37 | @@ -XXX,XX +XXX,XX @@ typedef struct Job { |
24 | GSList **ignore, Error **errp); | 38 | /* ProgressMeter API is thread-safe */ |
25 | bool bdrv_can_set_aio_context(BlockDriverState *bs, AioContext *ctx, | 39 | ProgressMeter progress; |
26 | GSList **ignore, Error **errp); | 40 | |
27 | +AioContext *bdrv_child_get_parent_aio_context(BdrvChild *c); | 41 | + /** |
42 | + * AioContext to run the job coroutine in. | ||
43 | + * The job Aiocontext can be read when holding *either* | ||
44 | + * the BQL (so we are in the main loop) or the job_mutex. | ||
45 | + * It can only be written when we hold *both* BQL | ||
46 | + * and the job_mutex. | ||
47 | + */ | ||
48 | + AioContext *aio_context; | ||
49 | |||
50 | - /** Protected by AioContext lock */ | ||
51 | |||
52 | - /** AioContext to run the job coroutine in */ | ||
53 | - AioContext *aio_context; | ||
54 | + /** Protected by AioContext lock */ | ||
55 | |||
56 | /** Reference count of the block job */ | ||
57 | int refcnt; | ||
58 | @@ -XXX,XX +XXX,XX @@ int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), | ||
59 | int job_finish_sync_locked(Job *job, void (*finish)(Job *, Error **errp), | ||
60 | Error **errp); | ||
61 | |||
62 | +/** | ||
63 | + * Sets the @job->aio_context. | ||
64 | + * Called with job_mutex *not* held. | ||
65 | + * | ||
66 | + * This function must run in the main thread to protect against | ||
67 | + * concurrent read in job_finish_sync_locked(), takes the job_mutex | ||
68 | + * lock to protect against the read in job_do_yield_locked(), and must | ||
69 | + * be called when the job is quiescent. | ||
70 | + */ | ||
71 | +void job_set_aio_context(Job *job, AioContext *ctx); | ||
28 | + | 72 | + |
29 | int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz); | 73 | #endif |
30 | int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo); | 74 | diff --git a/block/replication.c b/block/replication.c |
31 | |||
32 | diff --git a/include/block/block_int.h b/include/block/block_int.h | ||
33 | index XXXXXXX..XXXXXXX 100644 | 75 | index XXXXXXX..XXXXXXX 100644 |
34 | --- a/include/block/block_int.h | 76 | --- a/block/replication.c |
35 | +++ b/include/block/block_int.h | 77 | +++ b/block/replication.c |
36 | @@ -XXX,XX +XXX,XX @@ struct BdrvChildClass { | 78 | @@ -XXX,XX +XXX,XX @@ static void replication_close(BlockDriverState *bs) |
37 | bool (*can_set_aio_ctx)(BdrvChild *child, AioContext *ctx, | ||
38 | GSList **ignore, Error **errp); | ||
39 | void (*set_aio_ctx)(BdrvChild *child, AioContext *ctx, GSList **ignore); | ||
40 | + | ||
41 | + AioContext *(*get_parent_aio_context)(BdrvChild *child); | ||
42 | }; | ||
43 | |||
44 | extern const BdrvChildClass child_of_bds; | ||
45 | diff --git a/block.c b/block.c | ||
46 | index XXXXXXX..XXXXXXX 100644 | ||
47 | --- a/block.c | ||
48 | +++ b/block.c | ||
49 | @@ -XXX,XX +XXX,XX @@ static int bdrv_child_cb_update_filename(BdrvChild *c, BlockDriverState *base, | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | +static AioContext *bdrv_child_cb_get_parent_aio_context(BdrvChild *c) | ||
54 | +{ | ||
55 | + BlockDriverState *bs = c->opaque; | ||
56 | + | ||
57 | + return bdrv_get_aio_context(bs); | ||
58 | +} | ||
59 | + | ||
60 | const BdrvChildClass child_of_bds = { | ||
61 | .parent_is_bds = true, | ||
62 | .get_parent_desc = bdrv_child_get_parent_desc, | ||
63 | @@ -XXX,XX +XXX,XX @@ const BdrvChildClass child_of_bds = { | ||
64 | .can_set_aio_ctx = bdrv_child_cb_can_set_aio_ctx, | ||
65 | .set_aio_ctx = bdrv_child_cb_set_aio_ctx, | ||
66 | .update_filename = bdrv_child_cb_update_filename, | ||
67 | + .get_parent_aio_context = bdrv_child_cb_get_parent_aio_context, | ||
68 | }; | ||
69 | |||
70 | +AioContext *bdrv_child_get_parent_aio_context(BdrvChild *c) | ||
71 | +{ | ||
72 | + return c->klass->get_parent_aio_context(c); | ||
73 | +} | ||
74 | + | ||
75 | static int bdrv_open_flags(BlockDriverState *bs, int flags) | ||
76 | { | 79 | { |
77 | int open_flags = flags; | 80 | BDRVReplicationState *s = bs->opaque; |
78 | diff --git a/block/block-backend.c b/block/block-backend.c | 81 | Job *commit_job; |
79 | index XXXXXXX..XXXXXXX 100644 | 82 | + GLOBAL_STATE_CODE(); |
80 | --- a/block/block-backend.c | 83 | |
81 | +++ b/block/block-backend.c | 84 | if (s->stage == BLOCK_REPLICATION_RUNNING) { |
82 | @@ -XXX,XX +XXX,XX @@ static void blk_root_detach(BdrvChild *child) | 85 | replication_stop(s->rs, false, NULL); |
83 | } | ||
84 | } | ||
85 | |||
86 | +static AioContext *blk_root_get_parent_aio_context(BdrvChild *c) | ||
87 | +{ | ||
88 | + BlockBackend *blk = c->opaque; | ||
89 | + | ||
90 | + return blk_get_aio_context(blk); | ||
91 | +} | ||
92 | + | ||
93 | static const BdrvChildClass child_root = { | ||
94 | .inherit_options = blk_root_inherit_options, | ||
95 | |||
96 | @@ -XXX,XX +XXX,XX @@ static const BdrvChildClass child_root = { | ||
97 | |||
98 | .can_set_aio_ctx = blk_root_can_set_aio_ctx, | ||
99 | .set_aio_ctx = blk_root_set_aio_ctx, | ||
100 | + | ||
101 | + .get_parent_aio_context = blk_root_get_parent_aio_context, | ||
102 | }; | ||
103 | |||
104 | /* | ||
105 | diff --git a/blockjob.c b/blockjob.c | 86 | diff --git a/blockjob.c b/blockjob.c |
106 | index XXXXXXX..XXXXXXX 100644 | 87 | index XXXXXXX..XXXXXXX 100644 |
107 | --- a/blockjob.c | 88 | --- a/blockjob.c |
108 | +++ b/blockjob.c | 89 | +++ b/blockjob.c |
109 | @@ -XXX,XX +XXX,XX @@ static void child_job_set_aio_ctx(BdrvChild *c, AioContext *ctx, | 90 | @@ -XXX,XX +XXX,XX @@ static void child_job_set_aio_ctx(BdrvChild *c, AioContext *ctx, |
110 | job->job.aio_context = ctx; | 91 | bdrv_set_aio_context_ignore(sibling->bs, ctx, ignore); |
92 | } | ||
93 | |||
94 | - job->job.aio_context = ctx; | ||
95 | + job_set_aio_context(&job->job, ctx); | ||
111 | } | 96 | } |
112 | 97 | ||
113 | +static AioContext *child_job_get_parent_aio_context(BdrvChild *c) | 98 | static AioContext *child_job_get_parent_aio_context(BdrvChild *c) |
99 | { | ||
100 | BlockJob *job = c->opaque; | ||
101 | + GLOBAL_STATE_CODE(); | ||
102 | |||
103 | return job->job.aio_context; | ||
104 | } | ||
105 | diff --git a/job.c b/job.c | ||
106 | index XXXXXXX..XXXXXXX 100644 | ||
107 | --- a/job.c | ||
108 | +++ b/job.c | ||
109 | @@ -XXX,XX +XXX,XX @@ Job *job_get(const char *id) | ||
110 | return job_get_locked(id); | ||
111 | } | ||
112 | |||
113 | +void job_set_aio_context(Job *job, AioContext *ctx) | ||
114 | +{ | 114 | +{ |
115 | + BlockJob *job = c->opaque; | 115 | + /* protect against read in job_finish_sync_locked and job_start */ |
116 | + | 116 | + GLOBAL_STATE_CODE(); |
117 | + return job->job.aio_context; | 117 | + /* protect against read in job_do_yield_locked */ |
118 | + JOB_LOCK_GUARD(); | ||
119 | + /* ensure the job is quiescent while the AioContext is changed */ | ||
120 | + assert(job->paused || job_is_completed_locked(job)); | ||
121 | + job->aio_context = ctx; | ||
118 | +} | 122 | +} |
119 | + | 123 | + |
120 | static const BdrvChildClass child_job = { | 124 | /* Called with job_mutex *not* held. */ |
121 | .get_parent_desc = child_job_get_parent_desc, | 125 | static void job_sleep_timer_cb(void *opaque) |
122 | .drained_begin = child_job_drained_begin, | 126 | { |
123 | @@ -XXX,XX +XXX,XX @@ static const BdrvChildClass child_job = { | 127 | @@ -XXX,XX +XXX,XX @@ int job_finish_sync_locked(Job *job, |
124 | .can_set_aio_ctx = child_job_can_set_aio_ctx, | 128 | { |
125 | .set_aio_ctx = child_job_set_aio_ctx, | 129 | Error *local_err = NULL; |
126 | .stay_at_node = true, | 130 | int ret; |
127 | + .get_parent_aio_context = child_job_get_parent_aio_context, | 131 | + GLOBAL_STATE_CODE(); |
128 | }; | 132 | |
129 | 133 | job_ref_locked(job); | |
130 | void block_job_remove_all_bdrv(BlockJob *job) | 134 | |
131 | -- | 135 | -- |
132 | 2.30.2 | 136 | 2.37.3 |
133 | |||
134 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | These functions are called only from bdrv_reopen_multiple() in block.c. | 3 | The same job lock is being used also to protect some of blockjob fields. |
4 | No reason to publish them. | 4 | Categorize them just as done in job.h. |
5 | 5 | ||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 6 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
7 | Reviewed-by: Alberto Garcia <berto@igalia.com> | 7 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
8 | Message-Id: <20220926093214.506243-15-eesposit@redhat.com> | ||
8 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
9 | Message-Id: <20210428151804.439460-8-vsementsov@virtuozzo.com> | ||
10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
11 | --- | 11 | --- |
12 | include/block/block.h | 4 ---- | 12 | include/block/blockjob.h | 32 ++++++++++++++++++++++++++------ |
13 | block.c | 13 +++++++++---- | 13 | 1 file changed, 26 insertions(+), 6 deletions(-) |
14 | 2 files changed, 9 insertions(+), 8 deletions(-) | ||
15 | 14 | ||
16 | diff --git a/include/block/block.h b/include/block/block.h | 15 | diff --git a/include/block/blockjob.h b/include/block/blockjob.h |
17 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/include/block/block.h | 17 | --- a/include/block/blockjob.h |
19 | +++ b/include/block/block.h | 18 | +++ b/include/block/blockjob.h |
20 | @@ -XXX,XX +XXX,XX @@ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, | 19 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJobDriver BlockJobDriver; |
21 | int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp); | 20 | * Long-running operation on a BlockDriverState. |
22 | int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, | 21 | */ |
23 | Error **errp); | 22 | typedef struct BlockJob { |
24 | -int bdrv_reopen_prepare(BDRVReopenState *reopen_state, | 23 | - /** Data belonging to the generic Job infrastructure */ |
25 | - BlockReopenQueue *queue, Error **errp); | 24 | + /** |
26 | -void bdrv_reopen_commit(BDRVReopenState *reopen_state); | 25 | + * Data belonging to the generic Job infrastructure. |
27 | -void bdrv_reopen_abort(BDRVReopenState *reopen_state); | 26 | + * Protected by job mutex. |
28 | int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, | 27 | + */ |
29 | int64_t bytes, BdrvRequestFlags flags); | 28 | Job job; |
30 | int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags); | 29 | |
31 | diff --git a/block.c b/block.c | 30 | - /** Status that is published by the query-block-jobs QMP API */ |
32 | index XXXXXXX..XXXXXXX 100644 | 31 | + /** |
33 | --- a/block.c | 32 | + * Status that is published by the query-block-jobs QMP API. |
34 | +++ b/block.c | 33 | + * Protected by job mutex. |
35 | @@ -XXX,XX +XXX,XX @@ static BlockDriverState *bdrv_open_inherit(const char *filename, | 34 | + */ |
36 | BdrvChildRole child_role, | 35 | BlockDeviceIoStatus iostatus; |
37 | Error **errp); | 36 | |
38 | 37 | - /** Speed that was set with @block_job_set_speed. */ | |
39 | +static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue | 38 | + /** |
40 | + *queue, Error **errp); | 39 | + * Speed that was set with @block_job_set_speed. |
41 | +static void bdrv_reopen_commit(BDRVReopenState *reopen_state); | 40 | + * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). |
42 | +static void bdrv_reopen_abort(BDRVReopenState *reopen_state); | 41 | + */ |
42 | int64_t speed; | ||
43 | |||
44 | - /** Rate limiting data structure for implementing @speed. */ | ||
45 | + /** | ||
46 | + * Rate limiting data structure for implementing @speed. | ||
47 | + * RateLimit API is thread-safe. | ||
48 | + */ | ||
49 | RateLimit limit; | ||
50 | |||
51 | - /** Block other operations when block job is running */ | ||
52 | + /** | ||
53 | + * Block other operations when block job is running. | ||
54 | + * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). | ||
55 | + */ | ||
56 | Error *blocker; | ||
57 | |||
58 | + /** All notifiers are set once in block_job_create() and never modified. */ | ||
43 | + | 59 | + |
44 | /* If non-zero, use only whitelisted block drivers */ | 60 | /** Called when a cancelled job is finalised. */ |
45 | static int use_bdrv_whitelist; | 61 | Notifier finalize_cancelled_notifier; |
46 | 62 | ||
47 | @@ -XXX,XX +XXX,XX @@ static int bdrv_reopen_parse_backing(BDRVReopenState *reopen_state, | 63 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJob { |
48 | * commit() for any other BDS that have been left in a prepare() state | 64 | /** Called when the job coroutine yields or terminates */ |
49 | * | 65 | Notifier idle_notifier; |
50 | */ | 66 | |
51 | -int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue, | 67 | - /** BlockDriverStates that are involved in this block job */ |
52 | - Error **errp) | 68 | + /** |
53 | +static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, | 69 | + * BlockDriverStates that are involved in this block job. |
54 | + BlockReopenQueue *queue, Error **errp) | 70 | + * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). |
55 | { | 71 | + */ |
56 | int ret = -1; | 72 | GSList *nodes; |
57 | int old_flags; | 73 | } BlockJob; |
58 | @@ -XXX,XX +XXX,XX @@ error: | ||
59 | * makes them final by swapping the staging BlockDriverState contents into | ||
60 | * the active BlockDriverState contents. | ||
61 | */ | ||
62 | -void bdrv_reopen_commit(BDRVReopenState *reopen_state) | ||
63 | +static void bdrv_reopen_commit(BDRVReopenState *reopen_state) | ||
64 | { | ||
65 | BlockDriver *drv; | ||
66 | BlockDriverState *bs; | ||
67 | @@ -XXX,XX +XXX,XX @@ void bdrv_reopen_commit(BDRVReopenState *reopen_state) | ||
68 | * Abort the reopen, and delete and free the staged changes in | ||
69 | * reopen_state | ||
70 | */ | ||
71 | -void bdrv_reopen_abort(BDRVReopenState *reopen_state) | ||
72 | +static void bdrv_reopen_abort(BDRVReopenState *reopen_state) | ||
73 | { | ||
74 | BlockDriver *drv; | ||
75 | 74 | ||
76 | -- | 75 | -- |
77 | 2.30.2 | 76 | 2.37.3 |
78 | |||
79 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Add new interface, allowing use of existing node list. It will be used | 3 | They all are called with job_lock held, in job_event_*_locked() |
4 | to fix bdrv_replace_node() in the further commit. | ||
5 | 4 | ||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 5 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
6 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
7 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 8 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
8 | Message-Id: <20210428151804.439460-16-vsementsov@virtuozzo.com> | 9 | Message-Id: <20220926093214.506243-16-eesposit@redhat.com> |
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
10 | --- | 11 | --- |
11 | block.c | 106 +++++++++++++++++++++++++++++++++++++------------------- | 12 | blockjob.c | 25 +++++++++++++++---------- |
12 | 1 file changed, 71 insertions(+), 35 deletions(-) | 13 | 1 file changed, 15 insertions(+), 10 deletions(-) |
13 | 14 | ||
14 | diff --git a/block.c b/block.c | 15 | diff --git a/blockjob.c b/blockjob.c |
15 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/block.c | 17 | --- a/blockjob.c |
17 | +++ b/block.c | 18 | +++ b/blockjob.c |
18 | @@ -XXX,XX +XXX,XX @@ static int bdrv_drv_set_perm(BlockDriverState *bs, uint64_t perm, | 19 | @@ -XXX,XX +XXX,XX @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, |
19 | static int bdrv_node_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
20 | uint64_t cumulative_perms, | ||
21 | uint64_t cumulative_shared_perms, | ||
22 | - GSList *ignore_children, Error **errp) | ||
23 | + GSList *ignore_children, | ||
24 | + Transaction *tran, Error **errp) | ||
25 | { | ||
26 | BlockDriver *drv = bs->drv; | ||
27 | BdrvChild *c; | ||
28 | @@ -XXX,XX +XXX,XX @@ static int bdrv_node_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
29 | return 0; | ||
30 | } | ||
31 | |||
32 | - ret = bdrv_drv_set_perm(bs, cumulative_perms, cumulative_shared_perms, NULL, | ||
33 | + ret = bdrv_drv_set_perm(bs, cumulative_perms, cumulative_shared_perms, tran, | ||
34 | errp); | ||
35 | if (ret < 0) { | ||
36 | return ret; | ||
37 | @@ -XXX,XX +XXX,XX @@ static int bdrv_node_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
38 | bdrv_child_perm(bs, c->bs, c, c->role, q, | ||
39 | cumulative_perms, cumulative_shared_perms, | ||
40 | &cur_perm, &cur_shared); | ||
41 | - bdrv_child_set_perm_safe(c, cur_perm, cur_shared, NULL); | ||
42 | + bdrv_child_set_perm_safe(c, cur_perm, cur_shared, tran); | ||
43 | } | ||
44 | |||
45 | return 0; | 20 | return 0; |
46 | } | 21 | } |
47 | 22 | ||
48 | -static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | 23 | -static void block_job_on_idle(Notifier *n, void *opaque) |
49 | - uint64_t cumulative_perms, | 24 | +/* Called with job_mutex lock held. */ |
50 | - uint64_t cumulative_shared_perms, | 25 | +static void block_job_on_idle_locked(Notifier *n, void *opaque) |
51 | - GSList *ignore_children, Error **errp) | ||
52 | +/* | ||
53 | + * If use_cumulative_perms is true, use cumulative_perms and | ||
54 | + * cumulative_shared_perms for first element of the list. Otherwise just refresh | ||
55 | + * all permissions. | ||
56 | + */ | ||
57 | +static int bdrv_check_perm_common(GSList *list, BlockReopenQueue *q, | ||
58 | + bool use_cumulative_perms, | ||
59 | + uint64_t cumulative_perms, | ||
60 | + uint64_t cumulative_shared_perms, | ||
61 | + GSList *ignore_children, | ||
62 | + Transaction *tran, Error **errp) | ||
63 | { | 26 | { |
64 | int ret; | 27 | aio_wait_kick(); |
65 | - BlockDriverState *root = bs; | ||
66 | - g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, root); | ||
67 | + BlockDriverState *bs; | ||
68 | |||
69 | - for ( ; list; list = list->next) { | ||
70 | + if (use_cumulative_perms) { | ||
71 | bs = list->data; | ||
72 | |||
73 | - if (bs != root) { | ||
74 | - if (bdrv_parent_perms_conflict(bs, ignore_children, errp)) { | ||
75 | - return -EINVAL; | ||
76 | - } | ||
77 | + ret = bdrv_node_check_perm(bs, q, cumulative_perms, | ||
78 | + cumulative_shared_perms, | ||
79 | + ignore_children, tran, errp); | ||
80 | + if (ret < 0) { | ||
81 | + return ret; | ||
82 | + } | ||
83 | |||
84 | - bdrv_get_cumulative_perm(bs, &cumulative_perms, | ||
85 | - &cumulative_shared_perms); | ||
86 | + list = list->next; | ||
87 | + } | ||
88 | + | ||
89 | + for ( ; list; list = list->next) { | ||
90 | + bs = list->data; | ||
91 | + | ||
92 | + if (bdrv_parent_perms_conflict(bs, ignore_children, errp)) { | ||
93 | + return -EINVAL; | ||
94 | } | ||
95 | |||
96 | + bdrv_get_cumulative_perm(bs, &cumulative_perms, | ||
97 | + &cumulative_shared_perms); | ||
98 | + | ||
99 | ret = bdrv_node_check_perm(bs, q, cumulative_perms, | ||
100 | cumulative_shared_perms, | ||
101 | - ignore_children, errp); | ||
102 | + ignore_children, tran, errp); | ||
103 | if (ret < 0) { | ||
104 | return ret; | ||
105 | } | ||
106 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
107 | return 0; | ||
108 | } | 28 | } |
109 | 29 | @@ -XXX,XX +XXX,XX @@ static void block_job_iostatus_set_err(BlockJob *job, int error) | |
110 | +static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
111 | + uint64_t cumulative_perms, | ||
112 | + uint64_t cumulative_shared_perms, | ||
113 | + GSList *ignore_children, Error **errp) | ||
114 | +{ | ||
115 | + g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); | ||
116 | + return bdrv_check_perm_common(list, q, true, cumulative_perms, | ||
117 | + cumulative_shared_perms, ignore_children, | ||
118 | + NULL, errp); | ||
119 | +} | ||
120 | + | ||
121 | +static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q, | ||
122 | + Transaction *tran, Error **errp) | ||
123 | +{ | ||
124 | + return bdrv_check_perm_common(list, q, false, 0, 0, NULL, tran, errp); | ||
125 | +} | ||
126 | + | ||
127 | /* | ||
128 | * Notifies drivers that after a previous bdrv_check_perm() call, the | ||
129 | * permission update is not performed and any preparations made for it (e.g. | ||
130 | @@ -XXX,XX +XXX,XX @@ static void bdrv_node_abort_perm_update(BlockDriverState *bs) | ||
131 | } | 30 | } |
132 | } | 31 | } |
133 | 32 | ||
134 | -static void bdrv_abort_perm_update(BlockDriverState *bs) | 33 | -static void block_job_event_cancelled(Notifier *n, void *opaque) |
135 | +static void bdrv_list_abort_perm_update(GSList *list) | 34 | +/* Called with job_mutex lock held. */ |
35 | +static void block_job_event_cancelled_locked(Notifier *n, void *opaque) | ||
136 | { | 36 | { |
137 | - g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); | 37 | BlockJob *job = opaque; |
138 | - | 38 | uint64_t progress_current, progress_total; |
139 | for ( ; list; list = list->next) { | 39 | @@ -XXX,XX +XXX,XX @@ static void block_job_event_cancelled(Notifier *n, void *opaque) |
140 | bdrv_node_abort_perm_update((BlockDriverState *)list->data); | 40 | job->speed); |
141 | } | ||
142 | } | 41 | } |
143 | 42 | ||
144 | +static void bdrv_abort_perm_update(BlockDriverState *bs) | 43 | -static void block_job_event_completed(Notifier *n, void *opaque) |
145 | +{ | 44 | +/* Called with job_mutex lock held. */ |
146 | + g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); | 45 | +static void block_job_event_completed_locked(Notifier *n, void *opaque) |
147 | + return bdrv_list_abort_perm_update(list); | ||
148 | +} | ||
149 | + | ||
150 | static void bdrv_node_set_perm(BlockDriverState *bs) | ||
151 | { | 46 | { |
152 | BlockDriver *drv = bs->drv; | 47 | BlockJob *job = opaque; |
153 | @@ -XXX,XX +XXX,XX @@ static void bdrv_node_set_perm(BlockDriverState *bs) | 48 | const char *msg = NULL; |
154 | } | 49 | @@ -XXX,XX +XXX,XX @@ static void block_job_event_completed(Notifier *n, void *opaque) |
50 | msg); | ||
155 | } | 51 | } |
156 | 52 | ||
157 | -static void bdrv_set_perm(BlockDriverState *bs) | 53 | -static void block_job_event_pending(Notifier *n, void *opaque) |
158 | +static void bdrv_list_set_perm(GSList *list) | 54 | +/* Called with job_mutex lock held. */ |
55 | +static void block_job_event_pending_locked(Notifier *n, void *opaque) | ||
159 | { | 56 | { |
160 | - g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); | 57 | BlockJob *job = opaque; |
161 | - | 58 | |
162 | for ( ; list; list = list->next) { | 59 | @@ -XXX,XX +XXX,XX @@ static void block_job_event_pending(Notifier *n, void *opaque) |
163 | bdrv_node_set_perm((BlockDriverState *)list->data); | 60 | job->job.id); |
164 | } | ||
165 | } | 61 | } |
166 | 62 | ||
167 | +static void bdrv_set_perm(BlockDriverState *bs) | 63 | -static void block_job_event_ready(Notifier *n, void *opaque) |
168 | +{ | 64 | +/* Called with job_mutex lock held. */ |
169 | + g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); | 65 | +static void block_job_event_ready_locked(Notifier *n, void *opaque) |
170 | + return bdrv_list_set_perm(list); | ||
171 | +} | ||
172 | + | ||
173 | void bdrv_get_cumulative_perm(BlockDriverState *bs, uint64_t *perm, | ||
174 | uint64_t *shared_perm) | ||
175 | { | 66 | { |
176 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q, | 67 | BlockJob *job = opaque; |
177 | static int bdrv_refresh_perms(BlockDriverState *bs, Error **errp) | 68 | uint64_t progress_current, progress_total; |
178 | { | 69 | @@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, |
179 | int ret; | 70 | |
180 | - uint64_t perm, shared_perm; | 71 | ratelimit_init(&job->limit); |
181 | + Transaction *tran = tran_new(); | 72 | |
182 | + g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); | 73 | - job->finalize_cancelled_notifier.notify = block_job_event_cancelled; |
183 | 74 | - job->finalize_completed_notifier.notify = block_job_event_completed; | |
184 | - if (bdrv_parent_perms_conflict(bs, NULL, errp)) { | 75 | - job->pending_notifier.notify = block_job_event_pending; |
185 | - return -EPERM; | 76 | - job->ready_notifier.notify = block_job_event_ready; |
186 | - } | 77 | - job->idle_notifier.notify = block_job_on_idle; |
187 | - bdrv_get_cumulative_perm(bs, &perm, &shared_perm); | 78 | + job->finalize_cancelled_notifier.notify = block_job_event_cancelled_locked; |
188 | - ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL, errp); | 79 | + job->finalize_completed_notifier.notify = block_job_event_completed_locked; |
189 | - if (ret < 0) { | 80 | + job->pending_notifier.notify = block_job_event_pending_locked; |
190 | - bdrv_abort_perm_update(bs); | 81 | + job->ready_notifier.notify = block_job_event_ready_locked; |
191 | - return ret; | 82 | + job->idle_notifier.notify = block_job_on_idle_locked; |
192 | - } | 83 | |
193 | - bdrv_set_perm(bs); | 84 | WITH_JOB_LOCK_GUARD() { |
194 | + ret = bdrv_list_refresh_perms(list, NULL, tran, errp); | 85 | notifier_list_add(&job->job.on_finalize_cancelled, |
195 | + tran_finalize(tran, ret); | ||
196 | |||
197 | - return 0; | ||
198 | + return ret; | ||
199 | } | ||
200 | |||
201 | int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared, | ||
202 | -- | 86 | -- |
203 | 2.30.2 | 87 | 2.37.3 |
204 | |||
205 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Add the test that shows that concept of ignore_children is incomplete. | 3 | iostatus is the only field (together with .job) that needs |
4 | Actually, when we want to update something, ignoring permission of some | 4 | protection using the job mutex. |
5 | existing BdrvChild, we should ignore also the propagated effect of this | ||
6 | child to the other children. But that's not done. Better approach | ||
7 | (update permissions on already updated graph) will be implemented | ||
8 | later. | ||
9 | 5 | ||
10 | Now the test fails, so it's added with -d argument to not break make | 6 | It is set in the main loop (GLOBAL_STATE functions) but read |
11 | check. | 7 | in I/O code (block_job_error_action). |
12 | 8 | ||
13 | Test fails with | 9 | In order to protect it, change block_job_iostatus_set_err |
10 | to block_job_iostatus_set_err_locked(), always called under | ||
11 | job lock. | ||
14 | 12 | ||
15 | "Conflicts with use by fl1 as 'backing', which does not allow 'write' on base" | 13 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
16 | |||
17 | because when updating permissions we can ignore original top->fl1 | ||
18 | BdrvChild. But we don't ignore exclusive write permission in fl1->base | ||
19 | BdrvChild, which is propagated. Correct thing to do is make graph | ||
20 | change first and then do permission update from the top node. | ||
21 | |||
22 | To run test do | ||
23 | |||
24 | ./test-bdrv-graph-mod -d -p /bdrv-graph-mod/parallel-exclusive-write | ||
25 | |||
26 | from <build-directory>/tests. | ||
27 | |||
28 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
29 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 14 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
30 | Message-Id: <20210428151804.439460-2-vsementsov@virtuozzo.com> | 15 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
16 | Message-Id: <20220926093214.506243-17-eesposit@redhat.com> | ||
17 | [kwolf: Fixed up type of iostatus] | ||
31 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 18 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
32 | --- | 19 | --- |
33 | tests/unit/test-bdrv-graph-mod.c | 70 +++++++++++++++++++++++++++++++- | 20 | block/mirror.c | 6 +++++- |
34 | 1 file changed, 69 insertions(+), 1 deletion(-) | 21 | blockjob.c | 5 +++-- |
22 | 2 files changed, 8 insertions(+), 3 deletions(-) | ||
35 | 23 | ||
36 | diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c | 24 | diff --git a/block/mirror.c b/block/mirror.c |
37 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
38 | --- a/tests/unit/test-bdrv-graph-mod.c | 26 | --- a/block/mirror.c |
39 | +++ b/tests/unit/test-bdrv-graph-mod.c | 27 | +++ b/block/mirror.c |
40 | @@ -XXX,XX +XXX,XX @@ | 28 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn mirror_run(Job *job, Error **errp) |
41 | /* | 29 | BlockDriverState *bs = s->mirror_top_bs->backing->bs; |
42 | * Block node graph modifications tests | 30 | BlockDriverState *target_bs = blk_bs(s->target); |
43 | * | 31 | bool need_drain = true; |
44 | - * Copyright (c) 2019 Virtuozzo International GmbH. All rights reserved. | 32 | + BlockDeviceIoStatus iostatus; |
45 | + * Copyright (c) 2019-2021 Virtuozzo International GmbH. All rights reserved. | 33 | int64_t length; |
46 | * | 34 | int64_t target_length; |
47 | * This program is free software; you can redistribute it and/or modify | 35 | BlockDriverInfo bdi; |
48 | * it under the terms of the GNU General Public License as published by | 36 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn mirror_run(Job *job, Error **errp) |
49 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_no_perm = { | 37 | * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is |
50 | .bdrv_child_perm = no_perm_default_perms, | 38 | * an error, or when the source is clean, whichever comes first. */ |
51 | }; | 39 | delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; |
52 | 40 | + WITH_JOB_LOCK_GUARD() { | |
53 | +static void exclusive_write_perms(BlockDriverState *bs, BdrvChild *c, | 41 | + iostatus = s->common.iostatus; |
54 | + BdrvChildRole role, | 42 | + } |
55 | + BlockReopenQueue *reopen_queue, | 43 | if (delta < BLOCK_JOB_SLICE_TIME && |
56 | + uint64_t perm, uint64_t shared, | 44 | - s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
57 | + uint64_t *nperm, uint64_t *nshared) | 45 | + iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
58 | +{ | 46 | if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || |
59 | + *nperm = BLK_PERM_WRITE; | 47 | (cnt == 0 && s->in_flight > 0)) { |
60 | + *nshared = BLK_PERM_ALL & ~BLK_PERM_WRITE; | 48 | trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); |
61 | +} | 49 | diff --git a/blockjob.c b/blockjob.c |
62 | + | 50 | index XXXXXXX..XXXXXXX 100644 |
63 | +static BlockDriver bdrv_exclusive_writer = { | 51 | --- a/blockjob.c |
64 | + .format_name = "exclusive-writer", | 52 | +++ b/blockjob.c |
65 | + .bdrv_child_perm = exclusive_write_perms, | 53 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp) |
66 | +}; | 54 | return block_job_query_locked(job, errp); |
67 | + | 55 | } |
68 | static BlockDriverState *no_perm_node(const char *name) | 56 | |
57 | -static void block_job_iostatus_set_err(BlockJob *job, int error) | ||
58 | +/* Called with job lock held */ | ||
59 | +static void block_job_iostatus_set_err_locked(BlockJob *job, int error) | ||
69 | { | 60 | { |
70 | return bdrv_new_open_driver(&bdrv_no_perm, name, BDRV_O_RDWR, &error_abort); | 61 | if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
71 | @@ -XXX,XX +XXX,XX @@ static BlockDriverState *pass_through_node(const char *name) | 62 | job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : |
72 | BDRV_O_RDWR, &error_abort); | 63 | @@ -XXX,XX +XXX,XX @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, |
73 | } | 64 | */ |
74 | 65 | job->job.user_paused = true; | |
75 | +static BlockDriverState *exclusive_writer_node(const char *name) | 66 | } |
76 | +{ | 67 | + block_job_iostatus_set_err_locked(job, error); |
77 | + return bdrv_new_open_driver(&bdrv_exclusive_writer, name, | 68 | } |
78 | + BDRV_O_RDWR, &error_abort); | 69 | - block_job_iostatus_set_err(job, error); |
79 | +} | 70 | } |
80 | + | 71 | return action; |
81 | /* | ||
82 | * test_update_perm_tree | ||
83 | * | ||
84 | @@ -XXX,XX +XXX,XX @@ static void test_should_update_child(void) | ||
85 | blk_unref(root); | ||
86 | } | ||
87 | |||
88 | +/* | ||
89 | + * test_parallel_exclusive_write | ||
90 | + * | ||
91 | + * Check that when we replace node, old permissions of the node being removed | ||
92 | + * doesn't break the replacement. | ||
93 | + */ | ||
94 | +static void test_parallel_exclusive_write(void) | ||
95 | +{ | ||
96 | + BlockDriverState *top = exclusive_writer_node("top"); | ||
97 | + BlockDriverState *base = no_perm_node("base"); | ||
98 | + BlockDriverState *fl1 = pass_through_node("fl1"); | ||
99 | + BlockDriverState *fl2 = pass_through_node("fl2"); | ||
100 | + | ||
101 | + /* | ||
102 | + * bdrv_attach_child() eats child bs reference, so we need two @base | ||
103 | + * references for two filters: | ||
104 | + */ | ||
105 | + bdrv_ref(base); | ||
106 | + | ||
107 | + bdrv_attach_child(top, fl1, "backing", &child_of_bds, BDRV_CHILD_DATA, | ||
108 | + &error_abort); | ||
109 | + bdrv_attach_child(fl1, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED, | ||
110 | + &error_abort); | ||
111 | + bdrv_attach_child(fl2, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED, | ||
112 | + &error_abort); | ||
113 | + | ||
114 | + bdrv_replace_node(fl1, fl2, &error_abort); | ||
115 | + | ||
116 | + bdrv_unref(fl2); | ||
117 | + bdrv_unref(top); | ||
118 | +} | ||
119 | + | ||
120 | int main(int argc, char *argv[]) | ||
121 | { | ||
122 | + int i; | ||
123 | + bool debug = false; | ||
124 | + | ||
125 | + for (i = 1; i < argc; i++) { | ||
126 | + if (!strcmp(argv[i], "-d")) { | ||
127 | + debug = true; | ||
128 | + break; | ||
129 | + } | ||
130 | + } | ||
131 | + | ||
132 | bdrv_init(); | ||
133 | qemu_init_main_loop(&error_abort); | ||
134 | |||
135 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char *argv[]) | ||
136 | g_test_add_func("/bdrv-graph-mod/should-update-child", | ||
137 | test_should_update_child); | ||
138 | |||
139 | + if (debug) { | ||
140 | + g_test_add_func("/bdrv-graph-mod/parallel-exclusive-write", | ||
141 | + test_parallel_exclusive_write); | ||
142 | + } | ||
143 | + | ||
144 | return g_test_run(); | ||
145 | } | 72 | } |
146 | -- | 73 | -- |
147 | 2.30.2 | 74 | 2.37.3 |
148 | |||
149 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | We don't have bdrv_replace_child(), so it's time for | 3 | Some callbacks implementation use bdrv_* APIs that assume the |
4 | bdrv_replace_child_safe() to take its place. | 4 | AioContext lock is held. Make sure this invariant is documented. |
5 | 5 | ||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 6 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
7 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
8 | Message-Id: <20220926093214.506243-18-eesposit@redhat.com> | ||
7 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
8 | Message-Id: <20210428151804.439460-36-vsementsov@virtuozzo.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
10 | --- | 11 | --- |
11 | block.c | 10 +++++----- | 12 | include/qemu/job.h | 27 +++++++++++++++++++++++++-- |
12 | 1 file changed, 5 insertions(+), 5 deletions(-) | 13 | 1 file changed, 25 insertions(+), 2 deletions(-) |
13 | 14 | ||
14 | diff --git a/block.c b/block.c | 15 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
15 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/block.c | 17 | --- a/include/qemu/job.h |
17 | +++ b/block.c | 18 | +++ b/include/qemu/job.h |
18 | @@ -XXX,XX +XXX,XX @@ static TransactionActionDrv bdrv_replace_child_drv = { | 19 | @@ -XXX,XX +XXX,XX @@ typedef struct Job { |
20 | /** True if this job should automatically dismiss itself */ | ||
21 | bool auto_dismiss; | ||
22 | |||
23 | - /** The completion function that will be called when the job completes. */ | ||
24 | + /** | ||
25 | + * The completion function that will be called when the job completes. | ||
26 | + * Called with AioContext lock held, since many callback implementations | ||
27 | + * use bdrv_* functions that require to hold the lock. | ||
28 | + */ | ||
29 | BlockCompletionFunc *cb; | ||
30 | |||
31 | /** The opaque value that is passed to the completion function. */ | ||
32 | @@ -XXX,XX +XXX,XX @@ struct JobDriver { | ||
33 | * | ||
34 | * This callback will not be invoked if the job has already failed. | ||
35 | * If it fails, abort and then clean will be called. | ||
36 | + * | ||
37 | + * Called with AioContext lock held, since many callbacs implementations | ||
38 | + * use bdrv_* functions that require to hold the lock. | ||
39 | */ | ||
40 | int (*prepare)(Job *job); | ||
41 | |||
42 | @@ -XXX,XX +XXX,XX @@ struct JobDriver { | ||
43 | * | ||
44 | * All jobs will complete with a call to either .commit() or .abort() but | ||
45 | * never both. | ||
46 | + * | ||
47 | + * Called with AioContext lock held, since many callback implementations | ||
48 | + * use bdrv_* functions that require to hold the lock. | ||
49 | */ | ||
50 | void (*commit)(Job *job); | ||
51 | |||
52 | @@ -XXX,XX +XXX,XX @@ struct JobDriver { | ||
53 | * | ||
54 | * All jobs will complete with a call to either .commit() or .abort() but | ||
55 | * never both. | ||
56 | + * | ||
57 | + * Called with AioContext lock held, since many callback implementations | ||
58 | + * use bdrv_* functions that require to hold the lock. | ||
59 | */ | ||
60 | void (*abort)(Job *job); | ||
61 | |||
62 | @@ -XXX,XX +XXX,XX @@ struct JobDriver { | ||
63 | * .commit() or .abort(). Regardless of which callback is invoked after | ||
64 | * completion, .clean() will always be called, even if the job does not | ||
65 | * belong to a transaction group. | ||
66 | + * | ||
67 | + * Called with AioContext lock held, since many callbacs implementations | ||
68 | + * use bdrv_* functions that require to hold the lock. | ||
69 | */ | ||
70 | void (*clean)(Job *job); | ||
71 | |||
72 | @@ -XXX,XX +XXX,XX @@ struct JobDriver { | ||
73 | * READY). | ||
74 | * (If the callback is NULL, the job is assumed to terminate | ||
75 | * without I/O.) | ||
76 | + * | ||
77 | + * Called with AioContext lock held, since many callback implementations | ||
78 | + * use bdrv_* functions that require to hold the lock. | ||
79 | */ | ||
80 | bool (*cancel)(Job *job, bool force); | ||
81 | |||
82 | |||
83 | - /** Called when the job is freed */ | ||
84 | + /** | ||
85 | + * Called when the job is freed. | ||
86 | + * Called with AioContext lock held, since many callback implementations | ||
87 | + * use bdrv_* functions that require to hold the lock. | ||
88 | + */ | ||
89 | void (*free)(Job *job); | ||
19 | }; | 90 | }; |
20 | 91 | ||
21 | /* | ||
22 | - * bdrv_replace_child_safe | ||
23 | + * bdrv_replace_child | ||
24 | * | ||
25 | * Note: real unref of old_bs is done only on commit. | ||
26 | */ | ||
27 | -static void bdrv_replace_child_safe(BdrvChild *child, BlockDriverState *new_bs, | ||
28 | - Transaction *tran) | ||
29 | +static void bdrv_replace_child(BdrvChild *child, BlockDriverState *new_bs, | ||
30 | + Transaction *tran) | ||
31 | { | ||
32 | BdrvReplaceChildState *s = g_new(BdrvReplaceChildState, 1); | ||
33 | *s = (BdrvReplaceChildState) { | ||
34 | @@ -XXX,XX +XXX,XX @@ static void bdrv_remove_filter_or_cow_child(BlockDriverState *bs, | ||
35 | } | ||
36 | |||
37 | if (child->bs) { | ||
38 | - bdrv_replace_child_safe(child, NULL, tran); | ||
39 | + bdrv_replace_child(child, NULL, tran); | ||
40 | } | ||
41 | |||
42 | s = g_new(BdrvRemoveFilterOrCowChild, 1); | ||
43 | @@ -XXX,XX +XXX,XX @@ static int bdrv_replace_node_noperm(BlockDriverState *from, | ||
44 | c->name, from->node_name); | ||
45 | return -EPERM; | ||
46 | } | ||
47 | - bdrv_replace_child_safe(c, to, tran); | ||
48 | + bdrv_replace_child(c, to, tran); | ||
49 | } | ||
50 | |||
51 | return 0; | ||
52 | -- | 92 | -- |
53 | 2.30.2 | 93 | 2.37.3 |
54 | |||
55 | diff view generated by jsdifflib |
1 | For a successful conversion of an image, we must make sure that its | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | content doesn't change during the conversion. | ||
3 | 2 | ||
4 | A special case of this is using the same image file both as the source | 3 | Change the job_{lock/unlock} and macros to use job_mutex. |
5 | and as the destination. If both input and output format are raw, the | ||
6 | operation would just be useless work, with other formats it is a sure | ||
7 | way to destroy the image. This will now fail because the image file | ||
8 | can't be opened a second time for the output when opening it for the | ||
9 | input has already acquired file locks to unshare BLK_PERM_WRITE. | ||
10 | 4 | ||
11 | Nevertheless, if there is some reason in a special case why it is | 5 | Now that they are not nop anymore, remove the aiocontext |
12 | actually okay to allow writes to the image while it is being converted, | 6 | to avoid deadlocks. |
13 | -U can still be used to force sharing all permissions. | ||
14 | 7 | ||
15 | Note that for most image formats, BLK_PERM_WRITE would already be | 8 | Therefore: |
16 | unshared by the format driver, so this only really makes a difference | 9 | - when possible, remove completely the aiocontext lock/unlock pair |
17 | for raw source images (but any output format). | 10 | - if it is used by some other function too, reduce the locking |
11 | section as much as possible, leaving the job API outside. | ||
12 | - change AIO_WAIT_WHILE in AIO_WAIT_WHILE_UNLOCKED, since we | ||
13 | are not using the aiocontext lock anymore | ||
18 | 14 | ||
19 | Reported-by: Xueqiang Wei <xuwei@redhat.com> | 15 | The only functions that still need the aiocontext lock are: |
20 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 16 | - the JobDriver callbacks, already documented in job.h |
21 | Reviewed-by: Eric Blake <eblake@redhat.com> | 17 | - job_cancel_sync() in replication.c is called with aio_context_lock |
22 | Message-Id: <20210422164344.283389-3-kwolf@redhat.com> | 18 | taken, but now job is using AIO_WAIT_WHILE_UNLOCKED so we need to |
23 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 19 | release the lock. |
20 | |||
21 | Reduce the locking section to only cover the callback invocation | ||
22 | and document the functions that take the AioContext lock, | ||
23 | to avoid taking it twice. | ||
24 | |||
25 | Also remove real_job_{lock/unlock}, as they are replaced by the | ||
26 | public functions. | ||
27 | |||
28 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
29 | Message-Id: <20220926093214.506243-19-eesposit@redhat.com> | ||
30 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
31 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
24 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 32 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
25 | --- | 33 | --- |
26 | qemu-img.c | 2 +- | 34 | include/qemu/job.h | 17 ++--- |
27 | 1 file changed, 1 insertion(+), 1 deletion(-) | 35 | block/replication.c | 2 + |
36 | blockdev.c | 72 +++----------------- | ||
37 | job-qmp.c | 46 +++---------- | ||
38 | job.c | 111 +++++++++---------------------- | ||
39 | qemu-img.c | 2 - | ||
40 | tests/unit/test-bdrv-drain.c | 4 +- | ||
41 | tests/unit/test-block-iothread.c | 2 +- | ||
42 | tests/unit/test-blockjob.c | 19 +++--- | ||
43 | 9 files changed, 72 insertions(+), 203 deletions(-) | ||
28 | 44 | ||
45 | diff --git a/include/qemu/job.h b/include/qemu/job.h | ||
46 | index XXXXXXX..XXXXXXX 100644 | ||
47 | --- a/include/qemu/job.h | ||
48 | +++ b/include/qemu/job.h | ||
49 | @@ -XXX,XX +XXX,XX @@ typedef struct Job { | ||
50 | AioContext *aio_context; | ||
51 | |||
52 | |||
53 | - /** Protected by AioContext lock */ | ||
54 | + /** Protected by job_mutex */ | ||
55 | |||
56 | /** Reference count of the block job */ | ||
57 | int refcnt; | ||
58 | @@ -XXX,XX +XXX,XX @@ typedef struct Job { | ||
59 | /** | ||
60 | * Set to false by the job while the coroutine has yielded and may be | ||
61 | * re-entered by job_enter(). There may still be I/O or event loop activity | ||
62 | - * pending. Accessed under block_job_mutex (in blockjob.c). | ||
63 | + * pending. Accessed under job_mutex. | ||
64 | * | ||
65 | * When the job is deferred to the main loop, busy is true as long as the | ||
66 | * bottom half is still pending. | ||
67 | @@ -XXX,XX +XXX,XX @@ typedef enum JobCreateFlags { | ||
68 | |||
69 | extern QemuMutex job_mutex; | ||
70 | |||
71 | -#define JOB_LOCK_GUARD() /* QEMU_LOCK_GUARD(&job_mutex) */ | ||
72 | +#define JOB_LOCK_GUARD() QEMU_LOCK_GUARD(&job_mutex) | ||
73 | |||
74 | -#define WITH_JOB_LOCK_GUARD() /* WITH_QEMU_LOCK_GUARD(&job_mutex) */ | ||
75 | +#define WITH_JOB_LOCK_GUARD() WITH_QEMU_LOCK_GUARD(&job_mutex) | ||
76 | |||
77 | /** | ||
78 | * job_lock: | ||
79 | @@ -XXX,XX +XXX,XX @@ void job_ref_locked(Job *job); | ||
80 | /** | ||
81 | * Release a reference that was previously acquired with job_ref() or | ||
82 | * job_create(). If it's the last reference to the object, it will be freed. | ||
83 | + * | ||
84 | + * Takes AioContext lock internally to invoke a job->driver callback. | ||
85 | */ | ||
86 | void job_unref(Job *job); | ||
87 | |||
88 | @@ -XXX,XX +XXX,XX @@ void job_user_cancel_locked(Job *job, bool force, Error **errp); | ||
89 | * Returns the return value from the job if the job actually completed | ||
90 | * during the call, or -ECANCELED if it was canceled. | ||
91 | * | ||
92 | - * Callers must hold the AioContext lock of job->aio_context. | ||
93 | + * Called with job_lock *not* held. | ||
94 | */ | ||
95 | int job_cancel_sync(Job *job, bool force); | ||
96 | |||
97 | @@ -XXX,XX +XXX,XX @@ void job_cancel_sync_all(void); | ||
98 | * function). | ||
99 | * | ||
100 | * Returns the return value from the job. | ||
101 | - * | ||
102 | - * Callers must hold the AioContext lock of job->aio_context. | ||
103 | + * Called with job_lock *not* held. | ||
104 | */ | ||
105 | int job_complete_sync(Job *job, Error **errp); | ||
106 | |||
107 | @@ -XXX,XX +XXX,XX @@ void job_dismiss_locked(Job **job, Error **errp); | ||
108 | * Returns 0 if the job is successfully completed, -ECANCELED if the job was | ||
109 | * cancelled before completing, and -errno in other error cases. | ||
110 | * | ||
111 | - * Callers must hold the AioContext lock of job->aio_context. | ||
112 | + * Called with job_lock *not* held. | ||
113 | */ | ||
114 | int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), | ||
115 | Error **errp); | ||
116 | diff --git a/block/replication.c b/block/replication.c | ||
117 | index XXXXXXX..XXXXXXX 100644 | ||
118 | --- a/block/replication.c | ||
119 | +++ b/block/replication.c | ||
120 | @@ -XXX,XX +XXX,XX @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp) | ||
121 | * disk, secondary disk in backup_job_completed(). | ||
122 | */ | ||
123 | if (s->backup_job) { | ||
124 | + aio_context_release(aio_context); | ||
125 | job_cancel_sync(&s->backup_job->job, true); | ||
126 | + aio_context_acquire(aio_context); | ||
127 | } | ||
128 | |||
129 | if (!failover) { | ||
130 | diff --git a/blockdev.c b/blockdev.c | ||
131 | index XXXXXXX..XXXXXXX 100644 | ||
132 | --- a/blockdev.c | ||
133 | +++ b/blockdev.c | ||
134 | @@ -XXX,XX +XXX,XX @@ void blockdev_mark_auto_del(BlockBackend *blk) | ||
135 | for (job = block_job_next_locked(NULL); job; | ||
136 | job = block_job_next_locked(job)) { | ||
137 | if (block_job_has_bdrv(job, blk_bs(blk))) { | ||
138 | - AioContext *aio_context = job->job.aio_context; | ||
139 | - aio_context_acquire(aio_context); | ||
140 | - | ||
141 | job_cancel_locked(&job->job, false); | ||
142 | - | ||
143 | - aio_context_release(aio_context); | ||
144 | } | ||
145 | } | ||
146 | |||
147 | @@ -XXX,XX +XXX,XX @@ static void drive_backup_abort(BlkActionState *common) | ||
148 | DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); | ||
149 | |||
150 | if (state->job) { | ||
151 | - AioContext *aio_context; | ||
152 | - | ||
153 | - aio_context = bdrv_get_aio_context(state->bs); | ||
154 | - aio_context_acquire(aio_context); | ||
155 | - | ||
156 | job_cancel_sync(&state->job->job, true); | ||
157 | - | ||
158 | - aio_context_release(aio_context); | ||
159 | } | ||
160 | } | ||
161 | |||
162 | @@ -XXX,XX +XXX,XX @@ static void blockdev_backup_abort(BlkActionState *common) | ||
163 | BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); | ||
164 | |||
165 | if (state->job) { | ||
166 | - AioContext *aio_context; | ||
167 | - | ||
168 | - aio_context = bdrv_get_aio_context(state->bs); | ||
169 | - aio_context_acquire(aio_context); | ||
170 | - | ||
171 | job_cancel_sync(&state->job->job, true); | ||
172 | - | ||
173 | - aio_context_release(aio_context); | ||
174 | } | ||
175 | } | ||
176 | |||
177 | @@ -XXX,XX +XXX,XX @@ out: | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | - * Get a block job using its ID and acquire its AioContext. | ||
182 | - * Called with job_mutex held. | ||
183 | + * Get a block job using its ID. Called with job_mutex held. | ||
184 | */ | ||
185 | -static BlockJob *find_block_job_locked(const char *id, | ||
186 | - AioContext **aio_context, | ||
187 | - Error **errp) | ||
188 | +static BlockJob *find_block_job_locked(const char *id, Error **errp) | ||
189 | { | ||
190 | BlockJob *job; | ||
191 | |||
192 | assert(id != NULL); | ||
193 | |||
194 | - *aio_context = NULL; | ||
195 | - | ||
196 | job = block_job_get_locked(id); | ||
197 | |||
198 | if (!job) { | ||
199 | @@ -XXX,XX +XXX,XX @@ static BlockJob *find_block_job_locked(const char *id, | ||
200 | return NULL; | ||
201 | } | ||
202 | |||
203 | - *aio_context = block_job_get_aio_context(job); | ||
204 | - aio_context_acquire(*aio_context); | ||
205 | - | ||
206 | return job; | ||
207 | } | ||
208 | |||
209 | void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp) | ||
210 | { | ||
211 | - AioContext *aio_context; | ||
212 | BlockJob *job; | ||
213 | |||
214 | JOB_LOCK_GUARD(); | ||
215 | - job = find_block_job_locked(device, &aio_context, errp); | ||
216 | + job = find_block_job_locked(device, errp); | ||
217 | |||
218 | if (!job) { | ||
219 | return; | ||
220 | } | ||
221 | |||
222 | block_job_set_speed_locked(job, speed, errp); | ||
223 | - aio_context_release(aio_context); | ||
224 | } | ||
225 | |||
226 | void qmp_block_job_cancel(const char *device, | ||
227 | bool has_force, bool force, Error **errp) | ||
228 | { | ||
229 | - AioContext *aio_context; | ||
230 | BlockJob *job; | ||
231 | |||
232 | JOB_LOCK_GUARD(); | ||
233 | - job = find_block_job_locked(device, &aio_context, errp); | ||
234 | + job = find_block_job_locked(device, errp); | ||
235 | |||
236 | if (!job) { | ||
237 | return; | ||
238 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_cancel(const char *device, | ||
239 | if (job_user_paused_locked(&job->job) && !force) { | ||
240 | error_setg(errp, "The block job for device '%s' is currently paused", | ||
241 | device); | ||
242 | - goto out; | ||
243 | + return; | ||
244 | } | ||
245 | |||
246 | trace_qmp_block_job_cancel(job); | ||
247 | job_user_cancel_locked(&job->job, force, errp); | ||
248 | -out: | ||
249 | - aio_context_release(aio_context); | ||
250 | } | ||
251 | |||
252 | void qmp_block_job_pause(const char *device, Error **errp) | ||
253 | { | ||
254 | - AioContext *aio_context; | ||
255 | BlockJob *job; | ||
256 | |||
257 | JOB_LOCK_GUARD(); | ||
258 | - job = find_block_job_locked(device, &aio_context, errp); | ||
259 | + job = find_block_job_locked(device, errp); | ||
260 | |||
261 | if (!job) { | ||
262 | return; | ||
263 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_pause(const char *device, Error **errp) | ||
264 | |||
265 | trace_qmp_block_job_pause(job); | ||
266 | job_user_pause_locked(&job->job, errp); | ||
267 | - aio_context_release(aio_context); | ||
268 | } | ||
269 | |||
270 | void qmp_block_job_resume(const char *device, Error **errp) | ||
271 | { | ||
272 | - AioContext *aio_context; | ||
273 | BlockJob *job; | ||
274 | |||
275 | JOB_LOCK_GUARD(); | ||
276 | - job = find_block_job_locked(device, &aio_context, errp); | ||
277 | + job = find_block_job_locked(device, errp); | ||
278 | |||
279 | if (!job) { | ||
280 | return; | ||
281 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_resume(const char *device, Error **errp) | ||
282 | |||
283 | trace_qmp_block_job_resume(job); | ||
284 | job_user_resume_locked(&job->job, errp); | ||
285 | - aio_context_release(aio_context); | ||
286 | } | ||
287 | |||
288 | void qmp_block_job_complete(const char *device, Error **errp) | ||
289 | { | ||
290 | - AioContext *aio_context; | ||
291 | BlockJob *job; | ||
292 | |||
293 | JOB_LOCK_GUARD(); | ||
294 | - job = find_block_job_locked(device, &aio_context, errp); | ||
295 | + job = find_block_job_locked(device, errp); | ||
296 | |||
297 | if (!job) { | ||
298 | return; | ||
299 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_complete(const char *device, Error **errp) | ||
300 | |||
301 | trace_qmp_block_job_complete(job); | ||
302 | job_complete_locked(&job->job, errp); | ||
303 | - aio_context_release(aio_context); | ||
304 | } | ||
305 | |||
306 | void qmp_block_job_finalize(const char *id, Error **errp) | ||
307 | { | ||
308 | - AioContext *aio_context; | ||
309 | BlockJob *job; | ||
310 | |||
311 | JOB_LOCK_GUARD(); | ||
312 | - job = find_block_job_locked(id, &aio_context, errp); | ||
313 | + job = find_block_job_locked(id, errp); | ||
314 | |||
315 | if (!job) { | ||
316 | return; | ||
317 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_finalize(const char *id, Error **errp) | ||
318 | job_ref_locked(&job->job); | ||
319 | job_finalize_locked(&job->job, errp); | ||
320 | |||
321 | - /* | ||
322 | - * Job's context might have changed via job_finalize (and job_txn_apply | ||
323 | - * automatically acquires the new one), so make sure we release the correct | ||
324 | - * one. | ||
325 | - */ | ||
326 | - aio_context = block_job_get_aio_context(job); | ||
327 | job_unref_locked(&job->job); | ||
328 | - aio_context_release(aio_context); | ||
329 | } | ||
330 | |||
331 | void qmp_block_job_dismiss(const char *id, Error **errp) | ||
332 | { | ||
333 | - AioContext *aio_context; | ||
334 | BlockJob *bjob; | ||
335 | Job *job; | ||
336 | |||
337 | JOB_LOCK_GUARD(); | ||
338 | - bjob = find_block_job_locked(id, &aio_context, errp); | ||
339 | + bjob = find_block_job_locked(id, errp); | ||
340 | |||
341 | if (!bjob) { | ||
342 | return; | ||
343 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_dismiss(const char *id, Error **errp) | ||
344 | trace_qmp_block_job_dismiss(bjob); | ||
345 | job = &bjob->job; | ||
346 | job_dismiss_locked(&job, errp); | ||
347 | - aio_context_release(aio_context); | ||
348 | } | ||
349 | |||
350 | void qmp_change_backing_file(const char *device, | ||
351 | @@ -XXX,XX +XXX,XX @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp) | ||
352 | for (job = block_job_next_locked(NULL); job; | ||
353 | job = block_job_next_locked(job)) { | ||
354 | BlockJobInfo *value; | ||
355 | - AioContext *aio_context; | ||
356 | |||
357 | if (block_job_is_internal(job)) { | ||
358 | continue; | ||
359 | } | ||
360 | - aio_context = block_job_get_aio_context(job); | ||
361 | - aio_context_acquire(aio_context); | ||
362 | value = block_job_query_locked(job, errp); | ||
363 | - aio_context_release(aio_context); | ||
364 | if (!value) { | ||
365 | qapi_free_BlockJobInfoList(head); | ||
366 | return NULL; | ||
367 | diff --git a/job-qmp.c b/job-qmp.c | ||
368 | index XXXXXXX..XXXXXXX 100644 | ||
369 | --- a/job-qmp.c | ||
370 | +++ b/job-qmp.c | ||
371 | @@ -XXX,XX +XXX,XX @@ | ||
372 | #include "trace/trace-root.h" | ||
373 | |||
374 | /* | ||
375 | - * Get a job using its ID and acquire its AioContext. | ||
376 | - * Called with job_mutex held. | ||
377 | + * Get a job using its ID. Called with job_mutex held. | ||
378 | */ | ||
379 | -static Job *find_job_locked(const char *id, | ||
380 | - AioContext **aio_context, | ||
381 | - Error **errp) | ||
382 | +static Job *find_job_locked(const char *id, Error **errp) | ||
383 | { | ||
384 | Job *job; | ||
385 | |||
386 | - *aio_context = NULL; | ||
387 | - | ||
388 | job = job_get_locked(id); | ||
389 | if (!job) { | ||
390 | error_setg(errp, "Job not found"); | ||
391 | return NULL; | ||
392 | } | ||
393 | |||
394 | - *aio_context = job->aio_context; | ||
395 | - aio_context_acquire(*aio_context); | ||
396 | - | ||
397 | return job; | ||
398 | } | ||
399 | |||
400 | void qmp_job_cancel(const char *id, Error **errp) | ||
401 | { | ||
402 | - AioContext *aio_context; | ||
403 | Job *job; | ||
404 | |||
405 | JOB_LOCK_GUARD(); | ||
406 | - job = find_job_locked(id, &aio_context, errp); | ||
407 | + job = find_job_locked(id, errp); | ||
408 | |||
409 | if (!job) { | ||
410 | return; | ||
411 | @@ -XXX,XX +XXX,XX @@ void qmp_job_cancel(const char *id, Error **errp) | ||
412 | |||
413 | trace_qmp_job_cancel(job); | ||
414 | job_user_cancel_locked(job, true, errp); | ||
415 | - aio_context_release(aio_context); | ||
416 | } | ||
417 | |||
418 | void qmp_job_pause(const char *id, Error **errp) | ||
419 | { | ||
420 | - AioContext *aio_context; | ||
421 | Job *job; | ||
422 | |||
423 | JOB_LOCK_GUARD(); | ||
424 | - job = find_job_locked(id, &aio_context, errp); | ||
425 | + job = find_job_locked(id, errp); | ||
426 | |||
427 | if (!job) { | ||
428 | return; | ||
429 | @@ -XXX,XX +XXX,XX @@ void qmp_job_pause(const char *id, Error **errp) | ||
430 | |||
431 | trace_qmp_job_pause(job); | ||
432 | job_user_pause_locked(job, errp); | ||
433 | - aio_context_release(aio_context); | ||
434 | } | ||
435 | |||
436 | void qmp_job_resume(const char *id, Error **errp) | ||
437 | { | ||
438 | - AioContext *aio_context; | ||
439 | Job *job; | ||
440 | |||
441 | JOB_LOCK_GUARD(); | ||
442 | - job = find_job_locked(id, &aio_context, errp); | ||
443 | + job = find_job_locked(id, errp); | ||
444 | |||
445 | if (!job) { | ||
446 | return; | ||
447 | @@ -XXX,XX +XXX,XX @@ void qmp_job_resume(const char *id, Error **errp) | ||
448 | |||
449 | trace_qmp_job_resume(job); | ||
450 | job_user_resume_locked(job, errp); | ||
451 | - aio_context_release(aio_context); | ||
452 | } | ||
453 | |||
454 | void qmp_job_complete(const char *id, Error **errp) | ||
455 | { | ||
456 | - AioContext *aio_context; | ||
457 | Job *job; | ||
458 | |||
459 | JOB_LOCK_GUARD(); | ||
460 | - job = find_job_locked(id, &aio_context, errp); | ||
461 | + job = find_job_locked(id, errp); | ||
462 | |||
463 | if (!job) { | ||
464 | return; | ||
465 | @@ -XXX,XX +XXX,XX @@ void qmp_job_complete(const char *id, Error **errp) | ||
466 | |||
467 | trace_qmp_job_complete(job); | ||
468 | job_complete_locked(job, errp); | ||
469 | - aio_context_release(aio_context); | ||
470 | } | ||
471 | |||
472 | void qmp_job_finalize(const char *id, Error **errp) | ||
473 | { | ||
474 | - AioContext *aio_context; | ||
475 | Job *job; | ||
476 | |||
477 | JOB_LOCK_GUARD(); | ||
478 | - job = find_job_locked(id, &aio_context, errp); | ||
479 | + job = find_job_locked(id, errp); | ||
480 | |||
481 | if (!job) { | ||
482 | return; | ||
483 | @@ -XXX,XX +XXX,XX @@ void qmp_job_finalize(const char *id, Error **errp) | ||
484 | job_ref_locked(job); | ||
485 | job_finalize_locked(job, errp); | ||
486 | |||
487 | - /* | ||
488 | - * Job's context might have changed via job_finalize (and job_txn_apply | ||
489 | - * automatically acquires the new one), so make sure we release the correct | ||
490 | - * one. | ||
491 | - */ | ||
492 | - aio_context = job->aio_context; | ||
493 | job_unref_locked(job); | ||
494 | - aio_context_release(aio_context); | ||
495 | } | ||
496 | |||
497 | void qmp_job_dismiss(const char *id, Error **errp) | ||
498 | { | ||
499 | - AioContext *aio_context; | ||
500 | Job *job; | ||
501 | |||
502 | JOB_LOCK_GUARD(); | ||
503 | - job = find_job_locked(id, &aio_context, errp); | ||
504 | + job = find_job_locked(id, errp); | ||
505 | |||
506 | if (!job) { | ||
507 | return; | ||
508 | @@ -XXX,XX +XXX,XX @@ void qmp_job_dismiss(const char *id, Error **errp) | ||
509 | |||
510 | trace_qmp_job_dismiss(job); | ||
511 | job_dismiss_locked(&job, errp); | ||
512 | - aio_context_release(aio_context); | ||
513 | } | ||
514 | |||
515 | /* Called with job_mutex held. */ | ||
516 | @@ -XXX,XX +XXX,XX @@ JobInfoList *qmp_query_jobs(Error **errp) | ||
517 | |||
518 | for (job = job_next_locked(NULL); job; job = job_next_locked(job)) { | ||
519 | JobInfo *value; | ||
520 | - AioContext *aio_context; | ||
521 | |||
522 | if (job_is_internal(job)) { | ||
523 | continue; | ||
524 | } | ||
525 | - aio_context = job->aio_context; | ||
526 | - aio_context_acquire(aio_context); | ||
527 | value = job_query_single_locked(job, errp); | ||
528 | - aio_context_release(aio_context); | ||
529 | if (!value) { | ||
530 | qapi_free_JobInfoList(head); | ||
531 | return NULL; | ||
532 | diff --git a/job.c b/job.c | ||
533 | index XXXXXXX..XXXXXXX 100644 | ||
534 | --- a/job.c | ||
535 | +++ b/job.c | ||
536 | @@ -XXX,XX +XXX,XX @@ | ||
537 | * | ||
538 | * The second includes functions used by the job drivers and sometimes | ||
539 | * by the core block layer. These delegate the locking to the callee instead. | ||
540 | - * | ||
541 | - * TODO Actually make this true | ||
542 | */ | ||
543 | |||
544 | /* | ||
545 | @@ -XXX,XX +XXX,XX @@ struct JobTxn { | ||
546 | }; | ||
547 | |||
548 | void job_lock(void) | ||
549 | -{ | ||
550 | - /* nop */ | ||
551 | -} | ||
552 | - | ||
553 | -void job_unlock(void) | ||
554 | -{ | ||
555 | - /* nop */ | ||
556 | -} | ||
557 | - | ||
558 | -static void real_job_lock(void) | ||
559 | { | ||
560 | qemu_mutex_lock(&job_mutex); | ||
561 | } | ||
562 | |||
563 | -static void real_job_unlock(void) | ||
564 | +void job_unlock(void) | ||
565 | { | ||
566 | qemu_mutex_unlock(&job_mutex); | ||
567 | } | ||
568 | @@ -XXX,XX +XXX,XX @@ static void job_txn_del_job_locked(Job *job) | ||
569 | /* Called with job_mutex held, but releases it temporarily. */ | ||
570 | static int job_txn_apply_locked(Job *job, int fn(Job *)) | ||
571 | { | ||
572 | - AioContext *inner_ctx; | ||
573 | Job *other_job, *next; | ||
574 | JobTxn *txn = job->txn; | ||
575 | int rc = 0; | ||
576 | @@ -XXX,XX +XXX,XX @@ static int job_txn_apply_locked(Job *job, int fn(Job *)) | ||
577 | * break AIO_WAIT_WHILE from within fn. | ||
578 | */ | ||
579 | job_ref_locked(job); | ||
580 | - aio_context_release(job->aio_context); | ||
581 | |||
582 | QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { | ||
583 | - inner_ctx = other_job->aio_context; | ||
584 | - aio_context_acquire(inner_ctx); | ||
585 | rc = fn(other_job); | ||
586 | - aio_context_release(inner_ctx); | ||
587 | if (rc) { | ||
588 | break; | ||
589 | } | ||
590 | } | ||
591 | |||
592 | - /* | ||
593 | - * Note that job->aio_context might have been changed by calling fn, so we | ||
594 | - * can't use a local variable to cache it. | ||
595 | - */ | ||
596 | - aio_context_acquire(job->aio_context); | ||
597 | job_unref_locked(job); | ||
598 | return rc; | ||
599 | } | ||
600 | @@ -XXX,XX +XXX,XX @@ void job_unref_locked(Job *job) | ||
601 | assert(!job->txn); | ||
602 | |||
603 | if (job->driver->free) { | ||
604 | + AioContext *aio_context = job->aio_context; | ||
605 | job_unlock(); | ||
606 | + /* FIXME: aiocontext lock is required because cb calls blk_unref */ | ||
607 | + aio_context_acquire(aio_context); | ||
608 | job->driver->free(job); | ||
609 | + aio_context_release(aio_context); | ||
610 | job_lock(); | ||
611 | } | ||
612 | |||
613 | @@ -XXX,XX +XXX,XX @@ void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)) | ||
614 | return; | ||
615 | } | ||
616 | |||
617 | - real_job_lock(); | ||
618 | if (job->busy) { | ||
619 | - real_job_unlock(); | ||
620 | return; | ||
621 | } | ||
622 | |||
623 | if (fn && !fn(job)) { | ||
624 | - real_job_unlock(); | ||
625 | return; | ||
626 | } | ||
627 | |||
628 | assert(!job->deferred_to_main_loop); | ||
629 | timer_del(&job->sleep_timer); | ||
630 | job->busy = true; | ||
631 | - real_job_unlock(); | ||
632 | job_unlock(); | ||
633 | aio_co_wake(job->co); | ||
634 | job_lock(); | ||
635 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_do_yield_locked(Job *job, uint64_t ns) | ||
636 | { | ||
637 | AioContext *next_aio_context; | ||
638 | |||
639 | - real_job_lock(); | ||
640 | if (ns != -1) { | ||
641 | timer_mod(&job->sleep_timer, ns); | ||
642 | } | ||
643 | job->busy = false; | ||
644 | job_event_idle_locked(job); | ||
645 | - real_job_unlock(); | ||
646 | job_unlock(); | ||
647 | qemu_coroutine_yield(); | ||
648 | job_lock(); | ||
649 | @@ -XXX,XX +XXX,XX @@ static void job_clean(Job *job) | ||
650 | } | ||
651 | } | ||
652 | |||
653 | -/* Called with job_mutex held, but releases it temporarily */ | ||
654 | +/* | ||
655 | + * Called with job_mutex held, but releases it temporarily. | ||
656 | + * Takes AioContext lock internally to invoke a job->driver callback. | ||
657 | + */ | ||
658 | static int job_finalize_single_locked(Job *job) | ||
659 | { | ||
660 | int job_ret; | ||
661 | + AioContext *ctx = job->aio_context; | ||
662 | |||
663 | assert(job_is_completed_locked(job)); | ||
664 | |||
665 | @@ -XXX,XX +XXX,XX @@ static int job_finalize_single_locked(Job *job) | ||
666 | |||
667 | job_ret = job->ret; | ||
668 | job_unlock(); | ||
669 | + aio_context_acquire(ctx); | ||
670 | |||
671 | if (!job_ret) { | ||
672 | job_commit(job); | ||
673 | @@ -XXX,XX +XXX,XX @@ static int job_finalize_single_locked(Job *job) | ||
674 | } | ||
675 | job_clean(job); | ||
676 | |||
677 | - job_lock(); | ||
678 | - | ||
679 | if (job->cb) { | ||
680 | - job_ret = job->ret; | ||
681 | - job_unlock(); | ||
682 | job->cb(job->opaque, job_ret); | ||
683 | - job_lock(); | ||
684 | } | ||
685 | |||
686 | + aio_context_release(ctx); | ||
687 | + job_lock(); | ||
688 | + | ||
689 | /* Emit events only if we actually started */ | ||
690 | if (job_started_locked(job)) { | ||
691 | if (job_is_cancelled_locked(job)) { | ||
692 | @@ -XXX,XX +XXX,XX @@ static int job_finalize_single_locked(Job *job) | ||
693 | return 0; | ||
694 | } | ||
695 | |||
696 | -/* Called with job_mutex held, but releases it temporarily */ | ||
697 | +/* | ||
698 | + * Called with job_mutex held, but releases it temporarily. | ||
699 | + * Takes AioContext lock internally to invoke a job->driver callback. | ||
700 | + */ | ||
701 | static void job_cancel_async_locked(Job *job, bool force) | ||
702 | { | ||
703 | + AioContext *ctx = job->aio_context; | ||
704 | GLOBAL_STATE_CODE(); | ||
705 | if (job->driver->cancel) { | ||
706 | job_unlock(); | ||
707 | + aio_context_acquire(ctx); | ||
708 | force = job->driver->cancel(job, force); | ||
709 | + aio_context_release(ctx); | ||
710 | job_lock(); | ||
711 | } else { | ||
712 | /* No .cancel() means the job will behave as if force-cancelled */ | ||
713 | @@ -XXX,XX +XXX,XX @@ static void job_cancel_async_locked(Job *job, bool force) | ||
714 | } | ||
715 | } | ||
716 | |||
717 | -/* Called with job_mutex held, but releases it temporarily. */ | ||
718 | +/* | ||
719 | + * Called with job_mutex held, but releases it temporarily. | ||
720 | + * Takes AioContext lock internally to invoke a job->driver callback. | ||
721 | + */ | ||
722 | static void job_completed_txn_abort_locked(Job *job) | ||
723 | { | ||
724 | - AioContext *ctx; | ||
725 | JobTxn *txn = job->txn; | ||
726 | Job *other_job; | ||
727 | |||
728 | @@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort_locked(Job *job) | ||
729 | txn->aborting = true; | ||
730 | job_txn_ref_locked(txn); | ||
731 | |||
732 | - /* | ||
733 | - * We can only hold the single job's AioContext lock while calling | ||
734 | - * job_finalize_single() because the finalization callbacks can involve | ||
735 | - * calls of AIO_WAIT_WHILE(), which could deadlock otherwise. | ||
736 | - * Note that the job's AioContext may change when it is finalized. | ||
737 | - */ | ||
738 | job_ref_locked(job); | ||
739 | - aio_context_release(job->aio_context); | ||
740 | |||
741 | /* Other jobs are effectively cancelled by us, set the status for | ||
742 | * them; this job, however, may or may not be cancelled, depending | ||
743 | * on the caller, so leave it. */ | ||
744 | QLIST_FOREACH(other_job, &txn->jobs, txn_list) { | ||
745 | if (other_job != job) { | ||
746 | - ctx = other_job->aio_context; | ||
747 | - aio_context_acquire(ctx); | ||
748 | /* | ||
749 | * This is a transaction: If one job failed, no result will matter. | ||
750 | * Therefore, pass force=true to terminate all other jobs as quickly | ||
751 | * as possible. | ||
752 | */ | ||
753 | job_cancel_async_locked(other_job, true); | ||
754 | - aio_context_release(ctx); | ||
755 | } | ||
756 | } | ||
757 | while (!QLIST_EMPTY(&txn->jobs)) { | ||
758 | other_job = QLIST_FIRST(&txn->jobs); | ||
759 | - /* | ||
760 | - * The job's AioContext may change, so store it in @ctx so we | ||
761 | - * release the same context that we have acquired before. | ||
762 | - */ | ||
763 | - ctx = other_job->aio_context; | ||
764 | - aio_context_acquire(ctx); | ||
765 | if (!job_is_completed_locked(other_job)) { | ||
766 | assert(job_cancel_requested_locked(other_job)); | ||
767 | job_finish_sync_locked(other_job, NULL, NULL); | ||
768 | } | ||
769 | job_finalize_single_locked(other_job); | ||
770 | - aio_context_release(ctx); | ||
771 | } | ||
772 | |||
773 | - /* | ||
774 | - * Use job_ref()/job_unref() so we can read the AioContext here | ||
775 | - * even if the job went away during job_finalize_single(). | ||
776 | - */ | ||
777 | - aio_context_acquire(job->aio_context); | ||
778 | job_unref_locked(job); | ||
779 | - | ||
780 | job_txn_unref_locked(txn); | ||
781 | } | ||
782 | |||
783 | @@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort_locked(Job *job) | ||
784 | static int job_prepare_locked(Job *job) | ||
785 | { | ||
786 | int ret; | ||
787 | + AioContext *ctx = job->aio_context; | ||
788 | |||
789 | GLOBAL_STATE_CODE(); | ||
790 | + | ||
791 | if (job->ret == 0 && job->driver->prepare) { | ||
792 | job_unlock(); | ||
793 | + aio_context_acquire(ctx); | ||
794 | ret = job->driver->prepare(job); | ||
795 | + aio_context_release(ctx); | ||
796 | job_lock(); | ||
797 | job->ret = ret; | ||
798 | job_update_rc_locked(job); | ||
799 | } | ||
800 | + | ||
801 | return job->ret; | ||
802 | } | ||
803 | |||
804 | @@ -XXX,XX +XXX,XX @@ static void job_completed_locked(Job *job) | ||
805 | static void job_exit(void *opaque) | ||
806 | { | ||
807 | Job *job = (Job *)opaque; | ||
808 | - AioContext *ctx; | ||
809 | JOB_LOCK_GUARD(); | ||
810 | - | ||
811 | job_ref_locked(job); | ||
812 | - aio_context_acquire(job->aio_context); | ||
813 | |||
814 | /* This is a lie, we're not quiescent, but still doing the completion | ||
815 | * callbacks. However, completion callbacks tend to involve operations that | ||
816 | @@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque) | ||
817 | job_event_idle_locked(job); | ||
818 | |||
819 | job_completed_locked(job); | ||
820 | - | ||
821 | - /* | ||
822 | - * Note that calling job_completed can move the job to a different | ||
823 | - * aio_context, so we cannot cache from above. job_txn_apply takes care of | ||
824 | - * acquiring the new lock, and we ref/unref to avoid job_completed freeing | ||
825 | - * the job underneath us. | ||
826 | - */ | ||
827 | - ctx = job->aio_context; | ||
828 | job_unref_locked(job); | ||
829 | - aio_context_release(ctx); | ||
830 | } | ||
831 | |||
832 | /** | ||
833 | @@ -XXX,XX +XXX,XX @@ int job_cancel_sync(Job *job, bool force) | ||
834 | void job_cancel_sync_all(void) | ||
835 | { | ||
836 | Job *job; | ||
837 | - AioContext *aio_context; | ||
838 | JOB_LOCK_GUARD(); | ||
839 | |||
840 | while ((job = job_next_locked(NULL))) { | ||
841 | - aio_context = job->aio_context; | ||
842 | - aio_context_acquire(aio_context); | ||
843 | job_cancel_sync_locked(job, true); | ||
844 | - aio_context_release(aio_context); | ||
845 | } | ||
846 | } | ||
847 | |||
848 | @@ -XXX,XX +XXX,XX @@ int job_finish_sync_locked(Job *job, | ||
849 | } | ||
850 | |||
851 | job_unlock(); | ||
852 | - AIO_WAIT_WHILE(job->aio_context, | ||
853 | - (job_enter(job), !job_is_completed(job))); | ||
854 | + AIO_WAIT_WHILE_UNLOCKED(job->aio_context, | ||
855 | + (job_enter(job), !job_is_completed(job))); | ||
856 | job_lock(); | ||
857 | |||
858 | ret = (job_is_cancelled_locked(job) && job->ret == 0) | ||
29 | diff --git a/qemu-img.c b/qemu-img.c | 859 | diff --git a/qemu-img.c b/qemu-img.c |
30 | index XXXXXXX..XXXXXXX 100644 | 860 | index XXXXXXX..XXXXXXX 100644 |
31 | --- a/qemu-img.c | 861 | --- a/qemu-img.c |
32 | +++ b/qemu-img.c | 862 | +++ b/qemu-img.c |
33 | @@ -XXX,XX +XXX,XX @@ static void set_rate_limit(BlockBackend *blk, int64_t rate_limit) | 863 | @@ -XXX,XX +XXX,XX @@ static void run_block_job(BlockJob *job, Error **errp) |
34 | 864 | AioContext *aio_context = block_job_get_aio_context(job); | |
35 | static int img_convert(int argc, char **argv) | 865 | int ret = 0; |
36 | { | 866 | |
37 | - int c, bs_i, flags, src_flags = 0; | 867 | - aio_context_acquire(aio_context); |
38 | + int c, bs_i, flags, src_flags = BDRV_O_NO_SHARE; | 868 | job_lock(); |
39 | const char *fmt = NULL, *out_fmt = NULL, *cache = "unsafe", | 869 | job_ref_locked(&job->job); |
40 | *src_cache = BDRV_DEFAULT_CACHE, *out_baseimg = NULL, | 870 | do { |
41 | *out_filename, *out_baseimg_param, *snapshot_name = NULL; | 871 | @@ -XXX,XX +XXX,XX @@ static void run_block_job(BlockJob *job, Error **errp) |
872 | } | ||
873 | job_unref_locked(&job->job); | ||
874 | job_unlock(); | ||
875 | - aio_context_release(aio_context); | ||
876 | |||
877 | /* publish completion progress only when success */ | ||
878 | if (!ret) { | ||
879 | diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c | ||
880 | index XXXXXXX..XXXXXXX 100644 | ||
881 | --- a/tests/unit/test-bdrv-drain.c | ||
882 | +++ b/tests/unit/test-bdrv-drain.c | ||
883 | @@ -XXX,XX +XXX,XX @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, | ||
884 | tjob->prepare_ret = -EIO; | ||
885 | break; | ||
886 | } | ||
887 | + aio_context_release(ctx); | ||
888 | |||
889 | job_start(&job->job); | ||
890 | - aio_context_release(ctx); | ||
891 | |||
892 | if (use_iothread) { | ||
893 | /* job_co_entry() is run in the I/O thread, wait for the actual job | ||
894 | @@ -XXX,XX +XXX,XX @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, | ||
895 | g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ | ||
896 | } | ||
897 | |||
898 | - aio_context_acquire(ctx); | ||
899 | WITH_JOB_LOCK_GUARD() { | ||
900 | ret = job_complete_sync_locked(&job->job, &error_abort); | ||
901 | } | ||
902 | g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO)); | ||
903 | |||
904 | + aio_context_acquire(ctx); | ||
905 | if (use_iothread) { | ||
906 | blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort); | ||
907 | assert(blk_get_aio_context(blk_target) == qemu_get_aio_context()); | ||
908 | diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c | ||
909 | index XXXXXXX..XXXXXXX 100644 | ||
910 | --- a/tests/unit/test-block-iothread.c | ||
911 | +++ b/tests/unit/test-block-iothread.c | ||
912 | @@ -XXX,XX +XXX,XX @@ static void test_attach_blockjob(void) | ||
913 | aio_poll(qemu_get_aio_context(), false); | ||
914 | } | ||
915 | |||
916 | - aio_context_acquire(ctx); | ||
917 | WITH_JOB_LOCK_GUARD() { | ||
918 | job_complete_sync_locked(&tjob->common.job, &error_abort); | ||
919 | } | ||
920 | + aio_context_acquire(ctx); | ||
921 | blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); | ||
922 | aio_context_release(ctx); | ||
923 | |||
924 | diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c | ||
925 | index XXXXXXX..XXXXXXX 100644 | ||
926 | --- a/tests/unit/test-blockjob.c | ||
927 | +++ b/tests/unit/test-blockjob.c | ||
928 | @@ -XXX,XX +XXX,XX @@ static void cancel_common(CancelJob *s) | ||
929 | BlockJob *job = &s->common; | ||
930 | BlockBackend *blk = s->blk; | ||
931 | JobStatus sts = job->job.status; | ||
932 | - AioContext *ctx; | ||
933 | - | ||
934 | - ctx = job->job.aio_context; | ||
935 | - aio_context_acquire(ctx); | ||
936 | + AioContext *ctx = job->job.aio_context; | ||
937 | |||
938 | job_cancel_sync(&job->job, true); | ||
939 | WITH_JOB_LOCK_GUARD() { | ||
940 | @@ -XXX,XX +XXX,XX @@ static void cancel_common(CancelJob *s) | ||
941 | assert(job->job.status == JOB_STATUS_NULL); | ||
942 | job_unref_locked(&job->job); | ||
943 | } | ||
944 | - destroy_blk(blk); | ||
945 | |||
946 | + aio_context_acquire(ctx); | ||
947 | + destroy_blk(blk); | ||
948 | aio_context_release(ctx); | ||
949 | + | ||
950 | } | ||
951 | |||
952 | static void test_cancel_created(void) | ||
953 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_concluded(void) | ||
954 | aio_poll(qemu_get_aio_context(), true); | ||
955 | assert_job_status_is(job, JOB_STATUS_PENDING); | ||
956 | |||
957 | - aio_context_acquire(job->aio_context); | ||
958 | WITH_JOB_LOCK_GUARD() { | ||
959 | job_finalize_locked(job, &error_abort); | ||
960 | + assert(job->status == JOB_STATUS_CONCLUDED); | ||
961 | } | ||
962 | - aio_context_release(job->aio_context); | ||
963 | - assert_job_status_is(job, JOB_STATUS_CONCLUDED); | ||
964 | |||
965 | cancel_common(s); | ||
966 | } | ||
967 | @@ -XXX,XX +XXX,XX @@ static void test_complete_in_standby(void) | ||
968 | |||
969 | /* Wait for the job to become READY */ | ||
970 | job_start(job); | ||
971 | - aio_context_acquire(ctx); | ||
972 | /* | ||
973 | * Here we are waiting for the status to change, so don't bother | ||
974 | * protecting the read every time. | ||
975 | */ | ||
976 | - AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY); | ||
977 | - aio_context_release(ctx); | ||
978 | + AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY); | ||
979 | |||
980 | /* Begin the drained section, pausing the job */ | ||
981 | bdrv_drain_all_begin(); | ||
982 | @@ -XXX,XX +XXX,XX @@ static void test_complete_in_standby(void) | ||
983 | aio_context_acquire(ctx); | ||
984 | /* This will schedule the job to resume it */ | ||
985 | bdrv_drain_all_end(); | ||
986 | + aio_context_release(ctx); | ||
987 | |||
988 | WITH_JOB_LOCK_GUARD() { | ||
989 | /* But the job cannot run, so it will remain on standby */ | ||
990 | @@ -XXX,XX +XXX,XX @@ static void test_complete_in_standby(void) | ||
991 | job_dismiss_locked(&job, &error_abort); | ||
992 | } | ||
993 | |||
994 | + aio_context_acquire(ctx); | ||
995 | destroy_blk(blk); | ||
996 | aio_context_release(ctx); | ||
997 | iothread_join(iothread); | ||
42 | -- | 998 | -- |
43 | 2.30.2 | 999 | 2.37.3 |
44 | |||
45 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Passing parent aio context is redundant, as child_class and parent | 3 | Not sure what the atomic here was supposed to do, since job.busy |
4 | opaque pointer are enough to retrieve it. Drop the argument and use new | 4 | is protected by the job lock. Since the whole function |
5 | bdrv_child_get_parent_aio_context() interface. | 5 | is called under job_mutex, just remove the atomic. |
6 | 6 | ||
7 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 7 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
8 | Reviewed-by: Alberto Garcia <berto@igalia.com> | 8 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
10 | Message-Id: <20210428151804.439460-7-vsementsov@virtuozzo.com> | 11 | Message-Id: <20220926093214.506243-20-eesposit@redhat.com> |
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
12 | --- | 13 | --- |
13 | include/block/block_int.h | 1 - | 14 | blockjob.c | 2 +- |
14 | block.c | 8 +++++--- | 15 | 1 file changed, 1 insertion(+), 1 deletion(-) |
15 | block/block-backend.c | 4 ++-- | ||
16 | blockjob.c | 3 +-- | ||
17 | 4 files changed, 8 insertions(+), 8 deletions(-) | ||
18 | 16 | ||
19 | diff --git a/include/block/block_int.h b/include/block/block_int.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/include/block/block_int.h | ||
22 | +++ b/include/block/block_int.h | ||
23 | @@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, | ||
24 | const char *child_name, | ||
25 | const BdrvChildClass *child_class, | ||
26 | BdrvChildRole child_role, | ||
27 | - AioContext *ctx, | ||
28 | uint64_t perm, uint64_t shared_perm, | ||
29 | void *opaque, Error **errp); | ||
30 | void bdrv_root_unref_child(BdrvChild *child); | ||
31 | diff --git a/block.c b/block.c | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/block.c | ||
34 | +++ b/block.c | ||
35 | @@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, | ||
36 | const char *child_name, | ||
37 | const BdrvChildClass *child_class, | ||
38 | BdrvChildRole child_role, | ||
39 | - AioContext *ctx, | ||
40 | uint64_t perm, uint64_t shared_perm, | ||
41 | void *opaque, Error **errp) | ||
42 | { | ||
43 | BdrvChild *child; | ||
44 | Error *local_err = NULL; | ||
45 | int ret; | ||
46 | + AioContext *ctx; | ||
47 | |||
48 | ret = bdrv_check_update_perm(child_bs, NULL, perm, shared_perm, NULL, errp); | ||
49 | if (ret < 0) { | ||
50 | @@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, | ||
51 | .opaque = opaque, | ||
52 | }; | ||
53 | |||
54 | + ctx = bdrv_child_get_parent_aio_context(child); | ||
55 | + | ||
56 | /* If the AioContexts don't match, first try to move the subtree of | ||
57 | * child_bs into the AioContext of the new parent. If this doesn't work, | ||
58 | * try moving the parent into the AioContext of child_bs instead. */ | ||
59 | @@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, | ||
60 | perm, shared_perm, &perm, &shared_perm); | ||
61 | |||
62 | child = bdrv_root_attach_child(child_bs, child_name, child_class, | ||
63 | - child_role, bdrv_get_aio_context(parent_bs), | ||
64 | - perm, shared_perm, parent_bs, errp); | ||
65 | + child_role, perm, shared_perm, parent_bs, | ||
66 | + errp); | ||
67 | if (child == NULL) { | ||
68 | return NULL; | ||
69 | } | ||
70 | diff --git a/block/block-backend.c b/block/block-backend.c | ||
71 | index XXXXXXX..XXXXXXX 100644 | ||
72 | --- a/block/block-backend.c | ||
73 | +++ b/block/block-backend.c | ||
74 | @@ -XXX,XX +XXX,XX @@ BlockBackend *blk_new_open(const char *filename, const char *reference, | ||
75 | |||
76 | blk->root = bdrv_root_attach_child(bs, "root", &child_root, | ||
77 | BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, | ||
78 | - blk->ctx, perm, BLK_PERM_ALL, blk, errp); | ||
79 | + perm, BLK_PERM_ALL, blk, errp); | ||
80 | if (!blk->root) { | ||
81 | blk_unref(blk); | ||
82 | return NULL; | ||
83 | @@ -XXX,XX +XXX,XX @@ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp) | ||
84 | bdrv_ref(bs); | ||
85 | blk->root = bdrv_root_attach_child(bs, "root", &child_root, | ||
86 | BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, | ||
87 | - blk->ctx, blk->perm, blk->shared_perm, | ||
88 | + blk->perm, blk->shared_perm, | ||
89 | blk, errp); | ||
90 | if (blk->root == NULL) { | ||
91 | return -EPERM; | ||
92 | diff --git a/blockjob.c b/blockjob.c | 17 | diff --git a/blockjob.c b/blockjob.c |
93 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
94 | --- a/blockjob.c | 19 | --- a/blockjob.c |
95 | +++ b/blockjob.c | 20 | +++ b/blockjob.c |
96 | @@ -XXX,XX +XXX,XX @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, | 21 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp) |
97 | if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) { | 22 | info = g_new0(BlockJobInfo, 1); |
98 | aio_context_release(job->job.aio_context); | 23 | info->type = g_strdup(job_type_str(&job->job)); |
99 | } | 24 | info->device = g_strdup(job->job.id); |
100 | - c = bdrv_root_attach_child(bs, name, &child_job, 0, | 25 | - info->busy = qatomic_read(&job->job.busy); |
101 | - job->job.aio_context, perm, shared_perm, job, | 26 | + info->busy = job->job.busy; |
102 | + c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job, | 27 | info->paused = job->job.pause_count > 0; |
103 | errp); | 28 | info->offset = progress_current; |
104 | if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) { | 29 | info->len = progress_total; |
105 | aio_context_acquire(job->job.aio_context); | ||
106 | -- | 30 | -- |
107 | 2.30.2 | 31 | 2.37.3 |
108 | |||
109 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 3 | These public functions are not used anywhere, thus can be dropped. |
4 | |||
5 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
6 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
4 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 7 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
5 | Message-Id: <20210428151804.439460-32-vsementsov@virtuozzo.com> | 8 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
9 | Message-Id: <20220926093214.506243-21-eesposit@redhat.com> | ||
6 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
7 | --- | 11 | --- |
8 | block.c | 103 -------------------------------------------------------- | 12 | include/block/blockjob.h | 31 ++++++++++++------------------- |
9 | 1 file changed, 103 deletions(-) | 13 | blockjob.c | 16 ++-------------- |
14 | 2 files changed, 14 insertions(+), 33 deletions(-) | ||
10 | 15 | ||
11 | diff --git a/block.c b/block.c | 16 | diff --git a/include/block/blockjob.h b/include/block/blockjob.h |
12 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/block.c | 18 | --- a/include/block/blockjob.h |
14 | +++ b/block.c | 19 | +++ b/include/block/blockjob.h |
15 | @@ -XXX,XX +XXX,XX @@ static int bdrv_fill_options(QDict **options, const char *filename, | 20 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJob { |
16 | return 0; | 21 | */ |
22 | |||
23 | /** | ||
24 | - * block_job_next: | ||
25 | + * block_job_next_locked: | ||
26 | * @job: A block job, or %NULL. | ||
27 | * | ||
28 | * Get the next element from the list of block jobs after @job, or the | ||
29 | * first one if @job is %NULL. | ||
30 | * | ||
31 | * Returns the requested job, or %NULL if there are no more jobs left. | ||
32 | + * Called with job lock held. | ||
33 | */ | ||
34 | -BlockJob *block_job_next(BlockJob *job); | ||
35 | - | ||
36 | -/* Same as block_job_next(), but called with job lock held. */ | ||
37 | BlockJob *block_job_next_locked(BlockJob *job); | ||
38 | |||
39 | /** | ||
40 | @@ -XXX,XX +XXX,XX @@ BlockJob *block_job_next_locked(BlockJob *job); | ||
41 | * Get the block job identified by @id (which must not be %NULL). | ||
42 | * | ||
43 | * Returns the requested job, or %NULL if it doesn't exist. | ||
44 | + * Called with job lock *not* held. | ||
45 | */ | ||
46 | BlockJob *block_job_get(const char *id); | ||
47 | |||
48 | @@ -XXX,XX +XXX,XX @@ void block_job_remove_all_bdrv(BlockJob *job); | ||
49 | bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs); | ||
50 | |||
51 | /** | ||
52 | - * block_job_set_speed: | ||
53 | + * block_job_set_speed_locked: | ||
54 | * @job: The job to set the speed for. | ||
55 | * @speed: The new value | ||
56 | * @errp: Error object. | ||
57 | * | ||
58 | * Set a rate-limiting parameter for the job; the actual meaning may | ||
59 | * vary depending on the job type. | ||
60 | - */ | ||
61 | -bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp); | ||
62 | - | ||
63 | -/* | ||
64 | - * Same as block_job_set_speed(), but called with job lock held. | ||
65 | - * Might release the lock temporarily. | ||
66 | + * | ||
67 | + * Called with job lock held, but might release it temporarily. | ||
68 | */ | ||
69 | bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp); | ||
70 | |||
71 | /** | ||
72 | - * block_job_query: | ||
73 | + * block_job_query_locked: | ||
74 | * @job: The job to get information about. | ||
75 | * | ||
76 | * Return information about a job. | ||
77 | + * | ||
78 | + * Called with job lock held. | ||
79 | */ | ||
80 | -BlockJobInfo *block_job_query(BlockJob *job, Error **errp); | ||
81 | - | ||
82 | -/* Same as block_job_query(), but called with job lock held. */ | ||
83 | BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp); | ||
84 | |||
85 | /** | ||
86 | - * block_job_iostatus_reset: | ||
87 | + * block_job_iostatus_reset_locked: | ||
88 | * @job: The job whose I/O status should be reset. | ||
89 | * | ||
90 | * Reset I/O status on @job and on BlockDriverState objects it uses, | ||
91 | * other than job->blk. | ||
92 | + * | ||
93 | + * Called with job lock held. | ||
94 | */ | ||
95 | -void block_job_iostatus_reset(BlockJob *job); | ||
96 | - | ||
97 | -/* Same as block_job_iostatus_reset(), but called with job lock held. */ | ||
98 | void block_job_iostatus_reset_locked(BlockJob *job); | ||
99 | |||
100 | /* | ||
101 | diff --git a/blockjob.c b/blockjob.c | ||
102 | index XXXXXXX..XXXXXXX 100644 | ||
103 | --- a/blockjob.c | ||
104 | +++ b/blockjob.c | ||
105 | @@ -XXX,XX +XXX,XX @@ BlockJob *block_job_next_locked(BlockJob *bjob) | ||
106 | return job ? container_of(job, BlockJob, job) : NULL; | ||
17 | } | 107 | } |
18 | 108 | ||
19 | -static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q, | 109 | -BlockJob *block_job_next(BlockJob *bjob) |
20 | - uint64_t new_used_perm, | ||
21 | - uint64_t new_shared_perm, | ||
22 | - Error **errp); | ||
23 | - | ||
24 | typedef struct BlockReopenQueueEntry { | ||
25 | bool prepared; | ||
26 | bool perms_checked; | ||
27 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_perm_common(GSList *list, BlockReopenQueue *q, | ||
28 | return 0; | ||
29 | } | ||
30 | |||
31 | -static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
32 | - uint64_t cumulative_perms, | ||
33 | - uint64_t cumulative_shared_perms, Error **errp) | ||
34 | -{ | 110 | -{ |
35 | - g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); | 111 | - JOB_LOCK_GUARD(); |
36 | - return bdrv_check_perm_common(list, q, true, cumulative_perms, | 112 | - return block_job_next_locked(bjob); |
37 | - cumulative_shared_perms, NULL, errp); | ||
38 | -} | 113 | -} |
39 | - | 114 | - |
40 | static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q, | 115 | BlockJob *block_job_get_locked(const char *id) |
41 | Transaction *tran, Error **errp) | ||
42 | { | 116 | { |
43 | return bdrv_check_perm_common(list, q, false, 0, 0, tran, errp); | 117 | Job *job = job_get_locked(id); |
118 | @@ -XXX,XX +XXX,XX @@ bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp) | ||
119 | return true; | ||
44 | } | 120 | } |
45 | 121 | ||
46 | -/* | 122 | -bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) |
47 | - * Notifies drivers that after a previous bdrv_check_perm() call, the | 123 | +static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) |
48 | - * permission update is not performed and any preparations made for it (e.g. | 124 | { |
49 | - * taken file locks) need to be undone. | 125 | JOB_LOCK_GUARD(); |
50 | - */ | 126 | return block_job_set_speed_locked(job, speed, errp); |
51 | -static void bdrv_node_abort_perm_update(BlockDriverState *bs) | 127 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp) |
128 | return info; | ||
129 | } | ||
130 | |||
131 | -BlockJobInfo *block_job_query(BlockJob *job, Error **errp) | ||
52 | -{ | 132 | -{ |
53 | - BlockDriver *drv = bs->drv; | 133 | - JOB_LOCK_GUARD(); |
54 | - BdrvChild *c; | 134 | - return block_job_query_locked(job, errp); |
55 | - | ||
56 | - if (!drv) { | ||
57 | - return; | ||
58 | - } | ||
59 | - | ||
60 | - bdrv_drv_set_perm_abort(bs); | ||
61 | - | ||
62 | - QLIST_FOREACH(c, &bs->children, next) { | ||
63 | - bdrv_child_set_perm_abort(c); | ||
64 | - } | ||
65 | -} | 135 | -} |
66 | - | 136 | - |
67 | -static void bdrv_list_abort_perm_update(GSList *list) | 137 | /* Called with job lock held */ |
68 | -{ | 138 | static void block_job_iostatus_set_err_locked(BlockJob *job, int error) |
69 | - for ( ; list; list = list->next) { | ||
70 | - bdrv_node_abort_perm_update((BlockDriverState *)list->data); | ||
71 | - } | ||
72 | -} | ||
73 | - | ||
74 | -__attribute__((unused)) | ||
75 | -static void bdrv_abort_perm_update(BlockDriverState *bs) | ||
76 | -{ | ||
77 | - g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); | ||
78 | - return bdrv_list_abort_perm_update(list); | ||
79 | -} | ||
80 | - | ||
81 | static void bdrv_node_set_perm(BlockDriverState *bs) | ||
82 | { | 139 | { |
83 | BlockDriver *drv = bs->drv; | 140 | @@ -XXX,XX +XXX,XX @@ void block_job_iostatus_reset_locked(BlockJob *job) |
84 | @@ -XXX,XX +XXX,XX @@ char *bdrv_perm_names(uint64_t perm) | 141 | job->iostatus = BLOCK_DEVICE_IO_STATUS_OK; |
85 | return g_string_free(result, FALSE); | ||
86 | } | 142 | } |
87 | 143 | ||
88 | -/* | 144 | -void block_job_iostatus_reset(BlockJob *job) |
89 | - * Checks whether a new reference to @bs can be added if the new user requires | 145 | +static void block_job_iostatus_reset(BlockJob *job) |
90 | - * @new_used_perm/@new_shared_perm as its permissions. If @ignore_children is | ||
91 | - * set, the BdrvChild objects in this list are ignored in the calculations; | ||
92 | - * this allows checking permission updates for an existing reference. | ||
93 | - * | ||
94 | - * Needs to be followed by a call to either bdrv_set_perm() or | ||
95 | - * bdrv_abort_perm_update(). */ | ||
96 | -__attribute__((unused)) | ||
97 | -static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
98 | - uint64_t new_used_perm, | ||
99 | - uint64_t new_shared_perm, | ||
100 | - Error **errp) | ||
101 | -{ | ||
102 | - BdrvChild *c; | ||
103 | - uint64_t cumulative_perms = new_used_perm; | ||
104 | - uint64_t cumulative_shared_perms = new_shared_perm; | ||
105 | - | ||
106 | - | ||
107 | - /* There is no reason why anyone couldn't tolerate write_unchanged */ | ||
108 | - assert(new_shared_perm & BLK_PERM_WRITE_UNCHANGED); | ||
109 | - | ||
110 | - QLIST_FOREACH(c, &bs->parents, next_parent) { | ||
111 | - if ((new_used_perm & c->shared_perm) != new_used_perm) { | ||
112 | - char *user = bdrv_child_user_desc(c); | ||
113 | - char *perm_names = bdrv_perm_names(new_used_perm & ~c->shared_perm); | ||
114 | - | ||
115 | - error_setg(errp, "Conflicts with use by %s as '%s', which does not " | ||
116 | - "allow '%s' on %s", | ||
117 | - user, c->name, perm_names, bdrv_get_node_name(c->bs)); | ||
118 | - g_free(user); | ||
119 | - g_free(perm_names); | ||
120 | - return -EPERM; | ||
121 | - } | ||
122 | - | ||
123 | - if ((c->perm & new_shared_perm) != c->perm) { | ||
124 | - char *user = bdrv_child_user_desc(c); | ||
125 | - char *perm_names = bdrv_perm_names(c->perm & ~new_shared_perm); | ||
126 | - | ||
127 | - error_setg(errp, "Conflicts with use by %s as '%s', which uses " | ||
128 | - "'%s' on %s", | ||
129 | - user, c->name, perm_names, bdrv_get_node_name(c->bs)); | ||
130 | - g_free(user); | ||
131 | - g_free(perm_names); | ||
132 | - return -EPERM; | ||
133 | - } | ||
134 | - | ||
135 | - cumulative_perms |= c->perm; | ||
136 | - cumulative_shared_perms &= c->shared_perm; | ||
137 | - } | ||
138 | - | ||
139 | - return bdrv_check_perm(bs, q, cumulative_perms, cumulative_shared_perms, | ||
140 | - errp); | ||
141 | -} | ||
142 | |||
143 | static int bdrv_refresh_perms(BlockDriverState *bs, Error **errp) | ||
144 | { | 146 | { |
147 | JOB_LOCK_GUARD(); | ||
148 | block_job_iostatus_reset_locked(job); | ||
145 | -- | 149 | -- |
146 | 2.30.2 | 150 | 2.37.3 |
147 | |||
148 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | This argument is always NULL. Drop it. | 3 | These public functions are not used anywhere, thus can be dropped. |
4 | Also, since this is the final job API that doesn't use AioContext | ||
5 | lock and replaces it with job_lock, adjust all remaining function | ||
6 | documentation to clearly specify if the job lock is taken or not. | ||
4 | 7 | ||
5 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 8 | Also document the locking requirements for a few functions |
9 | where the second version is not removed. | ||
10 | |||
11 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
12 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
6 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 13 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
7 | Message-Id: <20210428151804.439460-26-vsementsov@virtuozzo.com> | 14 | Message-Id: <20220926093214.506243-22-eesposit@redhat.com> |
15 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
9 | --- | 17 | --- |
10 | block.c | 38 +++++++++++--------------------------- | 18 | include/qemu/job.h | 110 +++++++++++++------------------------ |
11 | 1 file changed, 11 insertions(+), 27 deletions(-) | 19 | job.c | 107 ++---------------------------------- |
20 | tests/unit/test-blockjob.c | 4 +- | ||
21 | 3 files changed, 46 insertions(+), 175 deletions(-) | ||
12 | 22 | ||
13 | diff --git a/block.c b/block.c | 23 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
14 | index XXXXXXX..XXXXXXX 100644 | 24 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/block.c | 25 | --- a/include/qemu/job.h |
16 | +++ b/block.c | 26 | +++ b/include/qemu/job.h |
17 | @@ -XXX,XX +XXX,XX @@ static int bdrv_fill_options(QDict **options, const char *filename, | 27 | @@ -XXX,XX +XXX,XX @@ JobTxn *job_txn_new(void); |
18 | static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q, | 28 | /** |
19 | uint64_t new_used_perm, | 29 | * Release a reference that was previously acquired with job_txn_add_job or |
20 | uint64_t new_shared_perm, | 30 | * job_txn_new. If it's the last reference to the object, it will be freed. |
21 | - GSList *ignore_children, | 31 | + * |
22 | Error **errp); | 32 | + * Called with job lock *not* held. |
23 | 33 | */ | |
24 | typedef struct BlockReopenQueueEntry { | 34 | void job_txn_unref(JobTxn *txn); |
25 | @@ -XXX,XX +XXX,XX @@ static bool bdrv_a_allow_b(BdrvChild *a, BdrvChild *b, Error **errp) | 35 | |
36 | @@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, | ||
37 | /** | ||
38 | * Add a reference to Job refcnt, it will be decreased with job_unref, and then | ||
39 | * be freed if it comes to be the last reference. | ||
40 | + * | ||
41 | + * Called with job lock held. | ||
42 | */ | ||
43 | -void job_ref(Job *job); | ||
44 | - | ||
45 | -/* Same as job_ref(), but called with job lock held. */ | ||
46 | void job_ref_locked(Job *job); | ||
47 | |||
48 | /** | ||
49 | - * Release a reference that was previously acquired with job_ref() or | ||
50 | + * Release a reference that was previously acquired with job_ref_locked() or | ||
51 | * job_create(). If it's the last reference to the object, it will be freed. | ||
52 | * | ||
53 | * Takes AioContext lock internally to invoke a job->driver callback. | ||
54 | + * Called with job lock held. | ||
55 | */ | ||
56 | -void job_unref(Job *job); | ||
57 | - | ||
58 | -/* Same as job_unref(), but called with job lock held. */ | ||
59 | void job_unref_locked(Job *job); | ||
60 | |||
61 | /** | ||
62 | @@ -XXX,XX +XXX,XX @@ void job_progress_increase_remaining(Job *job, uint64_t delta); | ||
63 | * Conditionally enter the job coroutine if the job is ready to run, not | ||
64 | * already busy and fn() returns true. fn() is called while under the job_lock | ||
65 | * critical section. | ||
66 | - */ | ||
67 | -void job_enter_cond(Job *job, bool(*fn)(Job *job)); | ||
68 | - | ||
69 | -/* | ||
70 | - * Same as job_enter_cond(), but called with job lock held. | ||
71 | - * Might release the lock temporarily. | ||
72 | + * | ||
73 | + * Called with job lock held, but might release it temporarily. | ||
74 | */ | ||
75 | void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)); | ||
76 | |||
77 | @@ -XXX,XX +XXX,XX @@ bool job_cancel_requested(Job *job); | ||
78 | |||
79 | /** | ||
80 | * Returns whether the job is in a completed state. | ||
81 | - * Called with job_mutex *not* held. | ||
82 | + * Called with job lock held. | ||
83 | */ | ||
84 | -bool job_is_completed(Job *job); | ||
85 | - | ||
86 | -/* Same as job_is_completed(), but called with job lock held. */ | ||
87 | bool job_is_completed_locked(Job *job); | ||
88 | |||
89 | /** | ||
90 | @@ -XXX,XX +XXX,XX @@ bool job_is_ready_locked(Job *job); | ||
91 | /** | ||
92 | * Request @job to pause at the next pause point. Must be paired with | ||
93 | * job_resume(). If the job is supposed to be resumed by user action, call | ||
94 | - * job_user_pause() instead. | ||
95 | + * job_user_pause_locked() instead. | ||
96 | + * | ||
97 | + * Called with job lock *not* held. | ||
98 | */ | ||
99 | void job_pause(Job *job); | ||
100 | |||
101 | /* Same as job_pause(), but called with job lock held. */ | ||
102 | void job_pause_locked(Job *job); | ||
103 | |||
104 | -/** Resumes a @job paused with job_pause. */ | ||
105 | +/** Resumes a @job paused with job_pause. Called with job lock *not* held. */ | ||
106 | void job_resume(Job *job); | ||
107 | |||
108 | /* | ||
109 | @@ -XXX,XX +XXX,XX @@ void job_resume_locked(Job *job); | ||
110 | /** | ||
111 | * Asynchronously pause the specified @job. | ||
112 | * Do not allow a resume until a matching call to job_user_resume. | ||
113 | + * Called with job lock held. | ||
114 | */ | ||
115 | -void job_user_pause(Job *job, Error **errp); | ||
116 | - | ||
117 | -/* Same as job_user_pause(), but called with job lock held. */ | ||
118 | void job_user_pause_locked(Job *job, Error **errp); | ||
119 | |||
120 | -/** Returns true if the job is user-paused. */ | ||
121 | -bool job_user_paused(Job *job); | ||
122 | - | ||
123 | -/* Same as job_user_paused(), but called with job lock held. */ | ||
124 | +/** | ||
125 | + * Returns true if the job is user-paused. | ||
126 | + * Called with job lock held. | ||
127 | + */ | ||
128 | bool job_user_paused_locked(Job *job); | ||
129 | |||
130 | /** | ||
131 | * Resume the specified @job. | ||
132 | - * Must be paired with a preceding job_user_pause. | ||
133 | - */ | ||
134 | -void job_user_resume(Job *job, Error **errp); | ||
135 | - | ||
136 | -/* | ||
137 | - * Same as job_user_resume(), but called with job lock held. | ||
138 | - * Might release the lock temporarily. | ||
139 | + * Must be paired with a preceding job_user_pause_locked. | ||
140 | + * Called with job lock held, but might release it temporarily. | ||
141 | */ | ||
142 | void job_user_resume_locked(Job *job, Error **errp); | ||
143 | |||
144 | @@ -XXX,XX +XXX,XX @@ void job_user_resume_locked(Job *job, Error **errp); | ||
145 | * first one if @job is %NULL. | ||
146 | * | ||
147 | * Returns the requested job, or %NULL if there are no more jobs left. | ||
148 | + * Called with job lock *not* held. | ||
149 | */ | ||
150 | Job *job_next(Job *job); | ||
151 | |||
152 | @@ -XXX,XX +XXX,XX @@ Job *job_next_locked(Job *job); | ||
153 | * Get the job identified by @id (which must not be %NULL). | ||
154 | * | ||
155 | * Returns the requested job, or %NULL if it doesn't exist. | ||
156 | + * Called with job lock held. | ||
157 | */ | ||
158 | -Job *job_get(const char *id); | ||
159 | - | ||
160 | -/* Same as job_get(), but called with job lock held. */ | ||
161 | Job *job_get_locked(const char *id); | ||
162 | |||
163 | /** | ||
164 | * Check whether the verb @verb can be applied to @job in its current state. | ||
165 | * Returns 0 if the verb can be applied; otherwise errp is set and -EPERM | ||
166 | * returned. | ||
167 | + * | ||
168 | + * Called with job lock held. | ||
169 | */ | ||
170 | -int job_apply_verb(Job *job, JobVerb verb, Error **errp); | ||
171 | - | ||
172 | -/* Same as job_apply_verb, but called with job lock held. */ | ||
173 | int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp); | ||
174 | |||
175 | /** | ||
176 | @@ -XXX,XX +XXX,XX @@ void job_early_fail(Job *job); | ||
177 | */ | ||
178 | void job_transition_to_ready(Job *job); | ||
179 | |||
180 | -/** Asynchronously complete the specified @job. */ | ||
181 | -void job_complete(Job *job, Error **errp); | ||
182 | - | ||
183 | -/* | ||
184 | - * Same as job_complete(), but called with job lock held. | ||
185 | - * Might release the lock temporarily. | ||
186 | +/** | ||
187 | + * Asynchronously complete the specified @job. | ||
188 | + * Called with job lock held, but might release it temporarily. | ||
189 | */ | ||
190 | void job_complete_locked(Job *job, Error **errp); | ||
191 | |||
192 | /** | ||
193 | * Asynchronously cancel the specified @job. If @force is true, the job should | ||
194 | * be cancelled immediately without waiting for a consistent state. | ||
195 | + * Called with job lock held. | ||
196 | */ | ||
197 | -void job_cancel(Job *job, bool force); | ||
198 | - | ||
199 | -/* Same as job_cancel(), but called with job lock held. */ | ||
200 | void job_cancel_locked(Job *job, bool force); | ||
201 | |||
202 | /** | ||
203 | - * Cancels the specified job like job_cancel(), but may refuse to do so if the | ||
204 | - * operation isn't meaningful in the current state of the job. | ||
205 | + * Cancels the specified job like job_cancel_locked(), but may refuse | ||
206 | + * to do so if the operation isn't meaningful in the current state of the job. | ||
207 | + * Called with job lock held. | ||
208 | */ | ||
209 | -void job_user_cancel(Job *job, bool force, Error **errp); | ||
210 | - | ||
211 | -/* Same as job_user_cancel(), but called with job lock held. */ | ||
212 | void job_user_cancel_locked(Job *job, bool force, Error **errp); | ||
213 | |||
214 | /** | ||
215 | @@ -XXX,XX +XXX,XX @@ void job_cancel_sync_all(void); | ||
216 | |||
217 | /** | ||
218 | * @job: The job to be completed. | ||
219 | - * @errp: Error object which may be set by job_complete(); this is not | ||
220 | + * @errp: Error object which may be set by job_complete_locked(); this is not | ||
221 | * necessarily set on every error, the job return value has to be | ||
222 | * checked as well. | ||
223 | * | ||
224 | @@ -XXX,XX +XXX,XX @@ void job_cancel_sync_all(void); | ||
225 | * function). | ||
226 | * | ||
227 | * Returns the return value from the job. | ||
228 | - * Called with job_lock *not* held. | ||
229 | + * Called with job_lock held. | ||
230 | */ | ||
231 | -int job_complete_sync(Job *job, Error **errp); | ||
232 | - | ||
233 | -/* Same as job_complete_sync, but called with job lock held. */ | ||
234 | int job_complete_sync_locked(Job *job, Error **errp); | ||
235 | |||
236 | /** | ||
237 | @@ -XXX,XX +XXX,XX @@ int job_complete_sync_locked(Job *job, Error **errp); | ||
238 | * FIXME: Make the below statement universally true: | ||
239 | * For jobs that support the manual workflow mode, all graph changes that occur | ||
240 | * as a result will occur after this command and before a successful reply. | ||
241 | + * | ||
242 | + * Called with job lock held. | ||
243 | */ | ||
244 | -void job_finalize(Job *job, Error **errp); | ||
245 | - | ||
246 | -/* Same as job_finalize(), but called with job lock held. */ | ||
247 | void job_finalize_locked(Job *job, Error **errp); | ||
248 | |||
249 | /** | ||
250 | * Remove the concluded @job from the query list and resets the passed pointer | ||
251 | * to %NULL. Returns an error if the job is not actually concluded. | ||
252 | + * | ||
253 | + * Called with job lock held. | ||
254 | */ | ||
255 | -void job_dismiss(Job **job, Error **errp); | ||
256 | - | ||
257 | -/* Same as job_dismiss(), but called with job lock held. */ | ||
258 | void job_dismiss_locked(Job **job, Error **errp); | ||
259 | |||
260 | /** | ||
261 | @@ -XXX,XX +XXX,XX @@ void job_dismiss_locked(Job **job, Error **errp); | ||
262 | * Returns 0 if the job is successfully completed, -ECANCELED if the job was | ||
263 | * cancelled before completing, and -errno in other error cases. | ||
264 | * | ||
265 | - * Called with job_lock *not* held. | ||
266 | - */ | ||
267 | -int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), | ||
268 | - Error **errp); | ||
269 | - | ||
270 | -/* | ||
271 | - * Same as job_finish_sync(), but called with job lock held. | ||
272 | - * Might release the lock temporarily. | ||
273 | + * Called with job_lock held, but might release it temporarily. | ||
274 | */ | ||
275 | int job_finish_sync_locked(Job *job, void (*finish)(Job *, Error **errp), | ||
276 | Error **errp); | ||
277 | diff --git a/job.c b/job.c | ||
278 | index XXXXXXX..XXXXXXX 100644 | ||
279 | --- a/job.c | ||
280 | +++ b/job.c | ||
281 | @@ -XXX,XX +XXX,XX @@ int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp) | ||
282 | return -EPERM; | ||
283 | } | ||
284 | |||
285 | -int job_apply_verb(Job *job, JobVerb verb, Error **errp) | ||
286 | -{ | ||
287 | - JOB_LOCK_GUARD(); | ||
288 | - return job_apply_verb_locked(job, verb, errp); | ||
289 | -} | ||
290 | - | ||
291 | JobType job_type(const Job *job) | ||
292 | { | ||
293 | return job->driver->job_type; | ||
294 | @@ -XXX,XX +XXX,XX @@ bool job_is_completed_locked(Job *job) | ||
26 | return false; | 295 | return false; |
27 | } | 296 | } |
28 | 297 | ||
29 | -static bool bdrv_parent_perms_conflict(BlockDriverState *bs, | 298 | -bool job_is_completed(Job *job) |
30 | - GSList *ignore_children, | 299 | +static bool job_is_completed(Job *job) |
31 | - Error **errp) | 300 | { |
32 | +static bool bdrv_parent_perms_conflict(BlockDriverState *bs, Error **errp) | 301 | JOB_LOCK_GUARD(); |
33 | { | 302 | return job_is_completed_locked(job); |
34 | BdrvChild *a, *b; | 303 | @@ -XXX,XX +XXX,XX @@ Job *job_get_locked(const char *id) |
35 | 304 | return NULL; | |
36 | @@ -XXX,XX +XXX,XX @@ static bool bdrv_parent_perms_conflict(BlockDriverState *bs, | 305 | } |
37 | * directions. | 306 | |
38 | */ | 307 | -Job *job_get(const char *id) |
39 | QLIST_FOREACH(a, &bs->parents, next_parent) { | 308 | -{ |
40 | - if (g_slist_find(ignore_children, a)) { | 309 | - JOB_LOCK_GUARD(); |
41 | - continue; | 310 | - return job_get_locked(id); |
42 | - } | 311 | -} |
43 | - | 312 | - |
44 | QLIST_FOREACH(b, &bs->parents, next_parent) { | 313 | void job_set_aio_context(Job *job, AioContext *ctx) |
45 | - if (a == b || g_slist_find(ignore_children, b)) { | 314 | { |
46 | + if (a == b) { | 315 | /* protect against read in job_finish_sync_locked and job_start */ |
47 | continue; | 316 | @@ -XXX,XX +XXX,XX @@ void job_ref_locked(Job *job) |
48 | } | 317 | ++job->refcnt; |
49 | 318 | } | |
50 | @@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_safe(BdrvChild *child, BlockDriverState *new_bs, | 319 | |
51 | static int bdrv_node_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | 320 | -void job_ref(Job *job) |
52 | uint64_t cumulative_perms, | 321 | -{ |
53 | uint64_t cumulative_shared_perms, | 322 | - JOB_LOCK_GUARD(); |
54 | - GSList *ignore_children, | 323 | - job_ref_locked(job); |
55 | Transaction *tran, Error **errp) | 324 | -} |
56 | { | 325 | - |
57 | BlockDriver *drv = bs->drv; | 326 | void job_unref_locked(Job *job) |
58 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_perm_common(GSList *list, BlockReopenQueue *q, | 327 | { |
59 | bool use_cumulative_perms, | 328 | GLOBAL_STATE_CODE(); |
60 | uint64_t cumulative_perms, | 329 | @@ -XXX,XX +XXX,XX @@ void job_unref_locked(Job *job) |
61 | uint64_t cumulative_shared_perms, | 330 | } |
62 | - GSList *ignore_children, | 331 | } |
63 | Transaction *tran, Error **errp) | 332 | |
64 | { | 333 | -void job_unref(Job *job) |
65 | int ret; | 334 | -{ |
66 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_perm_common(GSList *list, BlockReopenQueue *q, | 335 | - JOB_LOCK_GUARD(); |
67 | 336 | - job_unref_locked(job); | |
68 | ret = bdrv_node_check_perm(bs, q, cumulative_perms, | 337 | -} |
69 | cumulative_shared_perms, | 338 | - |
70 | - ignore_children, tran, errp); | 339 | void job_progress_update(Job *job, uint64_t done) |
71 | + tran, errp); | 340 | { |
72 | if (ret < 0) { | 341 | progress_work_done(&job->progress, done); |
73 | return ret; | 342 | @@ -XXX,XX +XXX,XX @@ void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)) |
74 | } | 343 | job_lock(); |
75 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_perm_common(GSList *list, BlockReopenQueue *q, | 344 | } |
76 | for ( ; list; list = list->next) { | 345 | |
77 | bs = list->data; | 346 | -void job_enter_cond(Job *job, bool(*fn)(Job *job)) |
78 | 347 | -{ | |
79 | - if (bdrv_parent_perms_conflict(bs, ignore_children, errp)) { | 348 | - JOB_LOCK_GUARD(); |
80 | + if (bdrv_parent_perms_conflict(bs, errp)) { | 349 | - job_enter_cond_locked(job, fn); |
81 | return -EINVAL; | 350 | -} |
82 | } | 351 | - |
83 | 352 | void job_enter(Job *job) | |
84 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_perm_common(GSList *list, BlockReopenQueue *q, | 353 | { |
85 | 354 | JOB_LOCK_GUARD(); | |
86 | ret = bdrv_node_check_perm(bs, q, cumulative_perms, | 355 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn job_pause_point(Job *job) |
87 | cumulative_shared_perms, | 356 | job_pause_point_locked(job); |
88 | - ignore_children, tran, errp); | 357 | } |
89 | + tran, errp); | 358 | |
90 | if (ret < 0) { | 359 | -static void coroutine_fn job_yield_locked(Job *job) |
91 | return ret; | 360 | +void coroutine_fn job_yield(Job *job) |
92 | } | 361 | { |
93 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_perm_common(GSList *list, BlockReopenQueue *q, | 362 | + JOB_LOCK_GUARD(); |
94 | 363 | assert(job->busy); | |
95 | static int bdrv_check_perm(BlockDriverState *bs, BlockReopenQueue *q, | 364 | |
96 | uint64_t cumulative_perms, | 365 | /* Check cancellation *before* setting busy = false, too! */ |
97 | - uint64_t cumulative_shared_perms, | 366 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_yield_locked(Job *job) |
98 | - GSList *ignore_children, Error **errp) | 367 | job_pause_point_locked(job); |
99 | + uint64_t cumulative_shared_perms, Error **errp) | 368 | } |
100 | { | 369 | |
101 | g_autoptr(GSList) list = bdrv_topological_dfs(NULL, NULL, bs); | 370 | -void coroutine_fn job_yield(Job *job) |
102 | return bdrv_check_perm_common(list, q, true, cumulative_perms, | 371 | -{ |
103 | - cumulative_shared_perms, ignore_children, | 372 | - JOB_LOCK_GUARD(); |
104 | - NULL, errp); | 373 | - job_yield_locked(job); |
105 | + cumulative_shared_perms, NULL, errp); | 374 | -} |
106 | } | 375 | - |
107 | 376 | void coroutine_fn job_sleep_ns(Job *job, int64_t ns) | |
108 | static int bdrv_list_refresh_perms(GSList *list, BlockReopenQueue *q, | 377 | { |
109 | Transaction *tran, Error **errp) | 378 | JOB_LOCK_GUARD(); |
110 | { | 379 | @@ -XXX,XX +XXX,XX @@ void job_user_pause_locked(Job *job, Error **errp) |
111 | - return bdrv_check_perm_common(list, q, false, 0, 0, NULL, tran, errp); | 380 | job_pause_locked(job); |
112 | + return bdrv_check_perm_common(list, q, false, 0, 0, tran, errp); | 381 | } |
113 | } | 382 | |
383 | -void job_user_pause(Job *job, Error **errp) | ||
384 | -{ | ||
385 | - JOB_LOCK_GUARD(); | ||
386 | - job_user_pause_locked(job, errp); | ||
387 | -} | ||
388 | - | ||
389 | bool job_user_paused_locked(Job *job) | ||
390 | { | ||
391 | return job->user_paused; | ||
392 | } | ||
393 | |||
394 | -bool job_user_paused(Job *job) | ||
395 | -{ | ||
396 | - JOB_LOCK_GUARD(); | ||
397 | - return job_user_paused_locked(job); | ||
398 | -} | ||
399 | - | ||
400 | void job_user_resume_locked(Job *job, Error **errp) | ||
401 | { | ||
402 | assert(job); | ||
403 | @@ -XXX,XX +XXX,XX @@ void job_user_resume_locked(Job *job, Error **errp) | ||
404 | job_resume_locked(job); | ||
405 | } | ||
406 | |||
407 | -void job_user_resume(Job *job, Error **errp) | ||
408 | -{ | ||
409 | - JOB_LOCK_GUARD(); | ||
410 | - job_user_resume_locked(job, errp); | ||
411 | -} | ||
412 | - | ||
413 | /* Called with job_mutex held, but releases it temporarily. */ | ||
414 | static void job_do_dismiss_locked(Job *job) | ||
415 | { | ||
416 | @@ -XXX,XX +XXX,XX @@ void job_dismiss_locked(Job **jobptr, Error **errp) | ||
417 | *jobptr = NULL; | ||
418 | } | ||
419 | |||
420 | -void job_dismiss(Job **jobptr, Error **errp) | ||
421 | -{ | ||
422 | - JOB_LOCK_GUARD(); | ||
423 | - job_dismiss_locked(jobptr, errp); | ||
424 | -} | ||
425 | - | ||
426 | void job_early_fail(Job *job) | ||
427 | { | ||
428 | JOB_LOCK_GUARD(); | ||
429 | @@ -XXX,XX +XXX,XX @@ void job_finalize_locked(Job *job, Error **errp) | ||
430 | job_do_finalize_locked(job); | ||
431 | } | ||
432 | |||
433 | -void job_finalize(Job *job, Error **errp) | ||
434 | -{ | ||
435 | - JOB_LOCK_GUARD(); | ||
436 | - job_finalize_locked(job, errp); | ||
437 | -} | ||
438 | - | ||
439 | /* Called with job_mutex held. */ | ||
440 | static int job_transition_to_pending_locked(Job *job) | ||
441 | { | ||
442 | @@ -XXX,XX +XXX,XX @@ void job_cancel_locked(Job *job, bool force) | ||
443 | } | ||
444 | } | ||
445 | |||
446 | -void job_cancel(Job *job, bool force) | ||
447 | -{ | ||
448 | - JOB_LOCK_GUARD(); | ||
449 | - job_cancel_locked(job, force); | ||
450 | -} | ||
451 | - | ||
452 | void job_user_cancel_locked(Job *job, bool force, Error **errp) | ||
453 | { | ||
454 | if (job_apply_verb_locked(job, JOB_VERB_CANCEL, errp)) { | ||
455 | @@ -XXX,XX +XXX,XX @@ void job_user_cancel_locked(Job *job, bool force, Error **errp) | ||
456 | job_cancel_locked(job, force); | ||
457 | } | ||
458 | |||
459 | -void job_user_cancel(Job *job, bool force, Error **errp) | ||
460 | -{ | ||
461 | - JOB_LOCK_GUARD(); | ||
462 | - job_user_cancel_locked(job, force, errp); | ||
463 | -} | ||
464 | - | ||
465 | -/* A wrapper around job_cancel() taking an Error ** parameter so it may be | ||
466 | - * used with job_finish_sync() without the need for (rather nasty) function | ||
467 | - * pointer casts there. | ||
468 | +/* A wrapper around job_cancel_locked() taking an Error ** parameter so it may | ||
469 | + * be used with job_finish_sync_locked() without the need for (rather nasty) | ||
470 | + * function pointer casts there. | ||
471 | * | ||
472 | * Called with job_mutex held. | ||
473 | */ | ||
474 | @@ -XXX,XX +XXX,XX @@ int job_complete_sync_locked(Job *job, Error **errp) | ||
475 | return job_finish_sync_locked(job, job_complete_locked, errp); | ||
476 | } | ||
477 | |||
478 | -int job_complete_sync(Job *job, Error **errp) | ||
479 | -{ | ||
480 | - JOB_LOCK_GUARD(); | ||
481 | - return job_complete_sync_locked(job, errp); | ||
482 | -} | ||
483 | - | ||
484 | void job_complete_locked(Job *job, Error **errp) | ||
485 | { | ||
486 | /* Should not be reachable via external interface for internal jobs */ | ||
487 | @@ -XXX,XX +XXX,XX @@ void job_complete_locked(Job *job, Error **errp) | ||
488 | job_lock(); | ||
489 | } | ||
490 | |||
491 | -void job_complete(Job *job, Error **errp) | ||
492 | -{ | ||
493 | - JOB_LOCK_GUARD(); | ||
494 | - job_complete_locked(job, errp); | ||
495 | -} | ||
496 | - | ||
497 | int job_finish_sync_locked(Job *job, | ||
498 | void (*finish)(Job *, Error **errp), | ||
499 | Error **errp) | ||
500 | @@ -XXX,XX +XXX,XX @@ int job_finish_sync_locked(Job *job, | ||
501 | job_unref_locked(job); | ||
502 | return ret; | ||
503 | } | ||
504 | - | ||
505 | -int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp) | ||
506 | -{ | ||
507 | - JOB_LOCK_GUARD(); | ||
508 | - return job_finish_sync_locked(job, finish, errp); | ||
509 | -} | ||
510 | diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c | ||
511 | index XXXXXXX..XXXXXXX 100644 | ||
512 | --- a/tests/unit/test-blockjob.c | ||
513 | +++ b/tests/unit/test-blockjob.c | ||
514 | @@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_yielding_driver = { | ||
515 | }; | ||
114 | 516 | ||
115 | /* | 517 | /* |
116 | @@ -XXX,XX +XXX,XX @@ char *bdrv_perm_names(uint64_t perm) | 518 | - * Test that job_complete() works even on jobs that are in a paused |
117 | static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q, | 519 | + * Test that job_complete_locked() works even on jobs that are in a paused |
118 | uint64_t new_used_perm, | 520 | * state (i.e., STANDBY). |
119 | uint64_t new_shared_perm, | 521 | * |
120 | - GSList *ignore_children, | 522 | * To do this, run YieldingJob in an IO thread, get it into the READY |
121 | Error **errp) | 523 | @@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_yielding_driver = { |
122 | { | 524 | * acquire the context so the job will not be entered and will thus |
123 | BdrvChild *c; | 525 | * remain on STANDBY. |
124 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q, | 526 | * |
125 | assert(new_shared_perm & BLK_PERM_WRITE_UNCHANGED); | 527 | - * job_complete() should still work without error. |
126 | 528 | + * job_complete_locked() should still work without error. | |
127 | QLIST_FOREACH(c, &bs->parents, next_parent) { | 529 | * |
128 | - if (g_slist_find(ignore_children, c)) { | 530 | * Note that on the QMP interface, it is impossible to lock an IO |
129 | - continue; | 531 | * thread before a drained section ends. In practice, the |
130 | - } | ||
131 | - | ||
132 | if ((new_used_perm & c->shared_perm) != new_used_perm) { | ||
133 | char *user = bdrv_child_user_desc(c); | ||
134 | char *perm_names = bdrv_perm_names(new_used_perm & ~c->shared_perm); | ||
135 | @@ -XXX,XX +XXX,XX @@ static int bdrv_check_update_perm(BlockDriverState *bs, BlockReopenQueue *q, | ||
136 | } | ||
137 | |||
138 | return bdrv_check_perm(bs, q, cumulative_perms, cumulative_shared_perms, | ||
139 | - ignore_children, errp); | ||
140 | + errp); | ||
141 | } | ||
142 | |||
143 | static int bdrv_refresh_perms(BlockDriverState *bs, Error **errp) | ||
144 | @@ -XXX,XX +XXX,XX @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) | ||
145 | QTAILQ_FOREACH(bs_entry, bs_queue, entry) { | ||
146 | BDRVReopenState *state = &bs_entry->state; | ||
147 | ret = bdrv_check_perm(state->bs, bs_queue, state->perm, | ||
148 | - state->shared_perm, NULL, errp); | ||
149 | + state->shared_perm, errp); | ||
150 | if (ret < 0) { | ||
151 | goto cleanup_perm; | ||
152 | } | ||
153 | @@ -XXX,XX +XXX,XX @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) | ||
154 | bs_queue, state->perm, state->shared_perm, | ||
155 | &nperm, &nshared); | ||
156 | ret = bdrv_check_update_perm(state->new_backing_bs, NULL, | ||
157 | - nperm, nshared, NULL, errp); | ||
158 | + nperm, nshared, errp); | ||
159 | if (ret < 0) { | ||
160 | goto cleanup_perm; | ||
161 | } | ||
162 | -- | 532 | -- |
163 | 2.30.2 | 533 | 2.37.3 |
164 | |||
165 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | The field is unused (only ever set, but never read) since commit |
---|---|---|---|
2 | ac9185603. Additionally, the commit message of commit 34fa110e already | ||
3 | explained earlier why it's unreliable. Remove it. | ||
2 | 4 | ||
3 | inore_children thing doesn't help to track all propagated permissions | 5 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
4 | of children we want to ignore. The simplest way to correctly update | 6 | Message-Id: <20220923142838.91043-1-kwolf@redhat.com> |
5 | permissions is update graph first and then do permission update. In | ||
6 | this case we just referesh permissions for the whole subgraph (in | ||
7 | topological-sort defined order) and everything is correctly calculated | ||
8 | automatically without any ignore_children. | ||
9 | |||
10 | So, refactor bdrv_replace_node_common to first do graph update and then | ||
11 | refresh the permissions. | ||
12 | |||
13 | Test test_parallel_exclusive_write() now pass, so move it out of | ||
14 | debugging "if". | ||
15 | |||
16 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
17 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
18 | Message-Id: <20210428151804.439460-18-vsementsov@virtuozzo.com> | ||
19 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 7 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
20 | --- | 8 | --- |
21 | block.c | 43 +++++++++++++------------------- | 9 | block/file-posix.c | 9 --------- |
22 | tests/unit/test-bdrv-graph-mod.c | 4 +-- | 10 | 1 file changed, 9 deletions(-) |
23 | 2 files changed, 20 insertions(+), 27 deletions(-) | ||
24 | 11 | ||
25 | diff --git a/block.c b/block.c | 12 | diff --git a/block/file-posix.c b/block/file-posix.c |
26 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
27 | --- a/block.c | 14 | --- a/block/file-posix.c |
28 | +++ b/block.c | 15 | +++ b/block/file-posix.c |
29 | @@ -XXX,XX +XXX,XX @@ static TransactionActionDrv bdrv_replace_child_drv = { | 16 | @@ -XXX,XX +XXX,XX @@ typedef struct BDRVRawState { |
30 | * | 17 | |
31 | * Note: real unref of old_bs is done only on commit. | 18 | bool has_discard:1; |
32 | */ | 19 | bool has_write_zeroes:1; |
33 | -__attribute__((unused)) | 20 | - bool discard_zeroes:1; |
34 | static void bdrv_replace_child_safe(BdrvChild *child, BlockDriverState *new_bs, | 21 | bool use_linux_aio:1; |
35 | Transaction *tran) | 22 | bool use_linux_io_uring:1; |
36 | { | 23 | int page_cache_inconsistent; /* errno from fdatasync failure */ |
37 | @@ -XXX,XX +XXX,XX @@ static int bdrv_replace_node_common(BlockDriverState *from, | 24 | @@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options, |
38 | bool auto_skip, Error **errp) | 25 | ret = -EINVAL; |
39 | { | 26 | goto fail; |
40 | BdrvChild *c, *next; | 27 | } else { |
41 | - GSList *list = NULL, *p; | 28 | - s->discard_zeroes = true; |
42 | - uint64_t perm = 0, shared = BLK_PERM_ALL; | 29 | s->has_fallocate = true; |
43 | + Transaction *tran = tran_new(); | ||
44 | + g_autoptr(GHashTable) found = NULL; | ||
45 | + g_autoptr(GSList) refresh_list = NULL; | ||
46 | int ret; | ||
47 | |||
48 | /* Make sure that @from doesn't go away until we have successfully attached | ||
49 | @@ -XXX,XX +XXX,XX @@ static int bdrv_replace_node_common(BlockDriverState *from, | ||
50 | assert(bdrv_get_aio_context(from) == bdrv_get_aio_context(to)); | ||
51 | bdrv_drained_begin(from); | ||
52 | |||
53 | - /* Put all parents into @list and calculate their cumulative permissions */ | ||
54 | + /* | ||
55 | + * Do the replacement without permission update. | ||
56 | + * Replacement may influence the permissions, we should calculate new | ||
57 | + * permissions based on new graph. If we fail, we'll roll-back the | ||
58 | + * replacement. | ||
59 | + */ | ||
60 | QLIST_FOREACH_SAFE(c, &from->parents, next_parent, next) { | ||
61 | assert(c->bs == from); | ||
62 | if (!should_update_child(c, to)) { | ||
63 | @@ -XXX,XX +XXX,XX @@ static int bdrv_replace_node_common(BlockDriverState *from, | ||
64 | c->name, from->node_name); | ||
65 | goto out; | ||
66 | } | 30 | } |
67 | - list = g_slist_prepend(list, c); | 31 | } else { |
68 | - perm |= c->perm; | 32 | @@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options, |
69 | - shared &= c->shared_perm; | ||
70 | + bdrv_replace_child_safe(c, to, tran); | ||
71 | } | 33 | } |
72 | 34 | ||
73 | - /* Check whether the required permissions can be granted on @to, ignoring | 35 | if (S_ISBLK(st.st_mode)) { |
74 | - * all BdrvChild in @list so that they can't block themselves. */ | 36 | -#ifdef BLKDISCARDZEROES |
75 | - ret = bdrv_check_update_perm(to, NULL, perm, shared, list, errp); | 37 | - unsigned int arg; |
76 | - if (ret < 0) { | 38 | - if (ioctl(s->fd, BLKDISCARDZEROES, &arg) == 0 && arg) { |
77 | - bdrv_abort_perm_update(to); | 39 | - s->discard_zeroes = true; |
78 | - goto out; | 40 | - } |
79 | - } | 41 | -#endif |
80 | + found = g_hash_table_new(NULL, NULL); | 42 | #ifdef __linux__ |
81 | 43 | /* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do | |
82 | - /* Now actually perform the change. We performed the permission check for | 44 | * not rely on the contents of discarded blocks unless using O_DIRECT. |
83 | - * all elements of @list at once, so set the permissions all at once at the | 45 | * Same for BLKZEROOUT. |
84 | - * very end. */ | 46 | */ |
85 | - for (p = list; p != NULL; p = p->next) { | 47 | if (!(bs->open_flags & BDRV_O_NOCACHE)) { |
86 | - c = p->data; | 48 | - s->discard_zeroes = false; |
87 | + refresh_list = bdrv_topological_dfs(refresh_list, found, to); | 49 | s->has_write_zeroes = false; |
88 | + refresh_list = bdrv_topological_dfs(refresh_list, found, from); | 50 | } |
89 | 51 | #endif | |
90 | - bdrv_ref(to); | ||
91 | - bdrv_replace_child_noperm(c, to); | ||
92 | - bdrv_unref(from); | ||
93 | + ret = bdrv_list_refresh_perms(refresh_list, NULL, tran, errp); | ||
94 | + if (ret < 0) { | ||
95 | + goto out; | ||
96 | } | ||
97 | |||
98 | - bdrv_set_perm(to); | ||
99 | - | ||
100 | ret = 0; | ||
101 | |||
102 | out: | ||
103 | - g_slist_free(list); | ||
104 | + tran_finalize(tran, ret); | ||
105 | + | ||
106 | bdrv_drained_end(from); | ||
107 | bdrv_unref(from); | ||
108 | |||
109 | diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c | ||
110 | index XXXXXXX..XXXXXXX 100644 | ||
111 | --- a/tests/unit/test-bdrv-graph-mod.c | ||
112 | +++ b/tests/unit/test-bdrv-graph-mod.c | ||
113 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char *argv[]) | ||
114 | test_should_update_child); | ||
115 | g_test_add_func("/bdrv-graph-mod/parallel-perm-update", | ||
116 | test_parallel_perm_update); | ||
117 | + g_test_add_func("/bdrv-graph-mod/parallel-exclusive-write", | ||
118 | + test_parallel_exclusive_write); | ||
119 | |||
120 | if (debug) { | ||
121 | - g_test_add_func("/bdrv-graph-mod/parallel-exclusive-write", | ||
122 | - test_parallel_exclusive_write); | ||
123 | g_test_add_func("/bdrv-graph-mod/append-greedy-filter", | ||
124 | test_append_greedy_filter); | ||
125 | } | ||
126 | -- | 52 | -- |
127 | 2.30.2 | 53 | 2.37.3 |
128 | |||
129 | diff view generated by jsdifflib |