1 | The following changes since commit 22ef7ba8e8ce7fef297549b3defcac333742b804: | 1 | The following changes since commit f1d33f55c47dfdaf8daacd618588ad3ae4c452d1: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/famz/tags/staging-pull-request' into staging (2018-03-13 11:42:45 +0000) | 3 | Merge tag 'pull-testing-gdbstub-plugins-gitdm-061022-3' of https://github.com/stsquad/qemu into staging (2022-10-06 07:11:56 -0400) |
4 | 4 | ||
5 | are available in the git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | git://repo.or.cz/qemu/kevin.git tags/for-upstream | 7 | git://repo.or.cz/qemu/kevin.git tags/for-upstream |
8 | 8 | ||
9 | for you to fetch changes up to be6c885842efded81a20f4ca24f0d4e123a80c00: | 9 | for you to fetch changes up to a7ca2eb488ff149c898f43abe103f8bd8e3ca3c4: |
10 | 10 | ||
11 | block/mirror: change the semantic of 'force' of block-job-cancel (2018-03-13 16:54:47 +0100) | 11 | file-posix: Remove unused s->discard_zeroes (2022-10-07 12:11:41 +0200) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Block layer patches | 14 | Block layer patches |
15 | 15 | ||
16 | - job: replace AioContext lock with job_mutex | ||
17 | - Fixes to make coroutine_fn annotations more accurate | ||
18 | - QAPI schema: Fix incorrect example | ||
19 | - Code cleanup | ||
20 | |||
16 | ---------------------------------------------------------------- | 21 | ---------------------------------------------------------------- |
17 | Fam Zheng (2): | 22 | Alberto Faria (1): |
18 | block: Fix flags in reopen queue | 23 | coroutine: Drop coroutine_fn annotation from qemu_coroutine_self() |
19 | iotests: Add regression test for commit base locking | ||
20 | 24 | ||
21 | John Snow (21): | 25 | Emanuele Giuseppe Esposito (20): |
22 | blockjobs: fix set-speed kick | 26 | job.c: make job_mutex and job_lock/unlock() public |
23 | blockjobs: model single jobs as transactions | 27 | job.h: categorize fields in struct Job |
24 | Blockjobs: documentation touchup | 28 | job.c: API functions not used outside should be static |
25 | blockjobs: add status enum | 29 | aio-wait.h: introduce AIO_WAIT_WHILE_UNLOCKED |
26 | blockjobs: add state transition table | 30 | job.c: add job_lock/unlock while keeping job.h intact |
27 | iotests: add pause_wait | 31 | job: move and update comments from blockjob.c |
28 | blockjobs: add block_job_verb permission table | 32 | blockjob: introduce block_job _locked() APIs |
29 | blockjobs: add ABORTING state | 33 | jobs: add job lock in find_* functions |
30 | blockjobs: add CONCLUDED state | 34 | jobs: use job locks also in the unit tests |
31 | blockjobs: add NULL state | 35 | block/mirror.c: use of job helpers in drivers |
32 | blockjobs: add block_job_dismiss | 36 | jobs: group together API calls under the same job lock |
33 | blockjobs: ensure abort is called for cancelled jobs | 37 | jobs: protect job.aio_context with BQL and job_mutex |
34 | blockjobs: add commit, abort, clean helpers | 38 | blockjob.h: categorize fields in struct BlockJob |
35 | blockjobs: add block_job_txn_apply function | 39 | blockjob: rename notifier callbacks as _locked |
36 | blockjobs: add prepare callback | 40 | blockjob: protect iostatus field in BlockJob struct |
37 | blockjobs: add waiting status | 41 | job.h: categorize JobDriver callbacks that need the AioContext lock |
38 | blockjobs: add PENDING status and event | 42 | job.c: enable job lock/unlock and remove Aiocontext locks |
39 | blockjobs: add block-job-finalize | 43 | block_job_query: remove atomic read |
40 | blockjobs: Expose manual property | 44 | blockjob: remove unused functions |
41 | iotests: test manual job dismissal | 45 | job: remove unused functions |
42 | tests/test-blockjob: test cancellations | ||
43 | 46 | ||
44 | Kevin Wolf (14): | 47 | Kevin Wolf (2): |
45 | luks: Separate image file creation from formatting | 48 | quorum: Remove unnecessary forward declaration |
46 | luks: Create block_crypto_co_create_generic() | 49 | file-posix: Remove unused s->discard_zeroes |
47 | luks: Support .bdrv_co_create | ||
48 | luks: Turn invalid assertion into check | ||
49 | luks: Catch integer overflow for huge sizes | ||
50 | qemu-iotests: Test luks QMP image creation | ||
51 | parallels: Support .bdrv_co_create | ||
52 | qemu-iotests: Enable write tests for parallels | ||
53 | qcow: Support .bdrv_co_create | ||
54 | qed: Support .bdrv_co_create | ||
55 | vdi: Make comments consistent with other drivers | ||
56 | vhdx: Support .bdrv_co_create | ||
57 | vpc: Support .bdrv_co_create | ||
58 | vpc: Require aligned size in .bdrv_co_create | ||
59 | 50 | ||
60 | Liang Li (1): | 51 | Marc-André Lureau (3): |
61 | block/mirror: change the semantic of 'force' of block-job-cancel | 52 | 9p: add missing coroutine_fn annotations |
53 | migration: add missing coroutine_fn annotations | ||
54 | test-coroutine: add missing coroutine_fn annotations | ||
62 | 55 | ||
63 | Max Reitz (3): | 56 | Markus Armbruster (1): |
64 | vdi: Pull option parsing from vdi_co_create | 57 | Revert "qapi: fix examples of blockdev-add with qcow2" |
65 | vdi: Move file creation to vdi_co_create_opts | ||
66 | vdi: Implement .bdrv_co_create | ||
67 | 58 | ||
68 | qapi/block-core.json | 363 ++++++++++++++++++++++++++++++++++++++++-- | 59 | Paolo Bonzini (23): |
69 | include/block/blockjob.h | 71 ++++++++- | 60 | block/nvme: separate nvme_get_free_req cases for coroutine/non-coroutine context |
70 | include/block/blockjob_int.h | 17 +- | 61 | block: add missing coroutine_fn annotations |
71 | block.c | 8 + | 62 | qcow2: remove incorrect coroutine_fn annotations |
72 | block/backup.c | 5 +- | 63 | nbd: remove incorrect coroutine_fn annotations |
73 | block/commit.c | 2 +- | 64 | coroutine: remove incorrect coroutine_fn annotations |
74 | block/crypto.c | 150 ++++++++++++----- | 65 | blkverify: add missing coroutine_fn annotations |
75 | block/mirror.c | 12 +- | 66 | file-posix: add missing coroutine_fn annotations |
76 | block/parallels.c | 199 +++++++++++++++++------ | 67 | iscsi: add missing coroutine_fn annotations |
77 | block/qcow.c | 196 +++++++++++++++-------- | 68 | nbd: add missing coroutine_fn annotations |
78 | block/qed.c | 204 ++++++++++++++++-------- | 69 | nfs: add missing coroutine_fn annotations |
79 | block/stream.c | 2 +- | 70 | nvme: add missing coroutine_fn annotations |
80 | block/vdi.c | 147 +++++++++++++---- | 71 | parallels: add missing coroutine_fn annotations |
81 | block/vhdx.c | 216 +++++++++++++++++++------ | 72 | qcow2: add missing coroutine_fn annotations |
82 | block/vpc.c | 241 +++++++++++++++++++++------- | 73 | copy-before-write: add missing coroutine_fn annotations |
83 | blockdev.c | 71 +++++++-- | 74 | curl: add missing coroutine_fn annotations |
84 | blockjob.c | 358 +++++++++++++++++++++++++++++++++++------ | 75 | qed: add missing coroutine_fn annotations |
85 | tests/test-bdrv-drain.c | 5 +- | 76 | quorum: add missing coroutine_fn annotations |
86 | tests/test-blockjob-txn.c | 27 ++-- | 77 | throttle: add missing coroutine_fn annotations |
87 | tests/test-blockjob.c | 233 ++++++++++++++++++++++++++- | 78 | vmdk: add missing coroutine_fn annotations |
88 | block/trace-events | 7 + | 79 | job: add missing coroutine_fn annotations |
89 | hmp-commands.hx | 3 +- | 80 | coroutine-lock: add missing coroutine_fn annotations |
90 | tests/qemu-iotests/030 | 6 +- | 81 | raw-format: add missing coroutine_fn annotations |
91 | tests/qemu-iotests/055 | 17 +- | 82 | job: detect change of aiocontext within job coroutine |
92 | tests/qemu-iotests/056 | 187 ++++++++++++++++++++++ | ||
93 | tests/qemu-iotests/056.out | 4 +- | ||
94 | tests/qemu-iotests/109.out | 24 +-- | ||
95 | tests/qemu-iotests/153 | 12 ++ | ||
96 | tests/qemu-iotests/153.out | 5 + | ||
97 | tests/qemu-iotests/181 | 2 +- | ||
98 | tests/qemu-iotests/209 | 210 ++++++++++++++++++++++++ | ||
99 | tests/qemu-iotests/209.out | 136 ++++++++++++++++ | ||
100 | tests/qemu-iotests/check | 1 - | ||
101 | tests/qemu-iotests/common.rc | 2 +- | ||
102 | tests/qemu-iotests/group | 1 + | ||
103 | tests/qemu-iotests/iotests.py | 12 +- | ||
104 | 36 files changed, 2642 insertions(+), 514 deletions(-) | ||
105 | create mode 100755 tests/qemu-iotests/209 | ||
106 | create mode 100644 tests/qemu-iotests/209.out | ||
107 | 83 | ||
84 | qapi/block-core.json | 10 +- | ||
85 | block/qcow2.h | 19 +- | ||
86 | hw/9pfs/9p.h | 9 +- | ||
87 | include/block/aio-wait.h | 17 +- | ||
88 | include/block/blockjob.h | 59 +++- | ||
89 | include/block/nbd.h | 2 +- | ||
90 | include/qemu/coroutine.h | 4 +- | ||
91 | include/qemu/job.h | 306 +++++++++++++----- | ||
92 | block.c | 24 +- | ||
93 | block/blkverify.c | 2 +- | ||
94 | block/block-backend.c | 10 +- | ||
95 | block/copy-before-write.c | 9 +- | ||
96 | block/curl.c | 2 +- | ||
97 | block/file-posix.c | 11 +- | ||
98 | block/io.c | 22 +- | ||
99 | block/iscsi.c | 3 +- | ||
100 | block/mirror.c | 19 +- | ||
101 | block/nbd.c | 11 +- | ||
102 | block/nfs.c | 2 +- | ||
103 | block/nvme.c | 54 ++-- | ||
104 | block/parallels.c | 5 +- | ||
105 | block/qcow2-cluster.c | 21 +- | ||
106 | block/qcow2-refcount.c | 6 +- | ||
107 | block/qcow2.c | 5 +- | ||
108 | block/qed.c | 4 +- | ||
109 | block/quorum.c | 38 +-- | ||
110 | block/raw-format.c | 3 +- | ||
111 | block/replication.c | 3 + | ||
112 | block/throttle.c | 2 +- | ||
113 | block/vmdk.c | 22 +- | ||
114 | blockdev.c | 129 ++++---- | ||
115 | blockjob.c | 132 ++++---- | ||
116 | job-qmp.c | 92 +++--- | ||
117 | job.c | 674 +++++++++++++++++++++++++-------------- | ||
118 | migration/migration.c | 3 +- | ||
119 | monitor/qmp-cmds.c | 7 +- | ||
120 | qemu-img.c | 17 +- | ||
121 | tests/unit/test-bdrv-drain.c | 80 +++-- | ||
122 | tests/unit/test-block-iothread.c | 8 +- | ||
123 | tests/unit/test-blockjob-txn.c | 24 +- | ||
124 | tests/unit/test-blockjob.c | 136 ++++---- | ||
125 | tests/unit/test-coroutine.c | 2 +- | ||
126 | util/qemu-coroutine-lock.c | 14 +- | ||
127 | util/qemu-coroutine.c | 2 +- | ||
128 | 44 files changed, 1237 insertions(+), 787 deletions(-) | ||
129 | |||
130 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Markus Armbruster <armbru@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Add a new state ABORTING. | 3 | This reverts commit b6522938327141235b97ab38e40c6c4512587373. |
4 | 4 | ||
5 | This makes transitions from normative states to error states explicit | 5 | Kevin Wolf NAKed this patch, because: |
6 | in the STM, and serves as a disambiguation for which states may complete | ||
7 | normally when normal end-states (CONCLUDED) are added in future commits. | ||
8 | 6 | ||
9 | Notably, Paused/Standby jobs do not transition directly to aborting, | 7 | 'file' is a required member (defined in BlockdevOptionsGenericFormat), |
10 | as they must wake up first and cooperate in their cancellation. | 8 | removing it makes the example invalid. 'data-file' is only an additional |
9 | optional member to be used for external data files (i.e. when the guest | ||
10 | data is kept separate from the metadata in the .qcow2 file). | ||
11 | 11 | ||
12 | Transitions: | 12 | However, it had already been merged then. Revert. |
13 | Created -> Aborting: can be cancelled (by the system) | ||
14 | Running -> Aborting: can be cancelled or encounter an error | ||
15 | Ready -> Aborting: can be cancelled or encounter an error | ||
16 | 13 | ||
17 | Verbs: | 14 | Signed-off-by: Markus Armbruster <armbru@redhat.com> |
18 | None. The job must finish cleaning itself up and report its final status. | 15 | Message-Id: <20220930171908.846769-1-armbru@redhat.com> |
19 | 16 | Reviewed-by: Victor Toso <victortoso@redhat.com> | |
20 | +---------+ | ||
21 | |UNDEFINED| | ||
22 | +--+------+ | ||
23 | | | ||
24 | +--v----+ | ||
25 | +---------+CREATED| | ||
26 | | +--+----+ | ||
27 | | | | ||
28 | | +--v----+ +------+ | ||
29 | +---------+RUNNING<----->PAUSED| | ||
30 | | +--+----+ +------+ | ||
31 | | | | ||
32 | | +--v--+ +-------+ | ||
33 | +---------+READY<------->STANDBY| | ||
34 | | +-----+ +-------+ | ||
35 | | | ||
36 | +--v-----+ | ||
37 | |ABORTING| | ||
38 | +--------+ | ||
39 | |||
40 | Signed-off-by: John Snow <jsnow@redhat.com> | ||
41 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
42 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 17 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
43 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 18 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
44 | --- | 19 | --- |
45 | qapi/block-core.json | 7 ++++++- | 20 | qapi/block-core.json | 10 +++++----- |
46 | blockjob.c | 31 ++++++++++++++++++------------- | 21 | 1 file changed, 5 insertions(+), 5 deletions(-) |
47 | 2 files changed, 24 insertions(+), 14 deletions(-) | ||
48 | 22 | ||
49 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 23 | diff --git a/qapi/block-core.json b/qapi/block-core.json |
50 | index XXXXXXX..XXXXXXX 100644 | 24 | index XXXXXXX..XXXXXXX 100644 |
51 | --- a/qapi/block-core.json | 25 | --- a/qapi/block-core.json |
52 | +++ b/qapi/block-core.json | 26 | +++ b/qapi/block-core.json |
53 | @@ -XXX,XX +XXX,XX @@ | 27 | @@ -XXX,XX +XXX,XX @@ |
54 | # @standby: The job is ready, but paused. This is nearly identical to @paused. | 28 | # -> { "execute": "blockdev-add", |
55 | # The job may return to @ready or otherwise be canceled. | 29 | # "arguments": { "driver": "qcow2", |
30 | # "node-name": "node1534", | ||
31 | -# "data-file": { "driver": "file", | ||
32 | -# "filename": "hd1.qcow2" }, | ||
33 | +# "file": { "driver": "file", | ||
34 | +# "filename": "hd1.qcow2" }, | ||
35 | # "backing": null } } | ||
56 | # | 36 | # |
57 | +# @aborting: The job is in the process of being aborted, and will finish with | 37 | # <- { "return": {} } |
58 | +# an error. | 38 | @@ -XXX,XX +XXX,XX @@ |
59 | +# This status may not be visible to the management process. | 39 | # "arguments": { |
60 | +# | 40 | # "driver": "qcow2", |
61 | # Since: 2.12 | 41 | # "node-name": "test1", |
62 | ## | 42 | -# "data-file": { |
63 | { 'enum': 'BlockJobStatus', | 43 | +# "file": { |
64 | - 'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby'] } | 44 | # "driver": "file", |
65 | + 'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby', | 45 | # "filename": "test.qcow2" |
66 | + 'aborting' ] } | 46 | # } |
67 | 47 | @@ -XXX,XX +XXX,XX @@ | |
68 | ## | 48 | # "cache": { |
69 | # @BlockJobInfo: | 49 | # "direct": true |
70 | diff --git a/blockjob.c b/blockjob.c | 50 | # }, |
71 | index XXXXXXX..XXXXXXX 100644 | 51 | -# "data-file": { |
72 | --- a/blockjob.c | 52 | +# "file": { |
73 | +++ b/blockjob.c | 53 | # "driver": "file", |
74 | @@ -XXX,XX +XXX,XX @@ static QemuMutex block_job_mutex; | 54 | # "filename": "/tmp/test.qcow2" |
75 | 55 | # }, | |
76 | /* BlockJob State Transition Table */ | 56 | @@ -XXX,XX +XXX,XX @@ |
77 | bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = { | 57 | # "arguments": { |
78 | - /* U, C, R, P, Y, S */ | 58 | # "driver": "qcow2", |
79 | - /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0}, | 59 | # "node-name": "node0", |
80 | - /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0}, | 60 | -# "data-file": { |
81 | - /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0}, | 61 | +# "file": { |
82 | - /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0}, | 62 | # "driver": "file", |
83 | - /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1}, | 63 | # "filename": "test.qcow2" |
84 | - /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0}, | 64 | # } |
85 | + /* U, C, R, P, Y, S, X */ | ||
86 | + /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0}, | ||
87 | + /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 1}, | ||
88 | + /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1}, | ||
89 | + /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0}, | ||
90 | + /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1}, | ||
91 | + /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0}, | ||
92 | + /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0}, | ||
93 | }; | ||
94 | |||
95 | bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = { | ||
96 | - /* U, C, R, P, Y, S */ | ||
97 | - [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1}, | ||
98 | - [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1}, | ||
99 | - [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1}, | ||
100 | - [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1}, | ||
101 | - [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0}, | ||
102 | + /* U, C, R, P, Y, S, X */ | ||
103 | + [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 0}, | ||
104 | + [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0}, | ||
105 | + [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0}, | ||
106 | + [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0}, | ||
107 | + [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0}, | ||
108 | }; | ||
109 | |||
110 | static void block_job_state_transition(BlockJob *job, BlockJobStatus s1) | ||
111 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job) | ||
112 | { | ||
113 | assert(job->completed); | ||
114 | |||
115 | + if (job->ret || block_job_is_cancelled(job)) { | ||
116 | + block_job_state_transition(job, BLOCK_JOB_STATUS_ABORTING); | ||
117 | + } | ||
118 | + | ||
119 | if (!job->ret) { | ||
120 | if (job->driver->commit) { | ||
121 | job->driver->commit(job); | ||
122 | -- | 65 | -- |
123 | 2.13.6 | 66 | 2.37.3 |
124 | |||
125 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Alberto Faria <afaria@redhat.com> | ||
1 | 2 | ||
3 | qemu_coroutine_self() can be called from outside coroutine context, | ||
4 | returning the leader coroutine, and several such invocations currently | ||
5 | exist (mostly in qcow2 tracing calls). | ||
6 | |||
7 | Signed-off-by: Alberto Faria <afaria@redhat.com> | ||
8 | Message-Id: <20221005175209.975797-1-afaria@redhat.com> | ||
9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
10 | Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> | ||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | --- | ||
13 | include/qemu/coroutine.h | 2 +- | ||
14 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
15 | |||
16 | diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/include/qemu/coroutine.h | ||
19 | +++ b/include/qemu/coroutine.h | ||
20 | @@ -XXX,XX +XXX,XX @@ AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co); | ||
21 | /** | ||
22 | * Get the currently executing coroutine | ||
23 | */ | ||
24 | -Coroutine *coroutine_fn qemu_coroutine_self(void); | ||
25 | +Coroutine *qemu_coroutine_self(void); | ||
26 | |||
27 | /** | ||
28 | * Return whether or not currently inside a coroutine | ||
29 | -- | ||
30 | 2.37.3 | diff view generated by jsdifflib |
1 | Perform the rounding to match a CHS geometry only in the legacy code | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | path in .bdrv_co_create_opts. QMP now requires that the user already | ||
3 | passes a CHS aligned image size, unless force-size=true is given. | ||
4 | 2 | ||
5 | CHS alignment is required to make the image compatible with Virtual PC, | 3 | nvme_get_free_req has very difference semantics when called in |
6 | but not for use with newer Microsoft hypervisors. | 4 | coroutine context (where it waits) and in non-coroutine context |
5 | (where it doesn't). Split the two cases to make it clear what | ||
6 | is being requested. | ||
7 | 7 | ||
8 | Cc: qemu-block@nongnu.org | ||
9 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
10 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
11 | Message-Id: <20220922084924.201610-2-pbonzini@redhat.com> | ||
12 | [kwolf: Fixed up coding style] | ||
13 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
9 | Reviewed-by: Max Reitz <mreitz@redhat.com> | ||
10 | --- | 15 | --- |
11 | block/vpc.c | 113 +++++++++++++++++++++++++++++++++++++++++++----------------- | 16 | block/nvme.c | 48 ++++++++++++++++++++++++++++-------------------- |
12 | 1 file changed, 82 insertions(+), 31 deletions(-) | 17 | 1 file changed, 28 insertions(+), 20 deletions(-) |
13 | 18 | ||
14 | diff --git a/block/vpc.c b/block/vpc.c | 19 | diff --git a/block/nvme.c b/block/nvme.c |
15 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/block/vpc.c | 21 | --- a/block/nvme.c |
17 | +++ b/block/vpc.c | 22 | +++ b/block/nvme.c |
18 | @@ -XXX,XX +XXX,XX @@ static int create_fixed_disk(BlockBackend *blk, uint8_t *buf, | 23 | @@ -XXX,XX +XXX,XX @@ static void nvme_kick(NVMeQueuePair *q) |
19 | return ret; | 24 | q->need_kick = 0; |
20 | } | 25 | } |
21 | 26 | ||
22 | +static int calculate_rounded_image_size(BlockdevCreateOptionsVpc *vpc_opts, | 27 | -/* Find a free request element if any, otherwise: |
23 | + uint16_t *out_cyls, | 28 | - * a) if in coroutine context, try to wait for one to become available; |
24 | + uint8_t *out_heads, | 29 | - * b) if not in coroutine, return NULL; |
25 | + uint8_t *out_secs_per_cyl, | 30 | - */ |
26 | + int64_t *out_total_sectors, | 31 | -static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q) |
27 | + Error **errp) | 32 | +static NVMeRequest *nvme_get_free_req_nofail_locked(NVMeQueuePair *q) |
33 | { | ||
34 | NVMeRequest *req; | ||
35 | |||
36 | - qemu_mutex_lock(&q->lock); | ||
37 | - | ||
38 | - while (q->free_req_head == -1) { | ||
39 | - if (qemu_in_coroutine()) { | ||
40 | - trace_nvme_free_req_queue_wait(q->s, q->index); | ||
41 | - qemu_co_queue_wait(&q->free_req_queue, &q->lock); | ||
42 | - } else { | ||
43 | - qemu_mutex_unlock(&q->lock); | ||
44 | - return NULL; | ||
45 | - } | ||
46 | - } | ||
47 | - | ||
48 | req = &q->reqs[q->free_req_head]; | ||
49 | q->free_req_head = req->free_req_next; | ||
50 | req->free_req_next = -1; | ||
51 | - | ||
52 | - qemu_mutex_unlock(&q->lock); | ||
53 | return req; | ||
54 | } | ||
55 | |||
56 | +/* Return a free request element if any, otherwise return NULL. */ | ||
57 | +static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q) | ||
28 | +{ | 58 | +{ |
29 | + int64_t total_size = vpc_opts->size; | 59 | + QEMU_LOCK_GUARD(&q->lock); |
30 | + uint16_t cyls = 0; | 60 | + if (q->free_req_head == -1) { |
31 | + uint8_t heads = 0; | 61 | + return NULL; |
32 | + uint8_t secs_per_cyl = 0; | 62 | + } |
33 | + int64_t total_sectors; | 63 | + return nvme_get_free_req_nofail_locked(q); |
34 | + int i; | 64 | +} |
35 | + | 65 | + |
36 | + /* | 66 | +/* |
37 | + * Calculate matching total_size and geometry. Increase the number of | 67 | + * Wait for a free request to become available if necessary, then |
38 | + * sectors requested until we get enough (or fail). This ensures that | 68 | + * return it. |
39 | + * qemu-img convert doesn't truncate images, but rather rounds up. | 69 | + */ |
40 | + * | 70 | +static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q) |
41 | + * If the image size can't be represented by a spec conformant CHS geometry, | 71 | +{ |
42 | + * we set the geometry to 65535 x 16 x 255 (CxHxS) sectors and use | 72 | + QEMU_LOCK_GUARD(&q->lock); |
43 | + * the image size from the VHD footer to calculate total_sectors. | 73 | + |
44 | + */ | 74 | + while (q->free_req_head == -1) { |
45 | + if (vpc_opts->force_size) { | 75 | + trace_nvme_free_req_queue_wait(q->s, q->index); |
46 | + /* This will force the use of total_size for sector count, below */ | 76 | + qemu_co_queue_wait(&q->free_req_queue, &q->lock); |
47 | + cyls = VHD_CHS_MAX_C; | ||
48 | + heads = VHD_CHS_MAX_H; | ||
49 | + secs_per_cyl = VHD_CHS_MAX_S; | ||
50 | + } else { | ||
51 | + total_sectors = MIN(VHD_MAX_GEOMETRY, total_size / BDRV_SECTOR_SIZE); | ||
52 | + for (i = 0; total_sectors > (int64_t)cyls * heads * secs_per_cyl; i++) { | ||
53 | + calculate_geometry(total_sectors + i, &cyls, &heads, &secs_per_cyl); | ||
54 | + } | ||
55 | + } | 77 | + } |
56 | + | 78 | + |
57 | + if ((int64_t)cyls * heads * secs_per_cyl == VHD_MAX_GEOMETRY) { | 79 | + return nvme_get_free_req_nofail_locked(q); |
58 | + total_sectors = total_size / BDRV_SECTOR_SIZE; | ||
59 | + /* Allow a maximum disk size of 2040 GiB */ | ||
60 | + if (total_sectors > VHD_MAX_SECTORS) { | ||
61 | + error_setg(errp, "Disk size is too large, max size is 2040 GiB"); | ||
62 | + return -EFBIG; | ||
63 | + } | ||
64 | + } else { | ||
65 | + total_sectors = (int64_t) cyls * heads * secs_per_cyl; | ||
66 | + } | ||
67 | + | ||
68 | + *out_total_sectors = total_sectors; | ||
69 | + if (out_cyls) { | ||
70 | + *out_cyls = cyls; | ||
71 | + *out_heads = heads; | ||
72 | + *out_secs_per_cyl = secs_per_cyl; | ||
73 | + } | ||
74 | + | ||
75 | + return 0; | ||
76 | +} | 80 | +} |
77 | + | 81 | + |
78 | static int coroutine_fn vpc_co_create(BlockdevCreateOptions *opts, | 82 | /* With q->lock */ |
79 | Error **errp) | 83 | static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req) |
80 | { | 84 | { |
81 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vpc_co_create(BlockdevCreateOptions *opts, | 85 | @@ -XXX,XX +XXX,XX @@ static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd) |
82 | 86 | AioContext *aio_context = bdrv_get_aio_context(bs); | |
83 | uint8_t buf[1024]; | 87 | NVMeRequest *req; |
84 | VHDFooter *footer = (VHDFooter *) buf; | 88 | int ret = -EINPROGRESS; |
85 | - int i; | 89 | - req = nvme_get_free_req(q); |
86 | uint16_t cyls = 0; | 90 | + req = nvme_get_free_req_nowait(q); |
87 | uint8_t heads = 0; | 91 | if (!req) { |
88 | uint8_t secs_per_cyl = 0; | 92 | return -EBUSY; |
89 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vpc_co_create(BlockdevCreateOptions *opts, | ||
90 | } | 93 | } |
91 | blk_set_allow_write_beyond_eof(blk, true); | ||
92 | |||
93 | - /* | ||
94 | - * Calculate matching total_size and geometry. Increase the number of | ||
95 | - * sectors requested until we get enough (or fail). This ensures that | ||
96 | - * qemu-img convert doesn't truncate images, but rather rounds up. | ||
97 | - * | ||
98 | - * If the image size can't be represented by a spec conformant CHS geometry, | ||
99 | - * we set the geometry to 65535 x 16 x 255 (CxHxS) sectors and use | ||
100 | - * the image size from the VHD footer to calculate total_sectors. | ||
101 | - */ | ||
102 | - if (vpc_opts->force_size) { | ||
103 | - /* This will force the use of total_size for sector count, below */ | ||
104 | - cyls = VHD_CHS_MAX_C; | ||
105 | - heads = VHD_CHS_MAX_H; | ||
106 | - secs_per_cyl = VHD_CHS_MAX_S; | ||
107 | - } else { | ||
108 | - total_sectors = MIN(VHD_MAX_GEOMETRY, total_size / BDRV_SECTOR_SIZE); | ||
109 | - for (i = 0; total_sectors > (int64_t)cyls * heads * secs_per_cyl; i++) { | ||
110 | - calculate_geometry(total_sectors + i, &cyls, &heads, &secs_per_cyl); | ||
111 | - } | ||
112 | + /* Get geometry and check that it matches the image size*/ | ||
113 | + ret = calculate_rounded_image_size(vpc_opts, &cyls, &heads, &secs_per_cyl, | ||
114 | + &total_sectors, errp); | ||
115 | + if (ret < 0) { | ||
116 | + goto out; | ||
117 | } | ||
118 | |||
119 | - if ((int64_t)cyls * heads * secs_per_cyl == VHD_MAX_GEOMETRY) { | ||
120 | - total_sectors = total_size / BDRV_SECTOR_SIZE; | ||
121 | - /* Allow a maximum disk size of 2040 GiB */ | ||
122 | - if (total_sectors > VHD_MAX_SECTORS) { | ||
123 | - error_setg(errp, "Disk size is too large, max size is 2040 GiB"); | ||
124 | - ret = -EFBIG; | ||
125 | - goto out; | ||
126 | - } | ||
127 | - } else { | ||
128 | - total_sectors = (int64_t)cyls * heads * secs_per_cyl; | ||
129 | - total_size = total_sectors * BDRV_SECTOR_SIZE; | ||
130 | + if (total_size != total_sectors * BDRV_SECTOR_SIZE) { | ||
131 | + error_setg(errp, "The requested image size cannot be represented in " | ||
132 | + "CHS geometry"); | ||
133 | + error_append_hint(errp, "Try size=%llu or force-size=on (the " | ||
134 | + "latter makes the image incompatible with " | ||
135 | + "Virtual PC)", | ||
136 | + total_sectors * BDRV_SECTOR_SIZE); | ||
137 | + ret = -EINVAL; | ||
138 | + goto out; | ||
139 | } | ||
140 | |||
141 | /* Prepare the Hard Disk Footer */ | ||
142 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vpc_co_create_opts(const char *filename, | ||
143 | create_options->u.vpc.size = | ||
144 | ROUND_UP(create_options->u.vpc.size, BDRV_SECTOR_SIZE); | ||
145 | |||
146 | + if (!create_options->u.vpc.force_size) { | ||
147 | + int64_t total_sectors; | ||
148 | + ret = calculate_rounded_image_size(&create_options->u.vpc, NULL, NULL, | ||
149 | + NULL, &total_sectors, errp); | ||
150 | + if (ret < 0) { | ||
151 | + goto fail; | ||
152 | + } | ||
153 | + | ||
154 | + create_options->u.vpc.size = total_sectors * BDRV_SECTOR_SIZE; | ||
155 | + } | ||
156 | + | ||
157 | + | ||
158 | /* Create the vpc image (format layer) */ | ||
159 | ret = vpc_co_create(create_options, errp); | ||
160 | |||
161 | -- | 94 | -- |
162 | 2.13.6 | 95 | 2.37.3 |
163 | |||
164 | diff view generated by jsdifflib |
1 | From: Fam Zheng <famz@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Reopen flags are not synchronized according to the | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | bdrv_reopen_queue_child precedence until bdrv_reopen_prepare. It is a | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | bit too late: we already check the consistency in bdrv_check_perm before | 5 | functions where this holds. |
6 | that. | ||
7 | 6 | ||
8 | This fixes the bug that when bdrv_reopen a RO node as RW, the flags for | 7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
9 | backing child are wrong. Before, we could recurse with flags.rw=1; now, | 8 | Message-Id: <20220922084924.201610-3-pbonzini@redhat.com> |
10 | role->inherit_options + update_flags_from_options will make sure to | 9 | [kwolf: Fixed up coding style] |
11 | clear the bit when necessary. Note that this will not clear an | 10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
12 | explicitly set bit, as in the case of parallel block jobs (e.g. | ||
13 | test_stream_parallel in 030), because the explicit options include | ||
14 | 'read-only=false' (for an intermediate node used by a different job). | ||
15 | |||
16 | Signed-off-by: Fam Zheng <famz@redhat.com> | ||
17 | Reviewed-by: Max Reitz <mreitz@redhat.com> | ||
18 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
19 | --- | 12 | --- |
20 | block.c | 8 ++++++++ | 13 | block.c | 7 ++++--- |
21 | 1 file changed, 8 insertions(+) | 14 | block/block-backend.c | 10 +++++----- |
15 | block/io.c | 22 +++++++++++----------- | ||
16 | 3 files changed, 20 insertions(+), 19 deletions(-) | ||
22 | 17 | ||
23 | diff --git a/block.c b/block.c | 18 | diff --git a/block.c b/block.c |
24 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
25 | --- a/block.c | 20 | --- a/block.c |
26 | +++ b/block.c | 21 | +++ b/block.c |
27 | @@ -XXX,XX +XXX,XX @@ static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, | 22 | @@ -XXX,XX +XXX,XX @@ static int64_t create_file_fallback_truncate(BlockBackend *blk, |
28 | 23 | * Helper function for bdrv_create_file_fallback(): Zero the first | |
29 | /* Inherit from parent node */ | 24 | * sector to remove any potentially pre-existing image header. |
30 | if (parent_options) { | 25 | */ |
31 | + QemuOpts *opts; | 26 | -static int create_file_fallback_zero_first_sector(BlockBackend *blk, |
32 | + QDict *options_copy; | 27 | - int64_t current_size, |
33 | assert(!flags); | 28 | - Error **errp) |
34 | role->inherit_options(&flags, options, parent_flags, parent_options); | 29 | +static int coroutine_fn |
35 | + options_copy = qdict_clone_shallow(options); | 30 | +create_file_fallback_zero_first_sector(BlockBackend *blk, |
36 | + opts = qemu_opts_create(&bdrv_runtime_opts, NULL, 0, &error_abort); | 31 | + int64_t current_size, |
37 | + qemu_opts_absorb_qdict(opts, options_copy, NULL); | 32 | + Error **errp) |
38 | + update_flags_from_options(&flags, opts); | 33 | { |
39 | + qemu_opts_del(opts); | 34 | int64_t bytes_to_clear; |
40 | + QDECREF(options_copy); | 35 | int ret; |
41 | } | 36 | diff --git a/block/block-backend.c b/block/block-backend.c |
42 | 37 | index XXXXXXX..XXXXXXX 100644 | |
43 | /* Old values are used for options that aren't set yet */ | 38 | --- a/block/block-backend.c |
39 | +++ b/block/block-backend.c | ||
40 | @@ -XXX,XX +XXX,XX @@ static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, | ||
41 | return &acb->common; | ||
42 | } | ||
43 | |||
44 | -static void blk_aio_read_entry(void *opaque) | ||
45 | +static void coroutine_fn blk_aio_read_entry(void *opaque) | ||
46 | { | ||
47 | BlkAioEmAIOCB *acb = opaque; | ||
48 | BlkRwCo *rwco = &acb->rwco; | ||
49 | @@ -XXX,XX +XXX,XX @@ static void blk_aio_read_entry(void *opaque) | ||
50 | blk_aio_complete(acb); | ||
51 | } | ||
52 | |||
53 | -static void blk_aio_write_entry(void *opaque) | ||
54 | +static void coroutine_fn blk_aio_write_entry(void *opaque) | ||
55 | { | ||
56 | BlkAioEmAIOCB *acb = opaque; | ||
57 | BlkRwCo *rwco = &acb->rwco; | ||
58 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req, | ||
59 | return ret; | ||
60 | } | ||
61 | |||
62 | -static void blk_aio_ioctl_entry(void *opaque) | ||
63 | +static void coroutine_fn blk_aio_ioctl_entry(void *opaque) | ||
64 | { | ||
65 | BlkAioEmAIOCB *acb = opaque; | ||
66 | BlkRwCo *rwco = &acb->rwco; | ||
67 | @@ -XXX,XX +XXX,XX @@ blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes) | ||
68 | return bdrv_co_pdiscard(blk->root, offset, bytes); | ||
69 | } | ||
70 | |||
71 | -static void blk_aio_pdiscard_entry(void *opaque) | ||
72 | +static void coroutine_fn blk_aio_pdiscard_entry(void *opaque) | ||
73 | { | ||
74 | BlkAioEmAIOCB *acb = opaque; | ||
75 | BlkRwCo *rwco = &acb->rwco; | ||
76 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn blk_co_do_flush(BlockBackend *blk) | ||
77 | return bdrv_co_flush(blk_bs(blk)); | ||
78 | } | ||
79 | |||
80 | -static void blk_aio_flush_entry(void *opaque) | ||
81 | +static void coroutine_fn blk_aio_flush_entry(void *opaque) | ||
82 | { | ||
83 | BlkAioEmAIOCB *acb = opaque; | ||
84 | BlkRwCo *rwco = &acb->rwco; | ||
85 | diff --git a/block/io.c b/block/io.c | ||
86 | index XXXXXXX..XXXXXXX 100644 | ||
87 | --- a/block/io.c | ||
88 | +++ b/block/io.c | ||
89 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req) | ||
90 | /** | ||
91 | * Add an active request to the tracked requests list | ||
92 | */ | ||
93 | -static void tracked_request_begin(BdrvTrackedRequest *req, | ||
94 | - BlockDriverState *bs, | ||
95 | - int64_t offset, | ||
96 | - int64_t bytes, | ||
97 | - enum BdrvTrackedRequestType type) | ||
98 | +static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req, | ||
99 | + BlockDriverState *bs, | ||
100 | + int64_t offset, | ||
101 | + int64_t bytes, | ||
102 | + enum BdrvTrackedRequestType type) | ||
103 | { | ||
104 | bdrv_check_request(offset, bytes, &error_abort); | ||
105 | |||
106 | @@ -XXX,XX +XXX,XX @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req, | ||
107 | } | ||
108 | |||
109 | /* Called with self->bs->reqs_lock held */ | ||
110 | -static BdrvTrackedRequest * | ||
111 | +static coroutine_fn BdrvTrackedRequest * | ||
112 | bdrv_find_conflicting_request(BdrvTrackedRequest *self) | ||
113 | { | ||
114 | BdrvTrackedRequest *req; | ||
115 | @@ -XXX,XX +XXX,XX @@ static bool bdrv_init_padding(BlockDriverState *bs, | ||
116 | return true; | ||
117 | } | ||
118 | |||
119 | -static int bdrv_padding_rmw_read(BdrvChild *child, | ||
120 | - BdrvTrackedRequest *req, | ||
121 | - BdrvRequestPadding *pad, | ||
122 | - bool zero_middle) | ||
123 | +static coroutine_fn int bdrv_padding_rmw_read(BdrvChild *child, | ||
124 | + BdrvTrackedRequest *req, | ||
125 | + BdrvRequestPadding *pad, | ||
126 | + bool zero_middle) | ||
127 | { | ||
128 | QEMUIOVector local_qiov; | ||
129 | BlockDriverState *bs = child->bs; | ||
130 | @@ -XXX,XX +XXX,XX @@ out: | ||
131 | return ret; | ||
132 | } | ||
133 | |||
134 | -int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) | ||
135 | +int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) | ||
136 | { | ||
137 | BlockDriver *drv = bs->drv; | ||
138 | CoroutineIOCompletion co = { | ||
44 | -- | 139 | -- |
45 | 2.13.6 | 140 | 2.37.3 |
46 | |||
47 | diff view generated by jsdifflib |
1 | This adds the .bdrv_co_create driver callback to vhdx, which | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | enables image creation over QMP. | ||
3 | 2 | ||
3 | This is incorrect because qcow2_mark_clean() calls qcow2_flush_caches(). | ||
4 | qcow2_mark_clean() is called from non-coroutine context in | ||
5 | qcow2_inactivate() and qcow2_amend_options(). | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
10 | Message-Id: <20220922084924.201610-4-pbonzini@redhat.com> | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
5 | Reviewed-by: Max Reitz <mreitz@redhat.com> | ||
6 | --- | 13 | --- |
7 | qapi/block-core.json | 40 +++++++++- | 14 | block/qcow2.h | 4 ++-- |
8 | block/vhdx.c | 216 ++++++++++++++++++++++++++++++++++++++------------- | 15 | block/qcow2-refcount.c | 4 ++-- |
9 | 2 files changed, 203 insertions(+), 53 deletions(-) | 16 | 2 files changed, 4 insertions(+), 4 deletions(-) |
10 | 17 | ||
11 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 18 | diff --git a/block/qcow2.h b/block/qcow2.h |
12 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/qapi/block-core.json | 20 | --- a/block/qcow2.h |
14 | +++ b/qapi/block-core.json | 21 | +++ b/block/qcow2.h |
15 | @@ -XXX,XX +XXX,XX @@ | 22 | @@ -XXX,XX +XXX,XX @@ void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry, |
16 | '*static': 'bool' } } | 23 | int qcow2_update_snapshot_refcount(BlockDriverState *bs, |
17 | 24 | int64_t l1_table_offset, int l1_size, int addend); | |
18 | ## | 25 | |
19 | +# @BlockdevVhdxSubformat: | 26 | -int coroutine_fn qcow2_flush_caches(BlockDriverState *bs); |
20 | +# | 27 | -int coroutine_fn qcow2_write_caches(BlockDriverState *bs); |
21 | +# @dynamic: Growing image file | 28 | +int qcow2_flush_caches(BlockDriverState *bs); |
22 | +# @fixed: Preallocated fixed-size image file | 29 | +int qcow2_write_caches(BlockDriverState *bs); |
23 | +# | 30 | int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, |
24 | +# Since: 2.12 | 31 | BdrvCheckMode fix); |
25 | +## | 32 | |
26 | +{ 'enum': 'BlockdevVhdxSubformat', | 33 | diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c |
27 | + 'data': [ 'dynamic', 'fixed' ] } | ||
28 | + | ||
29 | +## | ||
30 | +# @BlockdevCreateOptionsVhdx: | ||
31 | +# | ||
32 | +# Driver specific image creation options for vhdx. | ||
33 | +# | ||
34 | +# @file Node to create the image format on | ||
35 | +# @size Size of the virtual disk in bytes | ||
36 | +# @log-size Log size in bytes, must be a multiple of 1 MB | ||
37 | +# (default: 1 MB) | ||
38 | +# @block-size Block size in bytes, must be a multiple of 1 MB and not | ||
39 | +# larger than 256 MB (default: automatically choose a block | ||
40 | +# size depending on the image size) | ||
41 | +# @subformat vhdx subformat (default: dynamic) | ||
42 | +# @block-state-zero Force use of payload blocks of type 'ZERO'. Non-standard, | ||
43 | +# but default. Do not set to 'off' when using 'qemu-img | ||
44 | +# convert' with subformat=dynamic. | ||
45 | +# | ||
46 | +# Since: 2.12 | ||
47 | +## | ||
48 | +{ 'struct': 'BlockdevCreateOptionsVhdx', | ||
49 | + 'data': { 'file': 'BlockdevRef', | ||
50 | + 'size': 'size', | ||
51 | + '*log-size': 'size', | ||
52 | + '*block-size': 'size', | ||
53 | + '*subformat': 'BlockdevVhdxSubformat', | ||
54 | + '*block-state-zero': 'bool' } } | ||
55 | + | ||
56 | +## | ||
57 | # @BlockdevCreateNotSupported: | ||
58 | # | ||
59 | # This is used for all drivers that don't support creating images. | ||
60 | @@ -XXX,XX +XXX,XX @@ | ||
61 | 'ssh': 'BlockdevCreateOptionsSsh', | ||
62 | 'throttle': 'BlockdevCreateNotSupported', | ||
63 | 'vdi': 'BlockdevCreateOptionsVdi', | ||
64 | - 'vhdx': 'BlockdevCreateNotSupported', | ||
65 | + 'vhdx': 'BlockdevCreateOptionsVhdx', | ||
66 | 'vmdk': 'BlockdevCreateNotSupported', | ||
67 | 'vpc': 'BlockdevCreateNotSupported', | ||
68 | 'vvfat': 'BlockdevCreateNotSupported', | ||
69 | diff --git a/block/vhdx.c b/block/vhdx.c | ||
70 | index XXXXXXX..XXXXXXX 100644 | 34 | index XXXXXXX..XXXXXXX 100644 |
71 | --- a/block/vhdx.c | 35 | --- a/block/qcow2-refcount.c |
72 | +++ b/block/vhdx.c | 36 | +++ b/block/qcow2-refcount.c |
73 | @@ -XXX,XX +XXX,XX @@ | 37 | @@ -XXX,XX +XXX,XX @@ void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry, |
74 | #include "block/vhdx.h" | 38 | } |
75 | #include "migration/blocker.h" | 39 | } |
76 | #include "qemu/uuid.h" | 40 | |
77 | +#include "qapi/qmp/qdict.h" | 41 | -int coroutine_fn qcow2_write_caches(BlockDriverState *bs) |
78 | +#include "qapi/qobject-input-visitor.h" | 42 | +int qcow2_write_caches(BlockDriverState *bs) |
79 | +#include "qapi/qapi-visit-block-core.h" | ||
80 | |||
81 | /* Options for VHDX creation */ | ||
82 | |||
83 | @@ -XXX,XX +XXX,XX @@ typedef enum VHDXImageType { | ||
84 | VHDX_TYPE_DIFFERENCING, /* Currently unsupported */ | ||
85 | } VHDXImageType; | ||
86 | |||
87 | +static QemuOptsList vhdx_create_opts; | ||
88 | + | ||
89 | /* Several metadata and region table data entries are identified by | ||
90 | * guids in a MS-specific GUID format. */ | ||
91 | |||
92 | @@ -XXX,XX +XXX,XX @@ exit: | ||
93 | * .---- ~ ----------- ~ ------------ ~ ---------------- ~ -----------. | ||
94 | * 1MB | ||
95 | */ | ||
96 | -static int coroutine_fn vhdx_co_create_opts(const char *filename, QemuOpts *opts, | ||
97 | - Error **errp) | ||
98 | +static int coroutine_fn vhdx_co_create(BlockdevCreateOptions *opts, | ||
99 | + Error **errp) | ||
100 | { | 43 | { |
101 | + BlockdevCreateOptionsVhdx *vhdx_opts; | 44 | BDRVQcow2State *s = bs->opaque; |
102 | + BlockBackend *blk = NULL; | 45 | int ret; |
103 | + BlockDriverState *bs = NULL; | 46 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn qcow2_write_caches(BlockDriverState *bs) |
104 | + | 47 | return 0; |
105 | int ret = 0; | ||
106 | - uint64_t image_size = (uint64_t) 2 * GiB; | ||
107 | - uint32_t log_size = 1 * MiB; | ||
108 | - uint32_t block_size = 0; | ||
109 | + uint64_t image_size; | ||
110 | + uint32_t log_size; | ||
111 | + uint32_t block_size; | ||
112 | uint64_t signature; | ||
113 | uint64_t metadata_offset; | ||
114 | bool use_zero_blocks = false; | ||
115 | |||
116 | gunichar2 *creator = NULL; | ||
117 | glong creator_items; | ||
118 | - BlockBackend *blk; | ||
119 | - char *type = NULL; | ||
120 | VHDXImageType image_type; | ||
121 | - Error *local_err = NULL; | ||
122 | |||
123 | - image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), | ||
124 | - BDRV_SECTOR_SIZE); | ||
125 | - log_size = qemu_opt_get_size_del(opts, VHDX_BLOCK_OPT_LOG_SIZE, 0); | ||
126 | - block_size = qemu_opt_get_size_del(opts, VHDX_BLOCK_OPT_BLOCK_SIZE, 0); | ||
127 | - type = qemu_opt_get_del(opts, BLOCK_OPT_SUBFMT); | ||
128 | - use_zero_blocks = qemu_opt_get_bool_del(opts, VHDX_BLOCK_OPT_ZERO, true); | ||
129 | + assert(opts->driver == BLOCKDEV_DRIVER_VHDX); | ||
130 | + vhdx_opts = &opts->u.vhdx; | ||
131 | |||
132 | + /* Validate options and set default values */ | ||
133 | + image_size = vhdx_opts->size; | ||
134 | if (image_size > VHDX_MAX_IMAGE_SIZE) { | ||
135 | error_setg_errno(errp, EINVAL, "Image size too large; max of 64TB"); | ||
136 | - ret = -EINVAL; | ||
137 | - goto exit; | ||
138 | + return -EINVAL; | ||
139 | } | ||
140 | |||
141 | - if (type == NULL) { | ||
142 | - type = g_strdup("dynamic"); | ||
143 | + if (!vhdx_opts->has_log_size) { | ||
144 | + log_size = DEFAULT_LOG_SIZE; | ||
145 | + } else { | ||
146 | + log_size = vhdx_opts->log_size; | ||
147 | + } | ||
148 | + if (log_size < MiB || (log_size % MiB) != 0) { | ||
149 | + error_setg_errno(errp, EINVAL, "Log size must be a multiple of 1 MB"); | ||
150 | + return -EINVAL; | ||
151 | } | ||
152 | |||
153 | - if (!strcmp(type, "dynamic")) { | ||
154 | + if (!vhdx_opts->has_block_state_zero) { | ||
155 | + use_zero_blocks = true; | ||
156 | + } else { | ||
157 | + use_zero_blocks = vhdx_opts->block_state_zero; | ||
158 | + } | ||
159 | + | ||
160 | + if (!vhdx_opts->has_subformat) { | ||
161 | + vhdx_opts->subformat = BLOCKDEV_VHDX_SUBFORMAT_DYNAMIC; | ||
162 | + } | ||
163 | + | ||
164 | + switch (vhdx_opts->subformat) { | ||
165 | + case BLOCKDEV_VHDX_SUBFORMAT_DYNAMIC: | ||
166 | image_type = VHDX_TYPE_DYNAMIC; | ||
167 | - } else if (!strcmp(type, "fixed")) { | ||
168 | + break; | ||
169 | + case BLOCKDEV_VHDX_SUBFORMAT_FIXED: | ||
170 | image_type = VHDX_TYPE_FIXED; | ||
171 | - } else if (!strcmp(type, "differencing")) { | ||
172 | - error_setg_errno(errp, ENOTSUP, | ||
173 | - "Differencing files not yet supported"); | ||
174 | - ret = -ENOTSUP; | ||
175 | - goto exit; | ||
176 | - } else { | ||
177 | - error_setg(errp, "Invalid subformat '%s'", type); | ||
178 | - ret = -EINVAL; | ||
179 | - goto exit; | ||
180 | + break; | ||
181 | + default: | ||
182 | + g_assert_not_reached(); | ||
183 | } | ||
184 | |||
185 | /* These are pretty arbitrary, and mainly designed to keep the BAT | ||
186 | * size reasonable to load into RAM */ | ||
187 | - if (block_size == 0) { | ||
188 | + if (vhdx_opts->has_block_size) { | ||
189 | + block_size = vhdx_opts->block_size; | ||
190 | + } else { | ||
191 | if (image_size > 32 * TiB) { | ||
192 | block_size = 64 * MiB; | ||
193 | } else if (image_size > (uint64_t) 100 * GiB) { | ||
194 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vhdx_co_create_opts(const char *filename, QemuOpts *opts | ||
195 | } | ||
196 | } | ||
197 | |||
198 | - | ||
199 | - /* make the log size close to what was specified, but must be | ||
200 | - * min 1MB, and multiple of 1MB */ | ||
201 | - log_size = ROUND_UP(log_size, MiB); | ||
202 | - | ||
203 | - block_size = ROUND_UP(block_size, MiB); | ||
204 | - block_size = block_size > VHDX_BLOCK_SIZE_MAX ? VHDX_BLOCK_SIZE_MAX : | ||
205 | - block_size; | ||
206 | - | ||
207 | - ret = bdrv_create_file(filename, opts, &local_err); | ||
208 | - if (ret < 0) { | ||
209 | - error_propagate(errp, local_err); | ||
210 | - goto exit; | ||
211 | + if (block_size < MiB || (block_size % MiB) != 0) { | ||
212 | + error_setg_errno(errp, EINVAL, "Block size must be a multiple of 1 MB"); | ||
213 | + return -EINVAL; | ||
214 | + } | ||
215 | + if (block_size > VHDX_BLOCK_SIZE_MAX) { | ||
216 | + error_setg_errno(errp, EINVAL, "Block size must not exceed %d", | ||
217 | + VHDX_BLOCK_SIZE_MAX); | ||
218 | + return -EINVAL; | ||
219 | } | ||
220 | |||
221 | - blk = blk_new_open(filename, NULL, NULL, | ||
222 | - BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, | ||
223 | - &local_err); | ||
224 | - if (blk == NULL) { | ||
225 | - error_propagate(errp, local_err); | ||
226 | - ret = -EIO; | ||
227 | - goto exit; | ||
228 | + /* Create BlockBackend to write to the image */ | ||
229 | + bs = bdrv_open_blockdev_ref(vhdx_opts->file, errp); | ||
230 | + if (bs == NULL) { | ||
231 | + return -EIO; | ||
232 | } | ||
233 | |||
234 | + blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL); | ||
235 | + ret = blk_insert_bs(blk, bs, errp); | ||
236 | + if (ret < 0) { | ||
237 | + goto delete_and_exit; | ||
238 | + } | ||
239 | blk_set_allow_write_beyond_eof(blk, true); | ||
240 | |||
241 | /* Create (A) */ | ||
242 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vhdx_co_create_opts(const char *filename, QemuOpts *opts | ||
243 | |||
244 | delete_and_exit: | ||
245 | blk_unref(blk); | ||
246 | -exit: | ||
247 | - g_free(type); | ||
248 | + bdrv_unref(bs); | ||
249 | g_free(creator); | ||
250 | return ret; | ||
251 | } | 48 | } |
252 | 49 | ||
253 | +static int coroutine_fn vhdx_co_create_opts(const char *filename, | 50 | -int coroutine_fn qcow2_flush_caches(BlockDriverState *bs) |
254 | + QemuOpts *opts, | 51 | +int qcow2_flush_caches(BlockDriverState *bs) |
255 | + Error **errp) | 52 | { |
256 | +{ | 53 | int ret = qcow2_write_caches(bs); |
257 | + BlockdevCreateOptions *create_options = NULL; | 54 | if (ret < 0) { |
258 | + QDict *qdict = NULL; | ||
259 | + QObject *qobj; | ||
260 | + Visitor *v; | ||
261 | + BlockDriverState *bs = NULL; | ||
262 | + Error *local_err = NULL; | ||
263 | + int ret; | ||
264 | + | ||
265 | + static const QDictRenames opt_renames[] = { | ||
266 | + { VHDX_BLOCK_OPT_LOG_SIZE, "log-size" }, | ||
267 | + { VHDX_BLOCK_OPT_BLOCK_SIZE, "block-size" }, | ||
268 | + { VHDX_BLOCK_OPT_ZERO, "block-state-zero" }, | ||
269 | + { NULL, NULL }, | ||
270 | + }; | ||
271 | + | ||
272 | + /* Parse options and convert legacy syntax */ | ||
273 | + qdict = qemu_opts_to_qdict_filtered(opts, NULL, &vhdx_create_opts, true); | ||
274 | + | ||
275 | + if (!qdict_rename_keys(qdict, opt_renames, errp)) { | ||
276 | + ret = -EINVAL; | ||
277 | + goto fail; | ||
278 | + } | ||
279 | + | ||
280 | + /* Create and open the file (protocol layer) */ | ||
281 | + ret = bdrv_create_file(filename, opts, &local_err); | ||
282 | + if (ret < 0) { | ||
283 | + error_propagate(errp, local_err); | ||
284 | + goto fail; | ||
285 | + } | ||
286 | + | ||
287 | + bs = bdrv_open(filename, NULL, NULL, | ||
288 | + BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); | ||
289 | + if (bs == NULL) { | ||
290 | + ret = -EIO; | ||
291 | + goto fail; | ||
292 | + } | ||
293 | + | ||
294 | + /* Now get the QAPI type BlockdevCreateOptions */ | ||
295 | + qdict_put_str(qdict, "driver", "vhdx"); | ||
296 | + qdict_put_str(qdict, "file", bs->node_name); | ||
297 | + | ||
298 | + qobj = qdict_crumple(qdict, errp); | ||
299 | + QDECREF(qdict); | ||
300 | + qdict = qobject_to_qdict(qobj); | ||
301 | + if (qdict == NULL) { | ||
302 | + ret = -EINVAL; | ||
303 | + goto fail; | ||
304 | + } | ||
305 | + | ||
306 | + v = qobject_input_visitor_new_keyval(QOBJECT(qdict)); | ||
307 | + visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err); | ||
308 | + visit_free(v); | ||
309 | + | ||
310 | + if (local_err) { | ||
311 | + error_propagate(errp, local_err); | ||
312 | + ret = -EINVAL; | ||
313 | + goto fail; | ||
314 | + } | ||
315 | + | ||
316 | + /* Silently round up sizes: | ||
317 | + * The image size is rounded to 512 bytes. Make the block and log size | ||
318 | + * close to what was specified, but must be at least 1MB, and a multiple of | ||
319 | + * 1 MB. Also respect VHDX_BLOCK_SIZE_MAX for block sizes. block_size = 0 | ||
320 | + * means auto, which is represented by a missing key in QAPI. */ | ||
321 | + assert(create_options->driver == BLOCKDEV_DRIVER_VHDX); | ||
322 | + create_options->u.vhdx.size = | ||
323 | + ROUND_UP(create_options->u.vhdx.size, BDRV_SECTOR_SIZE); | ||
324 | + | ||
325 | + if (create_options->u.vhdx.has_log_size) { | ||
326 | + create_options->u.vhdx.log_size = | ||
327 | + ROUND_UP(create_options->u.vhdx.log_size, MiB); | ||
328 | + } | ||
329 | + if (create_options->u.vhdx.has_block_size) { | ||
330 | + create_options->u.vhdx.block_size = | ||
331 | + ROUND_UP(create_options->u.vhdx.block_size, MiB); | ||
332 | + | ||
333 | + if (create_options->u.vhdx.block_size == 0) { | ||
334 | + create_options->u.vhdx.has_block_size = false; | ||
335 | + } | ||
336 | + if (create_options->u.vhdx.block_size > VHDX_BLOCK_SIZE_MAX) { | ||
337 | + create_options->u.vhdx.block_size = VHDX_BLOCK_SIZE_MAX; | ||
338 | + } | ||
339 | + } | ||
340 | + | ||
341 | + /* Create the vhdx image (format layer) */ | ||
342 | + ret = vhdx_co_create(create_options, errp); | ||
343 | + | ||
344 | +fail: | ||
345 | + QDECREF(qdict); | ||
346 | + bdrv_unref(bs); | ||
347 | + qapi_free_BlockdevCreateOptions(create_options); | ||
348 | + return ret; | ||
349 | +} | ||
350 | + | ||
351 | /* If opened r/w, the VHDX driver will automatically replay the log, | ||
352 | * if one is present, inside the vhdx_open() call. | ||
353 | * | ||
354 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_vhdx = { | ||
355 | .bdrv_child_perm = bdrv_format_default_perms, | ||
356 | .bdrv_co_readv = vhdx_co_readv, | ||
357 | .bdrv_co_writev = vhdx_co_writev, | ||
358 | + .bdrv_co_create = vhdx_co_create, | ||
359 | .bdrv_co_create_opts = vhdx_co_create_opts, | ||
360 | .bdrv_get_info = vhdx_get_info, | ||
361 | .bdrv_co_check = vhdx_co_check, | ||
362 | -- | 55 | -- |
363 | 2.13.6 | 56 | 2.37.3 |
364 | |||
365 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | nbd_co_establish_connection_cancel() cancels a coroutine but is not called | ||
4 | from coroutine context itself, for example in nbd_cancel_in_flight() | ||
5 | and in timer callbacks reconnect_delay_timer_cb() and open_timer_cb(). | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
10 | Message-Id: <20220922084924.201610-5-pbonzini@redhat.com> | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
13 | --- | ||
14 | include/block/nbd.h | 2 +- | ||
15 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
16 | |||
17 | diff --git a/include/block/nbd.h b/include/block/nbd.h | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/include/block/nbd.h | ||
20 | +++ b/include/block/nbd.h | ||
21 | @@ -XXX,XX +XXX,XX @@ QIOChannel *coroutine_fn | ||
22 | nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info, | ||
23 | bool blocking, Error **errp); | ||
24 | |||
25 | -void coroutine_fn nbd_co_establish_connection_cancel(NBDClientConnection *conn); | ||
26 | +void nbd_co_establish_connection_cancel(NBDClientConnection *conn); | ||
27 | |||
28 | #endif | ||
29 | -- | ||
30 | 2.37.3 | diff view generated by jsdifflib |
1 | This adds the .bdrv_co_create driver callback to qcow, which | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | enables image creation over QMP. | ||
3 | 2 | ||
3 | qemu_coroutine_get_aio_context inspects a coroutine, but it does | ||
4 | not have to be called from the coroutine itself (or from any | ||
5 | coroutine). | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
10 | Message-Id: <20220922084924.201610-6-pbonzini@redhat.com> | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
5 | Reviewed-by: Max Reitz <mreitz@redhat.com> | ||
6 | Reviewed-by: Jeff Cody <jcody@redhat.com> | ||
7 | --- | 13 | --- |
8 | qapi/block-core.json | 21 +++++- | 14 | include/qemu/coroutine.h | 2 +- |
9 | block/qcow.c | 196 ++++++++++++++++++++++++++++++++++----------------- | 15 | util/qemu-coroutine.c | 2 +- |
10 | 2 files changed, 150 insertions(+), 67 deletions(-) | 16 | 2 files changed, 2 insertions(+), 2 deletions(-) |
11 | 17 | ||
12 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 18 | diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h |
13 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/qapi/block-core.json | 20 | --- a/include/qemu/coroutine.h |
15 | +++ b/qapi/block-core.json | 21 | +++ b/include/qemu/coroutine.h |
16 | @@ -XXX,XX +XXX,XX @@ | 22 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_coroutine_yield(void); |
17 | '*cluster-size': 'size' } } | 23 | /** |
18 | 24 | * Get the AioContext of the given coroutine | |
19 | ## | 25 | */ |
20 | +# @BlockdevCreateOptionsQcow: | 26 | -AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co); |
21 | +# | 27 | +AioContext *qemu_coroutine_get_aio_context(Coroutine *co); |
22 | +# Driver specific image creation options for qcow. | 28 | |
23 | +# | 29 | /** |
24 | +# @file Node to create the image format on | 30 | * Get the currently executing coroutine |
25 | +# @size Size of the virtual disk in bytes | 31 | diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c |
26 | +# @backing-file File name of the backing file if a backing file | ||
27 | +# should be used | ||
28 | +# @encrypt Encryption options if the image should be encrypted | ||
29 | +# | ||
30 | +# Since: 2.12 | ||
31 | +## | ||
32 | +{ 'struct': 'BlockdevCreateOptionsQcow', | ||
33 | + 'data': { 'file': 'BlockdevRef', | ||
34 | + 'size': 'size', | ||
35 | + '*backing-file': 'str', | ||
36 | + '*encrypt': 'QCryptoBlockCreateOptions' } } | ||
37 | + | ||
38 | +## | ||
39 | # @BlockdevQcow2Version: | ||
40 | # | ||
41 | # @v2: The original QCOW2 format as introduced in qemu 0.10 (version 2) | ||
42 | @@ -XXX,XX +XXX,XX @@ | ||
43 | 'null-co': 'BlockdevCreateNotSupported', | ||
44 | 'nvme': 'BlockdevCreateNotSupported', | ||
45 | 'parallels': 'BlockdevCreateOptionsParallels', | ||
46 | + 'qcow': 'BlockdevCreateOptionsQcow', | ||
47 | 'qcow2': 'BlockdevCreateOptionsQcow2', | ||
48 | - 'qcow': 'BlockdevCreateNotSupported', | ||
49 | 'qed': 'BlockdevCreateNotSupported', | ||
50 | 'quorum': 'BlockdevCreateNotSupported', | ||
51 | 'raw': 'BlockdevCreateNotSupported', | ||
52 | diff --git a/block/qcow.c b/block/qcow.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | 32 | index XXXXXXX..XXXXXXX 100644 |
54 | --- a/block/qcow.c | 33 | --- a/util/qemu-coroutine.c |
55 | +++ b/block/qcow.c | 34 | +++ b/util/qemu-coroutine.c |
56 | @@ -XXX,XX +XXX,XX @@ | 35 | @@ -XXX,XX +XXX,XX @@ bool qemu_coroutine_entered(Coroutine *co) |
57 | #include <zlib.h> | 36 | return co->caller; |
58 | #include "qapi/qmp/qdict.h" | ||
59 | #include "qapi/qmp/qstring.h" | ||
60 | +#include "qapi/qobject-input-visitor.h" | ||
61 | +#include "qapi/qapi-visit-block-core.h" | ||
62 | #include "crypto/block.h" | ||
63 | #include "migration/blocker.h" | ||
64 | #include "block/crypto.h" | ||
65 | @@ -XXX,XX +XXX,XX @@ typedef struct BDRVQcowState { | ||
66 | Error *migration_blocker; | ||
67 | } BDRVQcowState; | ||
68 | |||
69 | +static QemuOptsList qcow_create_opts; | ||
70 | + | ||
71 | static int decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset); | ||
72 | |||
73 | static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename) | ||
74 | @@ -XXX,XX +XXX,XX @@ static void qcow_close(BlockDriverState *bs) | ||
75 | error_free(s->migration_blocker); | ||
76 | } | 37 | } |
77 | 38 | ||
78 | -static int coroutine_fn qcow_co_create_opts(const char *filename, QemuOpts *opts, | 39 | -AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co) |
79 | - Error **errp) | 40 | +AioContext *qemu_coroutine_get_aio_context(Coroutine *co) |
80 | +static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts, | ||
81 | + Error **errp) | ||
82 | { | 41 | { |
83 | + BlockdevCreateOptionsQcow *qcow_opts; | 42 | return co->ctx; |
84 | int header_size, backing_filename_len, l1_size, shift, i; | ||
85 | QCowHeader header; | ||
86 | uint8_t *tmp; | ||
87 | int64_t total_size = 0; | ||
88 | - char *backing_file = NULL; | ||
89 | - Error *local_err = NULL; | ||
90 | int ret; | ||
91 | + BlockDriverState *bs; | ||
92 | BlockBackend *qcow_blk; | ||
93 | - char *encryptfmt = NULL; | ||
94 | - QDict *options; | ||
95 | - QDict *encryptopts = NULL; | ||
96 | - QCryptoBlockCreateOptions *crypto_opts = NULL; | ||
97 | QCryptoBlock *crypto = NULL; | ||
98 | |||
99 | - /* Read out options */ | ||
100 | - total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), | ||
101 | - BDRV_SECTOR_SIZE); | ||
102 | + assert(opts->driver == BLOCKDEV_DRIVER_QCOW); | ||
103 | + qcow_opts = &opts->u.qcow; | ||
104 | + | ||
105 | + /* Sanity checks */ | ||
106 | + total_size = qcow_opts->size; | ||
107 | if (total_size == 0) { | ||
108 | error_setg(errp, "Image size is too small, cannot be zero length"); | ||
109 | - ret = -EINVAL; | ||
110 | - goto cleanup; | ||
111 | + return -EINVAL; | ||
112 | } | ||
113 | |||
114 | - backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); | ||
115 | - encryptfmt = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT); | ||
116 | - if (encryptfmt) { | ||
117 | - if (qemu_opt_get(opts, BLOCK_OPT_ENCRYPT)) { | ||
118 | - error_setg(errp, "Options " BLOCK_OPT_ENCRYPT " and " | ||
119 | - BLOCK_OPT_ENCRYPT_FORMAT " are mutually exclusive"); | ||
120 | - ret = -EINVAL; | ||
121 | - goto cleanup; | ||
122 | - } | ||
123 | - } else if (qemu_opt_get_bool_del(opts, BLOCK_OPT_ENCRYPT, false)) { | ||
124 | - encryptfmt = g_strdup("aes"); | ||
125 | + if (qcow_opts->has_encrypt && | ||
126 | + qcow_opts->encrypt->format != Q_CRYPTO_BLOCK_FORMAT_QCOW) | ||
127 | + { | ||
128 | + error_setg(errp, "Unsupported encryption format"); | ||
129 | + return -EINVAL; | ||
130 | } | ||
131 | |||
132 | - ret = bdrv_create_file(filename, opts, &local_err); | ||
133 | - if (ret < 0) { | ||
134 | - error_propagate(errp, local_err); | ||
135 | - goto cleanup; | ||
136 | + /* Create BlockBackend to write to the image */ | ||
137 | + bs = bdrv_open_blockdev_ref(qcow_opts->file, errp); | ||
138 | + if (bs == NULL) { | ||
139 | + return -EIO; | ||
140 | } | ||
141 | |||
142 | - qcow_blk = blk_new_open(filename, NULL, NULL, | ||
143 | - BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, | ||
144 | - &local_err); | ||
145 | - if (qcow_blk == NULL) { | ||
146 | - error_propagate(errp, local_err); | ||
147 | - ret = -EIO; | ||
148 | - goto cleanup; | ||
149 | + qcow_blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL); | ||
150 | + ret = blk_insert_bs(qcow_blk, bs, errp); | ||
151 | + if (ret < 0) { | ||
152 | + goto exit; | ||
153 | } | ||
154 | - | ||
155 | blk_set_allow_write_beyond_eof(qcow_blk, true); | ||
156 | |||
157 | + /* Create image format */ | ||
158 | ret = blk_truncate(qcow_blk, 0, PREALLOC_MODE_OFF, errp); | ||
159 | if (ret < 0) { | ||
160 | goto exit; | ||
161 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn qcow_co_create_opts(const char *filename, QemuOpts *opts | ||
162 | header.size = cpu_to_be64(total_size); | ||
163 | header_size = sizeof(header); | ||
164 | backing_filename_len = 0; | ||
165 | - if (backing_file) { | ||
166 | - if (strcmp(backing_file, "fat:")) { | ||
167 | + if (qcow_opts->has_backing_file) { | ||
168 | + if (strcmp(qcow_opts->backing_file, "fat:")) { | ||
169 | header.backing_file_offset = cpu_to_be64(header_size); | ||
170 | - backing_filename_len = strlen(backing_file); | ||
171 | + backing_filename_len = strlen(qcow_opts->backing_file); | ||
172 | header.backing_file_size = cpu_to_be32(backing_filename_len); | ||
173 | header_size += backing_filename_len; | ||
174 | } else { | ||
175 | /* special backing file for vvfat */ | ||
176 | - g_free(backing_file); | ||
177 | - backing_file = NULL; | ||
178 | + qcow_opts->has_backing_file = false; | ||
179 | } | ||
180 | header.cluster_bits = 9; /* 512 byte cluster to avoid copying | ||
181 | unmodified sectors */ | ||
182 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn qcow_co_create_opts(const char *filename, QemuOpts *opts | ||
183 | |||
184 | header.l1_table_offset = cpu_to_be64(header_size); | ||
185 | |||
186 | - options = qemu_opts_to_qdict(opts, NULL); | ||
187 | - qdict_extract_subqdict(options, &encryptopts, "encrypt."); | ||
188 | - QDECREF(options); | ||
189 | - if (encryptfmt) { | ||
190 | - if (!g_str_equal(encryptfmt, "aes")) { | ||
191 | - error_setg(errp, "Unknown encryption format '%s', expected 'aes'", | ||
192 | - encryptfmt); | ||
193 | - ret = -EINVAL; | ||
194 | - goto exit; | ||
195 | - } | ||
196 | + if (qcow_opts->has_encrypt) { | ||
197 | header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES); | ||
198 | |||
199 | - crypto_opts = block_crypto_create_opts_init( | ||
200 | - Q_CRYPTO_BLOCK_FORMAT_QCOW, encryptopts, errp); | ||
201 | - if (!crypto_opts) { | ||
202 | - ret = -EINVAL; | ||
203 | - goto exit; | ||
204 | - } | ||
205 | - | ||
206 | - crypto = qcrypto_block_create(crypto_opts, "encrypt.", | ||
207 | + crypto = qcrypto_block_create(qcow_opts->encrypt, "encrypt.", | ||
208 | NULL, NULL, NULL, errp); | ||
209 | if (!crypto) { | ||
210 | ret = -EINVAL; | ||
211 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn qcow_co_create_opts(const char *filename, QemuOpts *opts | ||
212 | goto exit; | ||
213 | } | ||
214 | |||
215 | - if (backing_file) { | ||
216 | + if (qcow_opts->has_backing_file) { | ||
217 | ret = blk_pwrite(qcow_blk, sizeof(header), | ||
218 | - backing_file, backing_filename_len, 0); | ||
219 | + qcow_opts->backing_file, backing_filename_len, 0); | ||
220 | if (ret != backing_filename_len) { | ||
221 | goto exit; | ||
222 | } | ||
223 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn qcow_co_create_opts(const char *filename, QemuOpts *opts | ||
224 | ret = 0; | ||
225 | exit: | ||
226 | blk_unref(qcow_blk); | ||
227 | -cleanup: | ||
228 | - QDECREF(encryptopts); | ||
229 | - g_free(encryptfmt); | ||
230 | qcrypto_block_free(crypto); | ||
231 | - qapi_free_QCryptoBlockCreateOptions(crypto_opts); | ||
232 | - g_free(backing_file); | ||
233 | + return ret; | ||
234 | +} | ||
235 | + | ||
236 | +static int coroutine_fn qcow_co_create_opts(const char *filename, | ||
237 | + QemuOpts *opts, Error **errp) | ||
238 | +{ | ||
239 | + BlockdevCreateOptions *create_options = NULL; | ||
240 | + BlockDriverState *bs = NULL; | ||
241 | + QDict *qdict = NULL; | ||
242 | + QObject *qobj; | ||
243 | + Visitor *v; | ||
244 | + const char *val; | ||
245 | + Error *local_err = NULL; | ||
246 | + int ret; | ||
247 | + | ||
248 | + static const QDictRenames opt_renames[] = { | ||
249 | + { BLOCK_OPT_BACKING_FILE, "backing-file" }, | ||
250 | + { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT }, | ||
251 | + { NULL, NULL }, | ||
252 | + }; | ||
253 | + | ||
254 | + /* Parse options and convert legacy syntax */ | ||
255 | + qdict = qemu_opts_to_qdict_filtered(opts, NULL, &qcow_create_opts, true); | ||
256 | + | ||
257 | + val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT); | ||
258 | + if (val && !strcmp(val, "on")) { | ||
259 | + qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow"); | ||
260 | + } else if (val && !strcmp(val, "off")) { | ||
261 | + qdict_del(qdict, BLOCK_OPT_ENCRYPT); | ||
262 | + } | ||
263 | + | ||
264 | + val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT); | ||
265 | + if (val && !strcmp(val, "aes")) { | ||
266 | + qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow"); | ||
267 | + } | ||
268 | + | ||
269 | + if (!qdict_rename_keys(qdict, opt_renames, errp)) { | ||
270 | + ret = -EINVAL; | ||
271 | + goto fail; | ||
272 | + } | ||
273 | + | ||
274 | + /* Create and open the file (protocol layer) */ | ||
275 | + ret = bdrv_create_file(filename, opts, &local_err); | ||
276 | + if (ret < 0) { | ||
277 | + error_propagate(errp, local_err); | ||
278 | + goto fail; | ||
279 | + } | ||
280 | + | ||
281 | + bs = bdrv_open(filename, NULL, NULL, | ||
282 | + BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); | ||
283 | + if (bs == NULL) { | ||
284 | + ret = -EIO; | ||
285 | + goto fail; | ||
286 | + } | ||
287 | + | ||
288 | + /* Now get the QAPI type BlockdevCreateOptions */ | ||
289 | + qdict_put_str(qdict, "driver", "qcow"); | ||
290 | + qdict_put_str(qdict, "file", bs->node_name); | ||
291 | + | ||
292 | + qobj = qdict_crumple(qdict, errp); | ||
293 | + QDECREF(qdict); | ||
294 | + qdict = qobject_to_qdict(qobj); | ||
295 | + if (qdict == NULL) { | ||
296 | + ret = -EINVAL; | ||
297 | + goto fail; | ||
298 | + } | ||
299 | + | ||
300 | + v = qobject_input_visitor_new_keyval(QOBJECT(qdict)); | ||
301 | + visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err); | ||
302 | + visit_free(v); | ||
303 | + | ||
304 | + if (local_err) { | ||
305 | + error_propagate(errp, local_err); | ||
306 | + ret = -EINVAL; | ||
307 | + goto fail; | ||
308 | + } | ||
309 | + | ||
310 | + /* Silently round up size */ | ||
311 | + assert(create_options->driver == BLOCKDEV_DRIVER_QCOW); | ||
312 | + create_options->u.qcow.size = | ||
313 | + ROUND_UP(create_options->u.qcow.size, BDRV_SECTOR_SIZE); | ||
314 | + | ||
315 | + /* Create the qcow image (format layer) */ | ||
316 | + ret = qcow_co_create(create_options, errp); | ||
317 | + if (ret < 0) { | ||
318 | + goto fail; | ||
319 | + } | ||
320 | + | ||
321 | + ret = 0; | ||
322 | +fail: | ||
323 | + QDECREF(qdict); | ||
324 | + bdrv_unref(bs); | ||
325 | + qapi_free_BlockdevCreateOptions(create_options); | ||
326 | return ret; | ||
327 | } | 43 | } |
328 | |||
329 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_qcow = { | ||
330 | .bdrv_close = qcow_close, | ||
331 | .bdrv_child_perm = bdrv_format_default_perms, | ||
332 | .bdrv_reopen_prepare = qcow_reopen_prepare, | ||
333 | + .bdrv_co_create = qcow_co_create, | ||
334 | .bdrv_co_create_opts = qcow_co_create_opts, | ||
335 | .bdrv_has_zero_init = bdrv_has_zero_init_1, | ||
336 | .supports_backing = true, | ||
337 | -- | 44 | -- |
338 | 2.13.6 | 45 | 2.37.3 |
339 | |||
340 | diff view generated by jsdifflib |
1 | Originally we added parallels as a read-only format to qemu-iotests | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | where we did just some tests with a binary image. Since then, write and | ||
3 | image creation support has been added to the driver, so we can now | ||
4 | enable it in _supported_fmt generic. | ||
5 | 2 | ||
6 | The driver doesn't support migration yet, though, so we need to add it | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
7 | to the list of exceptions in 181. | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | functions where this holds. | ||
8 | 6 | ||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
10 | Message-Id: <20220922084924.201610-8-pbonzini@redhat.com> | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
10 | Reviewed-by: Max Reitz <mreitz@redhat.com> | ||
11 | Reviewed-by: Jeff Cody <jcody@redhat.com> | ||
12 | --- | 13 | --- |
13 | tests/qemu-iotests/181 | 2 +- | 14 | block/blkverify.c | 2 +- |
14 | tests/qemu-iotests/check | 1 - | 15 | 1 file changed, 1 insertion(+), 1 deletion(-) |
15 | 2 files changed, 1 insertion(+), 2 deletions(-) | ||
16 | 16 | ||
17 | diff --git a/tests/qemu-iotests/181 b/tests/qemu-iotests/181 | 17 | diff --git a/block/blkverify.c b/block/blkverify.c |
18 | index XXXXXXX..XXXXXXX 100755 | 18 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/tests/qemu-iotests/181 | 19 | --- a/block/blkverify.c |
20 | +++ b/tests/qemu-iotests/181 | 20 | +++ b/block/blkverify.c |
21 | @@ -XXX,XX +XXX,XX @@ trap "_cleanup; exit \$status" 0 1 2 3 15 | 21 | @@ -XXX,XX +XXX,XX @@ blkverify_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, |
22 | 22 | return blkverify_co_prwv(bs, &r, offset, bytes, qiov, qiov, flags, true); | |
23 | _supported_fmt generic | 23 | } |
24 | # Formats that do not support live migration | 24 | |
25 | -_unsupported_fmt qcow vdi vhdx vmdk vpc vvfat | 25 | -static int blkverify_co_flush(BlockDriverState *bs) |
26 | +_unsupported_fmt qcow vdi vhdx vmdk vpc vvfat parallels | 26 | +static int coroutine_fn blkverify_co_flush(BlockDriverState *bs) |
27 | _supported_proto generic | 27 | { |
28 | _supported_os Linux | 28 | BDRVBlkverifyState *s = bs->opaque; |
29 | |||
30 | diff --git a/tests/qemu-iotests/check b/tests/qemu-iotests/check | ||
31 | index XXXXXXX..XXXXXXX 100755 | ||
32 | --- a/tests/qemu-iotests/check | ||
33 | +++ b/tests/qemu-iotests/check | ||
34 | @@ -XXX,XX +XXX,XX @@ testlist options | ||
35 | |||
36 | -parallels) | ||
37 | IMGFMT=parallels | ||
38 | - IMGFMT_GENERIC=false | ||
39 | xpand=false | ||
40 | ;; | ||
41 | 29 | ||
42 | -- | 30 | -- |
43 | 2.13.6 | 31 | 2.37.3 |
44 | |||
45 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
10 | Message-Id: <20220922084924.201610-9-pbonzini@redhat.com> | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
13 | --- | ||
14 | block/file-posix.c | 2 +- | ||
15 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
16 | |||
17 | diff --git a/block/file-posix.c b/block/file-posix.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/block/file-posix.c | ||
20 | +++ b/block/file-posix.c | ||
21 | @@ -XXX,XX +XXX,XX @@ static void raw_aio_unplug(BlockDriverState *bs) | ||
22 | #endif | ||
23 | } | ||
24 | |||
25 | -static int raw_co_flush_to_disk(BlockDriverState *bs) | ||
26 | +static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs) | ||
27 | { | ||
28 | BDRVRawState *s = bs->opaque; | ||
29 | RawPosixAIOData acb; | ||
30 | -- | ||
31 | 2.37.3 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | The completed_single function is getting a little mucked up with | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | checking to see which callbacks exist, so let's factor them out. | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | functions where this holds. | ||
5 | 6 | ||
6 | Signed-off-by: John Snow <jsnow@redhat.com> | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | 8 | Reviewed-by: Eric Blake <eblake@redhat.com> |
9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
10 | Message-Id: <20220922084924.201610-10-pbonzini@redhat.com> | ||
11 | [kwolf: Fixed up coding style] | ||
8 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 12 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
10 | --- | 14 | --- |
11 | blockjob.c | 35 ++++++++++++++++++++++++++--------- | 15 | block/iscsi.c | 3 ++- |
12 | 1 file changed, 26 insertions(+), 9 deletions(-) | 16 | 1 file changed, 2 insertions(+), 1 deletion(-) |
13 | 17 | ||
14 | diff --git a/blockjob.c b/blockjob.c | 18 | diff --git a/block/iscsi.c b/block/iscsi.c |
15 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/blockjob.c | 20 | --- a/block/iscsi.c |
17 | +++ b/blockjob.c | 21 | +++ b/block/iscsi.c |
18 | @@ -XXX,XX +XXX,XX @@ static void block_job_update_rc(BlockJob *job) | 22 | @@ -XXX,XX +XXX,XX @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, |
19 | } | 23 | } |
20 | } | 24 | } |
21 | 25 | ||
22 | +static void block_job_commit(BlockJob *job) | 26 | -static void iscsi_co_init_iscsitask(IscsiLun *iscsilun, struct IscsiTask *iTask) |
23 | +{ | 27 | +static void coroutine_fn |
24 | + assert(!job->ret); | 28 | +iscsi_co_init_iscsitask(IscsiLun *iscsilun, struct IscsiTask *iTask) |
25 | + if (job->driver->commit) { | ||
26 | + job->driver->commit(job); | ||
27 | + } | ||
28 | +} | ||
29 | + | ||
30 | +static void block_job_abort(BlockJob *job) | ||
31 | +{ | ||
32 | + assert(job->ret); | ||
33 | + if (job->driver->abort) { | ||
34 | + job->driver->abort(job); | ||
35 | + } | ||
36 | +} | ||
37 | + | ||
38 | +static void block_job_clean(BlockJob *job) | ||
39 | +{ | ||
40 | + if (job->driver->clean) { | ||
41 | + job->driver->clean(job); | ||
42 | + } | ||
43 | +} | ||
44 | + | ||
45 | static void block_job_completed_single(BlockJob *job) | ||
46 | { | 29 | { |
47 | assert(job->completed); | 30 | *iTask = (struct IscsiTask) { |
48 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job) | 31 | .co = qemu_coroutine_self(), |
49 | block_job_update_rc(job); | ||
50 | |||
51 | if (!job->ret) { | ||
52 | - if (job->driver->commit) { | ||
53 | - job->driver->commit(job); | ||
54 | - } | ||
55 | + block_job_commit(job); | ||
56 | } else { | ||
57 | - if (job->driver->abort) { | ||
58 | - job->driver->abort(job); | ||
59 | - } | ||
60 | - } | ||
61 | - if (job->driver->clean) { | ||
62 | - job->driver->clean(job); | ||
63 | + block_job_abort(job); | ||
64 | } | ||
65 | + block_job_clean(job); | ||
66 | |||
67 | if (job->cb) { | ||
68 | job->cb(job->opaque, job->ret); | ||
69 | -- | 32 | -- |
70 | 2.13.6 | 33 | 2.37.3 |
71 | |||
72 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
10 | Message-Id: <20220922084924.201610-11-pbonzini@redhat.com> | ||
11 | [kwolf: Fixed up coding style] | ||
12 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
14 | --- | ||
15 | block/nbd.c | 11 ++++++----- | ||
16 | 1 file changed, 6 insertions(+), 5 deletions(-) | ||
17 | |||
18 | diff --git a/block/nbd.c b/block/nbd.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/block/nbd.c | ||
21 | +++ b/block/nbd.c | ||
22 | @@ -XXX,XX +XXX,XX @@ static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret) | ||
23 | * nbd_reply_chunk_iter_receive | ||
24 | * The pointer stored in @payload requires g_free() to free it. | ||
25 | */ | ||
26 | -static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s, | ||
27 | - NBDReplyChunkIter *iter, | ||
28 | - uint64_t handle, | ||
29 | - QEMUIOVector *qiov, NBDReply *reply, | ||
30 | - void **payload) | ||
31 | +static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s, | ||
32 | + NBDReplyChunkIter *iter, | ||
33 | + uint64_t handle, | ||
34 | + QEMUIOVector *qiov, | ||
35 | + NBDReply *reply, | ||
36 | + void **payload) | ||
37 | { | ||
38 | int ret, request_ret; | ||
39 | NBDReply local_reply; | ||
40 | -- | ||
41 | 2.37.3 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-12-pbonzini@redhat.com> | ||
10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | --- | ||
13 | block/nfs.c | 2 +- | ||
14 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
15 | |||
16 | diff --git a/block/nfs.c b/block/nfs.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/nfs.c | ||
19 | +++ b/block/nfs.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static void nfs_process_write(void *arg) | ||
21 | qemu_mutex_unlock(&client->mutex); | ||
22 | } | ||
23 | |||
24 | -static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) | ||
25 | +static void coroutine_fn nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) | ||
26 | { | ||
27 | *task = (NFSRPC) { | ||
28 | .co = qemu_coroutine_self(), | ||
29 | -- | ||
30 | 2.37.3 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-13-pbonzini@redhat.com> | ||
10 | [kwolf: Fixed up coding style] | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
13 | --- | ||
14 | block/nvme.c | 6 ++++-- | ||
15 | 1 file changed, 4 insertions(+), 2 deletions(-) | ||
16 | |||
17 | diff --git a/block/nvme.c b/block/nvme.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/block/nvme.c | ||
20 | +++ b/block/nvme.c | ||
21 | @@ -XXX,XX +XXX,XX @@ static inline bool nvme_qiov_aligned(BlockDriverState *bs, | ||
22 | return true; | ||
23 | } | ||
24 | |||
25 | -static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes, | ||
26 | - QEMUIOVector *qiov, bool is_write, int flags) | ||
27 | +static coroutine_fn int nvme_co_prw(BlockDriverState *bs, | ||
28 | + uint64_t offset, uint64_t bytes, | ||
29 | + QEMUIOVector *qiov, bool is_write, | ||
30 | + int flags) | ||
31 | { | ||
32 | BDRVNVMeState *s = bs->opaque; | ||
33 | int r; | ||
34 | -- | ||
35 | 2.37.3 | diff view generated by jsdifflib |
1 | This adds the .bdrv_co_create driver callback to parallels, which | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | enables image creation over QMP. | ||
3 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-14-pbonzini@redhat.com> | ||
10 | [kwolf: Fixed up coding style] | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
5 | Reviewed-by: Max Reitz <mreitz@redhat.com> | ||
6 | Reviewed-by: Jeff Cody <jcody@redhat.com> | ||
7 | --- | 13 | --- |
8 | qapi/block-core.json | 18 ++++- | 14 | block/parallels.c | 5 +++-- |
9 | block/parallels.c | 199 ++++++++++++++++++++++++++++++++++++++------------- | 15 | 1 file changed, 3 insertions(+), 2 deletions(-) |
10 | 2 files changed, 168 insertions(+), 49 deletions(-) | ||
11 | 16 | ||
12 | diff --git a/qapi/block-core.json b/qapi/block-core.json | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/qapi/block-core.json | ||
15 | +++ b/qapi/block-core.json | ||
16 | @@ -XXX,XX +XXX,XX @@ | ||
17 | 'size': 'size' } } | ||
18 | |||
19 | ## | ||
20 | +# @BlockdevCreateOptionsParallels: | ||
21 | +# | ||
22 | +# Driver specific image creation options for parallels. | ||
23 | +# | ||
24 | +# @file Node to create the image format on | ||
25 | +# @size Size of the virtual disk in bytes | ||
26 | +# @cluster-size Cluster size in bytes (default: 1 MB) | ||
27 | +# | ||
28 | +# Since: 2.12 | ||
29 | +## | ||
30 | +{ 'struct': 'BlockdevCreateOptionsParallels', | ||
31 | + 'data': { 'file': 'BlockdevRef', | ||
32 | + 'size': 'size', | ||
33 | + '*cluster-size': 'size' } } | ||
34 | + | ||
35 | +## | ||
36 | # @BlockdevQcow2Version: | ||
37 | # | ||
38 | # @v2: The original QCOW2 format as introduced in qemu 0.10 (version 2) | ||
39 | @@ -XXX,XX +XXX,XX @@ | ||
40 | 'null-aio': 'BlockdevCreateNotSupported', | ||
41 | 'null-co': 'BlockdevCreateNotSupported', | ||
42 | 'nvme': 'BlockdevCreateNotSupported', | ||
43 | - 'parallels': 'BlockdevCreateNotSupported', | ||
44 | + 'parallels': 'BlockdevCreateOptionsParallels', | ||
45 | 'qcow2': 'BlockdevCreateOptionsQcow2', | ||
46 | 'qcow': 'BlockdevCreateNotSupported', | ||
47 | 'qed': 'BlockdevCreateNotSupported', | ||
48 | diff --git a/block/parallels.c b/block/parallels.c | 17 | diff --git a/block/parallels.c b/block/parallels.c |
49 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
50 | --- a/block/parallels.c | 19 | --- a/block/parallels.c |
51 | +++ b/block/parallels.c | 20 | +++ b/block/parallels.c |
52 | @@ -XXX,XX +XXX,XX @@ | 21 | @@ -XXX,XX +XXX,XX @@ static int64_t block_status(BDRVParallelsState *s, int64_t sector_num, |
53 | #include "sysemu/block-backend.h" | 22 | return start_off; |
54 | #include "qemu/module.h" | 23 | } |
55 | #include "qemu/option.h" | 24 | |
56 | +#include "qapi/qmp/qdict.h" | 25 | -static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num, |
57 | +#include "qapi/qobject-input-visitor.h" | 26 | - int nb_sectors, int *pnum) |
58 | +#include "qapi/qapi-visit-block-core.h" | 27 | +static coroutine_fn int64_t allocate_clusters(BlockDriverState *bs, |
59 | #include "qemu/bswap.h" | 28 | + int64_t sector_num, |
60 | #include "qemu/bitmap.h" | 29 | + int nb_sectors, int *pnum) |
61 | #include "migration/blocker.h" | ||
62 | @@ -XXX,XX +XXX,XX @@ static QemuOptsList parallels_runtime_opts = { | ||
63 | }, | ||
64 | }; | ||
65 | |||
66 | +static QemuOptsList parallels_create_opts = { | ||
67 | + .name = "parallels-create-opts", | ||
68 | + .head = QTAILQ_HEAD_INITIALIZER(parallels_create_opts.head), | ||
69 | + .desc = { | ||
70 | + { | ||
71 | + .name = BLOCK_OPT_SIZE, | ||
72 | + .type = QEMU_OPT_SIZE, | ||
73 | + .help = "Virtual disk size", | ||
74 | + }, | ||
75 | + { | ||
76 | + .name = BLOCK_OPT_CLUSTER_SIZE, | ||
77 | + .type = QEMU_OPT_SIZE, | ||
78 | + .help = "Parallels image cluster size", | ||
79 | + .def_value_str = stringify(DEFAULT_CLUSTER_SIZE), | ||
80 | + }, | ||
81 | + { /* end of list */ } | ||
82 | + } | ||
83 | +}; | ||
84 | + | ||
85 | |||
86 | static int64_t bat2sect(BDRVParallelsState *s, uint32_t idx) | ||
87 | { | 30 | { |
88 | @@ -XXX,XX +XXX,XX @@ out: | 31 | int ret = 0; |
89 | } | 32 | BDRVParallelsState *s = bs->opaque; |
90 | |||
91 | |||
92 | -static int coroutine_fn parallels_co_create_opts(const char *filename, | ||
93 | - QemuOpts *opts, | ||
94 | - Error **errp) | ||
95 | +static int coroutine_fn parallels_co_create(BlockdevCreateOptions* opts, | ||
96 | + Error **errp) | ||
97 | { | ||
98 | + BlockdevCreateOptionsParallels *parallels_opts; | ||
99 | + BlockDriverState *bs; | ||
100 | + BlockBackend *blk; | ||
101 | int64_t total_size, cl_size; | ||
102 | - uint8_t tmp[BDRV_SECTOR_SIZE]; | ||
103 | - Error *local_err = NULL; | ||
104 | - BlockBackend *file; | ||
105 | uint32_t bat_entries, bat_sectors; | ||
106 | ParallelsHeader header; | ||
107 | + uint8_t tmp[BDRV_SECTOR_SIZE]; | ||
108 | int ret; | ||
109 | |||
110 | - total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), | ||
111 | - BDRV_SECTOR_SIZE); | ||
112 | - cl_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, | ||
113 | - DEFAULT_CLUSTER_SIZE), BDRV_SECTOR_SIZE); | ||
114 | + assert(opts->driver == BLOCKDEV_DRIVER_PARALLELS); | ||
115 | + parallels_opts = &opts->u.parallels; | ||
116 | + | ||
117 | + /* Sanity checks */ | ||
118 | + total_size = parallels_opts->size; | ||
119 | + | ||
120 | + if (parallels_opts->has_cluster_size) { | ||
121 | + cl_size = parallels_opts->cluster_size; | ||
122 | + } else { | ||
123 | + cl_size = DEFAULT_CLUSTER_SIZE; | ||
124 | + } | ||
125 | + | ||
126 | if (total_size >= MAX_PARALLELS_IMAGE_FACTOR * cl_size) { | ||
127 | - error_propagate(errp, local_err); | ||
128 | + error_setg(errp, "Image size is too large for this cluster size"); | ||
129 | return -E2BIG; | ||
130 | } | ||
131 | |||
132 | - ret = bdrv_create_file(filename, opts, &local_err); | ||
133 | - if (ret < 0) { | ||
134 | - error_propagate(errp, local_err); | ||
135 | - return ret; | ||
136 | + if (!QEMU_IS_ALIGNED(total_size, BDRV_SECTOR_SIZE)) { | ||
137 | + error_setg(errp, "Image size must be a multiple of 512 bytes"); | ||
138 | + return -EINVAL; | ||
139 | } | ||
140 | |||
141 | - file = blk_new_open(filename, NULL, NULL, | ||
142 | - BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, | ||
143 | - &local_err); | ||
144 | - if (file == NULL) { | ||
145 | - error_propagate(errp, local_err); | ||
146 | + if (!QEMU_IS_ALIGNED(cl_size, BDRV_SECTOR_SIZE)) { | ||
147 | + error_setg(errp, "Cluster size must be a multiple of 512 bytes"); | ||
148 | + return -EINVAL; | ||
149 | + } | ||
150 | + | ||
151 | + /* Create BlockBackend to write to the image */ | ||
152 | + bs = bdrv_open_blockdev_ref(parallels_opts->file, errp); | ||
153 | + if (bs == NULL) { | ||
154 | return -EIO; | ||
155 | } | ||
156 | |||
157 | - blk_set_allow_write_beyond_eof(file, true); | ||
158 | + blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL); | ||
159 | + ret = blk_insert_bs(blk, bs, errp); | ||
160 | + if (ret < 0) { | ||
161 | + goto out; | ||
162 | + } | ||
163 | + blk_set_allow_write_beyond_eof(blk, true); | ||
164 | |||
165 | - ret = blk_truncate(file, 0, PREALLOC_MODE_OFF, errp); | ||
166 | + /* Create image format */ | ||
167 | + ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp); | ||
168 | if (ret < 0) { | ||
169 | - goto exit; | ||
170 | + goto out; | ||
171 | } | ||
172 | |||
173 | bat_entries = DIV_ROUND_UP(total_size, cl_size); | ||
174 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn parallels_co_create_opts(const char *filename, | ||
175 | memset(tmp, 0, sizeof(tmp)); | ||
176 | memcpy(tmp, &header, sizeof(header)); | ||
177 | |||
178 | - ret = blk_pwrite(file, 0, tmp, BDRV_SECTOR_SIZE, 0); | ||
179 | + ret = blk_pwrite(blk, 0, tmp, BDRV_SECTOR_SIZE, 0); | ||
180 | if (ret < 0) { | ||
181 | goto exit; | ||
182 | } | ||
183 | - ret = blk_pwrite_zeroes(file, BDRV_SECTOR_SIZE, | ||
184 | + ret = blk_pwrite_zeroes(blk, BDRV_SECTOR_SIZE, | ||
185 | (bat_sectors - 1) << BDRV_SECTOR_BITS, 0); | ||
186 | if (ret < 0) { | ||
187 | goto exit; | ||
188 | } | ||
189 | - ret = 0; | ||
190 | |||
191 | -done: | ||
192 | - blk_unref(file); | ||
193 | + ret = 0; | ||
194 | +out: | ||
195 | + blk_unref(blk); | ||
196 | + bdrv_unref(bs); | ||
197 | return ret; | ||
198 | |||
199 | exit: | ||
200 | error_setg_errno(errp, -ret, "Failed to create Parallels image"); | ||
201 | - goto done; | ||
202 | + goto out; | ||
203 | +} | ||
204 | + | ||
205 | +static int coroutine_fn parallels_co_create_opts(const char *filename, | ||
206 | + QemuOpts *opts, | ||
207 | + Error **errp) | ||
208 | +{ | ||
209 | + BlockdevCreateOptions *create_options = NULL; | ||
210 | + Error *local_err = NULL; | ||
211 | + BlockDriverState *bs = NULL; | ||
212 | + QDict *qdict = NULL; | ||
213 | + QObject *qobj; | ||
214 | + Visitor *v; | ||
215 | + int ret; | ||
216 | + | ||
217 | + static const QDictRenames opt_renames[] = { | ||
218 | + { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" }, | ||
219 | + { NULL, NULL }, | ||
220 | + }; | ||
221 | + | ||
222 | + /* Parse options and convert legacy syntax */ | ||
223 | + qdict = qemu_opts_to_qdict_filtered(opts, NULL, ¶llels_create_opts, | ||
224 | + true); | ||
225 | + | ||
226 | + if (!qdict_rename_keys(qdict, opt_renames, errp)) { | ||
227 | + ret = -EINVAL; | ||
228 | + goto done; | ||
229 | + } | ||
230 | + | ||
231 | + /* Create and open the file (protocol layer) */ | ||
232 | + ret = bdrv_create_file(filename, opts, &local_err); | ||
233 | + if (ret < 0) { | ||
234 | + error_propagate(errp, local_err); | ||
235 | + goto done; | ||
236 | + } | ||
237 | + | ||
238 | + bs = bdrv_open(filename, NULL, NULL, | ||
239 | + BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); | ||
240 | + if (bs == NULL) { | ||
241 | + ret = -EIO; | ||
242 | + goto done; | ||
243 | + } | ||
244 | + | ||
245 | + /* Now get the QAPI type BlockdevCreateOptions */ | ||
246 | + qdict_put_str(qdict, "driver", "parallels"); | ||
247 | + qdict_put_str(qdict, "file", bs->node_name); | ||
248 | + | ||
249 | + qobj = qdict_crumple(qdict, errp); | ||
250 | + QDECREF(qdict); | ||
251 | + qdict = qobject_to_qdict(qobj); | ||
252 | + if (qdict == NULL) { | ||
253 | + ret = -EINVAL; | ||
254 | + goto done; | ||
255 | + } | ||
256 | + | ||
257 | + v = qobject_input_visitor_new_keyval(QOBJECT(qdict)); | ||
258 | + visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err); | ||
259 | + visit_free(v); | ||
260 | + | ||
261 | + if (local_err) { | ||
262 | + error_propagate(errp, local_err); | ||
263 | + ret = -EINVAL; | ||
264 | + goto done; | ||
265 | + } | ||
266 | + | ||
267 | + /* Silently round up sizes */ | ||
268 | + create_options->u.parallels.size = | ||
269 | + ROUND_UP(create_options->u.parallels.size, BDRV_SECTOR_SIZE); | ||
270 | + create_options->u.parallels.cluster_size = | ||
271 | + ROUND_UP(create_options->u.parallels.cluster_size, BDRV_SECTOR_SIZE); | ||
272 | + | ||
273 | + /* Create the Parallels image (format layer) */ | ||
274 | + ret = parallels_co_create(create_options, errp); | ||
275 | + if (ret < 0) { | ||
276 | + goto done; | ||
277 | + } | ||
278 | + ret = 0; | ||
279 | + | ||
280 | +done: | ||
281 | + QDECREF(qdict); | ||
282 | + bdrv_unref(bs); | ||
283 | + qapi_free_BlockdevCreateOptions(create_options); | ||
284 | + return ret; | ||
285 | } | ||
286 | |||
287 | |||
288 | @@ -XXX,XX +XXX,XX @@ static void parallels_close(BlockDriverState *bs) | ||
289 | error_free(s->migration_blocker); | ||
290 | } | ||
291 | |||
292 | -static QemuOptsList parallels_create_opts = { | ||
293 | - .name = "parallels-create-opts", | ||
294 | - .head = QTAILQ_HEAD_INITIALIZER(parallels_create_opts.head), | ||
295 | - .desc = { | ||
296 | - { | ||
297 | - .name = BLOCK_OPT_SIZE, | ||
298 | - .type = QEMU_OPT_SIZE, | ||
299 | - .help = "Virtual disk size", | ||
300 | - }, | ||
301 | - { | ||
302 | - .name = BLOCK_OPT_CLUSTER_SIZE, | ||
303 | - .type = QEMU_OPT_SIZE, | ||
304 | - .help = "Parallels image cluster size", | ||
305 | - .def_value_str = stringify(DEFAULT_CLUSTER_SIZE), | ||
306 | - }, | ||
307 | - { /* end of list */ } | ||
308 | - } | ||
309 | -}; | ||
310 | - | ||
311 | static BlockDriver bdrv_parallels = { | ||
312 | .format_name = "parallels", | ||
313 | .instance_size = sizeof(BDRVParallelsState), | ||
314 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_parallels = { | ||
315 | .bdrv_co_readv = parallels_co_readv, | ||
316 | .bdrv_co_writev = parallels_co_writev, | ||
317 | .supports_backing = true, | ||
318 | + .bdrv_co_create = parallels_co_create, | ||
319 | .bdrv_co_create_opts = parallels_co_create_opts, | ||
320 | .bdrv_co_check = parallels_co_check, | ||
321 | .create_opts = ¶llels_create_opts, | ||
322 | -- | 33 | -- |
323 | 2.13.6 | 34 | 2.37.3 |
324 | |||
325 | diff view generated by jsdifflib |
1 | From: Max Reitz <mreitz@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | In preparation of QAPI-fying VDI image creation, we have to create a | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | BlockdevCreateOptionsVdi type which is received by a (future) | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | vdi_co_create(). | 5 | functions where this holds. |
6 | 6 | ||
7 | vdi_co_create_opts() now converts the QemuOpts object into such a | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
8 | BlockdevCreateOptionsVdi object. The protocol-layer file is still | 8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
9 | created in vdi_co_do_create() (and BlockdevCreateOptionsVdi.file is set | 9 | Message-Id: <20220922084924.201610-15-pbonzini@redhat.com> |
10 | to an empty string), but that will be addressed by a follow-up patch. | 10 | [kwolf: Fixed up coding style] |
11 | 11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | |
12 | Note that cluster-size is not part of the QAPI schema because it is not | ||
13 | supported by default. | ||
14 | |||
15 | Signed-off-by: Max Reitz <mreitz@redhat.com> | ||
16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
17 | --- | 13 | --- |
18 | qapi/block-core.json | 18 +++++++++++ | 14 | block/qcow2.h | 15 ++++++++------- |
19 | block/vdi.c | 91 ++++++++++++++++++++++++++++++++++++++++++++-------- | 15 | block/qcow2-cluster.c | 21 ++++++++++++--------- |
20 | 2 files changed, 95 insertions(+), 14 deletions(-) | 16 | block/qcow2-refcount.c | 2 +- |
17 | block/qcow2.c | 5 +++-- | ||
18 | 4 files changed, 24 insertions(+), 19 deletions(-) | ||
21 | 19 | ||
22 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 20 | diff --git a/block/qcow2.h b/block/qcow2.h |
23 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
24 | --- a/qapi/block-core.json | 22 | --- a/block/qcow2.h |
25 | +++ b/qapi/block-core.json | 23 | +++ b/block/qcow2.h |
26 | @@ -XXX,XX +XXX,XX @@ | 24 | @@ -XXX,XX +XXX,XX @@ int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order, |
27 | 'size': 'size' } } | 25 | void *cb_opaque, Error **errp); |
28 | 26 | int qcow2_shrink_reftable(BlockDriverState *bs); | |
29 | ## | 27 | int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size); |
30 | +# @BlockdevCreateOptionsVdi: | 28 | -int qcow2_detect_metadata_preallocation(BlockDriverState *bs); |
31 | +# | 29 | +int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs); |
32 | +# Driver specific image creation options for VDI. | 30 | |
33 | +# | 31 | /* qcow2-cluster.c functions */ |
34 | +# @file Node to create the image format on | 32 | int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, |
35 | +# @size Size of the virtual disk in bytes | 33 | @@ -XXX,XX +XXX,XX @@ int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num, |
36 | +# @static Whether to create a statically (true) or | 34 | int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset, |
37 | +# dynamically (false) allocated image | 35 | unsigned int *bytes, uint64_t *host_offset, |
38 | +# (default: false, i.e. dynamic) | 36 | QCow2SubclusterType *subcluster_type); |
39 | +# | 37 | -int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, |
40 | +# Since: 2.12 | 38 | - unsigned int *bytes, uint64_t *host_offset, |
41 | +## | 39 | - QCowL2Meta **m); |
42 | +{ 'struct': 'BlockdevCreateOptionsVdi', | 40 | +int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, |
43 | + 'data': { 'file': 'BlockdevRef', | 41 | + unsigned int *bytes, |
44 | + 'size': 'size', | 42 | + uint64_t *host_offset, QCowL2Meta **m); |
45 | + '*static': 'bool' } } | 43 | int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, |
46 | + | 44 | uint64_t offset, |
47 | +## | 45 | int compressed_size, |
48 | # @BlockdevCreateNotSupported: | 46 | @@ -XXX,XX +XXX,XX @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, |
49 | # | 47 | void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry, |
50 | # This is used for all drivers that don't support creating images. | 48 | uint64_t *coffset, int *csize); |
51 | diff --git a/block/vdi.c b/block/vdi.c | 49 | |
50 | -int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m); | ||
51 | +int coroutine_fn qcow2_alloc_cluster_link_l2(BlockDriverState *bs, | ||
52 | + QCowL2Meta *m); | ||
53 | void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m); | ||
54 | int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, | ||
55 | uint64_t bytes, enum qcow2_discard_type type, | ||
56 | bool full_discard); | ||
57 | -int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, | ||
58 | - uint64_t bytes, int flags); | ||
59 | +int coroutine_fn qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, | ||
60 | + uint64_t bytes, int flags); | ||
61 | |||
62 | int qcow2_expand_zero_clusters(BlockDriverState *bs, | ||
63 | BlockDriverAmendStatusCB *status_cb, | ||
64 | diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c | ||
52 | index XXXXXXX..XXXXXXX 100644 | 65 | index XXXXXXX..XXXXXXX 100644 |
53 | --- a/block/vdi.c | 66 | --- a/block/qcow2-cluster.c |
54 | +++ b/block/vdi.c | 67 | +++ b/block/qcow2-cluster.c |
55 | @@ -XXX,XX +XXX,XX @@ | 68 | @@ -XXX,XX +XXX,XX @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, |
56 | 69 | return 0; | |
57 | #include "qemu/osdep.h" | 70 | } |
58 | #include "qapi/error.h" | 71 | |
59 | +#include "qapi/qmp/qdict.h" | 72 | -static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) |
60 | +#include "qapi/qobject-input-visitor.h" | 73 | +static int coroutine_fn perform_cow(BlockDriverState *bs, QCowL2Meta *m) |
61 | +#include "qapi/qapi-visit-block-core.h" | 74 | { |
62 | #include "block/block_int.h" | 75 | BDRVQcow2State *s = bs->opaque; |
63 | #include "sysemu/block-backend.h" | 76 | Qcow2COWRegion *start = &m->cow_start; |
64 | #include "qemu/module.h" | 77 | @@ -XXX,XX +XXX,XX @@ fail: |
65 | @@ -XXX,XX +XXX,XX @@ | ||
66 | #define VDI_DISK_SIZE_MAX ((uint64_t)VDI_BLOCKS_IN_IMAGE_MAX * \ | ||
67 | (uint64_t)DEFAULT_CLUSTER_SIZE) | ||
68 | |||
69 | +static QemuOptsList vdi_create_opts; | ||
70 | + | ||
71 | typedef struct { | ||
72 | char text[0x40]; | ||
73 | uint32_t signature; | ||
74 | @@ -XXX,XX +XXX,XX @@ nonallocating_write: | ||
75 | return ret; | 78 | return ret; |
76 | } | 79 | } |
77 | 80 | ||
78 | -static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts, | 81 | -int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) |
79 | - Error **errp) | 82 | +int coroutine_fn qcow2_alloc_cluster_link_l2(BlockDriverState *bs, |
80 | +static int coroutine_fn vdi_co_do_create(const char *filename, | 83 | + QCowL2Meta *m) |
81 | + QemuOpts *file_opts, | ||
82 | + BlockdevCreateOptionsVdi *vdi_opts, | ||
83 | + size_t block_size, Error **errp) | ||
84 | { | 84 | { |
85 | int ret = 0; | 85 | BDRVQcow2State *s = bs->opaque; |
86 | uint64_t bytes = 0; | 86 | int i, j = 0, l2_index, ret; |
87 | uint32_t blocks; | 87 | @@ -XXX,XX +XXX,XX @@ static int count_single_write_clusters(BlockDriverState *bs, int nb_clusters, |
88 | - size_t block_size = DEFAULT_CLUSTER_SIZE; | 88 | * information on cluster allocation may be invalid now. The caller |
89 | uint32_t image_type = VDI_TYPE_DYNAMIC; | 89 | * must start over anyway, so consider *cur_bytes undefined. |
90 | VdiHeader header; | 90 | */ |
91 | size_t i; | 91 | -static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, |
92 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts, | 92 | - uint64_t *cur_bytes, QCowL2Meta **m) |
93 | logout("\n"); | 93 | +static int coroutine_fn handle_dependencies(BlockDriverState *bs, |
94 | 94 | + uint64_t guest_offset, | |
95 | /* Read out options. */ | 95 | + uint64_t *cur_bytes, QCowL2Meta **m) |
96 | - bytes = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), | 96 | { |
97 | - BDRV_SECTOR_SIZE); | 97 | BDRVQcow2State *s = bs->opaque; |
98 | -#if defined(CONFIG_VDI_BLOCK_SIZE) | 98 | QCowL2Meta *old_alloc; |
99 | - /* TODO: Additional checks (SECTOR_SIZE * 2^n, ...). */ | 99 | @@ -XXX,XX +XXX,XX @@ out: |
100 | - block_size = qemu_opt_get_size_del(opts, | 100 | * |
101 | - BLOCK_OPT_CLUSTER_SIZE, | 101 | * Return 0 on success and -errno in error cases |
102 | - DEFAULT_CLUSTER_SIZE); | 102 | */ |
103 | -#endif | 103 | -int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, |
104 | -#if defined(CONFIG_VDI_STATIC_IMAGE) | 104 | - unsigned int *bytes, uint64_t *host_offset, |
105 | - if (qemu_opt_get_bool_del(opts, BLOCK_OPT_STATIC, false)) { | 105 | - QCowL2Meta **m) |
106 | + bytes = vdi_opts->size; | 106 | +int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, |
107 | + if (vdi_opts->q_static) { | 107 | + unsigned int *bytes, |
108 | image_type = VDI_TYPE_STATIC; | 108 | + uint64_t *host_offset, |
109 | } | 109 | + QCowL2Meta **m) |
110 | +#ifndef CONFIG_VDI_STATIC_IMAGE | 110 | { |
111 | + if (image_type == VDI_TYPE_STATIC) { | 111 | BDRVQcow2State *s = bs->opaque; |
112 | + ret = -ENOTSUP; | 112 | uint64_t start, remaining; |
113 | + error_setg(errp, "Statically allocated images cannot be created in " | 113 | @@ -XXX,XX +XXX,XX @@ out: |
114 | + "this build"); | ||
115 | + goto exit; | ||
116 | + } | ||
117 | +#endif | ||
118 | +#ifndef CONFIG_VDI_BLOCK_SIZE | ||
119 | + if (block_size != DEFAULT_CLUSTER_SIZE) { | ||
120 | + ret = -ENOTSUP; | ||
121 | + error_setg(errp, | ||
122 | + "A non-default cluster size is not supported in this build"); | ||
123 | + goto exit; | ||
124 | + } | ||
125 | #endif | ||
126 | |||
127 | if (bytes > VDI_DISK_SIZE_MAX) { | ||
128 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts, | ||
129 | goto exit; | ||
130 | } | ||
131 | |||
132 | - ret = bdrv_create_file(filename, opts, &local_err); | ||
133 | + ret = bdrv_create_file(filename, file_opts, &local_err); | ||
134 | if (ret < 0) { | ||
135 | error_propagate(errp, local_err); | ||
136 | goto exit; | ||
137 | @@ -XXX,XX +XXX,XX @@ exit: | ||
138 | return ret; | 114 | return ret; |
139 | } | 115 | } |
140 | 116 | ||
141 | +static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts, | 117 | -int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, |
142 | + Error **errp) | 118 | - uint64_t bytes, int flags) |
143 | +{ | 119 | +int coroutine_fn qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, |
144 | + QDict *qdict = NULL; | 120 | + uint64_t bytes, int flags) |
145 | + BlockdevCreateOptionsVdi *create_options = NULL; | ||
146 | + uint64_t block_size = DEFAULT_CLUSTER_SIZE; | ||
147 | + Visitor *v; | ||
148 | + Error *local_err = NULL; | ||
149 | + int ret; | ||
150 | + | ||
151 | + /* Since CONFIG_VDI_BLOCK_SIZE is disabled by default, | ||
152 | + * cluster-size is not part of the QAPI schema; therefore we have | ||
153 | + * to parse it before creating the QAPI object. */ | ||
154 | +#if defined(CONFIG_VDI_BLOCK_SIZE) | ||
155 | + block_size = qemu_opt_get_size_del(opts, | ||
156 | + BLOCK_OPT_CLUSTER_SIZE, | ||
157 | + DEFAULT_CLUSTER_SIZE); | ||
158 | + if (block_size < BDRV_SECTOR_SIZE || block_size > UINT32_MAX || | ||
159 | + !is_power_of_2(block_size)) | ||
160 | + { | ||
161 | + error_setg(errp, "Invalid cluster size"); | ||
162 | + ret = -EINVAL; | ||
163 | + goto done; | ||
164 | + } | ||
165 | +#endif | ||
166 | + | ||
167 | + qdict = qemu_opts_to_qdict_filtered(opts, NULL, &vdi_create_opts, true); | ||
168 | + | ||
169 | + qdict_put_str(qdict, "file", ""); /* FIXME */ | ||
170 | + | ||
171 | + /* Get the QAPI object */ | ||
172 | + v = qobject_input_visitor_new_keyval(QOBJECT(qdict)); | ||
173 | + visit_type_BlockdevCreateOptionsVdi(v, NULL, &create_options, &local_err); | ||
174 | + visit_free(v); | ||
175 | + | ||
176 | + if (local_err) { | ||
177 | + error_propagate(errp, local_err); | ||
178 | + ret = -EINVAL; | ||
179 | + goto done; | ||
180 | + } | ||
181 | + | ||
182 | + create_options->size = ROUND_UP(create_options->size, BDRV_SECTOR_SIZE); | ||
183 | + | ||
184 | + ret = vdi_co_do_create(filename, opts, create_options, block_size, errp); | ||
185 | +done: | ||
186 | + QDECREF(qdict); | ||
187 | + qapi_free_BlockdevCreateOptionsVdi(create_options); | ||
188 | + return ret; | ||
189 | +} | ||
190 | + | ||
191 | static void vdi_close(BlockDriverState *bs) | ||
192 | { | 121 | { |
193 | BDRVVdiState *s = bs->opaque; | 122 | BDRVQcow2State *s = bs->opaque; |
123 | uint64_t end_offset = offset + bytes; | ||
124 | diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c | ||
125 | index XXXXXXX..XXXXXXX 100644 | ||
126 | --- a/block/qcow2-refcount.c | ||
127 | +++ b/block/qcow2-refcount.c | ||
128 | @@ -XXX,XX +XXX,XX @@ int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size) | ||
129 | return -EIO; | ||
130 | } | ||
131 | |||
132 | -int qcow2_detect_metadata_preallocation(BlockDriverState *bs) | ||
133 | +int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs) | ||
134 | { | ||
135 | BDRVQcow2State *s = bs->opaque; | ||
136 | int64_t i, end_cluster, cluster_count = 0, threshold; | ||
137 | diff --git a/block/qcow2.c b/block/qcow2.c | ||
138 | index XXXXXXX..XXXXXXX 100644 | ||
139 | --- a/block/qcow2.c | ||
140 | +++ b/block/qcow2.c | ||
141 | @@ -XXX,XX +XXX,XX @@ static bool merge_cow(uint64_t offset, unsigned bytes, | ||
142 | * Return 1 if the COW regions read as zeroes, 0 if not, < 0 on error. | ||
143 | * Note that returning 0 does not guarantee non-zero data. | ||
144 | */ | ||
145 | -static int is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) | ||
146 | +static int coroutine_fn is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) | ||
147 | { | ||
148 | /* | ||
149 | * This check is designed for optimization shortcut so it must be | ||
150 | @@ -XXX,XX +XXX,XX @@ static int is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) | ||
151 | m->cow_end.nb_bytes); | ||
152 | } | ||
153 | |||
154 | -static int handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta) | ||
155 | +static int coroutine_fn handle_alloc_space(BlockDriverState *bs, | ||
156 | + QCowL2Meta *l2meta) | ||
157 | { | ||
158 | BDRVQcow2State *s = bs->opaque; | ||
159 | QCowL2Meta *m; | ||
194 | -- | 160 | -- |
195 | 2.13.6 | 161 | 2.37.3 |
196 | |||
197 | diff view generated by jsdifflib |
1 | From: Fam Zheng <famz@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Signed-off-by: Fam Zheng <famz@redhat.com> | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | Reviewed-by: Max Reitz <mreitz@redhat.com> | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-16-pbonzini@redhat.com> | ||
10 | [kwolf: Fixed up coding style] | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
5 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
6 | --- | 13 | --- |
7 | tests/qemu-iotests/153 | 12 ++++++++++++ | 14 | block/copy-before-write.c | 9 +++++---- |
8 | tests/qemu-iotests/153.out | 5 +++++ | 15 | 1 file changed, 5 insertions(+), 4 deletions(-) |
9 | 2 files changed, 17 insertions(+) | ||
10 | 16 | ||
11 | diff --git a/tests/qemu-iotests/153 b/tests/qemu-iotests/153 | 17 | diff --git a/block/copy-before-write.c b/block/copy-before-write.c |
12 | index XXXXXXX..XXXXXXX 100755 | ||
13 | --- a/tests/qemu-iotests/153 | ||
14 | +++ b/tests/qemu-iotests/153 | ||
15 | @@ -XXX,XX +XXX,XX @@ rm -f "${TEST_IMG}.lnk" &>/dev/null | ||
16 | ln -s ${TEST_IMG} "${TEST_IMG}.lnk" || echo "Failed to create link" | ||
17 | _run_qemu_with_images "${TEST_IMG}.lnk" "${TEST_IMG}" | ||
18 | |||
19 | +echo | ||
20 | +echo "== Active commit to intermediate layer should work when base in use ==" | ||
21 | +_launch_qemu -drive format=$IMGFMT,file="${TEST_IMG}.a",id=drive0,if=none \ | ||
22 | + -device virtio-blk,drive=drive0 | ||
23 | + | ||
24 | +_send_qemu_cmd $QEMU_HANDLE \ | ||
25 | + "{ 'execute': 'qmp_capabilities' }" \ | ||
26 | + 'return' | ||
27 | +_run_cmd $QEMU_IMG commit -b "${TEST_IMG}.b" "${TEST_IMG}.c" | ||
28 | + | ||
29 | +_cleanup_qemu | ||
30 | + | ||
31 | _launch_qemu | ||
32 | |||
33 | _send_qemu_cmd $QEMU_HANDLE \ | ||
34 | diff --git a/tests/qemu-iotests/153.out b/tests/qemu-iotests/153.out | ||
35 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
36 | --- a/tests/qemu-iotests/153.out | 19 | --- a/block/copy-before-write.c |
37 | +++ b/tests/qemu-iotests/153.out | 20 | +++ b/block/copy-before-write.c |
38 | @@ -XXX,XX +XXX,XX @@ Is another process using the image? | 21 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn cbw_co_flush(BlockDriverState *bs) |
39 | == Symbolic link == | 22 | * It's guaranteed that guest writes will not interact in the region until |
40 | QEMU_PROG: -drive if=none,file=TEST_DIR/t.qcow2: Failed to get "write" lock | 23 | * cbw_snapshot_read_unlock() called. |
41 | Is another process using the image? | 24 | */ |
42 | + | 25 | -static BlockReq *cbw_snapshot_read_lock(BlockDriverState *bs, |
43 | +== Active commit to intermediate layer should work when base in use == | 26 | - int64_t offset, int64_t bytes, |
44 | +{"return": {}} | 27 | - int64_t *pnum, BdrvChild **file) |
45 | + | 28 | +static coroutine_fn BlockReq * |
46 | +_qemu_img_wrapper commit -b TEST_DIR/t.qcow2.b TEST_DIR/t.qcow2.c | 29 | +cbw_snapshot_read_lock(BlockDriverState *bs, int64_t offset, int64_t bytes, |
47 | {"return": {}} | 30 | + int64_t *pnum, BdrvChild **file) |
48 | Adding drive | 31 | { |
32 | BDRVCopyBeforeWriteState *s = bs->opaque; | ||
33 | BlockReq *req = g_new(BlockReq, 1); | ||
34 | @@ -XXX,XX +XXX,XX @@ static BlockReq *cbw_snapshot_read_lock(BlockDriverState *bs, | ||
35 | return req; | ||
36 | } | ||
37 | |||
38 | -static void cbw_snapshot_read_unlock(BlockDriverState *bs, BlockReq *req) | ||
39 | +static coroutine_fn void | ||
40 | +cbw_snapshot_read_unlock(BlockDriverState *bs, BlockReq *req) | ||
41 | { | ||
42 | BDRVCopyBeforeWriteState *s = bs->opaque; | ||
49 | 43 | ||
50 | -- | 44 | -- |
51 | 2.13.6 | 45 | 2.37.3 |
52 | |||
53 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-17-pbonzini@redhat.com> | ||
10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | --- | ||
13 | block/curl.c | 2 +- | ||
14 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
15 | |||
16 | diff --git a/block/curl.c b/block/curl.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/curl.c | ||
19 | +++ b/block/curl.c | ||
20 | @@ -XXX,XX +XXX,XX @@ out_noclean: | ||
21 | return -EINVAL; | ||
22 | } | ||
23 | |||
24 | -static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb) | ||
25 | +static void coroutine_fn curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb) | ||
26 | { | ||
27 | CURLState *state; | ||
28 | int running; | ||
29 | -- | ||
30 | 2.37.3 | diff view generated by jsdifflib |
1 | This adds the .bdrv_co_create driver callback to qed, which | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | enables image creation over QMP. | ||
3 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-18-pbonzini@redhat.com> | ||
10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
5 | Reviewed-by: Max Reitz <mreitz@redhat.com> | ||
6 | --- | 12 | --- |
7 | qapi/block-core.json | 25 ++++++- | 13 | block/qed.c | 4 ++-- |
8 | block/qed.c | 204 ++++++++++++++++++++++++++++++++++----------------- | 14 | 1 file changed, 2 insertions(+), 2 deletions(-) |
9 | 2 files changed, 162 insertions(+), 67 deletions(-) | ||
10 | 15 | ||
11 | diff --git a/qapi/block-core.json b/qapi/block-core.json | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/qapi/block-core.json | ||
14 | +++ b/qapi/block-core.json | ||
15 | @@ -XXX,XX +XXX,XX @@ | ||
16 | '*refcount-bits': 'int' } } | ||
17 | |||
18 | ## | ||
19 | +# @BlockdevCreateOptionsQed: | ||
20 | +# | ||
21 | +# Driver specific image creation options for qed. | ||
22 | +# | ||
23 | +# @file Node to create the image format on | ||
24 | +# @size Size of the virtual disk in bytes | ||
25 | +# @backing-file File name of the backing file if a backing file | ||
26 | +# should be used | ||
27 | +# @backing-fmt Name of the block driver to use for the backing file | ||
28 | +# @cluster-size Cluster size in bytes (default: 65536) | ||
29 | +# @table-size L1/L2 table size (in clusters) | ||
30 | +# | ||
31 | +# Since: 2.12 | ||
32 | +## | ||
33 | +{ 'struct': 'BlockdevCreateOptionsQed', | ||
34 | + 'data': { 'file': 'BlockdevRef', | ||
35 | + 'size': 'size', | ||
36 | + '*backing-file': 'str', | ||
37 | + '*backing-fmt': 'BlockdevDriver', | ||
38 | + '*cluster-size': 'size', | ||
39 | + '*table-size': 'int' } } | ||
40 | + | ||
41 | +## | ||
42 | # @BlockdevCreateOptionsRbd: | ||
43 | # | ||
44 | # Driver specific image creation options for rbd/Ceph. | ||
45 | @@ -XXX,XX +XXX,XX @@ | ||
46 | 'parallels': 'BlockdevCreateOptionsParallels', | ||
47 | 'qcow': 'BlockdevCreateOptionsQcow', | ||
48 | 'qcow2': 'BlockdevCreateOptionsQcow2', | ||
49 | - 'qed': 'BlockdevCreateNotSupported', | ||
50 | + 'qed': 'BlockdevCreateOptionsQed', | ||
51 | 'quorum': 'BlockdevCreateNotSupported', | ||
52 | 'raw': 'BlockdevCreateNotSupported', | ||
53 | 'rbd': 'BlockdevCreateOptionsRbd', | ||
54 | diff --git a/block/qed.c b/block/qed.c | 16 | diff --git a/block/qed.c b/block/qed.c |
55 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
56 | --- a/block/qed.c | 18 | --- a/block/qed.c |
57 | +++ b/block/qed.c | 19 | +++ b/block/qed.c |
58 | @@ -XXX,XX +XXX,XX @@ | 20 | @@ -XXX,XX +XXX,XX @@ static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) |
59 | #include "trace.h" | 21 | return l2_table; |
60 | #include "qed.h" | ||
61 | #include "sysemu/block-backend.h" | ||
62 | +#include "qapi/qmp/qdict.h" | ||
63 | +#include "qapi/qobject-input-visitor.h" | ||
64 | +#include "qapi/qapi-visit-block-core.h" | ||
65 | + | ||
66 | +static QemuOptsList qed_create_opts; | ||
67 | |||
68 | static int bdrv_qed_probe(const uint8_t *buf, int buf_size, | ||
69 | const char *filename) | ||
70 | @@ -XXX,XX +XXX,XX @@ static void bdrv_qed_close(BlockDriverState *bs) | ||
71 | qemu_vfree(s->l1_table); | ||
72 | } | 22 | } |
73 | 23 | ||
74 | -static int qed_create(const char *filename, uint32_t cluster_size, | 24 | -static bool qed_plug_allocating_write_reqs(BDRVQEDState *s) |
75 | - uint64_t image_size, uint32_t table_size, | 25 | +static bool coroutine_fn qed_plug_allocating_write_reqs(BDRVQEDState *s) |
76 | - const char *backing_file, const char *backing_fmt, | ||
77 | - QemuOpts *opts, Error **errp) | ||
78 | +static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts, | ||
79 | + Error **errp) | ||
80 | { | 26 | { |
81 | - QEDHeader header = { | 27 | qemu_co_mutex_lock(&s->table_lock); |
82 | - .magic = QED_MAGIC, | 28 | |
83 | - .cluster_size = cluster_size, | 29 | @@ -XXX,XX +XXX,XX @@ static bool qed_plug_allocating_write_reqs(BDRVQEDState *s) |
84 | - .table_size = table_size, | 30 | return true; |
85 | - .header_size = 1, | ||
86 | - .features = 0, | ||
87 | - .compat_features = 0, | ||
88 | - .l1_table_offset = cluster_size, | ||
89 | - .image_size = image_size, | ||
90 | - }; | ||
91 | + BlockdevCreateOptionsQed *qed_opts; | ||
92 | + BlockBackend *blk = NULL; | ||
93 | + BlockDriverState *bs = NULL; | ||
94 | + | ||
95 | + QEDHeader header; | ||
96 | QEDHeader le_header; | ||
97 | uint8_t *l1_table = NULL; | ||
98 | - size_t l1_size = header.cluster_size * header.table_size; | ||
99 | - Error *local_err = NULL; | ||
100 | + size_t l1_size; | ||
101 | int ret = 0; | ||
102 | - BlockBackend *blk; | ||
103 | |||
104 | - ret = bdrv_create_file(filename, opts, &local_err); | ||
105 | - if (ret < 0) { | ||
106 | - error_propagate(errp, local_err); | ||
107 | - return ret; | ||
108 | + assert(opts->driver == BLOCKDEV_DRIVER_QED); | ||
109 | + qed_opts = &opts->u.qed; | ||
110 | + | ||
111 | + /* Validate options and set default values */ | ||
112 | + if (!qed_opts->has_cluster_size) { | ||
113 | + qed_opts->cluster_size = QED_DEFAULT_CLUSTER_SIZE; | ||
114 | + } | ||
115 | + if (!qed_opts->has_table_size) { | ||
116 | + qed_opts->table_size = QED_DEFAULT_TABLE_SIZE; | ||
117 | } | ||
118 | |||
119 | - blk = blk_new_open(filename, NULL, NULL, | ||
120 | - BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, | ||
121 | - &local_err); | ||
122 | - if (blk == NULL) { | ||
123 | - error_propagate(errp, local_err); | ||
124 | + if (!qed_is_cluster_size_valid(qed_opts->cluster_size)) { | ||
125 | + error_setg(errp, "QED cluster size must be within range [%u, %u] " | ||
126 | + "and power of 2", | ||
127 | + QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); | ||
128 | + return -EINVAL; | ||
129 | + } | ||
130 | + if (!qed_is_table_size_valid(qed_opts->table_size)) { | ||
131 | + error_setg(errp, "QED table size must be within range [%u, %u] " | ||
132 | + "and power of 2", | ||
133 | + QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); | ||
134 | + return -EINVAL; | ||
135 | + } | ||
136 | + if (!qed_is_image_size_valid(qed_opts->size, qed_opts->cluster_size, | ||
137 | + qed_opts->table_size)) | ||
138 | + { | ||
139 | + error_setg(errp, "QED image size must be a non-zero multiple of " | ||
140 | + "cluster size and less than %" PRIu64 " bytes", | ||
141 | + qed_max_image_size(qed_opts->cluster_size, | ||
142 | + qed_opts->table_size)); | ||
143 | + return -EINVAL; | ||
144 | + } | ||
145 | + | ||
146 | + /* Create BlockBackend to write to the image */ | ||
147 | + bs = bdrv_open_blockdev_ref(qed_opts->file, errp); | ||
148 | + if (bs == NULL) { | ||
149 | return -EIO; | ||
150 | } | ||
151 | |||
152 | + blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL); | ||
153 | + ret = blk_insert_bs(blk, bs, errp); | ||
154 | + if (ret < 0) { | ||
155 | + goto out; | ||
156 | + } | ||
157 | blk_set_allow_write_beyond_eof(blk, true); | ||
158 | |||
159 | + /* Prepare image format */ | ||
160 | + header = (QEDHeader) { | ||
161 | + .magic = QED_MAGIC, | ||
162 | + .cluster_size = qed_opts->cluster_size, | ||
163 | + .table_size = qed_opts->table_size, | ||
164 | + .header_size = 1, | ||
165 | + .features = 0, | ||
166 | + .compat_features = 0, | ||
167 | + .l1_table_offset = qed_opts->cluster_size, | ||
168 | + .image_size = qed_opts->size, | ||
169 | + }; | ||
170 | + | ||
171 | + l1_size = header.cluster_size * header.table_size; | ||
172 | + | ||
173 | /* File must start empty and grow, check truncate is supported */ | ||
174 | ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp); | ||
175 | if (ret < 0) { | ||
176 | goto out; | ||
177 | } | ||
178 | |||
179 | - if (backing_file) { | ||
180 | + if (qed_opts->has_backing_file) { | ||
181 | header.features |= QED_F_BACKING_FILE; | ||
182 | header.backing_filename_offset = sizeof(le_header); | ||
183 | - header.backing_filename_size = strlen(backing_file); | ||
184 | + header.backing_filename_size = strlen(qed_opts->backing_file); | ||
185 | |||
186 | - if (qed_fmt_is_raw(backing_fmt)) { | ||
187 | - header.features |= QED_F_BACKING_FORMAT_NO_PROBE; | ||
188 | + if (qed_opts->has_backing_fmt) { | ||
189 | + const char *backing_fmt = BlockdevDriver_str(qed_opts->backing_fmt); | ||
190 | + if (qed_fmt_is_raw(backing_fmt)) { | ||
191 | + header.features |= QED_F_BACKING_FORMAT_NO_PROBE; | ||
192 | + } | ||
193 | } | ||
194 | } | ||
195 | |||
196 | @@ -XXX,XX +XXX,XX @@ static int qed_create(const char *filename, uint32_t cluster_size, | ||
197 | if (ret < 0) { | ||
198 | goto out; | ||
199 | } | ||
200 | - ret = blk_pwrite(blk, sizeof(le_header), backing_file, | ||
201 | + ret = blk_pwrite(blk, sizeof(le_header), qed_opts->backing_file, | ||
202 | header.backing_filename_size, 0); | ||
203 | if (ret < 0) { | ||
204 | goto out; | ||
205 | @@ -XXX,XX +XXX,XX @@ static int qed_create(const char *filename, uint32_t cluster_size, | ||
206 | out: | ||
207 | g_free(l1_table); | ||
208 | blk_unref(blk); | ||
209 | + bdrv_unref(bs); | ||
210 | return ret; | ||
211 | } | 31 | } |
212 | 32 | ||
213 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_qed_co_create_opts(const char *filename, | 33 | -static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) |
214 | QemuOpts *opts, | 34 | +static void coroutine_fn qed_unplug_allocating_write_reqs(BDRVQEDState *s) |
215 | Error **errp) | ||
216 | { | 35 | { |
217 | - uint64_t image_size = 0; | 36 | qemu_co_mutex_lock(&s->table_lock); |
218 | - uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE; | 37 | assert(s->allocating_write_reqs_plugged); |
219 | - uint32_t table_size = QED_DEFAULT_TABLE_SIZE; | ||
220 | - char *backing_file = NULL; | ||
221 | - char *backing_fmt = NULL; | ||
222 | + BlockdevCreateOptions *create_options = NULL; | ||
223 | + QDict *qdict = NULL; | ||
224 | + QObject *qobj; | ||
225 | + Visitor *v; | ||
226 | + BlockDriverState *bs = NULL; | ||
227 | + Error *local_err = NULL; | ||
228 | int ret; | ||
229 | |||
230 | - image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), | ||
231 | - BDRV_SECTOR_SIZE); | ||
232 | - backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); | ||
233 | - backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT); | ||
234 | - cluster_size = qemu_opt_get_size_del(opts, | ||
235 | - BLOCK_OPT_CLUSTER_SIZE, | ||
236 | - QED_DEFAULT_CLUSTER_SIZE); | ||
237 | - table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE, | ||
238 | - QED_DEFAULT_TABLE_SIZE); | ||
239 | - | ||
240 | - if (!qed_is_cluster_size_valid(cluster_size)) { | ||
241 | - error_setg(errp, "QED cluster size must be within range [%u, %u] " | ||
242 | - "and power of 2", | ||
243 | - QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); | ||
244 | + static const QDictRenames opt_renames[] = { | ||
245 | + { BLOCK_OPT_BACKING_FILE, "backing-file" }, | ||
246 | + { BLOCK_OPT_BACKING_FMT, "backing-fmt" }, | ||
247 | + { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" }, | ||
248 | + { BLOCK_OPT_TABLE_SIZE, "table-size" }, | ||
249 | + { NULL, NULL }, | ||
250 | + }; | ||
251 | + | ||
252 | + /* Parse options and convert legacy syntax */ | ||
253 | + qdict = qemu_opts_to_qdict_filtered(opts, NULL, &qed_create_opts, true); | ||
254 | + | ||
255 | + if (!qdict_rename_keys(qdict, opt_renames, errp)) { | ||
256 | ret = -EINVAL; | ||
257 | - goto finish; | ||
258 | + goto fail; | ||
259 | } | ||
260 | - if (!qed_is_table_size_valid(table_size)) { | ||
261 | - error_setg(errp, "QED table size must be within range [%u, %u] " | ||
262 | - "and power of 2", | ||
263 | - QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); | ||
264 | + | ||
265 | + /* Create and open the file (protocol layer) */ | ||
266 | + ret = bdrv_create_file(filename, opts, &local_err); | ||
267 | + if (ret < 0) { | ||
268 | + error_propagate(errp, local_err); | ||
269 | + goto fail; | ||
270 | + } | ||
271 | + | ||
272 | + bs = bdrv_open(filename, NULL, NULL, | ||
273 | + BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); | ||
274 | + if (bs == NULL) { | ||
275 | + ret = -EIO; | ||
276 | + goto fail; | ||
277 | + } | ||
278 | + | ||
279 | + /* Now get the QAPI type BlockdevCreateOptions */ | ||
280 | + qdict_put_str(qdict, "driver", "qed"); | ||
281 | + qdict_put_str(qdict, "file", bs->node_name); | ||
282 | + | ||
283 | + qobj = qdict_crumple(qdict, errp); | ||
284 | + QDECREF(qdict); | ||
285 | + qdict = qobject_to_qdict(qobj); | ||
286 | + if (qdict == NULL) { | ||
287 | ret = -EINVAL; | ||
288 | - goto finish; | ||
289 | + goto fail; | ||
290 | } | ||
291 | - if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) { | ||
292 | - error_setg(errp, "QED image size must be a non-zero multiple of " | ||
293 | - "cluster size and less than %" PRIu64 " bytes", | ||
294 | - qed_max_image_size(cluster_size, table_size)); | ||
295 | + | ||
296 | + v = qobject_input_visitor_new_keyval(QOBJECT(qdict)); | ||
297 | + visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err); | ||
298 | + visit_free(v); | ||
299 | + | ||
300 | + if (local_err) { | ||
301 | + error_propagate(errp, local_err); | ||
302 | ret = -EINVAL; | ||
303 | - goto finish; | ||
304 | + goto fail; | ||
305 | } | ||
306 | |||
307 | - ret = qed_create(filename, cluster_size, image_size, table_size, | ||
308 | - backing_file, backing_fmt, opts, errp); | ||
309 | + /* Silently round up size */ | ||
310 | + assert(create_options->driver == BLOCKDEV_DRIVER_QED); | ||
311 | + create_options->u.qed.size = | ||
312 | + ROUND_UP(create_options->u.qed.size, BDRV_SECTOR_SIZE); | ||
313 | + | ||
314 | + /* Create the qed image (format layer) */ | ||
315 | + ret = bdrv_qed_co_create(create_options, errp); | ||
316 | |||
317 | -finish: | ||
318 | - g_free(backing_file); | ||
319 | - g_free(backing_fmt); | ||
320 | +fail: | ||
321 | + QDECREF(qdict); | ||
322 | + bdrv_unref(bs); | ||
323 | + qapi_free_BlockdevCreateOptions(create_options); | ||
324 | return ret; | ||
325 | } | ||
326 | |||
327 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_qed = { | ||
328 | .bdrv_close = bdrv_qed_close, | ||
329 | .bdrv_reopen_prepare = bdrv_qed_reopen_prepare, | ||
330 | .bdrv_child_perm = bdrv_format_default_perms, | ||
331 | + .bdrv_co_create = bdrv_qed_co_create, | ||
332 | .bdrv_co_create_opts = bdrv_qed_co_create_opts, | ||
333 | .bdrv_has_zero_init = bdrv_has_zero_init_1, | ||
334 | .bdrv_co_block_status = bdrv_qed_co_block_status, | ||
335 | -- | 38 | -- |
336 | 2.13.6 | 39 | 2.37.3 |
337 | |||
338 | diff view generated by jsdifflib |
1 | From: Max Reitz <mreitz@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Signed-off-by: Max Reitz <mreitz@redhat.com> | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-19-pbonzini@redhat.com> | ||
10 | [kwolf: Fixed up coding style] | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
5 | --- | 13 | --- |
6 | qapi/block-core.json | 2 +- | 14 | block/quorum.c | 36 +++++++++++++++++++----------------- |
7 | block/vdi.c | 24 +++++++++++++++++++----- | 15 | 1 file changed, 19 insertions(+), 17 deletions(-) |
8 | 2 files changed, 20 insertions(+), 6 deletions(-) | ||
9 | 16 | ||
10 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 17 | diff --git a/block/quorum.c b/block/quorum.c |
11 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
12 | --- a/qapi/block-core.json | 19 | --- a/block/quorum.c |
13 | +++ b/qapi/block-core.json | 20 | +++ b/block/quorum.c |
14 | @@ -XXX,XX +XXX,XX @@ | 21 | @@ -XXX,XX +XXX,XX @@ static bool quorum_64bits_compare(QuorumVoteValue *a, QuorumVoteValue *b) |
15 | 'sheepdog': 'BlockdevCreateOptionsSheepdog', | 22 | return a->l == b->l; |
16 | 'ssh': 'BlockdevCreateOptionsSsh', | 23 | } |
17 | 'throttle': 'BlockdevCreateNotSupported', | 24 | |
18 | - 'vdi': 'BlockdevCreateNotSupported', | 25 | -static QuorumAIOCB *quorum_aio_get(BlockDriverState *bs, |
19 | + 'vdi': 'BlockdevCreateOptionsVdi', | 26 | - QEMUIOVector *qiov, |
20 | 'vhdx': 'BlockdevCreateNotSupported', | 27 | - uint64_t offset, |
21 | 'vmdk': 'BlockdevCreateNotSupported', | 28 | - uint64_t bytes, |
22 | 'vpc': 'BlockdevCreateNotSupported', | 29 | - int flags) |
23 | diff --git a/block/vdi.c b/block/vdi.c | 30 | +static QuorumAIOCB *coroutine_fn quorum_aio_get(BlockDriverState *bs, |
24 | index XXXXXXX..XXXXXXX 100644 | 31 | + QEMUIOVector *qiov, |
25 | --- a/block/vdi.c | 32 | + uint64_t offset, uint64_t bytes, |
26 | +++ b/block/vdi.c | 33 | + int flags) |
27 | @@ -XXX,XX +XXX,XX @@ nonallocating_write: | 34 | { |
35 | BDRVQuorumState *s = bs->opaque; | ||
36 | QuorumAIOCB *acb = g_new(QuorumAIOCB, 1); | ||
37 | @@ -XXX,XX +XXX,XX @@ static void quorum_report_bad_versions(BDRVQuorumState *s, | ||
38 | } | ||
39 | } | ||
40 | |||
41 | -static void quorum_rewrite_entry(void *opaque) | ||
42 | +static void coroutine_fn quorum_rewrite_entry(void *opaque) | ||
43 | { | ||
44 | QuorumCo *co = opaque; | ||
45 | QuorumAIOCB *acb = co->acb; | ||
46 | @@ -XXX,XX +XXX,XX @@ free_exit: | ||
47 | quorum_free_vote_list(&acb->votes); | ||
48 | } | ||
49 | |||
50 | -static void read_quorum_children_entry(void *opaque) | ||
51 | +static void coroutine_fn read_quorum_children_entry(void *opaque) | ||
52 | { | ||
53 | QuorumCo *co = opaque; | ||
54 | QuorumAIOCB *acb = co->acb; | ||
55 | @@ -XXX,XX +XXX,XX @@ static void read_quorum_children_entry(void *opaque) | ||
56 | } | ||
57 | } | ||
58 | |||
59 | -static int read_quorum_children(QuorumAIOCB *acb) | ||
60 | +static int coroutine_fn read_quorum_children(QuorumAIOCB *acb) | ||
61 | { | ||
62 | BDRVQuorumState *s = acb->bs->opaque; | ||
63 | int i; | ||
64 | @@ -XXX,XX +XXX,XX @@ static int read_quorum_children(QuorumAIOCB *acb) | ||
65 | return acb->vote_ret; | ||
66 | } | ||
67 | |||
68 | -static int read_fifo_child(QuorumAIOCB *acb) | ||
69 | +static int coroutine_fn read_fifo_child(QuorumAIOCB *acb) | ||
70 | { | ||
71 | BDRVQuorumState *s = acb->bs->opaque; | ||
72 | int n, ret; | ||
73 | @@ -XXX,XX +XXX,XX @@ static int read_fifo_child(QuorumAIOCB *acb) | ||
28 | return ret; | 74 | return ret; |
29 | } | 75 | } |
30 | 76 | ||
31 | -static int coroutine_fn vdi_co_do_create(BlockdevCreateOptionsVdi *vdi_opts, | 77 | -static int quorum_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, |
32 | +static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options, | 78 | - QEMUIOVector *qiov, BdrvRequestFlags flags) |
33 | size_t block_size, Error **errp) | 79 | +static int coroutine_fn quorum_co_preadv(BlockDriverState *bs, |
80 | + int64_t offset, int64_t bytes, | ||
81 | + QEMUIOVector *qiov, | ||
82 | + BdrvRequestFlags flags) | ||
34 | { | 83 | { |
35 | + BlockdevCreateOptionsVdi *vdi_opts; | 84 | BDRVQuorumState *s = bs->opaque; |
36 | int ret = 0; | 85 | QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags); |
37 | uint64_t bytes = 0; | 86 | @@ -XXX,XX +XXX,XX @@ static int quorum_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, |
38 | uint32_t blocks; | ||
39 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptionsVdi *vdi_opts, | ||
40 | BlockBackend *blk = NULL; | ||
41 | uint32_t *bmap = NULL; | ||
42 | |||
43 | + assert(create_options->driver == BLOCKDEV_DRIVER_VDI); | ||
44 | + vdi_opts = &create_options->u.vdi; | ||
45 | + | ||
46 | logout("\n"); | ||
47 | |||
48 | /* Read out options. */ | ||
49 | @@ -XXX,XX +XXX,XX @@ exit: | ||
50 | return ret; | 87 | return ret; |
51 | } | 88 | } |
52 | 89 | ||
53 | +static int coroutine_fn vdi_co_create(BlockdevCreateOptions *create_options, | 90 | -static void write_quorum_entry(void *opaque) |
54 | + Error **errp) | 91 | +static void coroutine_fn write_quorum_entry(void *opaque) |
55 | +{ | ||
56 | + return vdi_co_do_create(create_options, DEFAULT_CLUSTER_SIZE, errp); | ||
57 | +} | ||
58 | + | ||
59 | static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts, | ||
60 | Error **errp) | ||
61 | { | 92 | { |
62 | QDict *qdict = NULL; | 93 | QuorumCo *co = opaque; |
63 | - BlockdevCreateOptionsVdi *create_options = NULL; | 94 | QuorumAIOCB *acb = co->acb; |
64 | + BlockdevCreateOptions *create_options = NULL; | 95 | @@ -XXX,XX +XXX,XX @@ static void write_quorum_entry(void *opaque) |
65 | BlockDriverState *bs_file = NULL; | ||
66 | uint64_t block_size = DEFAULT_CLUSTER_SIZE; | ||
67 | Visitor *v; | ||
68 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts, | ||
69 | goto done; | ||
70 | } | 96 | } |
71 | 97 | } | |
72 | + qdict_put_str(qdict, "driver", "vdi"); | 98 | |
73 | qdict_put_str(qdict, "file", bs_file->node_name); | 99 | -static int quorum_co_pwritev(BlockDriverState *bs, int64_t offset, |
74 | 100 | - int64_t bytes, QEMUIOVector *qiov, | |
75 | /* Get the QAPI object */ | 101 | - BdrvRequestFlags flags) |
76 | v = qobject_input_visitor_new_keyval(QOBJECT(qdict)); | 102 | +static int coroutine_fn quorum_co_pwritev(BlockDriverState *bs, int64_t offset, |
77 | - visit_type_BlockdevCreateOptionsVdi(v, NULL, &create_options, &local_err); | 103 | + int64_t bytes, QEMUIOVector *qiov, |
78 | + visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err); | 104 | + BdrvRequestFlags flags) |
79 | visit_free(v); | 105 | { |
80 | 106 | BDRVQuorumState *s = bs->opaque; | |
81 | if (local_err) { | 107 | QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags); |
82 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts, | 108 | @@ -XXX,XX +XXX,XX @@ static int quorum_co_pwritev(BlockDriverState *bs, int64_t offset, |
83 | goto done; | ||
84 | } | ||
85 | |||
86 | - create_options->size = ROUND_UP(create_options->size, BDRV_SECTOR_SIZE); | ||
87 | + assert(create_options->driver == BLOCKDEV_DRIVER_VDI); | ||
88 | + create_options->u.vdi.size = ROUND_UP(create_options->u.vdi.size, | ||
89 | + BDRV_SECTOR_SIZE); | ||
90 | |||
91 | ret = vdi_co_do_create(create_options, block_size, errp); | ||
92 | done: | ||
93 | QDECREF(qdict); | ||
94 | - qapi_free_BlockdevCreateOptionsVdi(create_options); | ||
95 | + qapi_free_BlockdevCreateOptions(create_options); | ||
96 | bdrv_unref(bs_file); | ||
97 | return ret; | 109 | return ret; |
98 | } | 110 | } |
99 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_vdi = { | 111 | |
100 | .bdrv_reopen_prepare = vdi_reopen_prepare, | 112 | -static int quorum_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, |
101 | .bdrv_child_perm = bdrv_format_default_perms, | 113 | - int64_t bytes, BdrvRequestFlags flags) |
102 | .bdrv_co_create_opts = vdi_co_create_opts, | 114 | +static int coroutine_fn quorum_co_pwrite_zeroes(BlockDriverState *bs, |
103 | + .bdrv_co_create = vdi_co_create, | 115 | + int64_t offset, int64_t bytes, |
104 | .bdrv_has_zero_init = bdrv_has_zero_init_1, | 116 | + BdrvRequestFlags flags) |
105 | .bdrv_co_block_status = vdi_co_block_status, | 117 | |
106 | .bdrv_make_empty = vdi_make_empty, | 118 | { |
119 | return quorum_co_pwritev(bs, offset, bytes, NULL, | ||
107 | -- | 120 | -- |
108 | 2.13.6 | 121 | 2.37.3 |
109 | |||
110 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-20-pbonzini@redhat.com> | ||
10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | --- | ||
13 | block/throttle.c | 2 +- | ||
14 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
15 | |||
16 | diff --git a/block/throttle.c b/block/throttle.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/throttle.c | ||
19 | +++ b/block/throttle.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn throttle_co_pwritev_compressed(BlockDriverState *bs, | ||
21 | BDRV_REQ_WRITE_COMPRESSED); | ||
22 | } | ||
23 | |||
24 | -static int throttle_co_flush(BlockDriverState *bs) | ||
25 | +static int coroutine_fn throttle_co_flush(BlockDriverState *bs) | ||
26 | { | ||
27 | return bdrv_co_flush(bs->file->bs); | ||
28 | } | ||
29 | -- | ||
30 | 2.37.3 | diff view generated by jsdifflib |
1 | This adds the .bdrv_co_create driver callback to vpc, which | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | enables image creation over QMP. | ||
3 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-21-pbonzini@redhat.com> | ||
10 | [kwolf: Fixed up coding style] | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
5 | Reviewed-by: Max Reitz <mreitz@redhat.com> | ||
6 | --- | 13 | --- |
7 | qapi/block-core.json | 33 ++++++++++- | 14 | block/vmdk.c | 22 ++++++++++++---------- |
8 | block/vpc.c | 152 ++++++++++++++++++++++++++++++++++++++------------- | 15 | 1 file changed, 12 insertions(+), 10 deletions(-) |
9 | 2 files changed, 147 insertions(+), 38 deletions(-) | ||
10 | 16 | ||
11 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 17 | diff --git a/block/vmdk.c b/block/vmdk.c |
12 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/qapi/block-core.json | 19 | --- a/block/vmdk.c |
14 | +++ b/qapi/block-core.json | 20 | +++ b/block/vmdk.c |
15 | @@ -XXX,XX +XXX,XX @@ | 21 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vmdk_co_block_status(BlockDriverState *bs, |
16 | '*block-state-zero': 'bool' } } | ||
17 | |||
18 | ## | ||
19 | +# @BlockdevVpcSubformat: | ||
20 | +# | ||
21 | +# @dynamic: Growing image file | ||
22 | +# @fixed: Preallocated fixed-size image file | ||
23 | +# | ||
24 | +# Since: 2.12 | ||
25 | +## | ||
26 | +{ 'enum': 'BlockdevVpcSubformat', | ||
27 | + 'data': [ 'dynamic', 'fixed' ] } | ||
28 | + | ||
29 | +## | ||
30 | +# @BlockdevCreateOptionsVpc: | ||
31 | +# | ||
32 | +# Driver specific image creation options for vpc (VHD). | ||
33 | +# | ||
34 | +# @file Node to create the image format on | ||
35 | +# @size Size of the virtual disk in bytes | ||
36 | +# @subformat vhdx subformat (default: dynamic) | ||
37 | +# @force-size Force use of the exact byte size instead of rounding to the | ||
38 | +# next size that can be represented in CHS geometry | ||
39 | +# (default: false) | ||
40 | +# | ||
41 | +# Since: 2.12 | ||
42 | +## | ||
43 | +{ 'struct': 'BlockdevCreateOptionsVpc', | ||
44 | + 'data': { 'file': 'BlockdevRef', | ||
45 | + 'size': 'size', | ||
46 | + '*subformat': 'BlockdevVpcSubformat', | ||
47 | + '*force-size': 'bool' } } | ||
48 | + | ||
49 | +## | ||
50 | # @BlockdevCreateNotSupported: | ||
51 | # | ||
52 | # This is used for all drivers that don't support creating images. | ||
53 | @@ -XXX,XX +XXX,XX @@ | ||
54 | 'vdi': 'BlockdevCreateOptionsVdi', | ||
55 | 'vhdx': 'BlockdevCreateOptionsVhdx', | ||
56 | 'vmdk': 'BlockdevCreateNotSupported', | ||
57 | - 'vpc': 'BlockdevCreateNotSupported', | ||
58 | + 'vpc': 'BlockdevCreateOptionsVpc', | ||
59 | 'vvfat': 'BlockdevCreateNotSupported', | ||
60 | 'vxhs': 'BlockdevCreateNotSupported' | ||
61 | } } | ||
62 | diff --git a/block/vpc.c b/block/vpc.c | ||
63 | index XXXXXXX..XXXXXXX 100644 | ||
64 | --- a/block/vpc.c | ||
65 | +++ b/block/vpc.c | ||
66 | @@ -XXX,XX +XXX,XX @@ | ||
67 | #include "migration/blocker.h" | ||
68 | #include "qemu/bswap.h" | ||
69 | #include "qemu/uuid.h" | ||
70 | +#include "qapi/qmp/qdict.h" | ||
71 | +#include "qapi/qobject-input-visitor.h" | ||
72 | +#include "qapi/qapi-visit-block-core.h" | ||
73 | |||
74 | /**************************************************************/ | ||
75 | |||
76 | @@ -XXX,XX +XXX,XX @@ static QemuOptsList vpc_runtime_opts = { | ||
77 | } | ||
78 | }; | ||
79 | |||
80 | +static QemuOptsList vpc_create_opts; | ||
81 | + | ||
82 | static uint32_t vpc_checksum(uint8_t* buf, size_t size) | ||
83 | { | ||
84 | uint32_t res = 0; | ||
85 | @@ -XXX,XX +XXX,XX @@ static int create_fixed_disk(BlockBackend *blk, uint8_t *buf, | ||
86 | return ret; | 22 | return ret; |
87 | } | 23 | } |
88 | 24 | ||
89 | -static int coroutine_fn vpc_co_create_opts(const char *filename, QemuOpts *opts, | 25 | -static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, |
90 | - Error **errp) | 26 | - int64_t offset_in_cluster, QEMUIOVector *qiov, |
91 | +static int coroutine_fn vpc_co_create(BlockdevCreateOptions *opts, | 27 | - uint64_t qiov_offset, uint64_t n_bytes, |
92 | + Error **errp) | 28 | - uint64_t offset) |
29 | +static int coroutine_fn | ||
30 | +vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, | ||
31 | + int64_t offset_in_cluster, QEMUIOVector *qiov, | ||
32 | + uint64_t qiov_offset, uint64_t n_bytes, | ||
33 | + uint64_t offset) | ||
93 | { | 34 | { |
94 | + BlockdevCreateOptionsVpc *vpc_opts; | 35 | int ret; |
95 | + BlockBackend *blk = NULL; | 36 | VmdkGrainMarker *data = NULL; |
96 | + BlockDriverState *bs = NULL; | 37 | @@ -XXX,XX +XXX,XX @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, |
97 | + | ||
98 | uint8_t buf[1024]; | ||
99 | VHDFooter *footer = (VHDFooter *) buf; | ||
100 | - char *disk_type_param; | ||
101 | int i; | ||
102 | uint16_t cyls = 0; | ||
103 | uint8_t heads = 0; | ||
104 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vpc_co_create_opts(const char *filename, QemuOpts *opts, | ||
105 | int64_t total_size; | ||
106 | int disk_type; | ||
107 | int ret = -EIO; | ||
108 | - bool force_size; | ||
109 | - Error *local_err = NULL; | ||
110 | - BlockBackend *blk = NULL; | ||
111 | |||
112 | - /* Read out options */ | ||
113 | - total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), | ||
114 | - BDRV_SECTOR_SIZE); | ||
115 | - disk_type_param = qemu_opt_get_del(opts, BLOCK_OPT_SUBFMT); | ||
116 | - if (disk_type_param) { | ||
117 | - if (!strcmp(disk_type_param, "dynamic")) { | ||
118 | - disk_type = VHD_DYNAMIC; | ||
119 | - } else if (!strcmp(disk_type_param, "fixed")) { | ||
120 | - disk_type = VHD_FIXED; | ||
121 | - } else { | ||
122 | - error_setg(errp, "Invalid disk type, %s", disk_type_param); | ||
123 | - ret = -EINVAL; | ||
124 | - goto out; | ||
125 | - } | ||
126 | - } else { | ||
127 | + assert(opts->driver == BLOCKDEV_DRIVER_VPC); | ||
128 | + vpc_opts = &opts->u.vpc; | ||
129 | + | ||
130 | + /* Validate options and set default values */ | ||
131 | + total_size = vpc_opts->size; | ||
132 | + | ||
133 | + if (!vpc_opts->has_subformat) { | ||
134 | + vpc_opts->subformat = BLOCKDEV_VPC_SUBFORMAT_DYNAMIC; | ||
135 | + } | ||
136 | + switch (vpc_opts->subformat) { | ||
137 | + case BLOCKDEV_VPC_SUBFORMAT_DYNAMIC: | ||
138 | disk_type = VHD_DYNAMIC; | ||
139 | + break; | ||
140 | + case BLOCKDEV_VPC_SUBFORMAT_FIXED: | ||
141 | + disk_type = VHD_FIXED; | ||
142 | + break; | ||
143 | + default: | ||
144 | + g_assert_not_reached(); | ||
145 | } | ||
146 | |||
147 | - force_size = qemu_opt_get_bool_del(opts, VPC_OPT_FORCE_SIZE, false); | ||
148 | - | ||
149 | - ret = bdrv_create_file(filename, opts, &local_err); | ||
150 | - if (ret < 0) { | ||
151 | - error_propagate(errp, local_err); | ||
152 | - goto out; | ||
153 | + /* Create BlockBackend to write to the image */ | ||
154 | + bs = bdrv_open_blockdev_ref(vpc_opts->file, errp); | ||
155 | + if (bs == NULL) { | ||
156 | + return -EIO; | ||
157 | } | ||
158 | |||
159 | - blk = blk_new_open(filename, NULL, NULL, | ||
160 | - BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, | ||
161 | - &local_err); | ||
162 | - if (blk == NULL) { | ||
163 | - error_propagate(errp, local_err); | ||
164 | - ret = -EIO; | ||
165 | + blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL); | ||
166 | + ret = blk_insert_bs(blk, bs, errp); | ||
167 | + if (ret < 0) { | ||
168 | goto out; | ||
169 | } | ||
170 | - | ||
171 | blk_set_allow_write_beyond_eof(blk, true); | ||
172 | |||
173 | /* | ||
174 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vpc_co_create_opts(const char *filename, QemuOpts *opts, | ||
175 | * we set the geometry to 65535 x 16 x 255 (CxHxS) sectors and use | ||
176 | * the image size from the VHD footer to calculate total_sectors. | ||
177 | */ | ||
178 | - if (force_size) { | ||
179 | + if (vpc_opts->force_size) { | ||
180 | /* This will force the use of total_size for sector count, below */ | ||
181 | cyls = VHD_CHS_MAX_C; | ||
182 | heads = VHD_CHS_MAX_H; | ||
183 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vpc_co_create_opts(const char *filename, QemuOpts *opts, | ||
184 | memset(buf, 0, 1024); | ||
185 | |||
186 | memcpy(footer->creator, "conectix", 8); | ||
187 | - if (force_size) { | ||
188 | + if (vpc_opts->force_size) { | ||
189 | memcpy(footer->creator_app, "qem2", 4); | ||
190 | } else { | ||
191 | memcpy(footer->creator_app, "qemu", 4); | ||
192 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vpc_co_create_opts(const char *filename, QemuOpts *opts, | ||
193 | |||
194 | out: | ||
195 | blk_unref(blk); | ||
196 | - g_free(disk_type_param); | ||
197 | + bdrv_unref(bs); | ||
198 | + return ret; | ||
199 | +} | ||
200 | + | ||
201 | +static int coroutine_fn vpc_co_create_opts(const char *filename, | ||
202 | + QemuOpts *opts, Error **errp) | ||
203 | +{ | ||
204 | + BlockdevCreateOptions *create_options = NULL; | ||
205 | + QDict *qdict = NULL; | ||
206 | + QObject *qobj; | ||
207 | + Visitor *v; | ||
208 | + BlockDriverState *bs = NULL; | ||
209 | + Error *local_err = NULL; | ||
210 | + int ret; | ||
211 | + | ||
212 | + static const QDictRenames opt_renames[] = { | ||
213 | + { VPC_OPT_FORCE_SIZE, "force-size" }, | ||
214 | + { NULL, NULL }, | ||
215 | + }; | ||
216 | + | ||
217 | + /* Parse options and convert legacy syntax */ | ||
218 | + qdict = qemu_opts_to_qdict_filtered(opts, NULL, &vpc_create_opts, true); | ||
219 | + | ||
220 | + if (!qdict_rename_keys(qdict, opt_renames, errp)) { | ||
221 | + ret = -EINVAL; | ||
222 | + goto fail; | ||
223 | + } | ||
224 | + | ||
225 | + /* Create and open the file (protocol layer) */ | ||
226 | + ret = bdrv_create_file(filename, opts, &local_err); | ||
227 | + if (ret < 0) { | ||
228 | + error_propagate(errp, local_err); | ||
229 | + goto fail; | ||
230 | + } | ||
231 | + | ||
232 | + bs = bdrv_open(filename, NULL, NULL, | ||
233 | + BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); | ||
234 | + if (bs == NULL) { | ||
235 | + ret = -EIO; | ||
236 | + goto fail; | ||
237 | + } | ||
238 | + | ||
239 | + /* Now get the QAPI type BlockdevCreateOptions */ | ||
240 | + qdict_put_str(qdict, "driver", "vpc"); | ||
241 | + qdict_put_str(qdict, "file", bs->node_name); | ||
242 | + | ||
243 | + qobj = qdict_crumple(qdict, errp); | ||
244 | + QDECREF(qdict); | ||
245 | + qdict = qobject_to_qdict(qobj); | ||
246 | + if (qdict == NULL) { | ||
247 | + ret = -EINVAL; | ||
248 | + goto fail; | ||
249 | + } | ||
250 | + | ||
251 | + v = qobject_input_visitor_new_keyval(QOBJECT(qdict)); | ||
252 | + visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err); | ||
253 | + visit_free(v); | ||
254 | + | ||
255 | + if (local_err) { | ||
256 | + error_propagate(errp, local_err); | ||
257 | + ret = -EINVAL; | ||
258 | + goto fail; | ||
259 | + } | ||
260 | + | ||
261 | + /* Silently round up size */ | ||
262 | + assert(create_options->driver == BLOCKDEV_DRIVER_VPC); | ||
263 | + create_options->u.vpc.size = | ||
264 | + ROUND_UP(create_options->u.vpc.size, BDRV_SECTOR_SIZE); | ||
265 | + | ||
266 | + /* Create the vpc image (format layer) */ | ||
267 | + ret = vpc_co_create(create_options, errp); | ||
268 | + | ||
269 | +fail: | ||
270 | + QDECREF(qdict); | ||
271 | + bdrv_unref(bs); | ||
272 | + qapi_free_BlockdevCreateOptions(create_options); | ||
273 | return ret; | 38 | return ret; |
274 | } | 39 | } |
275 | 40 | ||
276 | + | 41 | -static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset, |
277 | static int vpc_has_zero_init(BlockDriverState *bs) | 42 | - int64_t offset_in_cluster, QEMUIOVector *qiov, |
43 | - int bytes) | ||
44 | +static int coroutine_fn | ||
45 | +vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset, | ||
46 | + int64_t offset_in_cluster, QEMUIOVector *qiov, | ||
47 | + int bytes) | ||
278 | { | 48 | { |
279 | BDRVVPCState *s = bs->opaque; | 49 | int ret; |
280 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_vpc = { | 50 | int cluster_bytes, buf_bytes; |
281 | .bdrv_close = vpc_close, | 51 | @@ -XXX,XX +XXX,XX @@ fail: |
282 | .bdrv_reopen_prepare = vpc_reopen_prepare, | 52 | * |
283 | .bdrv_child_perm = bdrv_format_default_perms, | 53 | * Returns: error code with 0 for success. |
284 | + .bdrv_co_create = vpc_co_create, | 54 | */ |
285 | .bdrv_co_create_opts = vpc_co_create_opts, | 55 | -static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset, |
286 | 56 | - uint64_t bytes, QEMUIOVector *qiov, | |
287 | .bdrv_co_preadv = vpc_co_preadv, | 57 | - bool zeroed, bool zero_dry_run) |
58 | +static int coroutine_fn vmdk_pwritev(BlockDriverState *bs, uint64_t offset, | ||
59 | + uint64_t bytes, QEMUIOVector *qiov, | ||
60 | + bool zeroed, bool zero_dry_run) | ||
61 | { | ||
62 | BDRVVmdkState *s = bs->opaque; | ||
63 | VmdkExtent *extent = NULL; | ||
288 | -- | 64 | -- |
289 | 2.13.6 | 65 | 2.37.3 |
290 | |||
291 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Presently, even if a job is canceled post-completion as a result of | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | a failing peer in a transaction, it will still call .commit because | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | nothing has updated or changed its return code. | 5 | functions where this holds. |
6 | 6 | ||
7 | The reason why this does not cause problems currently is because | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
8 | backup's implementation of .commit checks for cancellation itself. | 8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
9 | 9 | Message-Id: <20220922084924.201610-22-pbonzini@redhat.com> | |
10 | I'd like to simplify this contract: | ||
11 | |||
12 | (1) Abort is called if the job/transaction fails | ||
13 | (2) Commit is called if the job/transaction succeeds | ||
14 | |||
15 | To this end: A job's return code, if 0, will be forcibly set as | ||
16 | -ECANCELED if that job has already concluded. Remove the now | ||
17 | redundant check in the backup job implementation. | ||
18 | |||
19 | We need to check for cancellation in both block_job_completed | ||
20 | AND block_job_completed_single, because jobs may be cancelled between | ||
21 | those two calls; for instance in transactions. This also necessitates | ||
22 | an ABORTING -> ABORTING transition to be allowed. | ||
23 | |||
24 | The check in block_job_completed could be removed, but there's no | ||
25 | point in starting to attempt to succeed a transaction that we know | ||
26 | in advance will fail. | ||
27 | |||
28 | This does NOT affect mirror jobs that are "canceled" during their | ||
29 | synchronous phase. The mirror job itself forcibly sets the canceled | ||
30 | property to false prior to ceding control, so such cases will invoke | ||
31 | the "commit" callback. | ||
32 | |||
33 | Signed-off-by: John Snow <jsnow@redhat.com> | ||
34 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
35 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
36 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
37 | --- | 12 | --- |
38 | block/backup.c | 2 +- | 13 | include/qemu/job.h | 2 +- |
39 | blockjob.c | 21 ++++++++++++++++----- | 14 | job.c | 2 +- |
40 | block/trace-events | 1 + | 15 | 2 files changed, 2 insertions(+), 2 deletions(-) |
41 | 3 files changed, 18 insertions(+), 6 deletions(-) | ||
42 | 16 | ||
43 | diff --git a/block/backup.c b/block/backup.c | 17 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
44 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
45 | --- a/block/backup.c | 19 | --- a/include/qemu/job.h |
46 | +++ b/block/backup.c | 20 | +++ b/include/qemu/job.h |
47 | @@ -XXX,XX +XXX,XX @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret) | 21 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn job_pause_point(Job *job); |
48 | BdrvDirtyBitmap *bm; | 22 | * |
49 | BlockDriverState *bs = blk_bs(job->common.blk); | 23 | * Yield the job coroutine. |
50 | 24 | */ | |
51 | - if (ret < 0 || block_job_is_cancelled(&job->common)) { | 25 | -void job_yield(Job *job); |
52 | + if (ret < 0) { | 26 | +void coroutine_fn job_yield(Job *job); |
53 | /* Merge the successor back into the parent, delete nothing. */ | 27 | |
54 | bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL); | 28 | /** |
55 | assert(bm); | 29 | * @job: The job that calls the function. |
56 | diff --git a/blockjob.c b/blockjob.c | 30 | diff --git a/job.c b/job.c |
57 | index XXXXXXX..XXXXXXX 100644 | 31 | index XXXXXXX..XXXXXXX 100644 |
58 | --- a/blockjob.c | 32 | --- a/job.c |
59 | +++ b/blockjob.c | 33 | +++ b/job.c |
60 | @@ -XXX,XX +XXX,XX @@ bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = { | 34 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn job_pause_point(Job *job) |
61 | /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0}, | ||
62 | /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 1, 0}, | ||
63 | /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0}, | ||
64 | - /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 1, 0}, | ||
65 | + /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 1, 1, 0}, | ||
66 | /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 1}, | ||
67 | /* N: */ [BLOCK_JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0}, | ||
68 | }; | ||
69 | @@ -XXX,XX +XXX,XX @@ static void block_job_conclude(BlockJob *job) | ||
70 | } | 35 | } |
71 | } | 36 | } |
72 | 37 | ||
73 | +static void block_job_update_rc(BlockJob *job) | 38 | -void job_yield(Job *job) |
74 | +{ | 39 | +void coroutine_fn job_yield(Job *job) |
75 | + if (!job->ret && block_job_is_cancelled(job)) { | ||
76 | + job->ret = -ECANCELED; | ||
77 | + } | ||
78 | + if (job->ret) { | ||
79 | + block_job_state_transition(job, BLOCK_JOB_STATUS_ABORTING); | ||
80 | + } | ||
81 | +} | ||
82 | + | ||
83 | static void block_job_completed_single(BlockJob *job) | ||
84 | { | 40 | { |
85 | assert(job->completed); | 41 | assert(job->busy); |
86 | |||
87 | - if (job->ret || block_job_is_cancelled(job)) { | ||
88 | - block_job_state_transition(job, BLOCK_JOB_STATUS_ABORTING); | ||
89 | - } | ||
90 | + /* Ensure abort is called for late-transactional failures */ | ||
91 | + block_job_update_rc(job); | ||
92 | |||
93 | if (!job->ret) { | ||
94 | if (job->driver->commit) { | ||
95 | @@ -XXX,XX +XXX,XX @@ void block_job_completed(BlockJob *job, int ret) | ||
96 | assert(blk_bs(job->blk)->job == job); | ||
97 | job->completed = true; | ||
98 | job->ret = ret; | ||
99 | - if (ret < 0 || block_job_is_cancelled(job)) { | ||
100 | + block_job_update_rc(job); | ||
101 | + trace_block_job_completed(job, ret, job->ret); | ||
102 | + if (job->ret) { | ||
103 | block_job_completed_txn_abort(job); | ||
104 | } else { | ||
105 | block_job_completed_txn_success(job); | ||
106 | diff --git a/block/trace-events b/block/trace-events | ||
107 | index XXXXXXX..XXXXXXX 100644 | ||
108 | --- a/block/trace-events | ||
109 | +++ b/block/trace-events | ||
110 | @@ -XXX,XX +XXX,XX @@ bdrv_open_common(void *bs, const char *filename, int flags, const char *format_n | ||
111 | bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d" | ||
112 | |||
113 | # blockjob.c | ||
114 | +block_job_completed(void *job, int ret, int jret) "job %p ret %d corrected ret %d" | ||
115 | block_job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)" | ||
116 | block_job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)" | ||
117 | 42 | ||
118 | -- | 43 | -- |
119 | 2.13.6 | 44 | 2.37.3 |
120 | |||
121 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Instead of automatically transitioning from PENDING to CONCLUDED, gate | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
4 | the .prepare() and .commit() phases behind an explicit acknowledgement | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
5 | provided by the QMP monitor if auto_finalize = false has been requested. | 5 | functions where this holds. |
6 | 6 | ||
7 | This allows us to perform graph changes in prepare and/or commit so that | 7 | Reviewed-by: Alberto Faria <afaria@redhat.com> |
8 | graph changes do not occur autonomously without knowledge of the | 8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
9 | controlling management layer. | 9 | Message-Id: <20220922084924.201610-23-pbonzini@redhat.com> |
10 | 10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | |
11 | Transactions that have reached the "PENDING" state together can all be | ||
12 | moved to invoke their finalization methods by issuing block_job_finalize | ||
13 | to any one job in the transaction. | ||
14 | |||
15 | Jobs in a transaction with mixed job->auto_finalize settings will all | ||
16 | remain stuck in the "PENDING" state, as if the entire transaction was | ||
17 | specified with auto_finalize = false. Jobs that specified | ||
18 | auto_finalize = true, however, will still not emit the PENDING event. | ||
19 | |||
20 | Signed-off-by: John Snow <jsnow@redhat.com> | ||
21 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
22 | --- | 12 | --- |
23 | qapi/block-core.json | 23 ++++++++++++++++++- | 13 | util/qemu-coroutine-lock.c | 14 +++++++------- |
24 | include/block/blockjob.h | 17 ++++++++++++++ | 14 | 1 file changed, 7 insertions(+), 7 deletions(-) |
25 | blockdev.c | 14 +++++++++++ | ||
26 | blockjob.c | 60 +++++++++++++++++++++++++++++++++++------------- | ||
27 | block/trace-events | 1 + | ||
28 | 5 files changed, 98 insertions(+), 17 deletions(-) | ||
29 | 15 | ||
30 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 16 | diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c |
31 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
32 | --- a/qapi/block-core.json | 18 | --- a/util/qemu-coroutine-lock.c |
33 | +++ b/qapi/block-core.json | 19 | +++ b/util/qemu-coroutine-lock.c |
34 | @@ -XXX,XX +XXX,XX @@ | 20 | @@ -XXX,XX +XXX,XX @@ typedef struct CoWaitRecord { |
35 | # | 21 | QSLIST_ENTRY(CoWaitRecord) next; |
36 | # @dismiss: see @block-job-dismiss | 22 | } CoWaitRecord; |
37 | # | 23 | |
38 | +# @finalize: see @block-job-finalize | 24 | -static void push_waiter(CoMutex *mutex, CoWaitRecord *w) |
39 | +# | 25 | +static void coroutine_fn push_waiter(CoMutex *mutex, CoWaitRecord *w) |
40 | # Since: 2.12 | 26 | { |
41 | ## | 27 | w->co = qemu_coroutine_self(); |
42 | { 'enum': 'BlockJobVerb', | 28 | QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next); |
43 | - 'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete', 'dismiss' ] } | 29 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_init(CoRwlock *lock) |
44 | + 'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete', 'dismiss', | ||
45 | + 'finalize' ] } | ||
46 | |||
47 | ## | ||
48 | # @BlockJobStatus: | ||
49 | @@ -XXX,XX +XXX,XX @@ | ||
50 | { 'command': 'block-job-dismiss', 'data': { 'id': 'str' } } | ||
51 | |||
52 | ## | ||
53 | +# @block-job-finalize: | ||
54 | +# | ||
55 | +# Once a job that has manual=true reaches the pending state, it can be | ||
56 | +# instructed to finalize any graph changes and do any necessary cleanup | ||
57 | +# via this command. | ||
58 | +# For jobs in a transaction, instructing one job to finalize will force | ||
59 | +# ALL jobs in the transaction to finalize, so it is only necessary to instruct | ||
60 | +# a single member job to finalize. | ||
61 | +# | ||
62 | +# @id: The job identifier. | ||
63 | +# | ||
64 | +# Returns: Nothing on success | ||
65 | +# | ||
66 | +# Since: 2.12 | ||
67 | +## | ||
68 | +{ 'command': 'block-job-finalize', 'data': { 'id': 'str' } } | ||
69 | + | ||
70 | +## | ||
71 | # @BlockdevDiscardOptions: | ||
72 | # | ||
73 | # Determines how to handle discard requests. | ||
74 | diff --git a/include/block/blockjob.h b/include/block/blockjob.h | ||
75 | index XXXXXXX..XXXXXXX 100644 | ||
76 | --- a/include/block/blockjob.h | ||
77 | +++ b/include/block/blockjob.h | ||
78 | @@ -XXX,XX +XXX,XX @@ void block_job_cancel(BlockJob *job); | ||
79 | */ | ||
80 | void block_job_complete(BlockJob *job, Error **errp); | ||
81 | |||
82 | + | ||
83 | +/** | ||
84 | + * block_job_finalize: | ||
85 | + * @job: The job to fully commit and finish. | ||
86 | + * @errp: Error object. | ||
87 | + * | ||
88 | + * For jobs that have finished their work and are pending | ||
89 | + * awaiting explicit acknowledgement to commit their work, | ||
90 | + * This will commit that work. | ||
91 | + * | ||
92 | + * FIXME: Make the below statement universally true: | ||
93 | + * For jobs that support the manual workflow mode, all graph | ||
94 | + * changes that occur as a result will occur after this command | ||
95 | + * and before a successful reply. | ||
96 | + */ | ||
97 | +void block_job_finalize(BlockJob *job, Error **errp); | ||
98 | + | ||
99 | /** | ||
100 | * block_job_dismiss: | ||
101 | * @job: The job to be dismissed. | ||
102 | diff --git a/blockdev.c b/blockdev.c | ||
103 | index XXXXXXX..XXXXXXX 100644 | ||
104 | --- a/blockdev.c | ||
105 | +++ b/blockdev.c | ||
106 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_complete(const char *device, Error **errp) | ||
107 | aio_context_release(aio_context); | ||
108 | } | 30 | } |
109 | 31 | ||
110 | +void qmp_block_job_finalize(const char *id, Error **errp) | 32 | /* Releases the internal CoMutex. */ |
111 | +{ | 33 | -static void qemu_co_rwlock_maybe_wake_one(CoRwlock *lock) |
112 | + AioContext *aio_context; | 34 | +static void coroutine_fn qemu_co_rwlock_maybe_wake_one(CoRwlock *lock) |
113 | + BlockJob *job = find_block_job(id, &aio_context, errp); | ||
114 | + | ||
115 | + if (!job) { | ||
116 | + return; | ||
117 | + } | ||
118 | + | ||
119 | + trace_qmp_block_job_finalize(job); | ||
120 | + block_job_finalize(job, errp); | ||
121 | + aio_context_release(aio_context); | ||
122 | +} | ||
123 | + | ||
124 | void qmp_block_job_dismiss(const char *id, Error **errp) | ||
125 | { | 35 | { |
126 | AioContext *aio_context; | 36 | CoRwTicket *tkt = QSIMPLEQ_FIRST(&lock->tickets); |
127 | diff --git a/blockjob.c b/blockjob.c | 37 | Coroutine *co = NULL; |
128 | index XXXXXXX..XXXXXXX 100644 | 38 | @@ -XXX,XX +XXX,XX @@ static void qemu_co_rwlock_maybe_wake_one(CoRwlock *lock) |
129 | --- a/blockjob.c | ||
130 | +++ b/blockjob.c | ||
131 | @@ -XXX,XX +XXX,XX @@ bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = { | ||
132 | [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, | ||
133 | [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, | ||
134 | [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, | ||
135 | + [BLOCK_JOB_VERB_FINALIZE] = {0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}, | ||
136 | [BLOCK_JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}, | ||
137 | }; | ||
138 | |||
139 | @@ -XXX,XX +XXX,XX @@ static void block_job_clean(BlockJob *job) | ||
140 | } | 39 | } |
141 | } | 40 | } |
142 | 41 | ||
143 | -static int block_job_completed_single(BlockJob *job) | 42 | -void qemu_co_rwlock_rdlock(CoRwlock *lock) |
144 | +static int block_job_finalize_single(BlockJob *job) | 43 | +void coroutine_fn qemu_co_rwlock_rdlock(CoRwlock *lock) |
145 | { | 44 | { |
146 | assert(job->completed); | 45 | Coroutine *self = qemu_coroutine_self(); |
147 | 46 | ||
148 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_abort(BlockJob *job) | 47 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_rdlock(CoRwlock *lock) |
149 | assert(other_job->cancelled); | 48 | self->locks_held++; |
150 | block_job_finish_sync(other_job, NULL, NULL); | ||
151 | } | ||
152 | - block_job_completed_single(other_job); | ||
153 | + block_job_finalize_single(other_job); | ||
154 | aio_context_release(ctx); | ||
155 | } | ||
156 | |||
157 | block_job_txn_unref(txn); | ||
158 | } | 49 | } |
159 | 50 | ||
160 | +static int block_job_needs_finalize(BlockJob *job) | 51 | -void qemu_co_rwlock_unlock(CoRwlock *lock) |
161 | +{ | 52 | +void coroutine_fn qemu_co_rwlock_unlock(CoRwlock *lock) |
162 | + return !job->auto_finalize; | ||
163 | +} | ||
164 | + | ||
165 | +static void block_job_do_finalize(BlockJob *job) | ||
166 | +{ | ||
167 | + int rc; | ||
168 | + assert(job && job->txn); | ||
169 | + | ||
170 | + /* prepare the transaction to complete */ | ||
171 | + rc = block_job_txn_apply(job->txn, block_job_prepare, true); | ||
172 | + if (rc) { | ||
173 | + block_job_completed_txn_abort(job); | ||
174 | + } else { | ||
175 | + block_job_txn_apply(job->txn, block_job_finalize_single, true); | ||
176 | + } | ||
177 | +} | ||
178 | + | ||
179 | static void block_job_completed_txn_success(BlockJob *job) | ||
180 | { | 53 | { |
181 | BlockJobTxn *txn = job->txn; | 54 | Coroutine *self = qemu_coroutine_self(); |
182 | BlockJob *other_job; | 55 | |
183 | - int rc = 0; | 56 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_unlock(CoRwlock *lock) |
184 | 57 | qemu_co_rwlock_maybe_wake_one(lock); | |
185 | block_job_state_transition(job, BLOCK_JOB_STATUS_WAITING); | ||
186 | |||
187 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_success(BlockJob *job) | ||
188 | assert(other_job->ret == 0); | ||
189 | } | ||
190 | |||
191 | - /* Jobs may require some prep-work to complete without failure */ | ||
192 | - rc = block_job_txn_apply(txn, block_job_prepare, true); | ||
193 | - if (rc) { | ||
194 | - block_job_completed_txn_abort(job); | ||
195 | - return; | ||
196 | - } | ||
197 | - | ||
198 | - /* We are the last completed job, commit the transaction. */ | ||
199 | block_job_txn_apply(txn, block_job_event_pending, false); | ||
200 | - block_job_txn_apply(txn, block_job_completed_single, true); | ||
201 | + | ||
202 | + /* If no jobs need manual finalization, automatically do so */ | ||
203 | + if (block_job_txn_apply(txn, block_job_needs_finalize, false) == 0) { | ||
204 | + block_job_do_finalize(job); | ||
205 | + } | ||
206 | } | 58 | } |
207 | 59 | ||
208 | /* Assumes the block_job_mutex is held */ | 60 | -void qemu_co_rwlock_downgrade(CoRwlock *lock) |
209 | @@ -XXX,XX +XXX,XX @@ void block_job_complete(BlockJob *job, Error **errp) | 61 | +void coroutine_fn qemu_co_rwlock_downgrade(CoRwlock *lock) |
210 | job->driver->complete(job, errp); | 62 | { |
63 | qemu_co_mutex_lock(&lock->mutex); | ||
64 | assert(lock->owners == -1); | ||
65 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_downgrade(CoRwlock *lock) | ||
66 | qemu_co_rwlock_maybe_wake_one(lock); | ||
211 | } | 67 | } |
212 | 68 | ||
213 | +void block_job_finalize(BlockJob *job, Error **errp) | 69 | -void qemu_co_rwlock_wrlock(CoRwlock *lock) |
214 | +{ | 70 | +void coroutine_fn qemu_co_rwlock_wrlock(CoRwlock *lock) |
215 | + assert(job && job->id && job->txn); | ||
216 | + if (block_job_apply_verb(job, BLOCK_JOB_VERB_FINALIZE, errp)) { | ||
217 | + return; | ||
218 | + } | ||
219 | + block_job_do_finalize(job); | ||
220 | +} | ||
221 | + | ||
222 | void block_job_dismiss(BlockJob **jobptr, Error **errp) | ||
223 | { | 71 | { |
224 | BlockJob *job = *jobptr; | 72 | Coroutine *self = qemu_coroutine_self(); |
225 | @@ -XXX,XX +XXX,XX @@ void block_job_cancel(BlockJob *job) | 73 | |
74 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_wrlock(CoRwlock *lock) | ||
75 | self->locks_held++; | ||
76 | } | ||
77 | |||
78 | -void qemu_co_rwlock_upgrade(CoRwlock *lock) | ||
79 | +void coroutine_fn qemu_co_rwlock_upgrade(CoRwlock *lock) | ||
226 | { | 80 | { |
227 | if (job->status == BLOCK_JOB_STATUS_CONCLUDED) { | 81 | qemu_co_mutex_lock(&lock->mutex); |
228 | block_job_do_dismiss(job); | 82 | assert(lock->owners > 0); |
229 | - } else if (block_job_started(job)) { | ||
230 | - block_job_cancel_async(job); | ||
231 | - block_job_enter(job); | ||
232 | - } else { | ||
233 | + return; | ||
234 | + } | ||
235 | + block_job_cancel_async(job); | ||
236 | + if (!block_job_started(job)) { | ||
237 | block_job_completed(job, -ECANCELED); | ||
238 | + } else if (job->deferred_to_main_loop) { | ||
239 | + block_job_completed_txn_abort(job); | ||
240 | + } else { | ||
241 | + block_job_enter(job); | ||
242 | } | ||
243 | } | ||
244 | |||
245 | diff --git a/block/trace-events b/block/trace-events | ||
246 | index XXXXXXX..XXXXXXX 100644 | ||
247 | --- a/block/trace-events | ||
248 | +++ b/block/trace-events | ||
249 | @@ -XXX,XX +XXX,XX @@ qmp_block_job_cancel(void *job) "job %p" | ||
250 | qmp_block_job_pause(void *job) "job %p" | ||
251 | qmp_block_job_resume(void *job) "job %p" | ||
252 | qmp_block_job_complete(void *job) "job %p" | ||
253 | +qmp_block_job_finalize(void *job) "job %p" | ||
254 | qmp_block_job_dismiss(void *job) "job %p" | ||
255 | qmp_block_stream(void *bs, void *job) "bs %p job %p" | ||
256 | |||
257 | -- | 83 | -- |
258 | 2.13.6 | 84 | 2.37.3 |
259 | |||
260 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-Id: <20220922084924.201610-24-pbonzini@redhat.com> | ||
10 | [kwolf: Fixed up coding style] | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
13 | --- | ||
14 | block/raw-format.c | 3 ++- | ||
15 | 1 file changed, 2 insertions(+), 1 deletion(-) | ||
16 | |||
17 | diff --git a/block/raw-format.c b/block/raw-format.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/block/raw-format.c | ||
20 | +++ b/block/raw-format.c | ||
21 | @@ -XXX,XX +XXX,XX @@ static void raw_lock_medium(BlockDriverState *bs, bool locked) | ||
22 | bdrv_lock_medium(bs->file->bs, locked); | ||
23 | } | ||
24 | |||
25 | -static int raw_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) | ||
26 | +static int coroutine_fn raw_co_ioctl(BlockDriverState *bs, | ||
27 | + unsigned long int req, void *buf) | ||
28 | { | ||
29 | BDRVRawState *s = bs->opaque; | ||
30 | if (s->offset || s->has_size) { | ||
31 | -- | ||
32 | 2.37.3 | diff view generated by jsdifflib |
1 | Everything that refers to the protocol layer or QemuOpts is moved out of | 1 | From: Marc-André Lureau <marcandre.lureau@redhat.com> |
---|---|---|---|
2 | block_crypto_create_generic(), so that the remaining function is | ||
3 | suitable to be called by a .bdrv_co_create implementation. | ||
4 | 2 | ||
5 | LUKS is the only driver that actually implements the old interface, and | 3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call |
6 | we don't intend to use it in any new drivers, so put the moved out code | 4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to |
7 | directly into a LUKS function rather than creating a generic | 5 | functions where this holds. |
8 | intermediate one. | ||
9 | 6 | ||
7 | Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> | ||
8 | Acked-by: Greg Kurz <groug@kaod.org> | ||
9 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
10 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
11 | Message-Id: <20220922084924.201610-25-pbonzini@redhat.com> | ||
12 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
11 | Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> | ||
12 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
13 | --- | 14 | --- |
14 | block/crypto.c | 95 +++++++++++++++++++++++++++++++++++++--------------------- | 15 | hw/9pfs/9p.h | 9 ++++++--- |
15 | 1 file changed, 61 insertions(+), 34 deletions(-) | 16 | 1 file changed, 6 insertions(+), 3 deletions(-) |
16 | 17 | ||
17 | diff --git a/block/crypto.c b/block/crypto.c | 18 | diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h |
18 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/block/crypto.c | 20 | --- a/hw/9pfs/9p.h |
20 | +++ b/block/crypto.c | 21 | +++ b/hw/9pfs/9p.h |
21 | @@ -XXX,XX +XXX,XX @@ static int block_crypto_open_generic(QCryptoBlockFormat format, | 22 | @@ -XXX,XX +XXX,XX @@ typedef struct V9fsGetlock |
23 | extern int open_fd_hw; | ||
24 | extern int total_open_fd; | ||
25 | |||
26 | -static inline void v9fs_path_write_lock(V9fsState *s) | ||
27 | +static inline void coroutine_fn | ||
28 | +v9fs_path_write_lock(V9fsState *s) | ||
29 | { | ||
30 | if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { | ||
31 | qemu_co_rwlock_wrlock(&s->rename_lock); | ||
32 | } | ||
22 | } | 33 | } |
23 | 34 | ||
24 | 35 | -static inline void v9fs_path_read_lock(V9fsState *s) | |
25 | -static int block_crypto_create_generic(QCryptoBlockFormat format, | 36 | +static inline void coroutine_fn |
26 | - const char *filename, | 37 | +v9fs_path_read_lock(V9fsState *s) |
27 | - QemuOpts *opts, | ||
28 | - Error **errp) | ||
29 | +static int block_crypto_co_create_generic(BlockDriverState *bs, | ||
30 | + int64_t size, | ||
31 | + QCryptoBlockCreateOptions *opts, | ||
32 | + Error **errp) | ||
33 | { | 38 | { |
34 | - int ret = -EINVAL; | 39 | if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { |
35 | - QCryptoBlockCreateOptions *create_opts = NULL; | 40 | qemu_co_rwlock_rdlock(&s->rename_lock); |
36 | + int ret; | ||
37 | + BlockBackend *blk; | ||
38 | QCryptoBlock *crypto = NULL; | ||
39 | - struct BlockCryptoCreateData data = { | ||
40 | - .size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), | ||
41 | - BDRV_SECTOR_SIZE), | ||
42 | - }; | ||
43 | - QDict *cryptoopts; | ||
44 | - | ||
45 | - /* Parse options */ | ||
46 | - cryptoopts = qemu_opts_to_qdict(opts, NULL); | ||
47 | + struct BlockCryptoCreateData data; | ||
48 | |||
49 | - create_opts = block_crypto_create_opts_init(format, cryptoopts, errp); | ||
50 | - if (!create_opts) { | ||
51 | - return -1; | ||
52 | - } | ||
53 | + blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL); | ||
54 | |||
55 | - /* Create protocol layer */ | ||
56 | - ret = bdrv_create_file(filename, opts, errp); | ||
57 | + ret = blk_insert_bs(blk, bs, errp); | ||
58 | if (ret < 0) { | ||
59 | - return ret; | ||
60 | + goto cleanup; | ||
61 | } | 41 | } |
62 | |||
63 | - data.blk = blk_new_open(filename, NULL, NULL, | ||
64 | - BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, | ||
65 | - errp); | ||
66 | - if (!data.blk) { | ||
67 | - return -EINVAL; | ||
68 | - } | ||
69 | + data = (struct BlockCryptoCreateData) { | ||
70 | + .blk = blk, | ||
71 | + .size = size, | ||
72 | + }; | ||
73 | |||
74 | - /* Create format layer */ | ||
75 | - crypto = qcrypto_block_create(create_opts, NULL, | ||
76 | + crypto = qcrypto_block_create(opts, NULL, | ||
77 | block_crypto_init_func, | ||
78 | block_crypto_write_func, | ||
79 | &data, | ||
80 | @@ -XXX,XX +XXX,XX @@ static int block_crypto_create_generic(QCryptoBlockFormat format, | ||
81 | |||
82 | ret = 0; | ||
83 | cleanup: | ||
84 | - QDECREF(cryptoopts); | ||
85 | qcrypto_block_free(crypto); | ||
86 | - blk_unref(data.blk); | ||
87 | - qapi_free_QCryptoBlockCreateOptions(create_opts); | ||
88 | + blk_unref(blk); | ||
89 | return ret; | ||
90 | } | 42 | } |
91 | 43 | ||
92 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn block_crypto_co_create_opts_luks(const char *filename, | 44 | -static inline void v9fs_path_unlock(V9fsState *s) |
93 | QemuOpts *opts, | 45 | +static inline void coroutine_fn |
94 | Error **errp) | 46 | +v9fs_path_unlock(V9fsState *s) |
95 | { | 47 | { |
96 | - return block_crypto_create_generic(Q_CRYPTO_BLOCK_FORMAT_LUKS, | 48 | if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { |
97 | - filename, opts, errp); | 49 | qemu_co_rwlock_unlock(&s->rename_lock); |
98 | + QCryptoBlockCreateOptions *create_opts = NULL; | ||
99 | + BlockDriverState *bs = NULL; | ||
100 | + QDict *cryptoopts; | ||
101 | + int64_t size; | ||
102 | + int ret; | ||
103 | + | ||
104 | + /* Parse options */ | ||
105 | + size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0); | ||
106 | + | ||
107 | + cryptoopts = qemu_opts_to_qdict_filtered(opts, NULL, | ||
108 | + &block_crypto_create_opts_luks, | ||
109 | + true); | ||
110 | + | ||
111 | + create_opts = block_crypto_create_opts_init(Q_CRYPTO_BLOCK_FORMAT_LUKS, | ||
112 | + cryptoopts, errp); | ||
113 | + if (!create_opts) { | ||
114 | + ret = -EINVAL; | ||
115 | + goto fail; | ||
116 | + } | ||
117 | + | ||
118 | + /* Create protocol layer */ | ||
119 | + ret = bdrv_create_file(filename, opts, errp); | ||
120 | + if (ret < 0) { | ||
121 | + return ret; | ||
122 | + } | ||
123 | + | ||
124 | + bs = bdrv_open(filename, NULL, NULL, | ||
125 | + BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); | ||
126 | + if (!bs) { | ||
127 | + ret = -EINVAL; | ||
128 | + goto fail; | ||
129 | + } | ||
130 | + | ||
131 | + /* Create format layer */ | ||
132 | + ret = block_crypto_co_create_generic(bs, size, create_opts, errp); | ||
133 | + if (ret < 0) { | ||
134 | + goto fail; | ||
135 | + } | ||
136 | + | ||
137 | + ret = 0; | ||
138 | +fail: | ||
139 | + bdrv_unref(bs); | ||
140 | + qapi_free_QCryptoBlockCreateOptions(create_opts); | ||
141 | + QDECREF(cryptoopts); | ||
142 | + return ret; | ||
143 | } | ||
144 | |||
145 | static int block_crypto_get_info_luks(BlockDriverState *bs, | ||
146 | -- | 50 | -- |
147 | 2.13.6 | 51 | 2.37.3 |
148 | 52 | ||
149 | 53 | diff view generated by jsdifflib |
1 | When you request an image size close to UINT64_MAX, the addition of the | 1 | From: Marc-André Lureau <marcandre.lureau@redhat.com> |
---|---|---|---|
2 | crypto header may cause an integer overflow. Catch it instead of | ||
3 | silently truncating the image size. | ||
4 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> | ||
8 | Reviewed-by: Juan Quintela <quintela@redhat.com> | ||
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
11 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
12 | Message-Id: <20220922084924.201610-26-pbonzini@redhat.com> | ||
13 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
5 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
6 | Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> | ||
7 | --- | 15 | --- |
8 | block/crypto.c | 5 +++++ | 16 | migration/migration.c | 3 ++- |
9 | 1 file changed, 5 insertions(+) | 17 | 1 file changed, 2 insertions(+), 1 deletion(-) |
10 | 18 | ||
11 | diff --git a/block/crypto.c b/block/crypto.c | 19 | diff --git a/migration/migration.c b/migration/migration.c |
12 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/block/crypto.c | 21 | --- a/migration/migration.c |
14 | +++ b/block/crypto.c | 22 | +++ b/migration/migration.c |
15 | @@ -XXX,XX +XXX,XX @@ static ssize_t block_crypto_init_func(QCryptoBlock *block, | 23 | @@ -XXX,XX +XXX,XX @@ static void process_incoming_migration_bh(void *opaque) |
24 | migration_incoming_state_destroy(); | ||
25 | } | ||
26 | |||
27 | -static void process_incoming_migration_co(void *opaque) | ||
28 | +static void coroutine_fn | ||
29 | +process_incoming_migration_co(void *opaque) | ||
16 | { | 30 | { |
17 | struct BlockCryptoCreateData *data = opaque; | 31 | MigrationIncomingState *mis = migration_incoming_get_current(); |
18 | 32 | PostcopyState ps; | |
19 | + if (data->size > INT64_MAX || headerlen > INT64_MAX - data->size) { | ||
20 | + error_setg(errp, "The requested file size is too large"); | ||
21 | + return -EFBIG; | ||
22 | + } | ||
23 | + | ||
24 | /* User provided size should reflect amount of space made | ||
25 | * available to the guest, so we must take account of that | ||
26 | * which will be used by the crypto header | ||
27 | -- | 33 | -- |
28 | 2.13.6 | 34 | 2.37.3 |
29 | 35 | ||
30 | 36 | diff view generated by jsdifflib |
1 | This adds the .bdrv_co_create driver callback to luks, which enables | 1 | From: Marc-André Lureau <marcandre.lureau@redhat.com> |
---|---|---|---|
2 | image creation over QMP. | ||
3 | 2 | ||
3 | Callers of coroutine_fn must be coroutine_fn themselves, or the call | ||
4 | must be within "if (qemu_in_coroutine())". Apply coroutine_fn to | ||
5 | functions where this holds. | ||
6 | |||
7 | Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> | ||
8 | Reviewed-by: Alberto Faria <afaria@redhat.com> | ||
9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
10 | Message-Id: <20220922084924.201610-27-pbonzini@redhat.com> | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
5 | Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> | ||
6 | --- | 13 | --- |
7 | qapi/block-core.json | 17 ++++++++++++++++- | 14 | tests/unit/test-coroutine.c | 2 +- |
8 | block/crypto.c | 34 ++++++++++++++++++++++++++++++++++ | 15 | 1 file changed, 1 insertion(+), 1 deletion(-) |
9 | 2 files changed, 50 insertions(+), 1 deletion(-) | ||
10 | 16 | ||
11 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 17 | diff --git a/tests/unit/test-coroutine.c b/tests/unit/test-coroutine.c |
12 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/qapi/block-core.json | 19 | --- a/tests/unit/test-coroutine.c |
14 | +++ b/qapi/block-core.json | 20 | +++ b/tests/unit/test-coroutine.c |
15 | @@ -XXX,XX +XXX,XX @@ | 21 | @@ -XXX,XX +XXX,XX @@ static void perf_baseline(void) |
16 | '*preallocation': 'PreallocMode' } } | 22 | g_test_message("Function call %u iterations: %f s", maxcycles, duration); |
17 | |||
18 | ## | ||
19 | +# @BlockdevCreateOptionsLUKS: | ||
20 | +# | ||
21 | +# Driver specific image creation options for LUKS. | ||
22 | +# | ||
23 | +# @file Node to create the image format on | ||
24 | +# @size Size of the virtual disk in bytes | ||
25 | +# | ||
26 | +# Since: 2.12 | ||
27 | +## | ||
28 | +{ 'struct': 'BlockdevCreateOptionsLUKS', | ||
29 | + 'base': 'QCryptoBlockCreateOptionsLUKS', | ||
30 | + 'data': { 'file': 'BlockdevRef', | ||
31 | + 'size': 'size' } } | ||
32 | + | ||
33 | +## | ||
34 | # @BlockdevCreateOptionsNfs: | ||
35 | # | ||
36 | # Driver specific image creation options for NFS. | ||
37 | @@ -XXX,XX +XXX,XX @@ | ||
38 | 'http': 'BlockdevCreateNotSupported', | ||
39 | 'https': 'BlockdevCreateNotSupported', | ||
40 | 'iscsi': 'BlockdevCreateNotSupported', | ||
41 | - 'luks': 'BlockdevCreateNotSupported', | ||
42 | + 'luks': 'BlockdevCreateOptionsLUKS', | ||
43 | 'nbd': 'BlockdevCreateNotSupported', | ||
44 | 'nfs': 'BlockdevCreateOptionsNfs', | ||
45 | 'null-aio': 'BlockdevCreateNotSupported', | ||
46 | diff --git a/block/crypto.c b/block/crypto.c | ||
47 | index XXXXXXX..XXXXXXX 100644 | ||
48 | --- a/block/crypto.c | ||
49 | +++ b/block/crypto.c | ||
50 | @@ -XXX,XX +XXX,XX @@ static int block_crypto_open_luks(BlockDriverState *bs, | ||
51 | bs, options, flags, errp); | ||
52 | } | 23 | } |
53 | 24 | ||
54 | +static int coroutine_fn | 25 | -static __attribute__((noinline)) void perf_cost_func(void *opaque) |
55 | +block_crypto_co_create_luks(BlockdevCreateOptions *create_options, Error **errp) | 26 | +static __attribute__((noinline)) void coroutine_fn perf_cost_func(void *opaque) |
56 | +{ | 27 | { |
57 | + BlockdevCreateOptionsLUKS *luks_opts; | 28 | qemu_coroutine_yield(); |
58 | + BlockDriverState *bs = NULL; | 29 | } |
59 | + QCryptoBlockCreateOptions create_opts; | ||
60 | + int ret; | ||
61 | + | ||
62 | + assert(create_options->driver == BLOCKDEV_DRIVER_LUKS); | ||
63 | + luks_opts = &create_options->u.luks; | ||
64 | + | ||
65 | + bs = bdrv_open_blockdev_ref(luks_opts->file, errp); | ||
66 | + if (bs == NULL) { | ||
67 | + return -EIO; | ||
68 | + } | ||
69 | + | ||
70 | + create_opts = (QCryptoBlockCreateOptions) { | ||
71 | + .format = Q_CRYPTO_BLOCK_FORMAT_LUKS, | ||
72 | + .u.luks = *qapi_BlockdevCreateOptionsLUKS_base(luks_opts), | ||
73 | + }; | ||
74 | + | ||
75 | + ret = block_crypto_co_create_generic(bs, luks_opts->size, &create_opts, | ||
76 | + errp); | ||
77 | + if (ret < 0) { | ||
78 | + goto fail; | ||
79 | + } | ||
80 | + | ||
81 | + ret = 0; | ||
82 | +fail: | ||
83 | + bdrv_unref(bs); | ||
84 | + return ret; | ||
85 | +} | ||
86 | + | ||
87 | static int coroutine_fn block_crypto_co_create_opts_luks(const char *filename, | ||
88 | QemuOpts *opts, | ||
89 | Error **errp) | ||
90 | @@ -XXX,XX +XXX,XX @@ BlockDriver bdrv_crypto_luks = { | ||
91 | .bdrv_open = block_crypto_open_luks, | ||
92 | .bdrv_close = block_crypto_close, | ||
93 | .bdrv_child_perm = bdrv_format_default_perms, | ||
94 | + .bdrv_co_create = block_crypto_co_create_luks, | ||
95 | .bdrv_co_create_opts = block_crypto_co_create_opts_luks, | ||
96 | .bdrv_truncate = block_crypto_truncate, | ||
97 | .create_opts = &block_crypto_create_opts_luks, | ||
98 | -- | 30 | -- |
99 | 2.13.6 | 31 | 2.37.3 |
100 | 32 | ||
101 | 33 | diff view generated by jsdifflib |
1 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 1 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
---|---|---|---|
2 | Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> | 2 | Message-Id: <20221006122607.162769-1-kwolf@redhat.com> |
3 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
4 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
5 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
3 | --- | 6 | --- |
4 | tests/qemu-iotests/209 | 210 +++++++++++++++++++++++++++++++++++++++++++ | 7 | block/quorum.c | 2 -- |
5 | tests/qemu-iotests/209.out | 136 ++++++++++++++++++++++++++++ | 8 | 1 file changed, 2 deletions(-) |
6 | tests/qemu-iotests/common.rc | 2 +- | ||
7 | tests/qemu-iotests/group | 1 + | ||
8 | 4 files changed, 348 insertions(+), 1 deletion(-) | ||
9 | create mode 100755 tests/qemu-iotests/209 | ||
10 | create mode 100644 tests/qemu-iotests/209.out | ||
11 | 9 | ||
12 | diff --git a/tests/qemu-iotests/209 b/tests/qemu-iotests/209 | 10 | diff --git a/block/quorum.c b/block/quorum.c |
13 | new file mode 100755 | ||
14 | index XXXXXXX..XXXXXXX | ||
15 | --- /dev/null | ||
16 | +++ b/tests/qemu-iotests/209 | ||
17 | @@ -XXX,XX +XXX,XX @@ | ||
18 | +#!/bin/bash | ||
19 | +# | ||
20 | +# Test luks and file image creation | ||
21 | +# | ||
22 | +# Copyright (C) 2018 Red Hat, Inc. | ||
23 | +# | ||
24 | +# This program is free software; you can redistribute it and/or modify | ||
25 | +# it under the terms of the GNU General Public License as published by | ||
26 | +# the Free Software Foundation; either version 2 of the License, or | ||
27 | +# (at your option) any later version. | ||
28 | +# | ||
29 | +# This program is distributed in the hope that it will be useful, | ||
30 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
31 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
32 | +# GNU General Public License for more details. | ||
33 | +# | ||
34 | +# You should have received a copy of the GNU General Public License | ||
35 | +# along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
36 | +# | ||
37 | + | ||
38 | +# creator | ||
39 | +owner=kwolf@redhat.com | ||
40 | + | ||
41 | +seq=`basename $0` | ||
42 | +echo "QA output created by $seq" | ||
43 | + | ||
44 | +here=`pwd` | ||
45 | +status=1 # failure is the default! | ||
46 | + | ||
47 | +# get standard environment, filters and checks | ||
48 | +. ./common.rc | ||
49 | +. ./common.filter | ||
50 | + | ||
51 | +_supported_fmt luks | ||
52 | +_supported_proto file | ||
53 | +_supported_os Linux | ||
54 | + | ||
55 | +function do_run_qemu() | ||
56 | +{ | ||
57 | + echo Testing: "$@" | ||
58 | + $QEMU -nographic -qmp stdio -serial none "$@" | ||
59 | + echo | ||
60 | +} | ||
61 | + | ||
62 | +function run_qemu() | ||
63 | +{ | ||
64 | + do_run_qemu "$@" 2>&1 | _filter_testdir | _filter_qmp \ | ||
65 | + | _filter_qemu | _filter_imgfmt \ | ||
66 | + | _filter_actual_image_size | ||
67 | +} | ||
68 | + | ||
69 | +echo | ||
70 | +echo "=== Successful image creation (defaults) ===" | ||
71 | +echo | ||
72 | + | ||
73 | +size=$((128 * 1024 * 1024)) | ||
74 | + | ||
75 | +run_qemu -object secret,id=keysec0,data="foo" <<EOF | ||
76 | +{ "execute": "qmp_capabilities" } | ||
77 | +{ "execute": "x-blockdev-create", | ||
78 | + "arguments": { | ||
79 | + "driver": "file", | ||
80 | + "filename": "$TEST_IMG_FILE", | ||
81 | + "size": 0 | ||
82 | + } | ||
83 | +} | ||
84 | +{ "execute": "blockdev-add", | ||
85 | + "arguments": { | ||
86 | + "driver": "file", | ||
87 | + "node-name": "imgfile", | ||
88 | + "filename": "$TEST_IMG_FILE" | ||
89 | + } | ||
90 | +} | ||
91 | +{ "execute": "x-blockdev-create", | ||
92 | + "arguments": { | ||
93 | + "driver": "$IMGFMT", | ||
94 | + "file": "imgfile", | ||
95 | + "key-secret": "keysec0", | ||
96 | + "size": $size, | ||
97 | + "iter-time": 10 | ||
98 | + } | ||
99 | +} | ||
100 | +{ "execute": "quit" } | ||
101 | +EOF | ||
102 | + | ||
103 | +_img_info --format-specific | _filter_img_info --format-specific | ||
104 | + | ||
105 | +echo | ||
106 | +echo "=== Successful image creation (with non-default options) ===" | ||
107 | +echo | ||
108 | + | ||
109 | +# Choose a different size to show that we got a new image | ||
110 | +size=$((64 * 1024 * 1024)) | ||
111 | + | ||
112 | +run_qemu -object secret,id=keysec0,data="foo" <<EOF | ||
113 | +{ "execute": "qmp_capabilities" } | ||
114 | +{ "execute": "x-blockdev-create", | ||
115 | + "arguments": { | ||
116 | + "driver": "file", | ||
117 | + "filename": "$TEST_IMG_FILE", | ||
118 | + "size": 0 | ||
119 | + } | ||
120 | +} | ||
121 | +{ "execute": "x-blockdev-create", | ||
122 | + "arguments": { | ||
123 | + "driver": "$IMGFMT", | ||
124 | + "file": { | ||
125 | + "driver": "file", | ||
126 | + "filename": "$TEST_IMG_FILE" | ||
127 | + }, | ||
128 | + "size": $size, | ||
129 | + "key-secret": "keysec0", | ||
130 | + "cipher-alg": "twofish-128", | ||
131 | + "cipher-mode": "ctr", | ||
132 | + "ivgen-alg": "plain64", | ||
133 | + "ivgen-hash-alg": "md5", | ||
134 | + "hash-alg": "sha1", | ||
135 | + "iter-time": 10 | ||
136 | + } | ||
137 | +} | ||
138 | +{ "execute": "quit" } | ||
139 | +EOF | ||
140 | + | ||
141 | +_img_info --format-specific | _filter_img_info --format-specific | ||
142 | + | ||
143 | +echo | ||
144 | +echo "=== Invalid BlockdevRef ===" | ||
145 | +echo | ||
146 | + | ||
147 | +run_qemu <<EOF | ||
148 | +{ "execute": "qmp_capabilities" } | ||
149 | +{ "execute": "x-blockdev-create", | ||
150 | + "arguments": { | ||
151 | + "driver": "$IMGFMT", | ||
152 | + "file": "this doesn't exist", | ||
153 | + "size": $size | ||
154 | + } | ||
155 | +} | ||
156 | +{ "execute": "quit" } | ||
157 | +EOF | ||
158 | + | ||
159 | +echo | ||
160 | +echo "=== Zero size ===" | ||
161 | +echo | ||
162 | + | ||
163 | +run_qemu -blockdev driver=file,filename="$TEST_IMG_FILE",node-name=node0 \ | ||
164 | + -object secret,id=keysec0,data="foo" <<EOF | ||
165 | +{ "execute": "qmp_capabilities" } | ||
166 | +{ "execute": "x-blockdev-create", | ||
167 | + "arguments": { | ||
168 | + "driver": "$IMGFMT", | ||
169 | + "file": "node0", | ||
170 | + "key-secret": "keysec0", | ||
171 | + "size": 0, | ||
172 | + "iter-time": 10 | ||
173 | + } | ||
174 | +} | ||
175 | +{ "execute": "quit" } | ||
176 | +EOF | ||
177 | + | ||
178 | +_img_info | _filter_img_info | ||
179 | + | ||
180 | + | ||
181 | +echo | ||
182 | +echo "=== Invalid sizes ===" | ||
183 | +echo | ||
184 | + | ||
185 | +# TODO Negative image sizes aren't handled correctly, but this is a problem | ||
186 | +# with QAPI's implementation of the 'size' type and affects other commands as | ||
187 | +# well. Once this is fixed, we may want to add a test case here. | ||
188 | + | ||
189 | +# 1. 2^64 - 512 | ||
190 | +# 2. 2^63 = 8 EB (qemu-img enforces image sizes less than this) | ||
191 | +# 3. 2^63 - 512 (generally valid, but with the crypto header the file will | ||
192 | +# exceed 63 bits) | ||
193 | + | ||
194 | +run_qemu -blockdev driver=file,filename="$TEST_IMG_FILE",node-name=node0 \ | ||
195 | + -object secret,id=keysec0,data="foo" <<EOF | ||
196 | +{ "execute": "qmp_capabilities" } | ||
197 | +{ "execute": "x-blockdev-create", | ||
198 | + "arguments": { | ||
199 | + "driver": "$IMGFMT", | ||
200 | + "file": "node0", | ||
201 | + "key-secret": "keysec0", | ||
202 | + "size": 18446744073709551104 | ||
203 | + } | ||
204 | +} | ||
205 | +{ "execute": "x-blockdev-create", | ||
206 | + "arguments": { | ||
207 | + "driver": "$IMGFMT", | ||
208 | + "file": "node0", | ||
209 | + "key-secret": "keysec0", | ||
210 | + "size": 9223372036854775808 | ||
211 | + } | ||
212 | +} | ||
213 | +{ "execute": "x-blockdev-create", | ||
214 | + "arguments": { | ||
215 | + "driver": "$IMGFMT", | ||
216 | + "file": "node0", | ||
217 | + "key-secret": "keysec0", | ||
218 | + "size": 9223372036854775296 | ||
219 | + } | ||
220 | +} | ||
221 | +{ "execute": "quit" } | ||
222 | +EOF | ||
223 | + | ||
224 | +# success, all done | ||
225 | +echo "*** done" | ||
226 | +rm -f $seq.full | ||
227 | +status=0 | ||
228 | diff --git a/tests/qemu-iotests/209.out b/tests/qemu-iotests/209.out | ||
229 | new file mode 100644 | ||
230 | index XXXXXXX..XXXXXXX | ||
231 | --- /dev/null | ||
232 | +++ b/tests/qemu-iotests/209.out | ||
233 | @@ -XXX,XX +XXX,XX @@ | ||
234 | +QA output created by 209 | ||
235 | + | ||
236 | +=== Successful image creation (defaults) === | ||
237 | + | ||
238 | +Testing: -object secret,id=keysec0,data=foo | ||
239 | +QMP_VERSION | ||
240 | +{"return": {}} | ||
241 | +{"return": {}} | ||
242 | +{"return": {}} | ||
243 | +{"return": {}} | ||
244 | +{"return": {}} | ||
245 | +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
246 | + | ||
247 | +image: json:{"driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/t.IMGFMT"}, "key-secret": "keysec0"} | ||
248 | +file format: IMGFMT | ||
249 | +virtual size: 128M (134217728 bytes) | ||
250 | +Format specific information: | ||
251 | + ivgen alg: plain64 | ||
252 | + hash alg: sha256 | ||
253 | + cipher alg: aes-256 | ||
254 | + uuid: 00000000-0000-0000-0000-000000000000 | ||
255 | + cipher mode: xts | ||
256 | + slots: | ||
257 | + [0]: | ||
258 | + active: true | ||
259 | + iters: 1024 | ||
260 | + key offset: 4096 | ||
261 | + stripes: 4000 | ||
262 | + [1]: | ||
263 | + active: false | ||
264 | + key offset: 262144 | ||
265 | + [2]: | ||
266 | + active: false | ||
267 | + key offset: 520192 | ||
268 | + [3]: | ||
269 | + active: false | ||
270 | + key offset: 778240 | ||
271 | + [4]: | ||
272 | + active: false | ||
273 | + key offset: 1036288 | ||
274 | + [5]: | ||
275 | + active: false | ||
276 | + key offset: 1294336 | ||
277 | + [6]: | ||
278 | + active: false | ||
279 | + key offset: 1552384 | ||
280 | + [7]: | ||
281 | + active: false | ||
282 | + key offset: 1810432 | ||
283 | + payload offset: 2068480 | ||
284 | + master key iters: 1024 | ||
285 | + | ||
286 | +=== Successful image creation (with non-default options) === | ||
287 | + | ||
288 | +Testing: -object secret,id=keysec0,data=foo | ||
289 | +QMP_VERSION | ||
290 | +{"return": {}} | ||
291 | +{"return": {}} | ||
292 | +{"return": {}} | ||
293 | +{"return": {}} | ||
294 | +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
295 | + | ||
296 | +image: json:{"driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/t.IMGFMT"}, "key-secret": "keysec0"} | ||
297 | +file format: IMGFMT | ||
298 | +virtual size: 64M (67108864 bytes) | ||
299 | +Format specific information: | ||
300 | + ivgen alg: plain64 | ||
301 | + hash alg: sha1 | ||
302 | + cipher alg: twofish-128 | ||
303 | + uuid: 00000000-0000-0000-0000-000000000000 | ||
304 | + cipher mode: ctr | ||
305 | + slots: | ||
306 | + [0]: | ||
307 | + active: true | ||
308 | + iters: 1024 | ||
309 | + key offset: 4096 | ||
310 | + stripes: 4000 | ||
311 | + [1]: | ||
312 | + active: false | ||
313 | + key offset: 69632 | ||
314 | + [2]: | ||
315 | + active: false | ||
316 | + key offset: 135168 | ||
317 | + [3]: | ||
318 | + active: false | ||
319 | + key offset: 200704 | ||
320 | + [4]: | ||
321 | + active: false | ||
322 | + key offset: 266240 | ||
323 | + [5]: | ||
324 | + active: false | ||
325 | + key offset: 331776 | ||
326 | + [6]: | ||
327 | + active: false | ||
328 | + key offset: 397312 | ||
329 | + [7]: | ||
330 | + active: false | ||
331 | + key offset: 462848 | ||
332 | + payload offset: 528384 | ||
333 | + master key iters: 1024 | ||
334 | + | ||
335 | +=== Invalid BlockdevRef === | ||
336 | + | ||
337 | +Testing: | ||
338 | +QMP_VERSION | ||
339 | +{"return": {}} | ||
340 | +{"error": {"class": "GenericError", "desc": "Cannot find device=this doesn't exist nor node_name=this doesn't exist"}} | ||
341 | +{"return": {}} | ||
342 | +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
343 | + | ||
344 | + | ||
345 | +=== Zero size === | ||
346 | + | ||
347 | +Testing: -blockdev driver=file,filename=TEST_DIR/t.IMGFMT,node-name=node0 -object secret,id=keysec0,data=foo | ||
348 | +QMP_VERSION | ||
349 | +{"return": {}} | ||
350 | +{"return": {}} | ||
351 | +{"return": {}} | ||
352 | +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
353 | + | ||
354 | +image: json:{"driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/t.IMGFMT"}, "key-secret": "keysec0"} | ||
355 | +file format: IMGFMT | ||
356 | +virtual size: 0 (0 bytes) | ||
357 | + | ||
358 | +=== Invalid sizes === | ||
359 | + | ||
360 | +Testing: -blockdev driver=file,filename=TEST_DIR/t.IMGFMT,node-name=node0 -object secret,id=keysec0,data=foo | ||
361 | +QMP_VERSION | ||
362 | +{"return": {}} | ||
363 | +{"error": {"class": "GenericError", "desc": "The requested file size is too large"}} | ||
364 | +{"error": {"class": "GenericError", "desc": "The requested file size is too large"}} | ||
365 | +{"error": {"class": "GenericError", "desc": "The requested file size is too large"}} | ||
366 | +{"return": {}} | ||
367 | +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
368 | + | ||
369 | +*** done | ||
370 | diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc | ||
371 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
372 | --- a/tests/qemu-iotests/common.rc | 12 | --- a/block/quorum.c |
373 | +++ b/tests/qemu-iotests/common.rc | 13 | +++ b/block/quorum.c |
374 | @@ -XXX,XX +XXX,XX @@ _img_info() | 14 | @@ -XXX,XX +XXX,XX @@ static bool quorum_has_too_much_io_failed(QuorumAIOCB *acb) |
375 | 15 | return false; | |
376 | discard=0 | 16 | } |
377 | regex_json_spec_start='^ *"format-specific": \{' | 17 | |
378 | - $QEMU_IMG info "$@" "$TEST_IMG" 2>&1 | \ | 18 | -static int read_fifo_child(QuorumAIOCB *acb); |
379 | + $QEMU_IMG info $QEMU_IMG_EXTRA_ARGS "$@" "$TEST_IMG" 2>&1 | \ | 19 | - |
380 | sed -e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \ | 20 | static void quorum_copy_qiov(QEMUIOVector *dest, QEMUIOVector *source) |
381 | -e "s#$TEST_DIR#TEST_DIR#g" \ | 21 | { |
382 | -e "s#$IMGFMT#IMGFMT#g" \ | 22 | int i; |
383 | diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group | ||
384 | index XXXXXXX..XXXXXXX 100644 | ||
385 | --- a/tests/qemu-iotests/group | ||
386 | +++ b/tests/qemu-iotests/group | ||
387 | @@ -XXX,XX +XXX,XX @@ | ||
388 | 205 rw auto quick | ||
389 | 206 rw auto | ||
390 | 207 rw auto | ||
391 | +209 rw auto | ||
392 | -- | 23 | -- |
393 | 2.13.6 | 24 | 2.37.3 |
394 | 25 | ||
395 | 26 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Add a new state that specifically demarcates when we begin to permanently | 3 | job mutex will be used to protect the job struct elements and list, |
4 | demolish a job after it has performed all work. This makes the transition | 4 | replacing AioContext locks. |
5 | explicit in the STM table and highlights conditions under which a job may | ||
6 | be demolished. | ||
7 | 5 | ||
8 | Alongside this state, add a new helper command "block_job_decommission", | 6 | Right now use a shared lock for all jobs, in order to keep things |
9 | which transitions to the NULL state and puts down our implicit reference. | 7 | simple. Once the AioContext lock is gone, we can introduce per-job |
10 | This separates instances in the code for "block_job_unref" which merely | 8 | locks. |
11 | undo a matching "block_job_ref" with instances intended to initiate the | ||
12 | full destruction of the object. | ||
13 | 9 | ||
14 | This decommission action also sets a number of fields to make sure that | 10 | To simplify the switch from aiocontext to job lock, introduce |
15 | block internals or external users that are holding a reference to a job | 11 | *nop* lock/unlock functions and macros. |
16 | to see when it "finishes" are convinced that the job object is "done." | 12 | We want to always call job_lock/unlock outside the AioContext locks, |
17 | This is necessary, for instance, to do a block_job_cancel_sync on a | 13 | and not vice-versa, otherwise we might get a deadlock. This is not |
18 | created object which will not make any progress. | 14 | straightforward to do, and that's why we start with nop functions. |
15 | Once everything is protected by job_lock/unlock, we can change the nop into | ||
16 | an actual mutex and remove the aiocontext lock. | ||
19 | 17 | ||
20 | Now, all jobs must go through block_job_decommission prior to being | 18 | Since job_mutex is already being used, add static |
21 | freed, giving us start-to-finish state machine coverage for jobs. | 19 | real_job_{lock/unlock} for the existing usage. |
22 | 20 | ||
23 | Transitions: | 21 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
24 | Created -> Null: Early failure event before the job is started | 22 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
25 | Concluded -> Null: Standard transition. | 23 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
26 | 24 | Message-Id: <20220926093214.506243-2-eesposit@redhat.com> | |
27 | Verbs: | ||
28 | None. This should not ever be visible to the monitor. | ||
29 | |||
30 | +---------+ | ||
31 | |UNDEFINED| | ||
32 | +--+------+ | ||
33 | | | ||
34 | +--v----+ | ||
35 | +---------+CREATED+------------------+ | ||
36 | | +--+----+ | | ||
37 | | | | | ||
38 | | +--v----+ +------+ | | ||
39 | +---------+RUNNING<----->PAUSED| | | ||
40 | | +--+-+--+ +------+ | | ||
41 | | | | | | ||
42 | | | +------------------+ | | ||
43 | | | | | | ||
44 | | +--v--+ +-------+ | | | ||
45 | +---------+READY<------->STANDBY| | | | ||
46 | | +--+--+ +-------+ | | | ||
47 | | | | | | ||
48 | +--v-----+ +--v------+ | | | ||
49 | |ABORTING+--->CONCLUDED<-------------+ | | ||
50 | +--------+ +--+------+ | | ||
51 | | | | ||
52 | +--v-+ | | ||
53 | |NULL<---------------------+ | ||
54 | +----+ | ||
55 | |||
56 | Signed-off-by: John Snow <jsnow@redhat.com> | ||
57 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 25 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
58 | --- | 26 | --- |
59 | qapi/block-core.json | 5 ++++- | 27 | include/qemu/job.h | 24 ++++++++++++++++++++++++ |
60 | blockjob.c | 50 ++++++++++++++++++++++++++++++++------------------ | 28 | job.c | 35 +++++++++++++++++++++++------------ |
61 | 2 files changed, 36 insertions(+), 19 deletions(-) | 29 | 2 files changed, 47 insertions(+), 12 deletions(-) |
62 | 30 | ||
63 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 31 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
64 | index XXXXXXX..XXXXXXX 100644 | 32 | index XXXXXXX..XXXXXXX 100644 |
65 | --- a/qapi/block-core.json | 33 | --- a/include/qemu/job.h |
66 | +++ b/qapi/block-core.json | 34 | +++ b/include/qemu/job.h |
35 | @@ -XXX,XX +XXX,XX @@ typedef enum JobCreateFlags { | ||
36 | JOB_MANUAL_DISMISS = 0x04, | ||
37 | } JobCreateFlags; | ||
38 | |||
39 | +extern QemuMutex job_mutex; | ||
40 | + | ||
41 | +#define JOB_LOCK_GUARD() /* QEMU_LOCK_GUARD(&job_mutex) */ | ||
42 | + | ||
43 | +#define WITH_JOB_LOCK_GUARD() /* WITH_QEMU_LOCK_GUARD(&job_mutex) */ | ||
44 | + | ||
45 | +/** | ||
46 | + * job_lock: | ||
47 | + * | ||
48 | + * Take the mutex protecting the list of jobs and their status. | ||
49 | + * Most functions called by the monitor need to call job_lock | ||
50 | + * and job_unlock manually. On the other hand, function called | ||
51 | + * by the block jobs themselves and by the block layer will take the | ||
52 | + * lock for you. | ||
53 | + */ | ||
54 | +void job_lock(void); | ||
55 | + | ||
56 | +/** | ||
57 | + * job_unlock: | ||
58 | + * | ||
59 | + * Release the mutex protecting the list of jobs and their status. | ||
60 | + */ | ||
61 | +void job_unlock(void); | ||
62 | + | ||
63 | /** | ||
64 | * Allocate and return a new job transaction. Jobs can be added to the | ||
65 | * transaction using job_txn_add_job(). | ||
66 | diff --git a/job.c b/job.c | ||
67 | index XXXXXXX..XXXXXXX 100644 | ||
68 | --- a/job.c | ||
69 | +++ b/job.c | ||
67 | @@ -XXX,XX +XXX,XX @@ | 70 | @@ -XXX,XX +XXX,XX @@ |
68 | # @concluded: The job has finished all work. If manual was set to true, the job | 71 | #include "trace/trace-root.h" |
69 | # will remain in the query list until it is dismissed. | 72 | #include "qapi/qapi-events-job.h" |
70 | # | 73 | |
71 | +# @null: The job is in the process of being dismantled. This state should not | 74 | +/* |
72 | +# ever be visible externally. | 75 | + * job_mutex protects the jobs list, but also makes the |
73 | +# | 76 | + * struct job fields thread-safe. |
74 | # Since: 2.12 | 77 | + */ |
75 | ## | 78 | +QemuMutex job_mutex; |
76 | { 'enum': 'BlockJobStatus', | 79 | + |
77 | 'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby', | 80 | static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs); |
78 | - 'aborting', 'concluded' ] } | 81 | |
79 | + 'aborting', 'concluded', 'null' ] } | 82 | /* Job State Transition Table */ |
80 | 83 | @@ -XXX,XX +XXX,XX @@ struct JobTxn { | |
81 | ## | 84 | int refcnt; |
82 | # @BlockJobInfo: | ||
83 | diff --git a/blockjob.c b/blockjob.c | ||
84 | index XXXXXXX..XXXXXXX 100644 | ||
85 | --- a/blockjob.c | ||
86 | +++ b/blockjob.c | ||
87 | @@ -XXX,XX +XXX,XX @@ static QemuMutex block_job_mutex; | ||
88 | |||
89 | /* BlockJob State Transition Table */ | ||
90 | bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = { | ||
91 | - /* U, C, R, P, Y, S, X, E */ | ||
92 | - /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0}, | ||
93 | - /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 1, 0}, | ||
94 | - /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 1}, | ||
95 | - /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0}, | ||
96 | - /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 1}, | ||
97 | - /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0}, | ||
98 | - /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 1}, | ||
99 | - /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0}, | ||
100 | + /* U, C, R, P, Y, S, X, E, N */ | ||
101 | + /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0}, | ||
102 | + /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 1, 0, 1}, | ||
103 | + /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 1, 0}, | ||
104 | + /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0}, | ||
105 | + /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 1, 0}, | ||
106 | + /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0}, | ||
107 | + /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 1, 0}, | ||
108 | + /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 1}, | ||
109 | + /* N: */ [BLOCK_JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0}, | ||
110 | }; | 85 | }; |
111 | 86 | ||
112 | bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = { | 87 | -/* Right now, this mutex is only needed to synchronize accesses to job->busy |
113 | - /* U, C, R, P, Y, S, X, E */ | 88 | - * and job->sleep_timer, such as concurrent calls to job_do_yield and |
114 | - [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 0, 0}, | 89 | - * job_enter. */ |
115 | - [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0}, | 90 | -static QemuMutex job_mutex; |
116 | - [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0}, | 91 | +void job_lock(void) |
117 | - [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0}, | ||
118 | - [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0}, | ||
119 | + /* U, C, R, P, Y, S, X, E, N */ | ||
120 | + [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 0, 0, 0}, | ||
121 | + [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0}, | ||
122 | + [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0}, | ||
123 | + [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0}, | ||
124 | + [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0}, | ||
125 | }; | ||
126 | |||
127 | static void block_job_state_transition(BlockJob *job, BlockJobStatus s1) | ||
128 | @@ -XXX,XX +XXX,XX @@ static void block_job_detach_aio_context(void *opaque); | ||
129 | void block_job_unref(BlockJob *job) | ||
130 | { | ||
131 | if (--job->refcnt == 0) { | ||
132 | + assert(job->status == BLOCK_JOB_STATUS_NULL); | ||
133 | BlockDriverState *bs = blk_bs(job->blk); | ||
134 | QLIST_REMOVE(job, job_list); | ||
135 | bs->job = NULL; | ||
136 | @@ -XXX,XX +XXX,XX @@ void block_job_start(BlockJob *job) | ||
137 | bdrv_coroutine_enter(blk_bs(job->blk), job->co); | ||
138 | } | ||
139 | |||
140 | +static void block_job_decommission(BlockJob *job) | ||
141 | +{ | 92 | +{ |
142 | + assert(job); | 93 | + /* nop */ |
143 | + job->completed = true; | ||
144 | + job->busy = false; | ||
145 | + job->paused = false; | ||
146 | + job->deferred_to_main_loop = true; | ||
147 | + block_job_state_transition(job, BLOCK_JOB_STATUS_NULL); | ||
148 | + block_job_unref(job); | ||
149 | +} | 94 | +} |
150 | + | 95 | + |
151 | static void block_job_conclude(BlockJob *job) | 96 | +void job_unlock(void) |
97 | +{ | ||
98 | + /* nop */ | ||
99 | +} | ||
100 | |||
101 | -static void job_lock(void) | ||
102 | +static void real_job_lock(void) | ||
152 | { | 103 | { |
153 | block_job_state_transition(job, BLOCK_JOB_STATUS_CONCLUDED); | 104 | qemu_mutex_lock(&job_mutex); |
154 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job) | ||
155 | QLIST_REMOVE(job, txn_list); | ||
156 | block_job_txn_unref(job->txn); | ||
157 | block_job_conclude(job); | ||
158 | - block_job_unref(job); | ||
159 | + block_job_decommission(job); | ||
160 | } | 105 | } |
161 | 106 | ||
162 | static void block_job_cancel_async(BlockJob *job) | 107 | -static void job_unlock(void) |
163 | @@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, | 108 | +static void real_job_unlock(void) |
164 | |||
165 | block_job_set_speed(job, speed, &local_err); | ||
166 | if (local_err) { | ||
167 | - block_job_unref(job); | ||
168 | + block_job_early_fail(job); | ||
169 | error_propagate(errp, local_err); | ||
170 | return NULL; | ||
171 | } | ||
172 | @@ -XXX,XX +XXX,XX @@ void block_job_pause_all(void) | ||
173 | |||
174 | void block_job_early_fail(BlockJob *job) | ||
175 | { | 109 | { |
176 | - block_job_unref(job); | 110 | qemu_mutex_unlock(&job_mutex); |
177 | + assert(job->status == BLOCK_JOB_STATUS_CREATED); | ||
178 | + block_job_decommission(job); | ||
179 | } | 111 | } |
180 | 112 | @@ -XXX,XX +XXX,XX @@ void job_enter_cond(Job *job, bool(*fn)(Job *job)) | |
181 | void block_job_completed(BlockJob *job, int ret) | 113 | return; |
114 | } | ||
115 | |||
116 | - job_lock(); | ||
117 | + real_job_lock(); | ||
118 | if (job->busy) { | ||
119 | - job_unlock(); | ||
120 | + real_job_unlock(); | ||
121 | return; | ||
122 | } | ||
123 | |||
124 | if (fn && !fn(job)) { | ||
125 | - job_unlock(); | ||
126 | + real_job_unlock(); | ||
127 | return; | ||
128 | } | ||
129 | |||
130 | assert(!job->deferred_to_main_loop); | ||
131 | timer_del(&job->sleep_timer); | ||
132 | job->busy = true; | ||
133 | - job_unlock(); | ||
134 | + real_job_unlock(); | ||
135 | aio_co_enter(job->aio_context, job->co); | ||
136 | } | ||
137 | |||
138 | @@ -XXX,XX +XXX,XX @@ void job_enter(Job *job) | ||
139 | * called explicitly. */ | ||
140 | static void coroutine_fn job_do_yield(Job *job, uint64_t ns) | ||
141 | { | ||
142 | - job_lock(); | ||
143 | + real_job_lock(); | ||
144 | if (ns != -1) { | ||
145 | timer_mod(&job->sleep_timer, ns); | ||
146 | } | ||
147 | job->busy = false; | ||
148 | job_event_idle(job); | ||
149 | - job_unlock(); | ||
150 | + real_job_unlock(); | ||
151 | qemu_coroutine_yield(); | ||
152 | |||
153 | /* Set by job_enter_cond() before re-entering the coroutine. */ | ||
182 | -- | 154 | -- |
183 | 2.13.6 | 155 | 2.37.3 |
184 | |||
185 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Signed-off-by: John Snow <jsnow@redhat.com> | 3 | Categorize the fields in struct Job to understand which ones |
4 | need to be protected by the job mutex and which don't. | ||
5 | |||
6 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
7 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
8 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | Message-Id: <20220926093214.506243-3-eesposit@redhat.com> | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
5 | --- | 12 | --- |
6 | tests/qemu-iotests/056 | 187 +++++++++++++++++++++++++++++++++++++++++++++ | 13 | include/qemu/job.h | 61 +++++++++++++++++++++++++++------------------- |
7 | tests/qemu-iotests/056.out | 4 +- | 14 | 1 file changed, 36 insertions(+), 25 deletions(-) |
8 | 2 files changed, 189 insertions(+), 2 deletions(-) | ||
9 | 15 | ||
10 | diff --git a/tests/qemu-iotests/056 b/tests/qemu-iotests/056 | 16 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
11 | index XXXXXXX..XXXXXXX 100755 | 17 | index XXXXXXX..XXXXXXX 100644 |
12 | --- a/tests/qemu-iotests/056 | 18 | --- a/include/qemu/job.h |
13 | +++ b/tests/qemu-iotests/056 | 19 | +++ b/include/qemu/job.h |
14 | @@ -XXX,XX +XXX,XX @@ backing_img = os.path.join(iotests.test_dir, 'backing.img') | 20 | @@ -XXX,XX +XXX,XX @@ typedef struct JobTxn JobTxn; |
15 | test_img = os.path.join(iotests.test_dir, 'test.img') | 21 | * Long-running operation. |
16 | target_img = os.path.join(iotests.test_dir, 'target.img') | 22 | */ |
17 | 23 | typedef struct Job { | |
18 | +def img_create(img, fmt=iotests.imgfmt, size='64M', **kwargs): | ||
19 | + fullname = os.path.join(iotests.test_dir, '%s.%s' % (img, fmt)) | ||
20 | + optargs = [] | ||
21 | + for k,v in kwargs.iteritems(): | ||
22 | + optargs = optargs + ['-o', '%s=%s' % (k,v)] | ||
23 | + args = ['create', '-f', fmt] + optargs + [fullname, size] | ||
24 | + iotests.qemu_img(*args) | ||
25 | + return fullname | ||
26 | + | 24 | + |
27 | +def try_remove(img): | 25 | + /* Fields set at initialization (job_create), and never modified */ |
28 | + try: | ||
29 | + os.remove(img) | ||
30 | + except OSError: | ||
31 | + pass | ||
32 | + | 26 | + |
33 | +def io_write_patterns(img, patterns): | 27 | /** The ID of the job. May be NULL for internal jobs. */ |
34 | + for pattern in patterns: | 28 | char *id; |
35 | + iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img) | 29 | |
30 | - /** The type of this job. */ | ||
31 | + /** | ||
32 | + * The type of this job. | ||
33 | + * All callbacks are called with job_mutex *not* held. | ||
34 | + */ | ||
35 | const JobDriver *driver; | ||
36 | |||
37 | - /** Reference count of the block job */ | ||
38 | - int refcnt; | ||
39 | - | ||
40 | - /** Current state; See @JobStatus for details. */ | ||
41 | - JobStatus status; | ||
42 | - | ||
43 | - /** AioContext to run the job coroutine in */ | ||
44 | - AioContext *aio_context; | ||
45 | - | ||
46 | /** | ||
47 | * The coroutine that executes the job. If not NULL, it is reentered when | ||
48 | * busy is false and the job is cancelled. | ||
49 | + * Initialized in job_start() | ||
50 | */ | ||
51 | Coroutine *co; | ||
52 | |||
53 | + /** True if this job should automatically finalize itself */ | ||
54 | + bool auto_finalize; | ||
55 | + | ||
56 | + /** True if this job should automatically dismiss itself */ | ||
57 | + bool auto_dismiss; | ||
58 | + | ||
59 | + /** The completion function that will be called when the job completes. */ | ||
60 | + BlockCompletionFunc *cb; | ||
61 | + | ||
62 | + /** The opaque value that is passed to the completion function. */ | ||
63 | + void *opaque; | ||
64 | + | ||
65 | + /* ProgressMeter API is thread-safe */ | ||
66 | + ProgressMeter progress; | ||
36 | + | 67 | + |
37 | + | 68 | + |
38 | class TestSyncModesNoneAndTop(iotests.QMPTestCase): | 69 | + /** Protected by AioContext lock */ |
39 | image_len = 64 * 1024 * 1024 # MB | ||
40 | |||
41 | @@ -XXX,XX +XXX,XX @@ class TestBeforeWriteNotifier(iotests.QMPTestCase): | ||
42 | event = self.cancel_and_wait() | ||
43 | self.assert_qmp(event, 'data/type', 'backup') | ||
44 | |||
45 | +class BackupTest(iotests.QMPTestCase): | ||
46 | + def setUp(self): | ||
47 | + self.vm = iotests.VM() | ||
48 | + self.test_img = img_create('test') | ||
49 | + self.dest_img = img_create('dest') | ||
50 | + self.vm.add_drive(self.test_img) | ||
51 | + self.vm.launch() | ||
52 | + | 70 | + |
53 | + def tearDown(self): | 71 | + /** AioContext to run the job coroutine in */ |
54 | + self.vm.shutdown() | 72 | + AioContext *aio_context; |
55 | + try_remove(self.test_img) | ||
56 | + try_remove(self.dest_img) | ||
57 | + | 73 | + |
58 | + def hmp_io_writes(self, drive, patterns): | 74 | + /** Reference count of the block job */ |
59 | + for pattern in patterns: | 75 | + int refcnt; |
60 | + self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern) | ||
61 | + self.vm.hmp_qemu_io(drive, 'flush') | ||
62 | + | 76 | + |
63 | + def qmp_backup_and_wait(self, cmd='drive-backup', serror=None, | 77 | + /** Current state; See @JobStatus for details. */ |
64 | + aerror=None, **kwargs): | 78 | + JobStatus status; |
65 | + if not self.qmp_backup(cmd, serror, **kwargs): | ||
66 | + return False | ||
67 | + return self.qmp_backup_wait(kwargs['device'], aerror) | ||
68 | + | 79 | + |
69 | + def qmp_backup(self, cmd='drive-backup', | 80 | /** |
70 | + error=None, **kwargs): | 81 | * Timer that is used by @job_sleep_ns. Accessed under job_mutex (in |
71 | + self.assertTrue('device' in kwargs) | 82 | * job.c). |
72 | + res = self.vm.qmp(cmd, **kwargs) | 83 | @@ -XXX,XX +XXX,XX @@ typedef struct Job { |
73 | + if error: | 84 | /** Set to true when the job has deferred work to the main loop. */ |
74 | + self.assert_qmp(res, 'error/desc', error) | 85 | bool deferred_to_main_loop; |
75 | + return False | 86 | |
76 | + self.assert_qmp(res, 'return', {}) | 87 | - /** True if this job should automatically finalize itself */ |
77 | + return True | 88 | - bool auto_finalize; |
78 | + | 89 | - |
79 | + def qmp_backup_wait(self, device, error=None): | 90 | - /** True if this job should automatically dismiss itself */ |
80 | + event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED", | 91 | - bool auto_dismiss; |
81 | + match={'data': {'device': device}}) | 92 | - |
82 | + self.assertNotEqual(event, None) | 93 | - ProgressMeter progress; |
83 | + try: | 94 | - |
84 | + failure = self.dictpath(event, 'data/error') | 95 | /** |
85 | + except AssertionError: | 96 | * Return code from @run and/or @prepare callback(s). |
86 | + # Backup succeeded. | 97 | * Not final until the job has reached the CONCLUDED status. |
87 | + self.assert_qmp(event, 'data/offset', event['data']['len']) | 98 | @@ -XXX,XX +XXX,XX @@ typedef struct Job { |
88 | + return True | 99 | */ |
89 | + else: | 100 | Error *err; |
90 | + # Failure. | 101 | |
91 | + self.assert_qmp(event, 'data/error', qerror) | 102 | - /** The completion function that will be called when the job completes. */ |
92 | + return False | 103 | - BlockCompletionFunc *cb; |
93 | + | 104 | - |
94 | + def test_dismiss_false(self): | 105 | - /** The opaque value that is passed to the completion function. */ |
95 | + res = self.vm.qmp('query-block-jobs') | 106 | - void *opaque; |
96 | + self.assert_qmp(res, 'return', []) | 107 | - |
97 | + self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt, | 108 | /** Notifiers called when a cancelled job is finalised */ |
98 | + sync='full', target=self.dest_img, | 109 | NotifierList on_finalize_cancelled; |
99 | + auto_dismiss=True) | 110 | |
100 | + res = self.vm.qmp('query-block-jobs') | 111 | @@ -XXX,XX +XXX,XX @@ typedef struct Job { |
101 | + self.assert_qmp(res, 'return', []) | 112 | |
102 | + | 113 | /** |
103 | + def test_dismiss_true(self): | 114 | * Callbacks and other information about a Job driver. |
104 | + res = self.vm.qmp('query-block-jobs') | 115 | + * All callbacks are invoked with job_mutex *not* held. |
105 | + self.assert_qmp(res, 'return', []) | 116 | */ |
106 | + self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt, | 117 | struct JobDriver { |
107 | + sync='full', target=self.dest_img, | 118 | |
108 | + auto_dismiss=False) | 119 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn job_yield(Job *job); |
109 | + res = self.vm.qmp('query-block-jobs') | 120 | */ |
110 | + self.assert_qmp(res, 'return[0]/status', 'concluded') | 121 | void coroutine_fn job_sleep_ns(Job *job, int64_t ns); |
111 | + res = self.vm.qmp('block-job-dismiss', id='drive0') | 122 | |
112 | + self.assert_qmp(res, 'return', {}) | 123 | - |
113 | + res = self.vm.qmp('query-block-jobs') | 124 | /** Returns the JobType of a given Job. */ |
114 | + self.assert_qmp(res, 'return', []) | 125 | JobType job_type(const Job *job); |
115 | + | 126 | |
116 | + def test_dismiss_bad_id(self): | ||
117 | + res = self.vm.qmp('query-block-jobs') | ||
118 | + self.assert_qmp(res, 'return', []) | ||
119 | + res = self.vm.qmp('block-job-dismiss', id='foobar') | ||
120 | + self.assert_qmp(res, 'error/class', 'DeviceNotActive') | ||
121 | + | ||
122 | + def test_dismiss_collision(self): | ||
123 | + res = self.vm.qmp('query-block-jobs') | ||
124 | + self.assert_qmp(res, 'return', []) | ||
125 | + self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt, | ||
126 | + sync='full', target=self.dest_img, | ||
127 | + auto_dismiss=False) | ||
128 | + res = self.vm.qmp('query-block-jobs') | ||
129 | + self.assert_qmp(res, 'return[0]/status', 'concluded') | ||
130 | + # Leave zombie job un-dismissed, observe a failure: | ||
131 | + res = self.qmp_backup_and_wait(serror='Need a root block node', | ||
132 | + device='drive0', format=iotests.imgfmt, | ||
133 | + sync='full', target=self.dest_img, | ||
134 | + auto_dismiss=False) | ||
135 | + self.assertEqual(res, False) | ||
136 | + # OK, dismiss the zombie. | ||
137 | + res = self.vm.qmp('block-job-dismiss', id='drive0') | ||
138 | + self.assert_qmp(res, 'return', {}) | ||
139 | + res = self.vm.qmp('query-block-jobs') | ||
140 | + self.assert_qmp(res, 'return', []) | ||
141 | + # Ensure it's really gone. | ||
142 | + self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt, | ||
143 | + sync='full', target=self.dest_img, | ||
144 | + auto_dismiss=False) | ||
145 | + | ||
146 | + def dismissal_failure(self, dismissal_opt): | ||
147 | + res = self.vm.qmp('query-block-jobs') | ||
148 | + self.assert_qmp(res, 'return', []) | ||
149 | + # Give blkdebug something to chew on | ||
150 | + self.hmp_io_writes('drive0', | ||
151 | + (('0x9a', 0, 512), | ||
152 | + ('0x55', '8M', '352k'), | ||
153 | + ('0x78', '15872k', '1M'))) | ||
154 | + # Add destination node via blkdebug | ||
155 | + res = self.vm.qmp('blockdev-add', | ||
156 | + node_name='target0', | ||
157 | + driver=iotests.imgfmt, | ||
158 | + file={ | ||
159 | + 'driver': 'blkdebug', | ||
160 | + 'image': { | ||
161 | + 'driver': 'file', | ||
162 | + 'filename': self.dest_img | ||
163 | + }, | ||
164 | + 'inject-error': [{ | ||
165 | + 'event': 'write_aio', | ||
166 | + 'errno': 5, | ||
167 | + 'immediately': False, | ||
168 | + 'once': True | ||
169 | + }], | ||
170 | + }) | ||
171 | + self.assert_qmp(res, 'return', {}) | ||
172 | + | ||
173 | + res = self.qmp_backup(cmd='blockdev-backup', | ||
174 | + device='drive0', target='target0', | ||
175 | + on_target_error='stop', | ||
176 | + sync='full', | ||
177 | + auto_dismiss=dismissal_opt) | ||
178 | + self.assertTrue(res) | ||
179 | + event = self.vm.event_wait(name="BLOCK_JOB_ERROR", | ||
180 | + match={'data': {'device': 'drive0'}}) | ||
181 | + self.assertNotEqual(event, None) | ||
182 | + # OK, job should be wedged | ||
183 | + res = self.vm.qmp('query-block-jobs') | ||
184 | + self.assert_qmp(res, 'return[0]/status', 'paused') | ||
185 | + res = self.vm.qmp('block-job-dismiss', id='drive0') | ||
186 | + self.assert_qmp(res, 'error/desc', | ||
187 | + "Job 'drive0' in state 'paused' cannot accept" | ||
188 | + " command verb 'dismiss'") | ||
189 | + res = self.vm.qmp('query-block-jobs') | ||
190 | + self.assert_qmp(res, 'return[0]/status', 'paused') | ||
191 | + # OK, unstick job and move forward. | ||
192 | + res = self.vm.qmp('block-job-resume', device='drive0') | ||
193 | + self.assert_qmp(res, 'return', {}) | ||
194 | + # And now we need to wait for it to conclude; | ||
195 | + res = self.qmp_backup_wait(device='drive0') | ||
196 | + self.assertTrue(res) | ||
197 | + if not dismissal_opt: | ||
198 | + # Job should now be languishing: | ||
199 | + res = self.vm.qmp('query-block-jobs') | ||
200 | + self.assert_qmp(res, 'return[0]/status', 'concluded') | ||
201 | + res = self.vm.qmp('block-job-dismiss', id='drive0') | ||
202 | + self.assert_qmp(res, 'return', {}) | ||
203 | + res = self.vm.qmp('query-block-jobs') | ||
204 | + self.assert_qmp(res, 'return', []) | ||
205 | + | ||
206 | + def test_dismiss_premature(self): | ||
207 | + self.dismissal_failure(False) | ||
208 | + | ||
209 | + def test_dismiss_erroneous(self): | ||
210 | + self.dismissal_failure(True) | ||
211 | + | ||
212 | if __name__ == '__main__': | ||
213 | iotests.main(supported_fmts=['qcow2', 'qed']) | ||
214 | diff --git a/tests/qemu-iotests/056.out b/tests/qemu-iotests/056.out | ||
215 | index XXXXXXX..XXXXXXX 100644 | ||
216 | --- a/tests/qemu-iotests/056.out | ||
217 | +++ b/tests/qemu-iotests/056.out | ||
218 | @@ -XXX,XX +XXX,XX @@ | ||
219 | -... | ||
220 | +......... | ||
221 | ---------------------------------------------------------------------- | ||
222 | -Ran 3 tests | ||
223 | +Ran 9 tests | ||
224 | |||
225 | OK | ||
226 | -- | 127 | -- |
227 | 2.13.6 | 128 | 2.37.3 |
228 | |||
229 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Whatever the state a blockjob is in, it should be able to be canceled | 3 | job_event_* functions can all be static, as they are not used |
4 | by the block layer. | 4 | outside job.c. |
5 | 5 | ||
6 | Signed-off-by: John Snow <jsnow@redhat.com> | 6 | Same applies for job_txn_add_job(). |
7 | |||
8 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | Message-Id: <20220926093214.506243-4-eesposit@redhat.com> | ||
7 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
8 | --- | 14 | --- |
9 | tests/test-blockjob.c | 233 +++++++++++++++++++++++++++++++++++++++++++++++++- | 15 | include/qemu/job.h | 18 ------------------ |
10 | 1 file changed, 229 insertions(+), 4 deletions(-) | 16 | job.c | 22 +++++++++++++++++++--- |
17 | 2 files changed, 19 insertions(+), 21 deletions(-) | ||
11 | 18 | ||
12 | diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c | 19 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
13 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tests/test-blockjob.c | 21 | --- a/include/qemu/job.h |
15 | +++ b/tests/test-blockjob.c | 22 | +++ b/include/qemu/job.h |
16 | @@ -XXX,XX +XXX,XX @@ static void block_job_cb(void *opaque, int ret) | 23 | @@ -XXX,XX +XXX,XX @@ JobTxn *job_txn_new(void); |
24 | */ | ||
25 | void job_txn_unref(JobTxn *txn); | ||
26 | |||
27 | -/** | ||
28 | - * @txn: The transaction (may be NULL) | ||
29 | - * @job: Job to add to the transaction | ||
30 | - * | ||
31 | - * Add @job to the transaction. The @job must not already be in a transaction. | ||
32 | - * The caller must call either job_txn_unref() or job_completed() to release | ||
33 | - * the reference that is automatically grabbed here. | ||
34 | - * | ||
35 | - * If @txn is NULL, the function does nothing. | ||
36 | - */ | ||
37 | -void job_txn_add_job(JobTxn *txn, Job *job); | ||
38 | - | ||
39 | /** | ||
40 | * Create a new long-running job and return it. | ||
41 | * | ||
42 | @@ -XXX,XX +XXX,XX @@ void job_progress_set_remaining(Job *job, uint64_t remaining); | ||
43 | */ | ||
44 | void job_progress_increase_remaining(Job *job, uint64_t delta); | ||
45 | |||
46 | -/** To be called when a cancelled job is finalised. */ | ||
47 | -void job_event_cancelled(Job *job); | ||
48 | - | ||
49 | -/** To be called when a successfully completed job is finalised. */ | ||
50 | -void job_event_completed(Job *job); | ||
51 | - | ||
52 | /** | ||
53 | * Conditionally enter the job coroutine if the job is ready to run, not | ||
54 | * already busy and fn() returns true. fn() is called while under the job_lock | ||
55 | diff --git a/job.c b/job.c | ||
56 | index XXXXXXX..XXXXXXX 100644 | ||
57 | --- a/job.c | ||
58 | +++ b/job.c | ||
59 | @@ -XXX,XX +XXX,XX @@ void job_txn_unref(JobTxn *txn) | ||
60 | } | ||
61 | } | ||
62 | |||
63 | -void job_txn_add_job(JobTxn *txn, Job *job) | ||
64 | +/** | ||
65 | + * @txn: The transaction (may be NULL) | ||
66 | + * @job: Job to add to the transaction | ||
67 | + * | ||
68 | + * Add @job to the transaction. The @job must not already be in a transaction. | ||
69 | + * The caller must call either job_txn_unref() or job_completed() to release | ||
70 | + * the reference that is automatically grabbed here. | ||
71 | + * | ||
72 | + * If @txn is NULL, the function does nothing. | ||
73 | + */ | ||
74 | +static void job_txn_add_job(JobTxn *txn, Job *job) | ||
17 | { | 75 | { |
76 | if (!txn) { | ||
77 | return; | ||
78 | @@ -XXX,XX +XXX,XX @@ void job_progress_increase_remaining(Job *job, uint64_t delta) | ||
79 | progress_increase_remaining(&job->progress, delta); | ||
18 | } | 80 | } |
19 | 81 | ||
20 | -static BlockJob *do_test_id(BlockBackend *blk, const char *id, | 82 | -void job_event_cancelled(Job *job) |
21 | - bool should_succeed) | 83 | +/** |
22 | +static BlockJob *mk_job(BlockBackend *blk, const char *id, | 84 | + * To be called when a cancelled job is finalised. |
23 | + const BlockJobDriver *drv, bool should_succeed, | 85 | + */ |
24 | + int flags) | 86 | +static void job_event_cancelled(Job *job) |
25 | { | 87 | { |
26 | BlockJob *job; | 88 | notifier_list_notify(&job->on_finalize_cancelled, job); |
27 | Error *errp = NULL; | ||
28 | |||
29 | - job = block_job_create(id, &test_block_job_driver, NULL, blk_bs(blk), | ||
30 | - 0, BLK_PERM_ALL, 0, BLOCK_JOB_DEFAULT, block_job_cb, | ||
31 | + job = block_job_create(id, drv, NULL, blk_bs(blk), | ||
32 | + 0, BLK_PERM_ALL, 0, flags, block_job_cb, | ||
33 | NULL, &errp); | ||
34 | if (should_succeed) { | ||
35 | g_assert_null(errp); | ||
36 | @@ -XXX,XX +XXX,XX @@ static BlockJob *do_test_id(BlockBackend *blk, const char *id, | ||
37 | return job; | ||
38 | } | 89 | } |
39 | 90 | ||
40 | +static BlockJob *do_test_id(BlockBackend *blk, const char *id, | 91 | -void job_event_completed(Job *job) |
41 | + bool should_succeed) | 92 | +/** |
42 | +{ | 93 | + * To be called when a successfully completed job is finalised. |
43 | + return mk_job(blk, id, &test_block_job_driver, | 94 | + */ |
44 | + should_succeed, BLOCK_JOB_DEFAULT); | 95 | +static void job_event_completed(Job *job) |
45 | +} | ||
46 | + | ||
47 | /* This creates a BlockBackend (optionally with a name) with a | ||
48 | * BlockDriverState inserted. */ | ||
49 | static BlockBackend *create_blk(const char *name) | ||
50 | @@ -XXX,XX +XXX,XX @@ static void test_job_ids(void) | ||
51 | destroy_blk(blk[2]); | ||
52 | } | ||
53 | |||
54 | +typedef struct CancelJob { | ||
55 | + BlockJob common; | ||
56 | + BlockBackend *blk; | ||
57 | + bool should_converge; | ||
58 | + bool should_complete; | ||
59 | + bool completed; | ||
60 | +} CancelJob; | ||
61 | + | ||
62 | +static void cancel_job_completed(BlockJob *job, void *opaque) | ||
63 | +{ | ||
64 | + CancelJob *s = opaque; | ||
65 | + s->completed = true; | ||
66 | + block_job_completed(job, 0); | ||
67 | +} | ||
68 | + | ||
69 | +static void cancel_job_complete(BlockJob *job, Error **errp) | ||
70 | +{ | ||
71 | + CancelJob *s = container_of(job, CancelJob, common); | ||
72 | + s->should_complete = true; | ||
73 | +} | ||
74 | + | ||
75 | +static void coroutine_fn cancel_job_start(void *opaque) | ||
76 | +{ | ||
77 | + CancelJob *s = opaque; | ||
78 | + | ||
79 | + while (!s->should_complete) { | ||
80 | + if (block_job_is_cancelled(&s->common)) { | ||
81 | + goto defer; | ||
82 | + } | ||
83 | + | ||
84 | + if (!s->common.ready && s->should_converge) { | ||
85 | + block_job_event_ready(&s->common); | ||
86 | + } | ||
87 | + | ||
88 | + block_job_sleep_ns(&s->common, 100000); | ||
89 | + } | ||
90 | + | ||
91 | + defer: | ||
92 | + block_job_defer_to_main_loop(&s->common, cancel_job_completed, s); | ||
93 | +} | ||
94 | + | ||
95 | +static const BlockJobDriver test_cancel_driver = { | ||
96 | + .instance_size = sizeof(CancelJob), | ||
97 | + .start = cancel_job_start, | ||
98 | + .complete = cancel_job_complete, | ||
99 | +}; | ||
100 | + | ||
101 | +static CancelJob *create_common(BlockJob **pjob) | ||
102 | +{ | ||
103 | + BlockBackend *blk; | ||
104 | + BlockJob *job; | ||
105 | + CancelJob *s; | ||
106 | + | ||
107 | + blk = create_blk(NULL); | ||
108 | + job = mk_job(blk, "Steve", &test_cancel_driver, true, | ||
109 | + BLOCK_JOB_MANUAL_FINALIZE | BLOCK_JOB_MANUAL_DISMISS); | ||
110 | + block_job_ref(job); | ||
111 | + assert(job->status == BLOCK_JOB_STATUS_CREATED); | ||
112 | + s = container_of(job, CancelJob, common); | ||
113 | + s->blk = blk; | ||
114 | + | ||
115 | + *pjob = job; | ||
116 | + return s; | ||
117 | +} | ||
118 | + | ||
119 | +static void cancel_common(CancelJob *s) | ||
120 | +{ | ||
121 | + BlockJob *job = &s->common; | ||
122 | + BlockBackend *blk = s->blk; | ||
123 | + BlockJobStatus sts = job->status; | ||
124 | + | ||
125 | + block_job_cancel_sync(job); | ||
126 | + if ((sts != BLOCK_JOB_STATUS_CREATED) && | ||
127 | + (sts != BLOCK_JOB_STATUS_CONCLUDED)) { | ||
128 | + BlockJob *dummy = job; | ||
129 | + block_job_dismiss(&dummy, &error_abort); | ||
130 | + } | ||
131 | + assert(job->status == BLOCK_JOB_STATUS_NULL); | ||
132 | + block_job_unref(job); | ||
133 | + destroy_blk(blk); | ||
134 | +} | ||
135 | + | ||
136 | +static void test_cancel_created(void) | ||
137 | +{ | ||
138 | + BlockJob *job; | ||
139 | + CancelJob *s; | ||
140 | + | ||
141 | + s = create_common(&job); | ||
142 | + cancel_common(s); | ||
143 | +} | ||
144 | + | ||
145 | +static void test_cancel_running(void) | ||
146 | +{ | ||
147 | + BlockJob *job; | ||
148 | + CancelJob *s; | ||
149 | + | ||
150 | + s = create_common(&job); | ||
151 | + | ||
152 | + block_job_start(job); | ||
153 | + assert(job->status == BLOCK_JOB_STATUS_RUNNING); | ||
154 | + | ||
155 | + cancel_common(s); | ||
156 | +} | ||
157 | + | ||
158 | +static void test_cancel_paused(void) | ||
159 | +{ | ||
160 | + BlockJob *job; | ||
161 | + CancelJob *s; | ||
162 | + | ||
163 | + s = create_common(&job); | ||
164 | + | ||
165 | + block_job_start(job); | ||
166 | + assert(job->status == BLOCK_JOB_STATUS_RUNNING); | ||
167 | + | ||
168 | + block_job_user_pause(job, &error_abort); | ||
169 | + block_job_enter(job); | ||
170 | + assert(job->status == BLOCK_JOB_STATUS_PAUSED); | ||
171 | + | ||
172 | + cancel_common(s); | ||
173 | +} | ||
174 | + | ||
175 | +static void test_cancel_ready(void) | ||
176 | +{ | ||
177 | + BlockJob *job; | ||
178 | + CancelJob *s; | ||
179 | + | ||
180 | + s = create_common(&job); | ||
181 | + | ||
182 | + block_job_start(job); | ||
183 | + assert(job->status == BLOCK_JOB_STATUS_RUNNING); | ||
184 | + | ||
185 | + s->should_converge = true; | ||
186 | + block_job_enter(job); | ||
187 | + assert(job->status == BLOCK_JOB_STATUS_READY); | ||
188 | + | ||
189 | + cancel_common(s); | ||
190 | +} | ||
191 | + | ||
192 | +static void test_cancel_standby(void) | ||
193 | +{ | ||
194 | + BlockJob *job; | ||
195 | + CancelJob *s; | ||
196 | + | ||
197 | + s = create_common(&job); | ||
198 | + | ||
199 | + block_job_start(job); | ||
200 | + assert(job->status == BLOCK_JOB_STATUS_RUNNING); | ||
201 | + | ||
202 | + s->should_converge = true; | ||
203 | + block_job_enter(job); | ||
204 | + assert(job->status == BLOCK_JOB_STATUS_READY); | ||
205 | + | ||
206 | + block_job_user_pause(job, &error_abort); | ||
207 | + block_job_enter(job); | ||
208 | + assert(job->status == BLOCK_JOB_STATUS_STANDBY); | ||
209 | + | ||
210 | + cancel_common(s); | ||
211 | +} | ||
212 | + | ||
213 | +static void test_cancel_pending(void) | ||
214 | +{ | ||
215 | + BlockJob *job; | ||
216 | + CancelJob *s; | ||
217 | + | ||
218 | + s = create_common(&job); | ||
219 | + | ||
220 | + block_job_start(job); | ||
221 | + assert(job->status == BLOCK_JOB_STATUS_RUNNING); | ||
222 | + | ||
223 | + s->should_converge = true; | ||
224 | + block_job_enter(job); | ||
225 | + assert(job->status == BLOCK_JOB_STATUS_READY); | ||
226 | + | ||
227 | + block_job_complete(job, &error_abort); | ||
228 | + block_job_enter(job); | ||
229 | + while (!s->completed) { | ||
230 | + aio_poll(qemu_get_aio_context(), true); | ||
231 | + } | ||
232 | + assert(job->status == BLOCK_JOB_STATUS_PENDING); | ||
233 | + | ||
234 | + cancel_common(s); | ||
235 | +} | ||
236 | + | ||
237 | +static void test_cancel_concluded(void) | ||
238 | +{ | ||
239 | + BlockJob *job; | ||
240 | + CancelJob *s; | ||
241 | + | ||
242 | + s = create_common(&job); | ||
243 | + | ||
244 | + block_job_start(job); | ||
245 | + assert(job->status == BLOCK_JOB_STATUS_RUNNING); | ||
246 | + | ||
247 | + s->should_converge = true; | ||
248 | + block_job_enter(job); | ||
249 | + assert(job->status == BLOCK_JOB_STATUS_READY); | ||
250 | + | ||
251 | + block_job_complete(job, &error_abort); | ||
252 | + block_job_enter(job); | ||
253 | + while (!s->completed) { | ||
254 | + aio_poll(qemu_get_aio_context(), true); | ||
255 | + } | ||
256 | + assert(job->status == BLOCK_JOB_STATUS_PENDING); | ||
257 | + | ||
258 | + block_job_finalize(job, &error_abort); | ||
259 | + assert(job->status == BLOCK_JOB_STATUS_CONCLUDED); | ||
260 | + | ||
261 | + cancel_common(s); | ||
262 | +} | ||
263 | + | ||
264 | int main(int argc, char **argv) | ||
265 | { | 96 | { |
266 | qemu_init_main_loop(&error_abort); | 97 | notifier_list_notify(&job->on_finalize_completed, job); |
267 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
268 | |||
269 | g_test_init(&argc, &argv, NULL); | ||
270 | g_test_add_func("/blockjob/ids", test_job_ids); | ||
271 | + g_test_add_func("/blockjob/cancel/created", test_cancel_created); | ||
272 | + g_test_add_func("/blockjob/cancel/running", test_cancel_running); | ||
273 | + g_test_add_func("/blockjob/cancel/paused", test_cancel_paused); | ||
274 | + g_test_add_func("/blockjob/cancel/ready", test_cancel_ready); | ||
275 | + g_test_add_func("/blockjob/cancel/standby", test_cancel_standby); | ||
276 | + g_test_add_func("/blockjob/cancel/pending", test_cancel_pending); | ||
277 | + g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded); | ||
278 | return g_test_run(); | ||
279 | } | 98 | } |
280 | -- | 99 | -- |
281 | 2.13.6 | 100 | 2.37.3 |
282 | |||
283 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Split out the pause command into the actual pause and the wait. | 3 | Same as AIO_WAIT_WHILE macro, but if we are in the Main loop |
4 | Not every usage presently needs to resubmit a pause request. | 4 | do not release and then acquire ctx_ 's aiocontext. |
5 | 5 | ||
6 | The intent with the next commit will be to explicitly disallow | 6 | Once all Aiocontext locks go away, this macro will replace |
7 | redundant or meaningless pause/resume requests, so the tests | 7 | AIO_WAIT_WHILE. |
8 | need to become more judicious to reflect that. | ||
9 | 8 | ||
10 | Signed-off-by: John Snow <jsnow@redhat.com> | 9 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
10 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
11 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
12 | Message-Id: <20220926093214.506243-5-eesposit@redhat.com> | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 13 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
12 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
14 | --- | 15 | --- |
15 | tests/qemu-iotests/030 | 6 ++---- | 16 | include/block/aio-wait.h | 17 +++++++++++++---- |
16 | tests/qemu-iotests/055 | 17 ++++++----------- | 17 | 1 file changed, 13 insertions(+), 4 deletions(-) |
17 | tests/qemu-iotests/iotests.py | 12 ++++++++---- | ||
18 | 3 files changed, 16 insertions(+), 19 deletions(-) | ||
19 | 18 | ||
20 | diff --git a/tests/qemu-iotests/030 b/tests/qemu-iotests/030 | 19 | diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h |
21 | index XXXXXXX..XXXXXXX 100755 | ||
22 | --- a/tests/qemu-iotests/030 | ||
23 | +++ b/tests/qemu-iotests/030 | ||
24 | @@ -XXX,XX +XXX,XX @@ class TestSingleDrive(iotests.QMPTestCase): | ||
25 | result = self.vm.qmp('block-stream', device='drive0') | ||
26 | self.assert_qmp(result, 'return', {}) | ||
27 | |||
28 | - result = self.vm.qmp('block-job-pause', device='drive0') | ||
29 | - self.assert_qmp(result, 'return', {}) | ||
30 | - | ||
31 | + self.pause_job('drive0', wait=False) | ||
32 | self.vm.resume_drive('drive0') | ||
33 | - self.pause_job('drive0') | ||
34 | + self.pause_wait('drive0') | ||
35 | |||
36 | result = self.vm.qmp('query-block-jobs') | ||
37 | offset = self.dictpath(result, 'return[0]/offset') | ||
38 | diff --git a/tests/qemu-iotests/055 b/tests/qemu-iotests/055 | ||
39 | index XXXXXXX..XXXXXXX 100755 | ||
40 | --- a/tests/qemu-iotests/055 | ||
41 | +++ b/tests/qemu-iotests/055 | ||
42 | @@ -XXX,XX +XXX,XX @@ class TestSingleDrive(iotests.QMPTestCase): | ||
43 | target=target, sync='full') | ||
44 | self.assert_qmp(result, 'return', {}) | ||
45 | |||
46 | - result = self.vm.qmp('block-job-pause', device='drive0') | ||
47 | - self.assert_qmp(result, 'return', {}) | ||
48 | - | ||
49 | + self.pause_job('drive0', wait=False) | ||
50 | self.vm.resume_drive('drive0') | ||
51 | - self.pause_job('drive0') | ||
52 | + self.pause_wait('drive0') | ||
53 | |||
54 | result = self.vm.qmp('query-block-jobs') | ||
55 | offset = self.dictpath(result, 'return[0]/offset') | ||
56 | @@ -XXX,XX +XXX,XX @@ class TestSingleTransaction(iotests.QMPTestCase): | ||
57 | ]) | ||
58 | self.assert_qmp(result, 'return', {}) | ||
59 | |||
60 | - result = self.vm.qmp('block-job-pause', device='drive0') | ||
61 | - self.assert_qmp(result, 'return', {}) | ||
62 | + self.pause_job('drive0', wait=False) | ||
63 | |||
64 | result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0) | ||
65 | self.assert_qmp(result, 'return', {}) | ||
66 | |||
67 | - self.pause_job('drive0') | ||
68 | + self.pause_wait('drive0') | ||
69 | |||
70 | result = self.vm.qmp('query-block-jobs') | ||
71 | offset = self.dictpath(result, 'return[0]/offset') | ||
72 | @@ -XXX,XX +XXX,XX @@ class TestDriveCompression(iotests.QMPTestCase): | ||
73 | result = self.vm.qmp(cmd, device='drive0', sync='full', compress=True, **args) | ||
74 | self.assert_qmp(result, 'return', {}) | ||
75 | |||
76 | - result = self.vm.qmp('block-job-pause', device='drive0') | ||
77 | - self.assert_qmp(result, 'return', {}) | ||
78 | - | ||
79 | + self.pause_job('drive0', wait=False) | ||
80 | self.vm.resume_drive('drive0') | ||
81 | - self.pause_job('drive0') | ||
82 | + self.pause_wait('drive0') | ||
83 | |||
84 | result = self.vm.qmp('query-block-jobs') | ||
85 | offset = self.dictpath(result, 'return[0]/offset') | ||
86 | diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py | ||
87 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
88 | --- a/tests/qemu-iotests/iotests.py | 21 | --- a/include/block/aio-wait.h |
89 | +++ b/tests/qemu-iotests/iotests.py | 22 | +++ b/include/block/aio-wait.h |
90 | @@ -XXX,XX +XXX,XX @@ class QMPTestCase(unittest.TestCase): | 23 | @@ -XXX,XX +XXX,XX @@ typedef struct { |
91 | event = self.wait_until_completed(drive=drive) | 24 | extern AioWait global_aio_wait; |
92 | self.assert_qmp(event, 'data/type', 'mirror') | 25 | |
93 | 26 | /** | |
94 | - def pause_job(self, job_id='job0'): | 27 | - * AIO_WAIT_WHILE: |
95 | - result = self.vm.qmp('block-job-pause', device=job_id) | 28 | + * AIO_WAIT_WHILE_INTERNAL: |
96 | - self.assert_qmp(result, 'return', {}) | 29 | * @ctx: the aio context, or NULL if multiple aio contexts (for which the |
97 | - | 30 | * caller does not hold a lock) are involved in the polling condition. |
98 | + def pause_wait(self, job_id='job0'): | 31 | * @cond: wait while this conditional expression is true |
99 | with Timeout(1, "Timeout waiting for job to pause"): | 32 | + * @unlock: whether to unlock and then lock again @ctx. This apples |
100 | while True: | 33 | + * only when waiting for another AioContext from the main loop. |
101 | result = self.vm.qmp('query-block-jobs') | 34 | + * Otherwise it's ignored. |
102 | @@ -XXX,XX +XXX,XX @@ class QMPTestCase(unittest.TestCase): | 35 | * |
103 | if job['device'] == job_id and job['paused'] == True and job['busy'] == False: | 36 | * Wait while a condition is true. Use this to implement synchronous |
104 | return job | 37 | * operations that require event loop activity. |
105 | 38 | @@ -XXX,XX +XXX,XX @@ extern AioWait global_aio_wait; | |
106 | + def pause_job(self, job_id='job0', wait=True): | 39 | * wait on conditions between two IOThreads since that could lead to deadlock, |
107 | + result = self.vm.qmp('block-job-pause', device=job_id) | 40 | * go via the main loop instead. |
108 | + self.assert_qmp(result, 'return', {}) | 41 | */ |
109 | + if wait: | 42 | -#define AIO_WAIT_WHILE(ctx, cond) ({ \ |
110 | + return self.pause_wait(job_id) | 43 | +#define AIO_WAIT_WHILE_INTERNAL(ctx, cond, unlock) ({ \ |
111 | + return result | 44 | bool waited_ = false; \ |
45 | AioWait *wait_ = &global_aio_wait; \ | ||
46 | AioContext *ctx_ = (ctx); \ | ||
47 | @@ -XXX,XX +XXX,XX @@ extern AioWait global_aio_wait; | ||
48 | assert(qemu_get_current_aio_context() == \ | ||
49 | qemu_get_aio_context()); \ | ||
50 | while ((cond)) { \ | ||
51 | - if (ctx_) { \ | ||
52 | + if (unlock && ctx_) { \ | ||
53 | aio_context_release(ctx_); \ | ||
54 | } \ | ||
55 | aio_poll(qemu_get_aio_context(), true); \ | ||
56 | - if (ctx_) { \ | ||
57 | + if (unlock && ctx_) { \ | ||
58 | aio_context_acquire(ctx_); \ | ||
59 | } \ | ||
60 | waited_ = true; \ | ||
61 | @@ -XXX,XX +XXX,XX @@ extern AioWait global_aio_wait; | ||
62 | qatomic_dec(&wait_->num_waiters); \ | ||
63 | waited_; }) | ||
64 | |||
65 | +#define AIO_WAIT_WHILE(ctx, cond) \ | ||
66 | + AIO_WAIT_WHILE_INTERNAL(ctx, cond, true) | ||
112 | + | 67 | + |
113 | 68 | +#define AIO_WAIT_WHILE_UNLOCKED(ctx, cond) \ | |
114 | def notrun(reason): | 69 | + AIO_WAIT_WHILE_INTERNAL(ctx, cond, false) |
115 | '''Skip this test suite''' | 70 | + |
71 | /** | ||
72 | * aio_wait_kick: | ||
73 | * Wake up the main thread if it is waiting on AIO_WAIT_WHILE(). During | ||
116 | -- | 74 | -- |
117 | 2.13.6 | 75 | 2.37.3 |
118 | |||
119 | diff view generated by jsdifflib |
1 | This makes the .bdrv_co_create(_opts) implementation of vdi look more | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | like the other recently converted block drivers. | ||
3 | 2 | ||
3 | With "intact" we mean that all job.h functions implicitly | ||
4 | take the lock. Therefore API callers are unmodified. | ||
5 | |||
6 | This means that: | ||
7 | - many static functions that will be always called with job lock held | ||
8 | become _locked, and call _locked functions | ||
9 | - all public functions take the lock internally if needed, and call _locked | ||
10 | functions | ||
11 | - all public functions called internally by other functions in job.c will have a | ||
12 | _locked counterpart (sometimes public), to avoid deadlocks (job lock already taken). | ||
13 | These functions are not used for now. | ||
14 | - some public functions called only from exernal files (not job.c) do not | ||
15 | have _locked() counterpart and take the lock inside. Others won't need | ||
16 | the lock at all because use fields only set at initialization and | ||
17 | never modified. | ||
18 | |||
19 | job_{lock/unlock} is independent from real_job_{lock/unlock}. | ||
20 | |||
21 | Note: at this stage, job_{lock/unlock} and job lock guard macros | ||
22 | are *nop* | ||
23 | |||
24 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
25 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
26 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
27 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
28 | Message-Id: <20220926093214.506243-6-eesposit@redhat.com> | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 29 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
5 | Reviewed-by: Max Reitz <mreitz@redhat.com> | ||
6 | --- | 30 | --- |
7 | block/vdi.c | 12 +++++++++--- | 31 | include/qemu/job.h | 138 +++++++++- |
8 | 1 file changed, 9 insertions(+), 3 deletions(-) | 32 | job.c | 610 ++++++++++++++++++++++++++++++++------------- |
33 | 2 files changed, 561 insertions(+), 187 deletions(-) | ||
9 | 34 | ||
10 | diff --git a/block/vdi.c b/block/vdi.c | 35 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
11 | index XXXXXXX..XXXXXXX 100644 | 36 | index XXXXXXX..XXXXXXX 100644 |
12 | --- a/block/vdi.c | 37 | --- a/include/qemu/job.h |
13 | +++ b/block/vdi.c | 38 | +++ b/include/qemu/job.h |
14 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options, | 39 | @@ -XXX,XX +XXX,XX @@ JobTxn *job_txn_new(void); |
15 | 40 | */ | |
16 | logout("\n"); | 41 | void job_txn_unref(JobTxn *txn); |
17 | 42 | ||
18 | - /* Read out options. */ | 43 | +/* |
19 | + /* Validate options and set default values */ | 44 | + * Same as job_txn_unref(), but called with job lock held. |
20 | bytes = vdi_opts->size; | 45 | + * Might release the lock temporarily. |
21 | if (vdi_opts->q_static) { | 46 | + */ |
22 | image_type = VDI_TYPE_STATIC; | 47 | +void job_txn_unref_locked(JobTxn *txn); |
23 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options, | 48 | + |
24 | goto exit; | 49 | /** |
25 | } | 50 | * Create a new long-running job and return it. |
26 | 51 | + * Called with job_mutex *not* held. | |
27 | + /* Create BlockBackend to write to the image */ | 52 | * |
28 | bs_file = bdrv_open_blockdev_ref(vdi_opts->file, errp); | 53 | * @job_id: The id of the newly-created job, or %NULL for internal jobs |
29 | if (!bs_file) { | 54 | * @driver: The class object for the newly-created job. |
30 | ret = -EIO; | 55 | @@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, |
31 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts, | 56 | */ |
57 | void job_ref(Job *job); | ||
58 | |||
59 | +/* Same as job_ref(), but called with job lock held. */ | ||
60 | +void job_ref_locked(Job *job); | ||
61 | + | ||
62 | /** | ||
63 | * Release a reference that was previously acquired with job_ref() or | ||
64 | * job_create(). If it's the last reference to the object, it will be freed. | ||
65 | */ | ||
66 | void job_unref(Job *job); | ||
67 | |||
68 | +/* Same as job_unref(), but called with job lock held. */ | ||
69 | +void job_unref_locked(Job *job); | ||
70 | + | ||
71 | /** | ||
72 | * @job: The job that has made progress | ||
73 | * @done: How much progress the job made since the last call | ||
74 | * | ||
75 | * Updates the progress counter of the job. | ||
76 | + * | ||
77 | + * May be called with mutex held or not held. | ||
78 | */ | ||
79 | void job_progress_update(Job *job, uint64_t done); | ||
80 | |||
81 | @@ -XXX,XX +XXX,XX @@ void job_progress_update(Job *job, uint64_t done); | ||
82 | * | ||
83 | * Sets the expected end value of the progress counter of a job so that a | ||
84 | * completion percentage can be calculated when the progress is updated. | ||
85 | + * | ||
86 | + * May be called with mutex held or not held. | ||
87 | */ | ||
88 | void job_progress_set_remaining(Job *job, uint64_t remaining); | ||
89 | |||
90 | @@ -XXX,XX +XXX,XX @@ void job_progress_set_remaining(Job *job, uint64_t remaining); | ||
91 | * length before, and job_progress_update() afterwards. | ||
92 | * (So the operation acts as a parenthesis in regards to the main job | ||
93 | * operation running in background.) | ||
94 | + * | ||
95 | + * May be called with mutex held or not held. | ||
96 | */ | ||
97 | void job_progress_increase_remaining(Job *job, uint64_t delta); | ||
98 | |||
99 | @@ -XXX,XX +XXX,XX @@ void job_progress_increase_remaining(Job *job, uint64_t delta); | ||
100 | */ | ||
101 | void job_enter_cond(Job *job, bool(*fn)(Job *job)); | ||
102 | |||
103 | +/* | ||
104 | + * Same as job_enter_cond(), but called with job lock held. | ||
105 | + * Might release the lock temporarily. | ||
106 | + */ | ||
107 | +void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)); | ||
108 | + | ||
109 | /** | ||
110 | * @job: A job that has not yet been started. | ||
111 | * | ||
112 | * Begins execution of a job. | ||
113 | * Takes ownership of one reference to the job object. | ||
114 | + * | ||
115 | + * Called with job_mutex *not* held. | ||
116 | */ | ||
117 | void job_start(Job *job); | ||
118 | |||
119 | @@ -XXX,XX +XXX,XX @@ void job_start(Job *job); | ||
120 | * @job: The job to enter. | ||
121 | * | ||
122 | * Continue the specified job by entering the coroutine. | ||
123 | + * Called with job_mutex *not* held. | ||
124 | */ | ||
125 | void job_enter(Job *job); | ||
126 | |||
127 | @@ -XXX,XX +XXX,XX @@ void job_enter(Job *job); | ||
128 | * | ||
129 | * Pause now if job_pause() has been called. Jobs that perform lots of I/O | ||
130 | * must call this between requests so that the job can be paused. | ||
131 | + * | ||
132 | + * Called with job_mutex *not* held. | ||
133 | */ | ||
134 | void coroutine_fn job_pause_point(Job *job); | ||
135 | |||
136 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn job_pause_point(Job *job); | ||
137 | * @job: The job that calls the function. | ||
138 | * | ||
139 | * Yield the job coroutine. | ||
140 | + * Called with job_mutex *not* held. | ||
141 | */ | ||
142 | void coroutine_fn job_yield(Job *job); | ||
143 | |||
144 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn job_yield(Job *job); | ||
145 | * Put the job to sleep (assuming that it wasn't canceled) for @ns | ||
146 | * %QEMU_CLOCK_REALTIME nanoseconds. Canceling the job will immediately | ||
147 | * interrupt the wait. | ||
148 | + * | ||
149 | + * Called with job_mutex *not* held. | ||
150 | */ | ||
151 | void coroutine_fn job_sleep_ns(Job *job, int64_t ns); | ||
152 | |||
153 | @@ -XXX,XX +XXX,XX @@ const char *job_type_str(const Job *job); | ||
154 | /** Returns true if the job should not be visible to the management layer. */ | ||
155 | bool job_is_internal(Job *job); | ||
156 | |||
157 | -/** Returns whether the job is being cancelled. */ | ||
158 | +/** | ||
159 | + * Returns whether the job is being cancelled. | ||
160 | + * Called with job_mutex *not* held. | ||
161 | + */ | ||
162 | bool job_is_cancelled(Job *job); | ||
163 | |||
164 | +/* Same as job_is_cancelled(), but called with job lock held. */ | ||
165 | +bool job_is_cancelled_locked(Job *job); | ||
166 | + | ||
167 | /** | ||
168 | * Returns whether the job is scheduled for cancellation (at an | ||
169 | * indefinite point). | ||
170 | + * Called with job_mutex *not* held. | ||
171 | */ | ||
172 | bool job_cancel_requested(Job *job); | ||
173 | |||
174 | -/** Returns whether the job is in a completed state. */ | ||
175 | +/** | ||
176 | + * Returns whether the job is in a completed state. | ||
177 | + * Called with job_mutex *not* held. | ||
178 | + */ | ||
179 | bool job_is_completed(Job *job); | ||
180 | |||
181 | -/** Returns whether the job is ready to be completed. */ | ||
182 | +/* Same as job_is_completed(), but called with job lock held. */ | ||
183 | +bool job_is_completed_locked(Job *job); | ||
184 | + | ||
185 | +/** | ||
186 | + * Returns whether the job is ready to be completed. | ||
187 | + * Called with job_mutex *not* held. | ||
188 | + */ | ||
189 | bool job_is_ready(Job *job); | ||
190 | |||
191 | +/* Same as job_is_ready(), but called with job lock held. */ | ||
192 | +bool job_is_ready_locked(Job *job); | ||
193 | + | ||
194 | /** | ||
195 | * Request @job to pause at the next pause point. Must be paired with | ||
196 | * job_resume(). If the job is supposed to be resumed by user action, call | ||
197 | @@ -XXX,XX +XXX,XX @@ bool job_is_ready(Job *job); | ||
198 | */ | ||
199 | void job_pause(Job *job); | ||
200 | |||
201 | +/* Same as job_pause(), but called with job lock held. */ | ||
202 | +void job_pause_locked(Job *job); | ||
203 | + | ||
204 | /** Resumes a @job paused with job_pause. */ | ||
205 | void job_resume(Job *job); | ||
206 | |||
207 | +/* | ||
208 | + * Same as job_resume(), but called with job lock held. | ||
209 | + * Might release the lock temporarily. | ||
210 | + */ | ||
211 | +void job_resume_locked(Job *job); | ||
212 | + | ||
213 | /** | ||
214 | * Asynchronously pause the specified @job. | ||
215 | * Do not allow a resume until a matching call to job_user_resume. | ||
216 | */ | ||
217 | void job_user_pause(Job *job, Error **errp); | ||
218 | |||
219 | +/* Same as job_user_pause(), but called with job lock held. */ | ||
220 | +void job_user_pause_locked(Job *job, Error **errp); | ||
221 | + | ||
222 | /** Returns true if the job is user-paused. */ | ||
223 | bool job_user_paused(Job *job); | ||
224 | |||
225 | +/* Same as job_user_paused(), but called with job lock held. */ | ||
226 | +bool job_user_paused_locked(Job *job); | ||
227 | + | ||
228 | /** | ||
229 | * Resume the specified @job. | ||
230 | * Must be paired with a preceding job_user_pause. | ||
231 | */ | ||
232 | void job_user_resume(Job *job, Error **errp); | ||
233 | |||
234 | +/* | ||
235 | + * Same as job_user_resume(), but called with job lock held. | ||
236 | + * Might release the lock temporarily. | ||
237 | + */ | ||
238 | +void job_user_resume_locked(Job *job, Error **errp); | ||
239 | + | ||
240 | /** | ||
241 | * Get the next element from the list of block jobs after @job, or the | ||
242 | * first one if @job is %NULL. | ||
243 | @@ -XXX,XX +XXX,XX @@ void job_user_resume(Job *job, Error **errp); | ||
244 | */ | ||
245 | Job *job_next(Job *job); | ||
246 | |||
247 | +/* Same as job_next(), but called with job lock held. */ | ||
248 | +Job *job_next_locked(Job *job); | ||
249 | + | ||
250 | /** | ||
251 | * Get the job identified by @id (which must not be %NULL). | ||
252 | * | ||
253 | @@ -XXX,XX +XXX,XX @@ Job *job_next(Job *job); | ||
254 | */ | ||
255 | Job *job_get(const char *id); | ||
256 | |||
257 | +/* Same as job_get(), but called with job lock held. */ | ||
258 | +Job *job_get_locked(const char *id); | ||
259 | + | ||
260 | /** | ||
261 | * Check whether the verb @verb can be applied to @job in its current state. | ||
262 | * Returns 0 if the verb can be applied; otherwise errp is set and -EPERM | ||
263 | @@ -XXX,XX +XXX,XX @@ Job *job_get(const char *id); | ||
264 | */ | ||
265 | int job_apply_verb(Job *job, JobVerb verb, Error **errp); | ||
266 | |||
267 | -/** The @job could not be started, free it. */ | ||
268 | +/* Same as job_apply_verb, but called with job lock held. */ | ||
269 | +int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp); | ||
270 | + | ||
271 | +/** | ||
272 | + * The @job could not be started, free it. | ||
273 | + * Called with job_mutex *not* held. | ||
274 | + */ | ||
275 | void job_early_fail(Job *job); | ||
276 | |||
277 | -/** Moves the @job from RUNNING to READY */ | ||
278 | +/** | ||
279 | + * Moves the @job from RUNNING to READY. | ||
280 | + * Called with job_mutex *not* held. | ||
281 | + */ | ||
282 | void job_transition_to_ready(Job *job); | ||
283 | |||
284 | /** Asynchronously complete the specified @job. */ | ||
285 | void job_complete(Job *job, Error **errp); | ||
286 | |||
287 | +/* | ||
288 | + * Same as job_complete(), but called with job lock held. | ||
289 | + * Might release the lock temporarily. | ||
290 | + */ | ||
291 | +void job_complete_locked(Job *job, Error **errp); | ||
292 | + | ||
293 | /** | ||
294 | * Asynchronously cancel the specified @job. If @force is true, the job should | ||
295 | * be cancelled immediately without waiting for a consistent state. | ||
296 | */ | ||
297 | void job_cancel(Job *job, bool force); | ||
298 | |||
299 | +/* Same as job_cancel(), but called with job lock held. */ | ||
300 | +void job_cancel_locked(Job *job, bool force); | ||
301 | + | ||
302 | /** | ||
303 | * Cancels the specified job like job_cancel(), but may refuse to do so if the | ||
304 | * operation isn't meaningful in the current state of the job. | ||
305 | */ | ||
306 | void job_user_cancel(Job *job, bool force, Error **errp); | ||
307 | |||
308 | +/* Same as job_user_cancel(), but called with job lock held. */ | ||
309 | +void job_user_cancel_locked(Job *job, bool force, Error **errp); | ||
310 | + | ||
311 | /** | ||
312 | * Synchronously cancel the @job. The completion callback is called | ||
313 | * before the function returns. If @force is false, the job may | ||
314 | @@ -XXX,XX +XXX,XX @@ void job_user_cancel(Job *job, bool force, Error **errp); | ||
315 | */ | ||
316 | int job_cancel_sync(Job *job, bool force); | ||
317 | |||
318 | -/** Synchronously force-cancels all jobs using job_cancel_sync(). */ | ||
319 | +/* Same as job_cancel_sync, but called with job lock held. */ | ||
320 | +int job_cancel_sync_locked(Job *job, bool force); | ||
321 | + | ||
322 | +/** | ||
323 | + * Synchronously force-cancels all jobs using job_cancel_sync_locked(). | ||
324 | + * | ||
325 | + * Called with job_lock *not* held. | ||
326 | + */ | ||
327 | void job_cancel_sync_all(void); | ||
328 | |||
329 | /** | ||
330 | @@ -XXX,XX +XXX,XX @@ void job_cancel_sync_all(void); | ||
331 | */ | ||
332 | int job_complete_sync(Job *job, Error **errp); | ||
333 | |||
334 | +/* Same as job_complete_sync, but called with job lock held. */ | ||
335 | +int job_complete_sync_locked(Job *job, Error **errp); | ||
336 | + | ||
337 | /** | ||
338 | * For a @job that has finished its work and is pending awaiting explicit | ||
339 | * acknowledgement to commit its work, this will commit that work. | ||
340 | @@ -XXX,XX +XXX,XX @@ int job_complete_sync(Job *job, Error **errp); | ||
341 | */ | ||
342 | void job_finalize(Job *job, Error **errp); | ||
343 | |||
344 | +/* Same as job_finalize(), but called with job lock held. */ | ||
345 | +void job_finalize_locked(Job *job, Error **errp); | ||
346 | + | ||
347 | /** | ||
348 | * Remove the concluded @job from the query list and resets the passed pointer | ||
349 | * to %NULL. Returns an error if the job is not actually concluded. | ||
350 | */ | ||
351 | void job_dismiss(Job **job, Error **errp); | ||
352 | |||
353 | +/* Same as job_dismiss(), but called with job lock held. */ | ||
354 | +void job_dismiss_locked(Job **job, Error **errp); | ||
355 | + | ||
356 | /** | ||
357 | * Synchronously finishes the given @job. If @finish is given, it is called to | ||
358 | * trigger completion or cancellation of the job. | ||
359 | @@ -XXX,XX +XXX,XX @@ void job_dismiss(Job **job, Error **errp); | ||
360 | * | ||
361 | * Callers must hold the AioContext lock of job->aio_context. | ||
362 | */ | ||
363 | -int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp); | ||
364 | +int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), | ||
365 | + Error **errp); | ||
366 | + | ||
367 | +/* | ||
368 | + * Same as job_finish_sync(), but called with job lock held. | ||
369 | + * Might release the lock temporarily. | ||
370 | + */ | ||
371 | +int job_finish_sync_locked(Job *job, void (*finish)(Job *, Error **errp), | ||
372 | + Error **errp); | ||
373 | |||
374 | #endif | ||
375 | diff --git a/job.c b/job.c | ||
376 | index XXXXXXX..XXXXXXX 100644 | ||
377 | --- a/job.c | ||
378 | +++ b/job.c | ||
379 | @@ -XXX,XX +XXX,XX @@ | ||
380 | */ | ||
381 | QemuMutex job_mutex; | ||
382 | |||
383 | +/* Protected by job_mutex */ | ||
384 | static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs); | ||
385 | |||
386 | /* Job State Transition Table */ | ||
387 | @@ -XXX,XX +XXX,XX @@ JobTxn *job_txn_new(void) | ||
388 | return txn; | ||
389 | } | ||
390 | |||
391 | -static void job_txn_ref(JobTxn *txn) | ||
392 | +/* Called with job_mutex held. */ | ||
393 | +static void job_txn_ref_locked(JobTxn *txn) | ||
394 | { | ||
395 | txn->refcnt++; | ||
396 | } | ||
397 | |||
398 | -void job_txn_unref(JobTxn *txn) | ||
399 | +void job_txn_unref_locked(JobTxn *txn) | ||
400 | { | ||
401 | if (txn && --txn->refcnt == 0) { | ||
402 | g_free(txn); | ||
403 | } | ||
404 | } | ||
405 | |||
406 | +void job_txn_unref(JobTxn *txn) | ||
407 | +{ | ||
408 | + JOB_LOCK_GUARD(); | ||
409 | + job_txn_unref_locked(txn); | ||
410 | +} | ||
411 | + | ||
412 | /** | ||
413 | * @txn: The transaction (may be NULL) | ||
414 | * @job: Job to add to the transaction | ||
415 | @@ -XXX,XX +XXX,XX @@ void job_txn_unref(JobTxn *txn) | ||
416 | * the reference that is automatically grabbed here. | ||
417 | * | ||
418 | * If @txn is NULL, the function does nothing. | ||
419 | + * | ||
420 | + * Called with job_mutex held. | ||
421 | */ | ||
422 | -static void job_txn_add_job(JobTxn *txn, Job *job) | ||
423 | +static void job_txn_add_job_locked(JobTxn *txn, Job *job) | ||
424 | { | ||
425 | if (!txn) { | ||
426 | return; | ||
427 | @@ -XXX,XX +XXX,XX @@ static void job_txn_add_job(JobTxn *txn, Job *job) | ||
428 | job->txn = txn; | ||
429 | |||
430 | QLIST_INSERT_HEAD(&txn->jobs, job, txn_list); | ||
431 | - job_txn_ref(txn); | ||
432 | + job_txn_ref_locked(txn); | ||
433 | } | ||
434 | |||
435 | -static void job_txn_del_job(Job *job) | ||
436 | +/* Called with job_mutex held. */ | ||
437 | +static void job_txn_del_job_locked(Job *job) | ||
438 | { | ||
439 | if (job->txn) { | ||
440 | QLIST_REMOVE(job, txn_list); | ||
441 | - job_txn_unref(job->txn); | ||
442 | + job_txn_unref_locked(job->txn); | ||
443 | job->txn = NULL; | ||
444 | } | ||
445 | } | ||
446 | |||
447 | -static int job_txn_apply(Job *job, int fn(Job *)) | ||
448 | +/* Called with job_mutex held, but releases it temporarily. */ | ||
449 | +static int job_txn_apply_locked(Job *job, int fn(Job *)) | ||
450 | { | ||
451 | AioContext *inner_ctx; | ||
452 | Job *other_job, *next; | ||
453 | @@ -XXX,XX +XXX,XX @@ static int job_txn_apply(Job *job, int fn(Job *)) | ||
454 | * we need to release it here to avoid holding the lock twice - which would | ||
455 | * break AIO_WAIT_WHILE from within fn. | ||
456 | */ | ||
457 | - job_ref(job); | ||
458 | + job_ref_locked(job); | ||
459 | aio_context_release(job->aio_context); | ||
460 | |||
461 | QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { | ||
462 | @@ -XXX,XX +XXX,XX @@ static int job_txn_apply(Job *job, int fn(Job *)) | ||
463 | * can't use a local variable to cache it. | ||
464 | */ | ||
465 | aio_context_acquire(job->aio_context); | ||
466 | - job_unref(job); | ||
467 | + job_unref_locked(job); | ||
468 | return rc; | ||
469 | } | ||
470 | |||
471 | @@ -XXX,XX +XXX,XX @@ bool job_is_internal(Job *job) | ||
472 | return (job->id == NULL); | ||
473 | } | ||
474 | |||
475 | -static void job_state_transition(Job *job, JobStatus s1) | ||
476 | +/* Called with job_mutex held. */ | ||
477 | +static void job_state_transition_locked(Job *job, JobStatus s1) | ||
478 | { | ||
479 | JobStatus s0 = job->status; | ||
480 | assert(s1 >= 0 && s1 < JOB_STATUS__MAX); | ||
481 | @@ -XXX,XX +XXX,XX @@ static void job_state_transition(Job *job, JobStatus s1) | ||
482 | } | ||
483 | } | ||
484 | |||
485 | -int job_apply_verb(Job *job, JobVerb verb, Error **errp) | ||
486 | +int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp) | ||
487 | { | ||
488 | JobStatus s0 = job->status; | ||
489 | assert(verb >= 0 && verb < JOB_VERB__MAX); | ||
490 | @@ -XXX,XX +XXX,XX @@ int job_apply_verb(Job *job, JobVerb verb, Error **errp) | ||
491 | return -EPERM; | ||
492 | } | ||
493 | |||
494 | +int job_apply_verb(Job *job, JobVerb verb, Error **errp) | ||
495 | +{ | ||
496 | + JOB_LOCK_GUARD(); | ||
497 | + return job_apply_verb_locked(job, verb, errp); | ||
498 | +} | ||
499 | + | ||
500 | JobType job_type(const Job *job) | ||
501 | { | ||
502 | return job->driver->job_type; | ||
503 | @@ -XXX,XX +XXX,XX @@ const char *job_type_str(const Job *job) | ||
504 | return JobType_str(job_type(job)); | ||
505 | } | ||
506 | |||
507 | -bool job_is_cancelled(Job *job) | ||
508 | +bool job_is_cancelled_locked(Job *job) | ||
509 | { | ||
510 | /* force_cancel may be true only if cancelled is true, too */ | ||
511 | assert(job->cancelled || !job->force_cancel); | ||
512 | return job->force_cancel; | ||
513 | } | ||
514 | |||
515 | -bool job_cancel_requested(Job *job) | ||
516 | +bool job_is_cancelled(Job *job) | ||
517 | +{ | ||
518 | + JOB_LOCK_GUARD(); | ||
519 | + return job_is_cancelled_locked(job); | ||
520 | +} | ||
521 | + | ||
522 | +/* Called with job_mutex held. */ | ||
523 | +static bool job_cancel_requested_locked(Job *job) | ||
524 | { | ||
525 | return job->cancelled; | ||
526 | } | ||
527 | |||
528 | -bool job_is_ready(Job *job) | ||
529 | +bool job_cancel_requested(Job *job) | ||
530 | +{ | ||
531 | + JOB_LOCK_GUARD(); | ||
532 | + return job_cancel_requested_locked(job); | ||
533 | +} | ||
534 | + | ||
535 | +bool job_is_ready_locked(Job *job) | ||
536 | { | ||
537 | switch (job->status) { | ||
538 | case JOB_STATUS_UNDEFINED: | ||
539 | @@ -XXX,XX +XXX,XX @@ bool job_is_ready(Job *job) | ||
540 | return false; | ||
541 | } | ||
542 | |||
543 | -bool job_is_completed(Job *job) | ||
544 | +bool job_is_ready(Job *job) | ||
545 | +{ | ||
546 | + JOB_LOCK_GUARD(); | ||
547 | + return job_is_ready_locked(job); | ||
548 | +} | ||
549 | + | ||
550 | +bool job_is_completed_locked(Job *job) | ||
551 | { | ||
552 | switch (job->status) { | ||
553 | case JOB_STATUS_UNDEFINED: | ||
554 | @@ -XXX,XX +XXX,XX @@ bool job_is_completed(Job *job) | ||
555 | return false; | ||
556 | } | ||
557 | |||
558 | -static bool job_started(Job *job) | ||
559 | +bool job_is_completed(Job *job) | ||
560 | +{ | ||
561 | + JOB_LOCK_GUARD(); | ||
562 | + return job_is_completed_locked(job); | ||
563 | +} | ||
564 | + | ||
565 | +static bool job_started_locked(Job *job) | ||
566 | { | ||
567 | return job->co; | ||
568 | } | ||
569 | |||
570 | -static bool job_should_pause(Job *job) | ||
571 | +/* Called with job_mutex held. */ | ||
572 | +static bool job_should_pause_locked(Job *job) | ||
573 | { | ||
574 | return job->pause_count > 0; | ||
575 | } | ||
576 | |||
577 | -Job *job_next(Job *job) | ||
578 | +Job *job_next_locked(Job *job) | ||
579 | { | ||
580 | if (!job) { | ||
581 | return QLIST_FIRST(&jobs); | ||
582 | @@ -XXX,XX +XXX,XX @@ Job *job_next(Job *job) | ||
583 | return QLIST_NEXT(job, job_list); | ||
584 | } | ||
585 | |||
586 | -Job *job_get(const char *id) | ||
587 | +Job *job_next(Job *job) | ||
588 | +{ | ||
589 | + JOB_LOCK_GUARD(); | ||
590 | + return job_next_locked(job); | ||
591 | +} | ||
592 | + | ||
593 | +Job *job_get_locked(const char *id) | ||
594 | { | ||
595 | Job *job; | ||
596 | |||
597 | @@ -XXX,XX +XXX,XX @@ Job *job_get(const char *id) | ||
598 | return NULL; | ||
599 | } | ||
600 | |||
601 | +Job *job_get(const char *id) | ||
602 | +{ | ||
603 | + JOB_LOCK_GUARD(); | ||
604 | + return job_get_locked(id); | ||
605 | +} | ||
606 | + | ||
607 | +/* Called with job_mutex *not* held. */ | ||
608 | static void job_sleep_timer_cb(void *opaque) | ||
609 | { | ||
610 | Job *job = opaque; | ||
611 | @@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, | ||
612 | { | ||
613 | Job *job; | ||
614 | |||
615 | + JOB_LOCK_GUARD(); | ||
616 | + | ||
617 | if (job_id) { | ||
618 | if (flags & JOB_INTERNAL) { | ||
619 | error_setg(errp, "Cannot specify job ID for internal job"); | ||
620 | @@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, | ||
621 | error_setg(errp, "Invalid job ID '%s'", job_id); | ||
622 | return NULL; | ||
623 | } | ||
624 | - if (job_get(job_id)) { | ||
625 | + if (job_get_locked(job_id)) { | ||
626 | error_setg(errp, "Job ID '%s' already in use", job_id); | ||
627 | return NULL; | ||
628 | } | ||
629 | @@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, | ||
630 | notifier_list_init(&job->on_ready); | ||
631 | notifier_list_init(&job->on_idle); | ||
632 | |||
633 | - job_state_transition(job, JOB_STATUS_CREATED); | ||
634 | + job_state_transition_locked(job, JOB_STATUS_CREATED); | ||
635 | aio_timer_init(qemu_get_aio_context(), &job->sleep_timer, | ||
636 | QEMU_CLOCK_REALTIME, SCALE_NS, | ||
637 | job_sleep_timer_cb, job); | ||
638 | @@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, | ||
639 | * consolidating the job management logic */ | ||
640 | if (!txn) { | ||
641 | txn = job_txn_new(); | ||
642 | - job_txn_add_job(txn, job); | ||
643 | - job_txn_unref(txn); | ||
644 | + job_txn_add_job_locked(txn, job); | ||
645 | + job_txn_unref_locked(txn); | ||
646 | } else { | ||
647 | - job_txn_add_job(txn, job); | ||
648 | + job_txn_add_job_locked(txn, job); | ||
649 | } | ||
650 | |||
651 | return job; | ||
652 | } | ||
653 | |||
654 | -void job_ref(Job *job) | ||
655 | +void job_ref_locked(Job *job) | ||
656 | { | ||
657 | ++job->refcnt; | ||
658 | } | ||
659 | |||
660 | -void job_unref(Job *job) | ||
661 | +void job_ref(Job *job) | ||
662 | +{ | ||
663 | + JOB_LOCK_GUARD(); | ||
664 | + job_ref_locked(job); | ||
665 | +} | ||
666 | + | ||
667 | +void job_unref_locked(Job *job) | ||
668 | { | ||
669 | GLOBAL_STATE_CODE(); | ||
670 | |||
671 | @@ -XXX,XX +XXX,XX @@ void job_unref(Job *job) | ||
672 | assert(!job->txn); | ||
673 | |||
674 | if (job->driver->free) { | ||
675 | + job_unlock(); | ||
676 | job->driver->free(job); | ||
677 | + job_lock(); | ||
678 | } | ||
679 | |||
680 | QLIST_REMOVE(job, job_list); | ||
681 | @@ -XXX,XX +XXX,XX @@ void job_unref(Job *job) | ||
682 | } | ||
683 | } | ||
684 | |||
685 | +void job_unref(Job *job) | ||
686 | +{ | ||
687 | + JOB_LOCK_GUARD(); | ||
688 | + job_unref_locked(job); | ||
689 | +} | ||
690 | + | ||
691 | void job_progress_update(Job *job, uint64_t done) | ||
692 | { | ||
693 | progress_work_done(&job->progress, done); | ||
694 | @@ -XXX,XX +XXX,XX @@ void job_progress_increase_remaining(Job *job, uint64_t delta) | ||
695 | |||
696 | /** | ||
697 | * To be called when a cancelled job is finalised. | ||
698 | + * Called with job_mutex held. | ||
699 | */ | ||
700 | -static void job_event_cancelled(Job *job) | ||
701 | +static void job_event_cancelled_locked(Job *job) | ||
702 | { | ||
703 | notifier_list_notify(&job->on_finalize_cancelled, job); | ||
704 | } | ||
705 | |||
706 | /** | ||
707 | * To be called when a successfully completed job is finalised. | ||
708 | + * Called with job_mutex held. | ||
709 | */ | ||
710 | -static void job_event_completed(Job *job) | ||
711 | +static void job_event_completed_locked(Job *job) | ||
712 | { | ||
713 | notifier_list_notify(&job->on_finalize_completed, job); | ||
714 | } | ||
715 | |||
716 | -static void job_event_pending(Job *job) | ||
717 | +/* Called with job_mutex held. */ | ||
718 | +static void job_event_pending_locked(Job *job) | ||
719 | { | ||
720 | notifier_list_notify(&job->on_pending, job); | ||
721 | } | ||
722 | |||
723 | -static void job_event_ready(Job *job) | ||
724 | +/* Called with job_mutex held. */ | ||
725 | +static void job_event_ready_locked(Job *job) | ||
726 | { | ||
727 | notifier_list_notify(&job->on_ready, job); | ||
728 | } | ||
729 | |||
730 | -static void job_event_idle(Job *job) | ||
731 | +/* Called with job_mutex held. */ | ||
732 | +static void job_event_idle_locked(Job *job) | ||
733 | { | ||
734 | notifier_list_notify(&job->on_idle, job); | ||
735 | } | ||
736 | |||
737 | -void job_enter_cond(Job *job, bool(*fn)(Job *job)) | ||
738 | +void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)) | ||
739 | { | ||
740 | - if (!job_started(job)) { | ||
741 | + if (!job_started_locked(job)) { | ||
742 | return; | ||
743 | } | ||
744 | if (job->deferred_to_main_loop) { | ||
745 | @@ -XXX,XX +XXX,XX @@ void job_enter_cond(Job *job, bool(*fn)(Job *job)) | ||
746 | timer_del(&job->sleep_timer); | ||
747 | job->busy = true; | ||
748 | real_job_unlock(); | ||
749 | + job_unlock(); | ||
750 | aio_co_enter(job->aio_context, job->co); | ||
751 | + job_lock(); | ||
752 | +} | ||
753 | + | ||
754 | +void job_enter_cond(Job *job, bool(*fn)(Job *job)) | ||
755 | +{ | ||
756 | + JOB_LOCK_GUARD(); | ||
757 | + job_enter_cond_locked(job, fn); | ||
758 | } | ||
759 | |||
760 | void job_enter(Job *job) | ||
761 | { | ||
762 | - job_enter_cond(job, NULL); | ||
763 | + JOB_LOCK_GUARD(); | ||
764 | + job_enter_cond_locked(job, NULL); | ||
765 | } | ||
766 | |||
767 | /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds. | ||
768 | @@ -XXX,XX +XXX,XX @@ void job_enter(Job *job) | ||
769 | * is allowed and cancels the timer. | ||
770 | * | ||
771 | * If @ns is (uint64_t) -1, no timer is scheduled and job_enter() must be | ||
772 | - * called explicitly. */ | ||
773 | -static void coroutine_fn job_do_yield(Job *job, uint64_t ns) | ||
774 | + * called explicitly. | ||
775 | + * | ||
776 | + * Called with job_mutex held, but releases it temporarily. | ||
777 | + */ | ||
778 | +static void coroutine_fn job_do_yield_locked(Job *job, uint64_t ns) | ||
779 | { | ||
780 | real_job_lock(); | ||
781 | if (ns != -1) { | ||
782 | timer_mod(&job->sleep_timer, ns); | ||
783 | } | ||
784 | job->busy = false; | ||
785 | - job_event_idle(job); | ||
786 | + job_event_idle_locked(job); | ||
787 | real_job_unlock(); | ||
788 | + job_unlock(); | ||
789 | qemu_coroutine_yield(); | ||
790 | + job_lock(); | ||
791 | |||
792 | /* Set by job_enter_cond() before re-entering the coroutine. */ | ||
793 | assert(job->busy); | ||
794 | } | ||
795 | |||
796 | -void coroutine_fn job_pause_point(Job *job) | ||
797 | +/* Called with job_mutex held, but releases it temporarily. */ | ||
798 | +static void coroutine_fn job_pause_point_locked(Job *job) | ||
799 | { | ||
800 | - assert(job && job_started(job)); | ||
801 | + assert(job && job_started_locked(job)); | ||
802 | |||
803 | - if (!job_should_pause(job)) { | ||
804 | + if (!job_should_pause_locked(job)) { | ||
805 | return; | ||
806 | } | ||
807 | - if (job_is_cancelled(job)) { | ||
808 | + if (job_is_cancelled_locked(job)) { | ||
809 | return; | ||
810 | } | ||
811 | |||
812 | if (job->driver->pause) { | ||
813 | + job_unlock(); | ||
814 | job->driver->pause(job); | ||
815 | + job_lock(); | ||
816 | } | ||
817 | |||
818 | - if (job_should_pause(job) && !job_is_cancelled(job)) { | ||
819 | + if (job_should_pause_locked(job) && !job_is_cancelled_locked(job)) { | ||
820 | JobStatus status = job->status; | ||
821 | - job_state_transition(job, status == JOB_STATUS_READY | ||
822 | - ? JOB_STATUS_STANDBY | ||
823 | - : JOB_STATUS_PAUSED); | ||
824 | + job_state_transition_locked(job, status == JOB_STATUS_READY | ||
825 | + ? JOB_STATUS_STANDBY | ||
826 | + : JOB_STATUS_PAUSED); | ||
827 | job->paused = true; | ||
828 | - job_do_yield(job, -1); | ||
829 | + job_do_yield_locked(job, -1); | ||
830 | job->paused = false; | ||
831 | - job_state_transition(job, status); | ||
832 | + job_state_transition_locked(job, status); | ||
833 | } | ||
834 | |||
835 | if (job->driver->resume) { | ||
836 | + job_unlock(); | ||
837 | job->driver->resume(job); | ||
838 | + job_lock(); | ||
839 | } | ||
840 | } | ||
841 | |||
842 | -void coroutine_fn job_yield(Job *job) | ||
843 | +void coroutine_fn job_pause_point(Job *job) | ||
844 | +{ | ||
845 | + JOB_LOCK_GUARD(); | ||
846 | + job_pause_point_locked(job); | ||
847 | +} | ||
848 | + | ||
849 | +static void coroutine_fn job_yield_locked(Job *job) | ||
850 | { | ||
851 | assert(job->busy); | ||
852 | |||
853 | /* Check cancellation *before* setting busy = false, too! */ | ||
854 | - if (job_is_cancelled(job)) { | ||
855 | + if (job_is_cancelled_locked(job)) { | ||
856 | return; | ||
857 | } | ||
858 | |||
859 | - if (!job_should_pause(job)) { | ||
860 | - job_do_yield(job, -1); | ||
861 | + if (!job_should_pause_locked(job)) { | ||
862 | + job_do_yield_locked(job, -1); | ||
863 | } | ||
864 | |||
865 | - job_pause_point(job); | ||
866 | + job_pause_point_locked(job); | ||
867 | +} | ||
868 | + | ||
869 | +void coroutine_fn job_yield(Job *job) | ||
870 | +{ | ||
871 | + JOB_LOCK_GUARD(); | ||
872 | + job_yield_locked(job); | ||
873 | } | ||
874 | |||
875 | void coroutine_fn job_sleep_ns(Job *job, int64_t ns) | ||
876 | { | ||
877 | + JOB_LOCK_GUARD(); | ||
878 | assert(job->busy); | ||
879 | |||
880 | /* Check cancellation *before* setting busy = false, too! */ | ||
881 | - if (job_is_cancelled(job)) { | ||
882 | + if (job_is_cancelled_locked(job)) { | ||
883 | return; | ||
884 | } | ||
885 | |||
886 | - if (!job_should_pause(job)) { | ||
887 | - job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns); | ||
888 | + if (!job_should_pause_locked(job)) { | ||
889 | + job_do_yield_locked(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns); | ||
890 | } | ||
891 | |||
892 | - job_pause_point(job); | ||
893 | + job_pause_point_locked(job); | ||
894 | } | ||
895 | |||
896 | -/* Assumes the block_job_mutex is held */ | ||
897 | -static bool job_timer_not_pending(Job *job) | ||
898 | +/* Assumes the job_mutex is held */ | ||
899 | +static bool job_timer_not_pending_locked(Job *job) | ||
900 | { | ||
901 | return !timer_pending(&job->sleep_timer); | ||
902 | } | ||
903 | |||
904 | -void job_pause(Job *job) | ||
905 | +void job_pause_locked(Job *job) | ||
906 | { | ||
907 | job->pause_count++; | ||
908 | if (!job->paused) { | ||
909 | - job_enter(job); | ||
910 | + job_enter_cond_locked(job, NULL); | ||
911 | } | ||
912 | } | ||
913 | |||
914 | -void job_resume(Job *job) | ||
915 | +void job_pause(Job *job) | ||
916 | +{ | ||
917 | + JOB_LOCK_GUARD(); | ||
918 | + job_pause_locked(job); | ||
919 | +} | ||
920 | + | ||
921 | +void job_resume_locked(Job *job) | ||
922 | { | ||
923 | assert(job->pause_count > 0); | ||
924 | job->pause_count--; | ||
925 | @@ -XXX,XX +XXX,XX @@ void job_resume(Job *job) | ||
926 | } | ||
927 | |||
928 | /* kick only if no timer is pending */ | ||
929 | - job_enter_cond(job, job_timer_not_pending); | ||
930 | + job_enter_cond_locked(job, job_timer_not_pending_locked); | ||
931 | } | ||
932 | |||
933 | -void job_user_pause(Job *job, Error **errp) | ||
934 | +void job_resume(Job *job) | ||
935 | { | ||
936 | - if (job_apply_verb(job, JOB_VERB_PAUSE, errp)) { | ||
937 | + JOB_LOCK_GUARD(); | ||
938 | + job_resume_locked(job); | ||
939 | +} | ||
940 | + | ||
941 | +void job_user_pause_locked(Job *job, Error **errp) | ||
942 | +{ | ||
943 | + if (job_apply_verb_locked(job, JOB_VERB_PAUSE, errp)) { | ||
944 | return; | ||
945 | } | ||
946 | if (job->user_paused) { | ||
947 | @@ -XXX,XX +XXX,XX @@ void job_user_pause(Job *job, Error **errp) | ||
948 | return; | ||
949 | } | ||
950 | job->user_paused = true; | ||
951 | - job_pause(job); | ||
952 | + job_pause_locked(job); | ||
953 | } | ||
954 | |||
955 | -bool job_user_paused(Job *job) | ||
956 | +void job_user_pause(Job *job, Error **errp) | ||
957 | +{ | ||
958 | + JOB_LOCK_GUARD(); | ||
959 | + job_user_pause_locked(job, errp); | ||
960 | +} | ||
961 | + | ||
962 | +bool job_user_paused_locked(Job *job) | ||
963 | { | ||
964 | return job->user_paused; | ||
965 | } | ||
966 | |||
967 | -void job_user_resume(Job *job, Error **errp) | ||
968 | +bool job_user_paused(Job *job) | ||
969 | +{ | ||
970 | + JOB_LOCK_GUARD(); | ||
971 | + return job_user_paused_locked(job); | ||
972 | +} | ||
973 | + | ||
974 | +void job_user_resume_locked(Job *job, Error **errp) | ||
975 | { | ||
976 | assert(job); | ||
977 | GLOBAL_STATE_CODE(); | ||
978 | @@ -XXX,XX +XXX,XX @@ void job_user_resume(Job *job, Error **errp) | ||
979 | error_setg(errp, "Can't resume a job that was not paused"); | ||
980 | return; | ||
981 | } | ||
982 | - if (job_apply_verb(job, JOB_VERB_RESUME, errp)) { | ||
983 | + if (job_apply_verb_locked(job, JOB_VERB_RESUME, errp)) { | ||
984 | return; | ||
985 | } | ||
986 | if (job->driver->user_resume) { | ||
987 | + job_unlock(); | ||
988 | job->driver->user_resume(job); | ||
989 | + job_lock(); | ||
990 | } | ||
991 | job->user_paused = false; | ||
992 | - job_resume(job); | ||
993 | + job_resume_locked(job); | ||
994 | } | ||
995 | |||
996 | -static void job_do_dismiss(Job *job) | ||
997 | +void job_user_resume(Job *job, Error **errp) | ||
998 | +{ | ||
999 | + JOB_LOCK_GUARD(); | ||
1000 | + job_user_resume_locked(job, errp); | ||
1001 | +} | ||
1002 | + | ||
1003 | +/* Called with job_mutex held, but releases it temporarily. */ | ||
1004 | +static void job_do_dismiss_locked(Job *job) | ||
1005 | { | ||
1006 | assert(job); | ||
1007 | job->busy = false; | ||
1008 | job->paused = false; | ||
1009 | job->deferred_to_main_loop = true; | ||
1010 | |||
1011 | - job_txn_del_job(job); | ||
1012 | + job_txn_del_job_locked(job); | ||
1013 | |||
1014 | - job_state_transition(job, JOB_STATUS_NULL); | ||
1015 | - job_unref(job); | ||
1016 | + job_state_transition_locked(job, JOB_STATUS_NULL); | ||
1017 | + job_unref_locked(job); | ||
1018 | } | ||
1019 | |||
1020 | -void job_dismiss(Job **jobptr, Error **errp) | ||
1021 | +void job_dismiss_locked(Job **jobptr, Error **errp) | ||
1022 | { | ||
1023 | Job *job = *jobptr; | ||
1024 | /* similarly to _complete, this is QMP-interface only. */ | ||
1025 | assert(job->id); | ||
1026 | - if (job_apply_verb(job, JOB_VERB_DISMISS, errp)) { | ||
1027 | + if (job_apply_verb_locked(job, JOB_VERB_DISMISS, errp)) { | ||
1028 | return; | ||
1029 | } | ||
1030 | |||
1031 | - job_do_dismiss(job); | ||
1032 | + job_do_dismiss_locked(job); | ||
1033 | *jobptr = NULL; | ||
1034 | } | ||
1035 | |||
1036 | +void job_dismiss(Job **jobptr, Error **errp) | ||
1037 | +{ | ||
1038 | + JOB_LOCK_GUARD(); | ||
1039 | + job_dismiss_locked(jobptr, errp); | ||
1040 | +} | ||
1041 | + | ||
1042 | void job_early_fail(Job *job) | ||
1043 | { | ||
1044 | + JOB_LOCK_GUARD(); | ||
1045 | assert(job->status == JOB_STATUS_CREATED); | ||
1046 | - job_do_dismiss(job); | ||
1047 | + job_do_dismiss_locked(job); | ||
1048 | } | ||
1049 | |||
1050 | -static void job_conclude(Job *job) | ||
1051 | +/* Called with job_mutex held. */ | ||
1052 | +static void job_conclude_locked(Job *job) | ||
1053 | { | ||
1054 | - job_state_transition(job, JOB_STATUS_CONCLUDED); | ||
1055 | - if (job->auto_dismiss || !job_started(job)) { | ||
1056 | - job_do_dismiss(job); | ||
1057 | + job_state_transition_locked(job, JOB_STATUS_CONCLUDED); | ||
1058 | + if (job->auto_dismiss || !job_started_locked(job)) { | ||
1059 | + job_do_dismiss_locked(job); | ||
1060 | } | ||
1061 | } | ||
1062 | |||
1063 | -static void job_update_rc(Job *job) | ||
1064 | +/* Called with job_mutex held. */ | ||
1065 | +static void job_update_rc_locked(Job *job) | ||
1066 | { | ||
1067 | - if (!job->ret && job_is_cancelled(job)) { | ||
1068 | + if (!job->ret && job_is_cancelled_locked(job)) { | ||
1069 | job->ret = -ECANCELED; | ||
1070 | } | ||
1071 | if (job->ret) { | ||
1072 | if (!job->err) { | ||
1073 | error_setg(&job->err, "%s", strerror(-job->ret)); | ||
1074 | } | ||
1075 | - job_state_transition(job, JOB_STATUS_ABORTING); | ||
1076 | + job_state_transition_locked(job, JOB_STATUS_ABORTING); | ||
1077 | } | ||
1078 | } | ||
1079 | |||
1080 | @@ -XXX,XX +XXX,XX @@ static void job_clean(Job *job) | ||
1081 | } | ||
1082 | } | ||
1083 | |||
1084 | -static int job_finalize_single(Job *job) | ||
1085 | +/* Called with job_mutex held, but releases it temporarily */ | ||
1086 | +static int job_finalize_single_locked(Job *job) | ||
1087 | { | ||
1088 | - assert(job_is_completed(job)); | ||
1089 | + int job_ret; | ||
1090 | + | ||
1091 | + assert(job_is_completed_locked(job)); | ||
1092 | |||
1093 | /* Ensure abort is called for late-transactional failures */ | ||
1094 | - job_update_rc(job); | ||
1095 | + job_update_rc_locked(job); | ||
1096 | + | ||
1097 | + job_ret = job->ret; | ||
1098 | + job_unlock(); | ||
1099 | |||
1100 | - if (!job->ret) { | ||
1101 | + if (!job_ret) { | ||
1102 | job_commit(job); | ||
1103 | } else { | ||
1104 | job_abort(job); | ||
1105 | } | ||
1106 | job_clean(job); | ||
1107 | |||
1108 | + job_lock(); | ||
1109 | + | ||
1110 | if (job->cb) { | ||
1111 | - job->cb(job->opaque, job->ret); | ||
1112 | + job_ret = job->ret; | ||
1113 | + job_unlock(); | ||
1114 | + job->cb(job->opaque, job_ret); | ||
1115 | + job_lock(); | ||
1116 | } | ||
1117 | |||
1118 | /* Emit events only if we actually started */ | ||
1119 | - if (job_started(job)) { | ||
1120 | - if (job_is_cancelled(job)) { | ||
1121 | - job_event_cancelled(job); | ||
1122 | + if (job_started_locked(job)) { | ||
1123 | + if (job_is_cancelled_locked(job)) { | ||
1124 | + job_event_cancelled_locked(job); | ||
1125 | } else { | ||
1126 | - job_event_completed(job); | ||
1127 | + job_event_completed_locked(job); | ||
1128 | } | ||
1129 | } | ||
1130 | |||
1131 | - job_txn_del_job(job); | ||
1132 | - job_conclude(job); | ||
1133 | + job_txn_del_job_locked(job); | ||
1134 | + job_conclude_locked(job); | ||
1135 | return 0; | ||
1136 | } | ||
1137 | |||
1138 | -static void job_cancel_async(Job *job, bool force) | ||
1139 | +/* Called with job_mutex held, but releases it temporarily */ | ||
1140 | +static void job_cancel_async_locked(Job *job, bool force) | ||
1141 | { | ||
1142 | GLOBAL_STATE_CODE(); | ||
1143 | if (job->driver->cancel) { | ||
1144 | + job_unlock(); | ||
1145 | force = job->driver->cancel(job, force); | ||
1146 | + job_lock(); | ||
1147 | } else { | ||
1148 | /* No .cancel() means the job will behave as if force-cancelled */ | ||
1149 | force = true; | ||
1150 | @@ -XXX,XX +XXX,XX @@ static void job_cancel_async(Job *job, bool force) | ||
1151 | if (job->user_paused) { | ||
1152 | /* Do not call job_enter here, the caller will handle it. */ | ||
1153 | if (job->driver->user_resume) { | ||
1154 | + job_unlock(); | ||
1155 | job->driver->user_resume(job); | ||
1156 | + job_lock(); | ||
1157 | } | ||
1158 | job->user_paused = false; | ||
1159 | assert(job->pause_count > 0); | ||
1160 | @@ -XXX,XX +XXX,XX @@ static void job_cancel_async(Job *job, bool force) | ||
1161 | } | ||
1162 | } | ||
1163 | |||
1164 | -static void job_completed_txn_abort(Job *job) | ||
1165 | +/* Called with job_mutex held, but releases it temporarily. */ | ||
1166 | +static void job_completed_txn_abort_locked(Job *job) | ||
1167 | { | ||
1168 | AioContext *ctx; | ||
1169 | JobTxn *txn = job->txn; | ||
1170 | @@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job) | ||
1171 | return; | ||
1172 | } | ||
1173 | txn->aborting = true; | ||
1174 | - job_txn_ref(txn); | ||
1175 | + job_txn_ref_locked(txn); | ||
1176 | |||
1177 | /* | ||
1178 | * We can only hold the single job's AioContext lock while calling | ||
1179 | @@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job) | ||
1180 | * calls of AIO_WAIT_WHILE(), which could deadlock otherwise. | ||
1181 | * Note that the job's AioContext may change when it is finalized. | ||
1182 | */ | ||
1183 | - job_ref(job); | ||
1184 | + job_ref_locked(job); | ||
1185 | aio_context_release(job->aio_context); | ||
1186 | |||
1187 | /* Other jobs are effectively cancelled by us, set the status for | ||
1188 | @@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job) | ||
1189 | * Therefore, pass force=true to terminate all other jobs as quickly | ||
1190 | * as possible. | ||
1191 | */ | ||
1192 | - job_cancel_async(other_job, true); | ||
1193 | + job_cancel_async_locked(other_job, true); | ||
1194 | aio_context_release(ctx); | ||
1195 | } | ||
1196 | } | ||
1197 | @@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job) | ||
1198 | */ | ||
1199 | ctx = other_job->aio_context; | ||
1200 | aio_context_acquire(ctx); | ||
1201 | - if (!job_is_completed(other_job)) { | ||
1202 | - assert(job_cancel_requested(other_job)); | ||
1203 | - job_finish_sync(other_job, NULL, NULL); | ||
1204 | + if (!job_is_completed_locked(other_job)) { | ||
1205 | + assert(job_cancel_requested_locked(other_job)); | ||
1206 | + job_finish_sync_locked(other_job, NULL, NULL); | ||
1207 | } | ||
1208 | - job_finalize_single(other_job); | ||
1209 | + job_finalize_single_locked(other_job); | ||
1210 | aio_context_release(ctx); | ||
1211 | } | ||
1212 | |||
1213 | @@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job) | ||
1214 | * even if the job went away during job_finalize_single(). | ||
1215 | */ | ||
1216 | aio_context_acquire(job->aio_context); | ||
1217 | - job_unref(job); | ||
1218 | + job_unref_locked(job); | ||
1219 | |||
1220 | - job_txn_unref(txn); | ||
1221 | + job_txn_unref_locked(txn); | ||
1222 | } | ||
1223 | |||
1224 | -static int job_prepare(Job *job) | ||
1225 | +/* Called with job_mutex held, but releases it temporarily */ | ||
1226 | +static int job_prepare_locked(Job *job) | ||
1227 | { | ||
1228 | + int ret; | ||
1229 | + | ||
1230 | GLOBAL_STATE_CODE(); | ||
1231 | if (job->ret == 0 && job->driver->prepare) { | ||
1232 | - job->ret = job->driver->prepare(job); | ||
1233 | - job_update_rc(job); | ||
1234 | + job_unlock(); | ||
1235 | + ret = job->driver->prepare(job); | ||
1236 | + job_lock(); | ||
1237 | + job->ret = ret; | ||
1238 | + job_update_rc_locked(job); | ||
1239 | } | ||
1240 | return job->ret; | ||
1241 | } | ||
1242 | |||
1243 | -static int job_needs_finalize(Job *job) | ||
1244 | +/* Called with job_mutex held */ | ||
1245 | +static int job_needs_finalize_locked(Job *job) | ||
1246 | { | ||
1247 | return !job->auto_finalize; | ||
1248 | } | ||
1249 | |||
1250 | -static void job_do_finalize(Job *job) | ||
1251 | +/* Called with job_mutex held */ | ||
1252 | +static void job_do_finalize_locked(Job *job) | ||
1253 | { | ||
1254 | int rc; | ||
1255 | assert(job && job->txn); | ||
1256 | |||
1257 | /* prepare the transaction to complete */ | ||
1258 | - rc = job_txn_apply(job, job_prepare); | ||
1259 | + rc = job_txn_apply_locked(job, job_prepare_locked); | ||
1260 | if (rc) { | ||
1261 | - job_completed_txn_abort(job); | ||
1262 | + job_completed_txn_abort_locked(job); | ||
1263 | } else { | ||
1264 | - job_txn_apply(job, job_finalize_single); | ||
1265 | + job_txn_apply_locked(job, job_finalize_single_locked); | ||
1266 | } | ||
1267 | } | ||
1268 | |||
1269 | -void job_finalize(Job *job, Error **errp) | ||
1270 | +void job_finalize_locked(Job *job, Error **errp) | ||
1271 | { | ||
1272 | assert(job && job->id); | ||
1273 | - if (job_apply_verb(job, JOB_VERB_FINALIZE, errp)) { | ||
1274 | + if (job_apply_verb_locked(job, JOB_VERB_FINALIZE, errp)) { | ||
1275 | return; | ||
1276 | } | ||
1277 | - job_do_finalize(job); | ||
1278 | + job_do_finalize_locked(job); | ||
1279 | } | ||
1280 | |||
1281 | -static int job_transition_to_pending(Job *job) | ||
1282 | +void job_finalize(Job *job, Error **errp) | ||
1283 | { | ||
1284 | - job_state_transition(job, JOB_STATUS_PENDING); | ||
1285 | + JOB_LOCK_GUARD(); | ||
1286 | + job_finalize_locked(job, errp); | ||
1287 | +} | ||
1288 | + | ||
1289 | +/* Called with job_mutex held. */ | ||
1290 | +static int job_transition_to_pending_locked(Job *job) | ||
1291 | +{ | ||
1292 | + job_state_transition_locked(job, JOB_STATUS_PENDING); | ||
1293 | if (!job->auto_finalize) { | ||
1294 | - job_event_pending(job); | ||
1295 | + job_event_pending_locked(job); | ||
1296 | } | ||
1297 | return 0; | ||
1298 | } | ||
1299 | |||
1300 | void job_transition_to_ready(Job *job) | ||
1301 | { | ||
1302 | - job_state_transition(job, JOB_STATUS_READY); | ||
1303 | - job_event_ready(job); | ||
1304 | + JOB_LOCK_GUARD(); | ||
1305 | + job_state_transition_locked(job, JOB_STATUS_READY); | ||
1306 | + job_event_ready_locked(job); | ||
1307 | } | ||
1308 | |||
1309 | -static void job_completed_txn_success(Job *job) | ||
1310 | +/* Called with job_mutex held. */ | ||
1311 | +static void job_completed_txn_success_locked(Job *job) | ||
1312 | { | ||
1313 | JobTxn *txn = job->txn; | ||
1314 | Job *other_job; | ||
1315 | |||
1316 | - job_state_transition(job, JOB_STATUS_WAITING); | ||
1317 | + job_state_transition_locked(job, JOB_STATUS_WAITING); | ||
1318 | |||
1319 | /* | ||
1320 | * Successful completion, see if there are other running jobs in this | ||
1321 | * txn. | ||
1322 | */ | ||
1323 | QLIST_FOREACH(other_job, &txn->jobs, txn_list) { | ||
1324 | - if (!job_is_completed(other_job)) { | ||
1325 | + if (!job_is_completed_locked(other_job)) { | ||
1326 | return; | ||
1327 | } | ||
1328 | assert(other_job->ret == 0); | ||
1329 | } | ||
1330 | |||
1331 | - job_txn_apply(job, job_transition_to_pending); | ||
1332 | + job_txn_apply_locked(job, job_transition_to_pending_locked); | ||
1333 | |||
1334 | /* If no jobs need manual finalization, automatically do so */ | ||
1335 | - if (job_txn_apply(job, job_needs_finalize) == 0) { | ||
1336 | - job_do_finalize(job); | ||
1337 | + if (job_txn_apply_locked(job, job_needs_finalize_locked) == 0) { | ||
1338 | + job_do_finalize_locked(job); | ||
1339 | } | ||
1340 | } | ||
1341 | |||
1342 | -static void job_completed(Job *job) | ||
1343 | +/* Called with job_mutex held. */ | ||
1344 | +static void job_completed_locked(Job *job) | ||
1345 | { | ||
1346 | - assert(job && job->txn && !job_is_completed(job)); | ||
1347 | + assert(job && job->txn && !job_is_completed_locked(job)); | ||
1348 | |||
1349 | - job_update_rc(job); | ||
1350 | + job_update_rc_locked(job); | ||
1351 | trace_job_completed(job, job->ret); | ||
1352 | if (job->ret) { | ||
1353 | - job_completed_txn_abort(job); | ||
1354 | + job_completed_txn_abort_locked(job); | ||
1355 | } else { | ||
1356 | - job_completed_txn_success(job); | ||
1357 | + job_completed_txn_success_locked(job); | ||
1358 | } | ||
1359 | } | ||
1360 | |||
1361 | -/** Useful only as a type shim for aio_bh_schedule_oneshot. */ | ||
1362 | +/** | ||
1363 | + * Useful only as a type shim for aio_bh_schedule_oneshot. | ||
1364 | + * Called with job_mutex *not* held. | ||
1365 | + */ | ||
1366 | static void job_exit(void *opaque) | ||
1367 | { | ||
1368 | Job *job = (Job *)opaque; | ||
1369 | AioContext *ctx; | ||
1370 | + JOB_LOCK_GUARD(); | ||
1371 | |||
1372 | - job_ref(job); | ||
1373 | + job_ref_locked(job); | ||
1374 | aio_context_acquire(job->aio_context); | ||
1375 | |||
1376 | /* This is a lie, we're not quiescent, but still doing the completion | ||
1377 | @@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque) | ||
1378 | * drain block nodes, and if .drained_poll still returned true, we would | ||
1379 | * deadlock. */ | ||
1380 | job->busy = false; | ||
1381 | - job_event_idle(job); | ||
1382 | + job_event_idle_locked(job); | ||
1383 | |||
1384 | - job_completed(job); | ||
1385 | + job_completed_locked(job); | ||
1386 | |||
1387 | /* | ||
1388 | * Note that calling job_completed can move the job to a different | ||
1389 | @@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque) | ||
1390 | * the job underneath us. | ||
1391 | */ | ||
1392 | ctx = job->aio_context; | ||
1393 | - job_unref(job); | ||
1394 | + job_unref_locked(job); | ||
1395 | aio_context_release(ctx); | ||
1396 | } | ||
1397 | |||
1398 | @@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque) | ||
1399 | static void coroutine_fn job_co_entry(void *opaque) | ||
1400 | { | ||
1401 | Job *job = opaque; | ||
1402 | + int ret; | ||
1403 | |||
1404 | assert(job && job->driver && job->driver->run); | ||
1405 | - assert(job->aio_context == qemu_get_current_aio_context()); | ||
1406 | - job_pause_point(job); | ||
1407 | - job->ret = job->driver->run(job, &job->err); | ||
1408 | - job->deferred_to_main_loop = true; | ||
1409 | - job->busy = true; | ||
1410 | + WITH_JOB_LOCK_GUARD() { | ||
1411 | + assert(job->aio_context == qemu_get_current_aio_context()); | ||
1412 | + job_pause_point_locked(job); | ||
1413 | + } | ||
1414 | + ret = job->driver->run(job, &job->err); | ||
1415 | + WITH_JOB_LOCK_GUARD() { | ||
1416 | + job->ret = ret; | ||
1417 | + job->deferred_to_main_loop = true; | ||
1418 | + job->busy = true; | ||
1419 | + } | ||
1420 | aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job); | ||
1421 | } | ||
1422 | |||
1423 | void job_start(Job *job) | ||
1424 | { | ||
1425 | - assert(job && !job_started(job) && job->paused && | ||
1426 | - job->driver && job->driver->run); | ||
1427 | - job->co = qemu_coroutine_create(job_co_entry, job); | ||
1428 | - job->pause_count--; | ||
1429 | - job->busy = true; | ||
1430 | - job->paused = false; | ||
1431 | - job_state_transition(job, JOB_STATUS_RUNNING); | ||
1432 | + assert(qemu_in_main_thread()); | ||
1433 | + | ||
1434 | + WITH_JOB_LOCK_GUARD() { | ||
1435 | + assert(job && !job_started_locked(job) && job->paused && | ||
1436 | + job->driver && job->driver->run); | ||
1437 | + job->co = qemu_coroutine_create(job_co_entry, job); | ||
1438 | + job->pause_count--; | ||
1439 | + job->busy = true; | ||
1440 | + job->paused = false; | ||
1441 | + job_state_transition_locked(job, JOB_STATUS_RUNNING); | ||
1442 | + } | ||
1443 | aio_co_enter(job->aio_context, job->co); | ||
1444 | } | ||
1445 | |||
1446 | -void job_cancel(Job *job, bool force) | ||
1447 | +void job_cancel_locked(Job *job, bool force) | ||
1448 | { | ||
1449 | if (job->status == JOB_STATUS_CONCLUDED) { | ||
1450 | - job_do_dismiss(job); | ||
1451 | + job_do_dismiss_locked(job); | ||
1452 | return; | ||
1453 | } | ||
1454 | - job_cancel_async(job, force); | ||
1455 | - if (!job_started(job)) { | ||
1456 | - job_completed(job); | ||
1457 | + job_cancel_async_locked(job, force); | ||
1458 | + if (!job_started_locked(job)) { | ||
1459 | + job_completed_locked(job); | ||
1460 | } else if (job->deferred_to_main_loop) { | ||
1461 | /* | ||
1462 | * job_cancel_async() ignores soft-cancel requests for jobs | ||
1463 | @@ -XXX,XX +XXX,XX @@ void job_cancel(Job *job, bool force) | ||
1464 | * choose to call job_is_cancelled() to show that we invoke | ||
1465 | * job_completed_txn_abort() only for force-cancelled jobs.) | ||
1466 | */ | ||
1467 | - if (job_is_cancelled(job)) { | ||
1468 | - job_completed_txn_abort(job); | ||
1469 | + if (job_is_cancelled_locked(job)) { | ||
1470 | + job_completed_txn_abort_locked(job); | ||
1471 | } | ||
1472 | } else { | ||
1473 | - job_enter(job); | ||
1474 | + job_enter_cond_locked(job, NULL); | ||
1475 | } | ||
1476 | } | ||
1477 | |||
1478 | -void job_user_cancel(Job *job, bool force, Error **errp) | ||
1479 | +void job_cancel(Job *job, bool force) | ||
1480 | { | ||
1481 | - if (job_apply_verb(job, JOB_VERB_CANCEL, errp)) { | ||
1482 | + JOB_LOCK_GUARD(); | ||
1483 | + job_cancel_locked(job, force); | ||
1484 | +} | ||
1485 | + | ||
1486 | +void job_user_cancel_locked(Job *job, bool force, Error **errp) | ||
1487 | +{ | ||
1488 | + if (job_apply_verb_locked(job, JOB_VERB_CANCEL, errp)) { | ||
1489 | return; | ||
1490 | } | ||
1491 | - job_cancel(job, force); | ||
1492 | + job_cancel_locked(job, force); | ||
1493 | +} | ||
1494 | + | ||
1495 | +void job_user_cancel(Job *job, bool force, Error **errp) | ||
1496 | +{ | ||
1497 | + JOB_LOCK_GUARD(); | ||
1498 | + job_user_cancel_locked(job, force, errp); | ||
1499 | } | ||
1500 | |||
1501 | /* A wrapper around job_cancel() taking an Error ** parameter so it may be | ||
1502 | * used with job_finish_sync() without the need for (rather nasty) function | ||
1503 | - * pointer casts there. */ | ||
1504 | -static void job_cancel_err(Job *job, Error **errp) | ||
1505 | + * pointer casts there. | ||
1506 | + * | ||
1507 | + * Called with job_mutex held. | ||
1508 | + */ | ||
1509 | +static void job_cancel_err_locked(Job *job, Error **errp) | ||
1510 | { | ||
1511 | - job_cancel(job, false); | ||
1512 | + job_cancel_locked(job, false); | ||
1513 | } | ||
1514 | |||
1515 | /** | ||
1516 | * Same as job_cancel_err(), but force-cancel. | ||
1517 | + * Called with job_mutex held. | ||
1518 | */ | ||
1519 | -static void job_force_cancel_err(Job *job, Error **errp) | ||
1520 | +static void job_force_cancel_err_locked(Job *job, Error **errp) | ||
1521 | { | ||
1522 | - job_cancel(job, true); | ||
1523 | + job_cancel_locked(job, true); | ||
1524 | } | ||
1525 | |||
1526 | -int job_cancel_sync(Job *job, bool force) | ||
1527 | +int job_cancel_sync_locked(Job *job, bool force) | ||
1528 | { | ||
1529 | if (force) { | ||
1530 | - return job_finish_sync(job, &job_force_cancel_err, NULL); | ||
1531 | + return job_finish_sync_locked(job, &job_force_cancel_err_locked, NULL); | ||
1532 | } else { | ||
1533 | - return job_finish_sync(job, &job_cancel_err, NULL); | ||
1534 | + return job_finish_sync_locked(job, &job_cancel_err_locked, NULL); | ||
1535 | } | ||
1536 | } | ||
1537 | |||
1538 | +int job_cancel_sync(Job *job, bool force) | ||
1539 | +{ | ||
1540 | + JOB_LOCK_GUARD(); | ||
1541 | + return job_cancel_sync_locked(job, force); | ||
1542 | +} | ||
1543 | + | ||
1544 | void job_cancel_sync_all(void) | ||
1545 | { | ||
1546 | Job *job; | ||
1547 | AioContext *aio_context; | ||
1548 | + JOB_LOCK_GUARD(); | ||
1549 | |||
1550 | - while ((job = job_next(NULL))) { | ||
1551 | + while ((job = job_next_locked(NULL))) { | ||
1552 | aio_context = job->aio_context; | ||
1553 | aio_context_acquire(aio_context); | ||
1554 | - job_cancel_sync(job, true); | ||
1555 | + job_cancel_sync_locked(job, true); | ||
1556 | aio_context_release(aio_context); | ||
1557 | } | ||
1558 | } | ||
1559 | |||
1560 | +int job_complete_sync_locked(Job *job, Error **errp) | ||
1561 | +{ | ||
1562 | + return job_finish_sync_locked(job, job_complete_locked, errp); | ||
1563 | +} | ||
1564 | + | ||
1565 | int job_complete_sync(Job *job, Error **errp) | ||
1566 | { | ||
1567 | - return job_finish_sync(job, job_complete, errp); | ||
1568 | + JOB_LOCK_GUARD(); | ||
1569 | + return job_complete_sync_locked(job, errp); | ||
1570 | } | ||
1571 | |||
1572 | -void job_complete(Job *job, Error **errp) | ||
1573 | +void job_complete_locked(Job *job, Error **errp) | ||
1574 | { | ||
1575 | /* Should not be reachable via external interface for internal jobs */ | ||
1576 | assert(job->id); | ||
1577 | GLOBAL_STATE_CODE(); | ||
1578 | - if (job_apply_verb(job, JOB_VERB_COMPLETE, errp)) { | ||
1579 | + if (job_apply_verb_locked(job, JOB_VERB_COMPLETE, errp)) { | ||
1580 | return; | ||
1581 | } | ||
1582 | - if (job_cancel_requested(job) || !job->driver->complete) { | ||
1583 | + if (job_cancel_requested_locked(job) || !job->driver->complete) { | ||
1584 | error_setg(errp, "The active block job '%s' cannot be completed", | ||
1585 | job->id); | ||
1586 | return; | ||
1587 | } | ||
1588 | |||
1589 | + job_unlock(); | ||
1590 | job->driver->complete(job, errp); | ||
1591 | + job_lock(); | ||
1592 | } | ||
1593 | |||
1594 | -int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp) | ||
1595 | +void job_complete(Job *job, Error **errp) | ||
1596 | +{ | ||
1597 | + JOB_LOCK_GUARD(); | ||
1598 | + job_complete_locked(job, errp); | ||
1599 | +} | ||
1600 | + | ||
1601 | +int job_finish_sync_locked(Job *job, | ||
1602 | + void (*finish)(Job *, Error **errp), | ||
1603 | + Error **errp) | ||
1604 | { | ||
32 | Error *local_err = NULL; | 1605 | Error *local_err = NULL; |
33 | int ret; | 1606 | int ret; |
34 | 1607 | ||
35 | - /* Since CONFIG_VDI_BLOCK_SIZE is disabled by default, | 1608 | - job_ref(job); |
36 | + /* Parse options and convert legacy syntax. | 1609 | + job_ref_locked(job); |
37 | + * | 1610 | |
38 | + * Since CONFIG_VDI_BLOCK_SIZE is disabled by default, | 1611 | if (finish) { |
39 | * cluster-size is not part of the QAPI schema; therefore we have | 1612 | finish(job, &local_err); |
40 | * to parse it before creating the QAPI object. */ | 1613 | } |
41 | #if defined(CONFIG_VDI_BLOCK_SIZE) | 1614 | if (local_err) { |
42 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts, | 1615 | error_propagate(errp, local_err); |
43 | 1616 | - job_unref(job); | |
44 | qdict = qemu_opts_to_qdict_filtered(opts, NULL, &vdi_create_opts, true); | 1617 | + job_unref_locked(job); |
45 | 1618 | return -EBUSY; | |
46 | + /* Create and open the file (protocol layer) */ | 1619 | } |
47 | ret = bdrv_create_file(filename, opts, errp); | 1620 | |
48 | if (ret < 0) { | 1621 | + job_unlock(); |
49 | goto done; | 1622 | AIO_WAIT_WHILE(job->aio_context, |
50 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts, | 1623 | (job_enter(job), !job_is_completed(job))); |
51 | goto done; | 1624 | + job_lock(); |
52 | } | 1625 | |
53 | 1626 | - ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret; | |
54 | + /* Silently round up size */ | 1627 | - job_unref(job); |
55 | assert(create_options->driver == BLOCKDEV_DRIVER_VDI); | 1628 | + ret = (job_is_cancelled_locked(job) && job->ret == 0) |
56 | create_options->u.vdi.size = ROUND_UP(create_options->u.vdi.size, | 1629 | + ? -ECANCELED : job->ret; |
57 | BDRV_SECTOR_SIZE); | 1630 | + job_unref_locked(job); |
58 | 1631 | return ret; | |
59 | + /* Create the vdi image (format layer) */ | 1632 | } |
60 | ret = vdi_co_do_create(create_options, block_size, errp); | 1633 | + |
61 | done: | 1634 | +int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp) |
62 | QDECREF(qdict); | 1635 | +{ |
63 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_vdi = { | 1636 | + JOB_LOCK_GUARD(); |
64 | .bdrv_close = vdi_close, | 1637 | + return job_finish_sync_locked(job, finish, errp); |
65 | .bdrv_reopen_prepare = vdi_reopen_prepare, | 1638 | +} |
66 | .bdrv_child_perm = bdrv_format_default_perms, | ||
67 | - .bdrv_co_create_opts = vdi_co_create_opts, | ||
68 | .bdrv_co_create = vdi_co_create, | ||
69 | + .bdrv_co_create_opts = vdi_co_create_opts, | ||
70 | .bdrv_has_zero_init = bdrv_has_zero_init_1, | ||
71 | .bdrv_co_block_status = vdi_co_block_status, | ||
72 | .bdrv_make_empty = vdi_make_empty, | ||
73 | -- | 1639 | -- |
74 | 2.13.6 | 1640 | 2.37.3 |
75 | |||
76 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | The state transition table has mostly been implied. We're about to make | 3 | This comment applies more on job, it was left in blockjob as in the past |
4 | it a bit more complex, so let's make the STM explicit instead. | 4 | the whole job logic was implemented there. |
5 | 5 | ||
6 | Perform state transitions with a function that for now just asserts the | 6 | Note: at this stage, job_{lock/unlock} and job lock guard macros |
7 | transition is appropriate. | 7 | are *nop*. |
8 | 8 | ||
9 | Transitions: | 9 | No functional change intended. |
10 | Undefined -> Created: During job initialization. | ||
11 | Created -> Running: Once the job is started. | ||
12 | Jobs cannot transition from "Created" to "Paused" | ||
13 | directly, but will instead synchronously transition | ||
14 | to running to paused immediately. | ||
15 | Running -> Paused: Normal workflow for pauses. | ||
16 | Running -> Ready: Normal workflow for jobs reaching their sync point. | ||
17 | (e.g. mirror) | ||
18 | Ready -> Standby: Normal workflow for pausing ready jobs. | ||
19 | Paused -> Running: Normal resume. | ||
20 | Standby -> Ready: Resume of a Standby job. | ||
21 | 10 | ||
22 | +---------+ | 11 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
23 | |UNDEFINED| | 12 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
24 | +--+------+ | 13 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
25 | | | 14 | Message-Id: <20220926093214.506243-7-eesposit@redhat.com> |
26 | +--v----+ | 15 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
27 | |CREATED| | ||
28 | +--+----+ | ||
29 | | | ||
30 | +--v----+ +------+ | ||
31 | |RUNNING<----->PAUSED| | ||
32 | +--+----+ +------+ | ||
33 | | | ||
34 | +--v--+ +-------+ | ||
35 | |READY<------->STANDBY| | ||
36 | +-----+ +-------+ | ||
37 | |||
38 | Notably, there is no state presently defined as of this commit that | ||
39 | deals with a job after the "running" or "ready" states, so this table | ||
40 | will be adjusted alongside the commits that introduce those states. | ||
41 | |||
42 | Signed-off-by: John Snow <jsnow@redhat.com> | ||
43 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
44 | --- | 17 | --- |
45 | blockjob.c | 40 +++++++++++++++++++++++++++++++++------- | 18 | blockjob.c | 20 -------------------- |
46 | block/trace-events | 3 +++ | 19 | job.c | 16 ++++++++++++++++ |
47 | 2 files changed, 36 insertions(+), 7 deletions(-) | 20 | 2 files changed, 16 insertions(+), 20 deletions(-) |
48 | 21 | ||
49 | diff --git a/blockjob.c b/blockjob.c | 22 | diff --git a/blockjob.c b/blockjob.c |
50 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
51 | --- a/blockjob.c | 24 | --- a/blockjob.c |
52 | +++ b/blockjob.c | 25 | +++ b/blockjob.c |
53 | @@ -XXX,XX +XXX,XX @@ | 26 | @@ -XXX,XX +XXX,XX @@ |
54 | #include "block/block.h" | 27 | #include "qemu/main-loop.h" |
55 | #include "block/blockjob_int.h" | 28 | #include "qemu/timer.h" |
56 | #include "block/block_int.h" | 29 | |
57 | +#include "block/trace.h" | 30 | -/* |
58 | #include "sysemu/block-backend.h" | 31 | - * The block job API is composed of two categories of functions. |
59 | #include "qapi/error.h" | 32 | - * |
60 | #include "qapi/qapi-events-block-core.h" | 33 | - * The first includes functions used by the monitor. The monitor is |
34 | - * peculiar in that it accesses the block job list with block_job_get, and | ||
35 | - * therefore needs consistency across block_job_get and the actual operation | ||
36 | - * (e.g. block_job_set_speed). The consistency is achieved with | ||
37 | - * aio_context_acquire/release. These functions are declared in blockjob.h. | ||
38 | - * | ||
39 | - * The second includes functions used by the block job drivers and sometimes | ||
40 | - * by the core block layer. These do not care about locking, because the | ||
41 | - * whole coroutine runs under the AioContext lock, and are declared in | ||
42 | - * blockjob_int.h. | ||
43 | - */ | ||
44 | - | ||
45 | static bool is_block_job(Job *job) | ||
46 | { | ||
47 | return job_type(job) == JOB_TYPE_BACKUP || | ||
48 | @@ -XXX,XX +XXX,XX @@ static void block_job_event_ready(Notifier *n, void *opaque) | ||
49 | } | ||
50 | |||
51 | |||
52 | -/* | ||
53 | - * API for block job drivers and the block layer. These functions are | ||
54 | - * declared in blockjob_int.h. | ||
55 | - */ | ||
56 | - | ||
57 | void *block_job_create(const char *job_id, const BlockJobDriver *driver, | ||
58 | JobTxn *txn, BlockDriverState *bs, uint64_t perm, | ||
59 | uint64_t shared_perm, int64_t speed, int flags, | ||
60 | diff --git a/job.c b/job.c | ||
61 | index XXXXXXX..XXXXXXX 100644 | ||
62 | --- a/job.c | ||
63 | +++ b/job.c | ||
61 | @@ -XXX,XX +XXX,XX @@ | 64 | @@ -XXX,XX +XXX,XX @@ |
62 | * block_job_enter. */ | 65 | #include "trace/trace-root.h" |
63 | static QemuMutex block_job_mutex; | 66 | #include "qapi/qapi-events-job.h" |
64 | 67 | ||
65 | +/* BlockJob State Transition Table */ | 68 | +/* |
66 | +bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = { | 69 | + * The job API is composed of two categories of functions. |
67 | + /* U, C, R, P, Y, S */ | 70 | + * |
68 | + /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0}, | 71 | + * The first includes functions used by the monitor. The monitor is |
69 | + /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0}, | 72 | + * peculiar in that it accesses the job list with job_get, and |
70 | + /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0}, | 73 | + * therefore needs consistency across job_get and the actual operation |
71 | + /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0}, | 74 | + * (e.g. job_user_cancel). To achieve this consistency, the caller |
72 | + /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1}, | 75 | + * calls job_lock/job_unlock itself around the whole operation. |
73 | + /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0}, | 76 | + * |
74 | +}; | 77 | + * |
78 | + * The second includes functions used by the job drivers and sometimes | ||
79 | + * by the core block layer. These delegate the locking to the callee instead. | ||
80 | + * | ||
81 | + * TODO Actually make this true | ||
82 | + */ | ||
75 | + | 83 | + |
76 | +static void block_job_state_transition(BlockJob *job, BlockJobStatus s1) | 84 | /* |
77 | +{ | 85 | * job_mutex protects the jobs list, but also makes the |
78 | + BlockJobStatus s0 = job->status; | 86 | * struct job fields thread-safe. |
79 | + assert(s1 >= 0 && s1 <= BLOCK_JOB_STATUS__MAX); | ||
80 | + trace_block_job_state_transition(job, job->ret, BlockJobSTT[s0][s1] ? | ||
81 | + "allowed" : "disallowed", | ||
82 | + qapi_enum_lookup(&BlockJobStatus_lookup, | ||
83 | + s0), | ||
84 | + qapi_enum_lookup(&BlockJobStatus_lookup, | ||
85 | + s1)); | ||
86 | + assert(BlockJobSTT[s0][s1]); | ||
87 | + job->status = s1; | ||
88 | +} | ||
89 | + | ||
90 | static void block_job_lock(void) | ||
91 | { | ||
92 | qemu_mutex_lock(&block_job_mutex); | ||
93 | @@ -XXX,XX +XXX,XX @@ void block_job_start(BlockJob *job) | ||
94 | job->pause_count--; | ||
95 | job->busy = true; | ||
96 | job->paused = false; | ||
97 | - job->status = BLOCK_JOB_STATUS_RUNNING; | ||
98 | + block_job_state_transition(job, BLOCK_JOB_STATUS_RUNNING); | ||
99 | bdrv_coroutine_enter(blk_bs(job->blk), job->co); | ||
100 | } | ||
101 | |||
102 | @@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, | ||
103 | job->paused = true; | ||
104 | job->pause_count = 1; | ||
105 | job->refcnt = 1; | ||
106 | - job->status = BLOCK_JOB_STATUS_CREATED; | ||
107 | + block_job_state_transition(job, BLOCK_JOB_STATUS_CREATED); | ||
108 | aio_timer_init(qemu_get_aio_context(), &job->sleep_timer, | ||
109 | QEMU_CLOCK_REALTIME, SCALE_NS, | ||
110 | block_job_sleep_timer_cb, job); | ||
111 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn block_job_pause_point(BlockJob *job) | ||
112 | |||
113 | if (block_job_should_pause(job) && !block_job_is_cancelled(job)) { | ||
114 | BlockJobStatus status = job->status; | ||
115 | - job->status = status == BLOCK_JOB_STATUS_READY ? \ | ||
116 | - BLOCK_JOB_STATUS_STANDBY : \ | ||
117 | - BLOCK_JOB_STATUS_PAUSED; | ||
118 | + block_job_state_transition(job, status == BLOCK_JOB_STATUS_READY ? \ | ||
119 | + BLOCK_JOB_STATUS_STANDBY : \ | ||
120 | + BLOCK_JOB_STATUS_PAUSED); | ||
121 | job->paused = true; | ||
122 | block_job_do_yield(job, -1); | ||
123 | job->paused = false; | ||
124 | - job->status = status; | ||
125 | + block_job_state_transition(job, status); | ||
126 | } | ||
127 | |||
128 | if (job->driver->resume) { | ||
129 | @@ -XXX,XX +XXX,XX @@ void block_job_iostatus_reset(BlockJob *job) | ||
130 | |||
131 | void block_job_event_ready(BlockJob *job) | ||
132 | { | ||
133 | - job->status = BLOCK_JOB_STATUS_READY; | ||
134 | + block_job_state_transition(job, BLOCK_JOB_STATUS_READY); | ||
135 | job->ready = true; | ||
136 | |||
137 | if (block_job_is_internal(job)) { | ||
138 | diff --git a/block/trace-events b/block/trace-events | ||
139 | index XXXXXXX..XXXXXXX 100644 | ||
140 | --- a/block/trace-events | ||
141 | +++ b/block/trace-events | ||
142 | @@ -XXX,XX +XXX,XX @@ | ||
143 | bdrv_open_common(void *bs, const char *filename, int flags, const char *format_name) "bs %p filename \"%s\" flags 0x%x format_name \"%s\"" | ||
144 | bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d" | ||
145 | |||
146 | +# blockjob.c | ||
147 | +block_job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)" | ||
148 | + | ||
149 | # block/block-backend.c | ||
150 | blk_co_preadv(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x" | ||
151 | blk_co_pwritev(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x" | ||
152 | -- | 87 | -- |
153 | 2.13.6 | 88 | 2.37.3 |
154 | |||
155 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | We're about to add several new states, and booleans are becoming | 3 | Just as done with job.h, create _locked() functions in blockjob.h |
4 | unwieldly and difficult to reason about. It would help to have a | 4 | |
5 | more explicit bookkeeping of the state of blockjobs. To this end, | 5 | These functions will be later useful when caller has already taken |
6 | add a new "status" field and add our existing states in a redundant | 6 | the lock. All blockjob _locked functions call job _locked functions. |
7 | manner alongside the bools they are replacing: | 7 | |
8 | 8 | Note: at this stage, job_{lock/unlock} and job lock guard macros | |
9 | UNDEFINED: Placeholder, default state. Not currently visible to QMP | 9 | are *nop*. |
10 | unless changes occur in the future to allow creating jobs | 10 | |
11 | without starting them via QMP. | 11 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
12 | CREATED: replaces !!job->co && paused && !busy | 12 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
13 | RUNNING: replaces effectively (!paused && busy) | 13 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
14 | PAUSED: Nearly redundant with info->paused, which shows pause_count. | 14 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
15 | This reports the actual status of the job, which almost always | 15 | Message-Id: <20220926093214.506243-8-eesposit@redhat.com> |
16 | matches the paused request status. It differs in that it is | ||
17 | strictly only true when the job has actually gone dormant. | ||
18 | READY: replaces job->ready. | ||
19 | STANDBY: Paused, but job->ready is true. | ||
20 | |||
21 | New state additions in coming commits will not be quite so redundant: | ||
22 | |||
23 | WAITING: Waiting on transaction. This job has finished all the work | ||
24 | it can until the transaction converges, fails, or is canceled. | ||
25 | PENDING: Pending authorization from user. This job has finished all the | ||
26 | work it can until the job or transaction is finalized via | ||
27 | block_job_finalize. This implies the transaction has converged | ||
28 | and left the WAITING phase. | ||
29 | ABORTING: Job has encountered an error condition and is in the process | ||
30 | of aborting. | ||
31 | CONCLUDED: Job has ceased all operations and has a return code available | ||
32 | for query and may be dismissed via block_job_dismiss. | ||
33 | NULL: Job has been dismissed and (should) be destroyed. Should never | ||
34 | be visible to QMP. | ||
35 | |||
36 | Some of these states appear somewhat superfluous, but it helps define the | ||
37 | expected flow of a job; so some of the states wind up being synchronous | ||
38 | empty transitions. Importantly, jobs can be in only one of these states | ||
39 | at any given time, which helps code and external users alike reason about | ||
40 | the current condition of a job unambiguously. | ||
41 | |||
42 | Signed-off-by: John Snow <jsnow@redhat.com> | ||
43 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
44 | --- | 17 | --- |
45 | qapi/block-core.json | 31 ++++++++++++++++++++++++++++++- | 18 | include/block/blockjob.h | 18 ++++++++++++++ |
46 | include/block/blockjob.h | 3 +++ | 19 | blockjob.c | 52 ++++++++++++++++++++++++++++++++-------- |
47 | blockjob.c | 9 +++++++++ | 20 | 2 files changed, 60 insertions(+), 10 deletions(-) |
48 | tests/qemu-iotests/109.out | 24 ++++++++++++------------ | 21 | |
49 | 4 files changed, 54 insertions(+), 13 deletions(-) | ||
50 | |||
51 | diff --git a/qapi/block-core.json b/qapi/block-core.json | ||
52 | index XXXXXXX..XXXXXXX 100644 | ||
53 | --- a/qapi/block-core.json | ||
54 | +++ b/qapi/block-core.json | ||
55 | @@ -XXX,XX +XXX,XX @@ | ||
56 | 'data': ['commit', 'stream', 'mirror', 'backup'] } | ||
57 | |||
58 | ## | ||
59 | +# @BlockJobStatus: | ||
60 | +# | ||
61 | +# Indicates the present state of a given blockjob in its lifetime. | ||
62 | +# | ||
63 | +# @undefined: Erroneous, default state. Should not ever be visible. | ||
64 | +# | ||
65 | +# @created: The job has been created, but not yet started. | ||
66 | +# | ||
67 | +# @running: The job is currently running. | ||
68 | +# | ||
69 | +# @paused: The job is running, but paused. The pause may be requested by | ||
70 | +# either the QMP user or by internal processes. | ||
71 | +# | ||
72 | +# @ready: The job is running, but is ready for the user to signal completion. | ||
73 | +# This is used for long-running jobs like mirror that are designed to | ||
74 | +# run indefinitely. | ||
75 | +# | ||
76 | +# @standby: The job is ready, but paused. This is nearly identical to @paused. | ||
77 | +# The job may return to @ready or otherwise be canceled. | ||
78 | +# | ||
79 | +# Since: 2.12 | ||
80 | +## | ||
81 | +{ 'enum': 'BlockJobStatus', | ||
82 | + 'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby'] } | ||
83 | + | ||
84 | +## | ||
85 | # @BlockJobInfo: | ||
86 | # | ||
87 | # Information about a long-running block device operation. | ||
88 | @@ -XXX,XX +XXX,XX @@ | ||
89 | # | ||
90 | # @ready: true if the job may be completed (since 2.2) | ||
91 | # | ||
92 | +# @status: Current job state/status (since 2.12) | ||
93 | +# | ||
94 | # Since: 1.1 | ||
95 | ## | ||
96 | { 'struct': 'BlockJobInfo', | ||
97 | 'data': {'type': 'str', 'device': 'str', 'len': 'int', | ||
98 | 'offset': 'int', 'busy': 'bool', 'paused': 'bool', 'speed': 'int', | ||
99 | - 'io-status': 'BlockDeviceIoStatus', 'ready': 'bool'} } | ||
100 | + 'io-status': 'BlockDeviceIoStatus', 'ready': 'bool', | ||
101 | + 'status': 'BlockJobStatus' } } | ||
102 | |||
103 | ## | ||
104 | # @query-block-jobs: | ||
105 | diff --git a/include/block/blockjob.h b/include/block/blockjob.h | 22 | diff --git a/include/block/blockjob.h b/include/block/blockjob.h |
106 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
107 | --- a/include/block/blockjob.h | 24 | --- a/include/block/blockjob.h |
108 | +++ b/include/block/blockjob.h | 25 | +++ b/include/block/blockjob.h |
109 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJob { | 26 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJob { |
110 | */ | 27 | */ |
111 | QEMUTimer sleep_timer; | 28 | BlockJob *block_job_next(BlockJob *job); |
112 | 29 | ||
113 | + /** Current state; See @BlockJobStatus for details. */ | 30 | +/* Same as block_job_next(), but called with job lock held. */ |
114 | + BlockJobStatus status; | 31 | +BlockJob *block_job_next_locked(BlockJob *job); |
115 | + | 32 | + |
116 | BlockJobTxn *txn; | 33 | /** |
117 | QLIST_ENTRY(BlockJob) txn_list; | 34 | * block_job_get: |
118 | } BlockJob; | 35 | * @id: The id of the block job. |
36 | @@ -XXX,XX +XXX,XX @@ BlockJob *block_job_next(BlockJob *job); | ||
37 | */ | ||
38 | BlockJob *block_job_get(const char *id); | ||
39 | |||
40 | +/* Same as block_job_get(), but called with job lock held. */ | ||
41 | +BlockJob *block_job_get_locked(const char *id); | ||
42 | + | ||
43 | /** | ||
44 | * block_job_add_bdrv: | ||
45 | * @job: A block job | ||
46 | @@ -XXX,XX +XXX,XX @@ bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs); | ||
47 | */ | ||
48 | bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp); | ||
49 | |||
50 | +/* | ||
51 | + * Same as block_job_set_speed(), but called with job lock held. | ||
52 | + * Might release the lock temporarily. | ||
53 | + */ | ||
54 | +bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp); | ||
55 | + | ||
56 | /** | ||
57 | * block_job_query: | ||
58 | * @job: The job to get information about. | ||
59 | @@ -XXX,XX +XXX,XX @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp); | ||
60 | */ | ||
61 | BlockJobInfo *block_job_query(BlockJob *job, Error **errp); | ||
62 | |||
63 | +/* Same as block_job_query(), but called with job lock held. */ | ||
64 | +BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp); | ||
65 | + | ||
66 | /** | ||
67 | * block_job_iostatus_reset: | ||
68 | * @job: The job whose I/O status should be reset. | ||
69 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp); | ||
70 | */ | ||
71 | void block_job_iostatus_reset(BlockJob *job); | ||
72 | |||
73 | +/* Same as block_job_iostatus_reset(), but called with job lock held. */ | ||
74 | +void block_job_iostatus_reset_locked(BlockJob *job); | ||
75 | + | ||
76 | /* | ||
77 | * block_job_get_aio_context: | ||
78 | * | ||
119 | diff --git a/blockjob.c b/blockjob.c | 79 | diff --git a/blockjob.c b/blockjob.c |
120 | index XXXXXXX..XXXXXXX 100644 | 80 | index XXXXXXX..XXXXXXX 100644 |
121 | --- a/blockjob.c | 81 | --- a/blockjob.c |
122 | +++ b/blockjob.c | 82 | +++ b/blockjob.c |
123 | @@ -XXX,XX +XXX,XX @@ void block_job_start(BlockJob *job) | 83 | @@ -XXX,XX +XXX,XX @@ static bool is_block_job(Job *job) |
124 | job->pause_count--; | 84 | job_type(job) == JOB_TYPE_STREAM; |
125 | job->busy = true; | 85 | } |
126 | job->paused = false; | 86 | |
127 | + job->status = BLOCK_JOB_STATUS_RUNNING; | 87 | -BlockJob *block_job_next(BlockJob *bjob) |
128 | bdrv_coroutine_enter(blk_bs(job->blk), job->co); | 88 | +BlockJob *block_job_next_locked(BlockJob *bjob) |
129 | } | 89 | { |
130 | 90 | Job *job = bjob ? &bjob->job : NULL; | |
91 | GLOBAL_STATE_CODE(); | ||
92 | |||
93 | do { | ||
94 | - job = job_next(job); | ||
95 | + job = job_next_locked(job); | ||
96 | } while (job && !is_block_job(job)); | ||
97 | |||
98 | return job ? container_of(job, BlockJob, job) : NULL; | ||
99 | } | ||
100 | |||
101 | -BlockJob *block_job_get(const char *id) | ||
102 | +BlockJob *block_job_next(BlockJob *bjob) | ||
103 | { | ||
104 | - Job *job = job_get(id); | ||
105 | + JOB_LOCK_GUARD(); | ||
106 | + return block_job_next_locked(bjob); | ||
107 | +} | ||
108 | + | ||
109 | +BlockJob *block_job_get_locked(const char *id) | ||
110 | +{ | ||
111 | + Job *job = job_get_locked(id); | ||
112 | GLOBAL_STATE_CODE(); | ||
113 | |||
114 | if (job && is_block_job(job)) { | ||
115 | @@ -XXX,XX +XXX,XX @@ BlockJob *block_job_get(const char *id) | ||
116 | } | ||
117 | } | ||
118 | |||
119 | +BlockJob *block_job_get(const char *id) | ||
120 | +{ | ||
121 | + JOB_LOCK_GUARD(); | ||
122 | + return block_job_get_locked(id); | ||
123 | +} | ||
124 | + | ||
125 | void block_job_free(Job *job) | ||
126 | { | ||
127 | BlockJob *bjob = container_of(job, BlockJob, job); | ||
128 | @@ -XXX,XX +XXX,XX @@ static bool job_timer_pending(Job *job) | ||
129 | return timer_pending(&job->sleep_timer); | ||
130 | } | ||
131 | |||
132 | -bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) | ||
133 | +bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp) | ||
134 | { | ||
135 | const BlockJobDriver *drv = block_job_driver(job); | ||
136 | int64_t old_speed = job->speed; | ||
137 | |||
138 | GLOBAL_STATE_CODE(); | ||
139 | |||
140 | - if (job_apply_verb(&job->job, JOB_VERB_SET_SPEED, errp) < 0) { | ||
141 | + if (job_apply_verb_locked(&job->job, JOB_VERB_SET_SPEED, errp) < 0) { | ||
142 | return false; | ||
143 | } | ||
144 | if (speed < 0) { | ||
145 | @@ -XXX,XX +XXX,XX @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) | ||
146 | job->speed = speed; | ||
147 | |||
148 | if (drv->set_speed) { | ||
149 | + job_unlock(); | ||
150 | drv->set_speed(job, speed); | ||
151 | + job_lock(); | ||
152 | } | ||
153 | |||
154 | if (speed && speed <= old_speed) { | ||
155 | @@ -XXX,XX +XXX,XX @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) | ||
156 | } | ||
157 | |||
158 | /* kick only if a timer is pending */ | ||
159 | - job_enter_cond(&job->job, job_timer_pending); | ||
160 | + job_enter_cond_locked(&job->job, job_timer_pending); | ||
161 | |||
162 | return true; | ||
163 | } | ||
164 | |||
165 | +bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) | ||
166 | +{ | ||
167 | + JOB_LOCK_GUARD(); | ||
168 | + return block_job_set_speed_locked(job, speed, errp); | ||
169 | +} | ||
170 | + | ||
171 | int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n) | ||
172 | { | ||
173 | IO_CODE(); | ||
174 | return ratelimit_calculate_delay(&job->limit, n); | ||
175 | } | ||
176 | |||
177 | -BlockJobInfo *block_job_query(BlockJob *job, Error **errp) | ||
178 | +BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp) | ||
179 | { | ||
180 | BlockJobInfo *info; | ||
181 | uint64_t progress_current, progress_total; | ||
131 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp) | 182 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp) |
183 | info->len = progress_total; | ||
132 | info->speed = job->speed; | 184 | info->speed = job->speed; |
133 | info->io_status = job->iostatus; | 185 | info->io_status = job->iostatus; |
134 | info->ready = job->ready; | 186 | - info->ready = job_is_ready(&job->job), |
135 | + info->status = job->status; | 187 | + info->ready = job_is_ready_locked(&job->job), |
188 | info->status = job->job.status; | ||
189 | info->auto_finalize = job->job.auto_finalize; | ||
190 | info->auto_dismiss = job->job.auto_dismiss; | ||
191 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp) | ||
136 | return info; | 192 | return info; |
137 | } | 193 | } |
138 | 194 | ||
139 | @@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, | 195 | +BlockJobInfo *block_job_query(BlockJob *job, Error **errp) |
140 | job->paused = true; | 196 | +{ |
141 | job->pause_count = 1; | 197 | + JOB_LOCK_GUARD(); |
142 | job->refcnt = 1; | 198 | + return block_job_query_locked(job, errp); |
143 | + job->status = BLOCK_JOB_STATUS_CREATED; | 199 | +} |
144 | aio_timer_init(qemu_get_aio_context(), &job->sleep_timer, | 200 | + |
145 | QEMU_CLOCK_REALTIME, SCALE_NS, | 201 | static void block_job_iostatus_set_err(BlockJob *job, int error) |
146 | block_job_sleep_timer_cb, job); | 202 | { |
147 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn block_job_pause_point(BlockJob *job) | 203 | if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
148 | } | 204 | @@ -XXX,XX +XXX,XX @@ fail: |
149 | 205 | return NULL; | |
150 | if (block_job_should_pause(job) && !block_job_is_cancelled(job)) { | 206 | } |
151 | + BlockJobStatus status = job->status; | 207 | |
152 | + job->status = status == BLOCK_JOB_STATUS_READY ? \ | 208 | -void block_job_iostatus_reset(BlockJob *job) |
153 | + BLOCK_JOB_STATUS_STANDBY : \ | 209 | +void block_job_iostatus_reset_locked(BlockJob *job) |
154 | + BLOCK_JOB_STATUS_PAUSED; | 210 | { |
155 | job->paused = true; | 211 | GLOBAL_STATE_CODE(); |
156 | block_job_do_yield(job, -1); | 212 | if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
157 | job->paused = false; | ||
158 | + job->status = status; | ||
159 | } | ||
160 | |||
161 | if (job->driver->resume) { | ||
162 | @@ -XXX,XX +XXX,XX @@ void block_job_iostatus_reset(BlockJob *job) | 213 | @@ -XXX,XX +XXX,XX @@ void block_job_iostatus_reset(BlockJob *job) |
163 | 214 | job->iostatus = BLOCK_DEVICE_IO_STATUS_OK; | |
164 | void block_job_event_ready(BlockJob *job) | 215 | } |
165 | { | 216 | |
166 | + job->status = BLOCK_JOB_STATUS_READY; | 217 | +void block_job_iostatus_reset(BlockJob *job) |
167 | job->ready = true; | 218 | +{ |
168 | 219 | + JOB_LOCK_GUARD(); | |
169 | if (block_job_is_internal(job)) { | 220 | + block_job_iostatus_reset_locked(job); |
170 | diff --git a/tests/qemu-iotests/109.out b/tests/qemu-iotests/109.out | 221 | +} |
171 | index XXXXXXX..XXXXXXX 100644 | 222 | + |
172 | --- a/tests/qemu-iotests/109.out | 223 | void block_job_user_resume(Job *job) |
173 | +++ b/tests/qemu-iotests/109.out | 224 | { |
174 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | 225 | BlockJob *bjob = container_of(job, BlockJob, job); |
175 | {"return": {}} | ||
176 | {"return": {}} | ||
177 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} | ||
178 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 1024, "offset": 1024, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
179 | +{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
180 | {"return": {}} | ||
181 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
182 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} | ||
183 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | ||
184 | {"return": {}} | ||
185 | {"return": {}} | ||
186 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}} | ||
187 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 197120, "offset": 197120, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
188 | +{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 197120, "offset": 197120, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
189 | {"return": {}} | ||
190 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
191 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}} | ||
192 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | ||
193 | {"return": {}} | ||
194 | {"return": {}} | ||
195 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} | ||
196 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 327680, "offset": 327680, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
197 | +{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
198 | {"return": {}} | ||
199 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
200 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} | ||
201 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | ||
202 | {"return": {}} | ||
203 | {"return": {}} | ||
204 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} | ||
205 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 1024, "offset": 1024, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
206 | +{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
207 | {"return": {}} | ||
208 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
209 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} | ||
210 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | ||
211 | {"return": {}} | ||
212 | {"return": {}} | ||
213 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}} | ||
214 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 65536, "offset": 65536, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
215 | +{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 65536, "offset": 65536, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
216 | {"return": {}} | ||
217 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
218 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}} | ||
219 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | ||
220 | {"return": {}} | ||
221 | {"return": {}} | ||
222 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} | ||
223 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2560, "offset": 2560, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
224 | +{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
225 | {"return": {}} | ||
226 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
227 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} | ||
228 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | ||
229 | {"return": {}} | ||
230 | {"return": {}} | ||
231 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} | ||
232 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2560, "offset": 2560, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
233 | +{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
234 | {"return": {}} | ||
235 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
236 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} | ||
237 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | ||
238 | {"return": {}} | ||
239 | {"return": {}} | ||
240 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}} | ||
241 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 31457280, "offset": 31457280, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
242 | +{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 31457280, "offset": 31457280, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
243 | {"return": {}} | ||
244 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
245 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}} | ||
246 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | ||
247 | {"return": {}} | ||
248 | {"return": {}} | ||
249 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} | ||
250 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 327680, "offset": 327680, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
251 | +{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
252 | {"return": {}} | ||
253 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
254 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} | ||
255 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | ||
256 | {"return": {}} | ||
257 | {"return": {}} | ||
258 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}} | ||
259 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2048, "offset": 2048, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
260 | +{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2048, "offset": 2048, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
261 | {"return": {}} | ||
262 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
263 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}} | ||
264 | @@ -XXX,XX +XXX,XX @@ Automatically detecting the format is dangerous for raw images, write operations | ||
265 | Specify the 'raw' format explicitly to remove the restrictions. | ||
266 | {"return": {}} | ||
267 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} | ||
268 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 512, "offset": 512, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
269 | +{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
270 | {"return": {}} | ||
271 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
272 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} | ||
273 | @@ -XXX,XX +XXX,XX @@ Images are identical. | ||
274 | {"return": {}} | ||
275 | {"return": {}} | ||
276 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} | ||
277 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 512, "offset": 512, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
278 | +{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
279 | {"return": {}} | ||
280 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
281 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} | ||
282 | -- | 226 | -- |
283 | 2.13.6 | 227 | 2.37.3 |
284 | |||
285 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Which commands ("verbs") are appropriate for jobs in which state is | 3 | Both blockdev.c and job-qmp.c have TOC/TOU conditions, because |
4 | also somewhat burdensome to keep track of. | 4 | they first search for the job and then perform an action on it. |
5 | Therefore, we need to do the search + action under the same | ||
6 | job mutex critical section. | ||
5 | 7 | ||
6 | As of this commit, it looks rather useless, but begins to look more | 8 | Note: at this stage, job_{lock/unlock} and job lock guard macros |
7 | interesting the more states we add to the STM table. | 9 | are *nop*. |
8 | 10 | ||
9 | A recurring theme is that no verb will apply to an 'undefined' job. | 11 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
10 | 12 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | |
11 | Further, it's not presently possible to restrict the "pause" or "resume" | 13 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
12 | verbs any more than they are in this commit because of the asynchronous | ||
13 | nature of how jobs enter the PAUSED state; justifications for some | ||
14 | seemingly erroneous applications are given below. | ||
15 | |||
16 | ===== | ||
17 | Verbs | ||
18 | ===== | ||
19 | |||
20 | Cancel: Any state except undefined. | ||
21 | Pause: Any state except undefined; | ||
22 | 'created': Requests that the job pauses as it starts. | ||
23 | 'running': Normal usage. (PAUSED) | ||
24 | 'paused': The job may be paused for internal reasons, | ||
25 | but the user may wish to force an indefinite | ||
26 | user-pause, so this is allowed. | ||
27 | 'ready': Normal usage. (STANDBY) | ||
28 | 'standby': Same logic as above. | ||
29 | Resume: Any state except undefined; | ||
30 | 'created': Will lift a user's pause-on-start request. | ||
31 | 'running': Will lift a pause request before it takes effect. | ||
32 | 'paused': Normal usage. | ||
33 | 'ready': Will lift a pause request before it takes effect. | ||
34 | 'standby': Normal usage. | ||
35 | Set-speed: Any state except undefined, though ready may not be meaningful. | ||
36 | Complete: Only a 'ready' job may accept a complete request. | ||
37 | |||
38 | ======= | ||
39 | Changes | ||
40 | ======= | ||
41 | |||
42 | (1) | ||
43 | |||
44 | To facilitate "nice" error checking, all five major block-job verb | ||
45 | interfaces in blockjob.c now support an errp parameter: | ||
46 | |||
47 | - block_job_user_cancel is added as a new interface. | ||
48 | - block_job_user_pause gains an errp paramter | ||
49 | - block_job_user_resume gains an errp parameter | ||
50 | - block_job_set_speed already had an errp parameter. | ||
51 | - block_job_complete already had an errp parameter. | ||
52 | |||
53 | (2) | ||
54 | |||
55 | block-job-pause and block-job-resume will no longer no-op when trying | ||
56 | to pause an already paused job, or trying to resume a job that isn't | ||
57 | paused. These functions will now report that they did not perform the | ||
58 | action requested because it was not possible. | ||
59 | |||
60 | iotests have been adjusted to address this new behavior. | ||
61 | |||
62 | (3) | ||
63 | |||
64 | block-job-complete doesn't worry about checking !block_job_started, | ||
65 | because the permission table guards against this. | ||
66 | |||
67 | (4) | ||
68 | |||
69 | test-bdrv-drain's job implementation needs to announce that it is | ||
70 | 'ready' now, in order to be completed. | ||
71 | |||
72 | Signed-off-by: John Snow <jsnow@redhat.com> | ||
73 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 14 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
74 | Reviewed-by: Eric Blake <eblake@redhat.com> | 15 | Message-Id: <20220926093214.506243-9-eesposit@redhat.com> |
75 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
76 | --- | 17 | --- |
77 | qapi/block-core.json | 20 ++++++++++++++ | 18 | blockdev.c | 67 +++++++++++++++++++++++++++++++++++++----------------- |
78 | include/block/blockjob.h | 13 +++++++-- | 19 | job-qmp.c | 57 ++++++++++++++++++++++++++++++++-------------- |
79 | blockdev.c | 10 +++---- | 20 | 2 files changed, 86 insertions(+), 38 deletions(-) |
80 | blockjob.c | 71 ++++++++++++++++++++++++++++++++++++++++++------ | ||
81 | tests/test-bdrv-drain.c | 1 + | ||
82 | block/trace-events | 1 + | ||
83 | 6 files changed, 100 insertions(+), 16 deletions(-) | ||
84 | 21 | ||
85 | diff --git a/qapi/block-core.json b/qapi/block-core.json | ||
86 | index XXXXXXX..XXXXXXX 100644 | ||
87 | --- a/qapi/block-core.json | ||
88 | +++ b/qapi/block-core.json | ||
89 | @@ -XXX,XX +XXX,XX @@ | ||
90 | 'data': ['commit', 'stream', 'mirror', 'backup'] } | ||
91 | |||
92 | ## | ||
93 | +# @BlockJobVerb: | ||
94 | +# | ||
95 | +# Represents command verbs that can be applied to a blockjob. | ||
96 | +# | ||
97 | +# @cancel: see @block-job-cancel | ||
98 | +# | ||
99 | +# @pause: see @block-job-pause | ||
100 | +# | ||
101 | +# @resume: see @block-job-resume | ||
102 | +# | ||
103 | +# @set-speed: see @block-job-set-speed | ||
104 | +# | ||
105 | +# @complete: see @block-job-complete | ||
106 | +# | ||
107 | +# Since: 2.12 | ||
108 | +## | ||
109 | +{ 'enum': 'BlockJobVerb', | ||
110 | + 'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete' ] } | ||
111 | + | ||
112 | +## | ||
113 | # @BlockJobStatus: | ||
114 | # | ||
115 | # Indicates the present state of a given blockjob in its lifetime. | ||
116 | diff --git a/include/block/blockjob.h b/include/block/blockjob.h | ||
117 | index XXXXXXX..XXXXXXX 100644 | ||
118 | --- a/include/block/blockjob.h | ||
119 | +++ b/include/block/blockjob.h | ||
120 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp); | ||
121 | * Asynchronously pause the specified job. | ||
122 | * Do not allow a resume until a matching call to block_job_user_resume. | ||
123 | */ | ||
124 | -void block_job_user_pause(BlockJob *job); | ||
125 | +void block_job_user_pause(BlockJob *job, Error **errp); | ||
126 | |||
127 | /** | ||
128 | * block_job_paused: | ||
129 | @@ -XXX,XX +XXX,XX @@ bool block_job_user_paused(BlockJob *job); | ||
130 | * Resume the specified job. | ||
131 | * Must be paired with a preceding block_job_user_pause. | ||
132 | */ | ||
133 | -void block_job_user_resume(BlockJob *job); | ||
134 | +void block_job_user_resume(BlockJob *job, Error **errp); | ||
135 | + | ||
136 | +/** | ||
137 | + * block_job_user_cancel: | ||
138 | + * @job: The job to be cancelled. | ||
139 | + * | ||
140 | + * Cancels the specified job, but may refuse to do so if the | ||
141 | + * operation isn't currently meaningful. | ||
142 | + */ | ||
143 | +void block_job_user_cancel(BlockJob *job, Error **errp); | ||
144 | |||
145 | /** | ||
146 | * block_job_cancel_sync: | ||
147 | diff --git a/blockdev.c b/blockdev.c | 22 | diff --git a/blockdev.c b/blockdev.c |
148 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
149 | --- a/blockdev.c | 24 | --- a/blockdev.c |
150 | +++ b/blockdev.c | 25 | +++ b/blockdev.c |
26 | @@ -XXX,XX +XXX,XX @@ out: | ||
27 | aio_context_release(aio_context); | ||
28 | } | ||
29 | |||
30 | -/* Get a block job using its ID and acquire its AioContext */ | ||
31 | -static BlockJob *find_block_job(const char *id, AioContext **aio_context, | ||
32 | - Error **errp) | ||
33 | +/* | ||
34 | + * Get a block job using its ID and acquire its AioContext. | ||
35 | + * Called with job_mutex held. | ||
36 | + */ | ||
37 | +static BlockJob *find_block_job_locked(const char *id, | ||
38 | + AioContext **aio_context, | ||
39 | + Error **errp) | ||
40 | { | ||
41 | BlockJob *job; | ||
42 | |||
43 | @@ -XXX,XX +XXX,XX @@ static BlockJob *find_block_job(const char *id, AioContext **aio_context, | ||
44 | |||
45 | *aio_context = NULL; | ||
46 | |||
47 | - job = block_job_get(id); | ||
48 | + job = block_job_get_locked(id); | ||
49 | |||
50 | if (!job) { | ||
51 | error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE, | ||
52 | @@ -XXX,XX +XXX,XX @@ static BlockJob *find_block_job(const char *id, AioContext **aio_context, | ||
53 | void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp) | ||
54 | { | ||
55 | AioContext *aio_context; | ||
56 | - BlockJob *job = find_block_job(device, &aio_context, errp); | ||
57 | + BlockJob *job; | ||
58 | + | ||
59 | + JOB_LOCK_GUARD(); | ||
60 | + job = find_block_job_locked(device, &aio_context, errp); | ||
61 | |||
62 | if (!job) { | ||
63 | return; | ||
64 | } | ||
65 | |||
66 | - block_job_set_speed(job, speed, errp); | ||
67 | + block_job_set_speed_locked(job, speed, errp); | ||
68 | aio_context_release(aio_context); | ||
69 | } | ||
70 | |||
151 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_cancel(const char *device, | 71 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_cancel(const char *device, |
152 | } | 72 | bool has_force, bool force, Error **errp) |
73 | { | ||
74 | AioContext *aio_context; | ||
75 | - BlockJob *job = find_block_job(device, &aio_context, errp); | ||
76 | + BlockJob *job; | ||
77 | + | ||
78 | + JOB_LOCK_GUARD(); | ||
79 | + job = find_block_job_locked(device, &aio_context, errp); | ||
80 | |||
81 | if (!job) { | ||
82 | return; | ||
83 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_cancel(const char *device, | ||
84 | force = false; | ||
85 | } | ||
86 | |||
87 | - if (job_user_paused(&job->job) && !force) { | ||
88 | + if (job_user_paused_locked(&job->job) && !force) { | ||
89 | error_setg(errp, "The block job for device '%s' is currently paused", | ||
90 | device); | ||
91 | goto out; | ||
92 | } | ||
153 | 93 | ||
154 | trace_qmp_block_job_cancel(job); | 94 | trace_qmp_block_job_cancel(job); |
155 | - block_job_cancel(job); | 95 | - job_user_cancel(&job->job, force, errp); |
156 | + block_job_user_cancel(job, errp); | 96 | + job_user_cancel_locked(&job->job, force, errp); |
157 | out: | 97 | out: |
158 | aio_context_release(aio_context); | 98 | aio_context_release(aio_context); |
159 | } | 99 | } |
160 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_pause(const char *device, Error **errp) | 100 | @@ -XXX,XX +XXX,XX @@ out: |
161 | AioContext *aio_context; | 101 | void qmp_block_job_pause(const char *device, Error **errp) |
162 | BlockJob *job = find_block_job(device, &aio_context, errp); | 102 | { |
163 | 103 | AioContext *aio_context; | |
164 | - if (!job || block_job_user_paused(job)) { | 104 | - BlockJob *job = find_block_job(device, &aio_context, errp); |
165 | + if (!job) { | 105 | + BlockJob *job; |
166 | return; | 106 | + |
167 | } | 107 | + JOB_LOCK_GUARD(); |
108 | + job = find_block_job_locked(device, &aio_context, errp); | ||
109 | |||
110 | if (!job) { | ||
111 | return; | ||
112 | } | ||
168 | 113 | ||
169 | trace_qmp_block_job_pause(job); | 114 | trace_qmp_block_job_pause(job); |
170 | - block_job_user_pause(job); | 115 | - job_user_pause(&job->job, errp); |
171 | + block_job_user_pause(job, errp); | 116 | + job_user_pause_locked(&job->job, errp); |
172 | aio_context_release(aio_context); | 117 | aio_context_release(aio_context); |
173 | } | 118 | } |
174 | 119 | ||
175 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_resume(const char *device, Error **errp) | 120 | void qmp_block_job_resume(const char *device, Error **errp) |
176 | AioContext *aio_context; | 121 | { |
177 | BlockJob *job = find_block_job(device, &aio_context, errp); | 122 | AioContext *aio_context; |
178 | 123 | - BlockJob *job = find_block_job(device, &aio_context, errp); | |
179 | - if (!job || !block_job_user_paused(job)) { | 124 | + BlockJob *job; |
180 | + if (!job) { | 125 | + |
181 | return; | 126 | + JOB_LOCK_GUARD(); |
182 | } | 127 | + job = find_block_job_locked(device, &aio_context, errp); |
128 | |||
129 | if (!job) { | ||
130 | return; | ||
131 | } | ||
183 | 132 | ||
184 | trace_qmp_block_job_resume(job); | 133 | trace_qmp_block_job_resume(job); |
185 | - block_job_user_resume(job); | 134 | - job_user_resume(&job->job, errp); |
186 | + block_job_user_resume(job, errp); | 135 | + job_user_resume_locked(&job->job, errp); |
187 | aio_context_release(aio_context); | 136 | aio_context_release(aio_context); |
188 | } | 137 | } |
189 | 138 | ||
190 | diff --git a/blockjob.c b/blockjob.c | 139 | void qmp_block_job_complete(const char *device, Error **errp) |
140 | { | ||
141 | AioContext *aio_context; | ||
142 | - BlockJob *job = find_block_job(device, &aio_context, errp); | ||
143 | + BlockJob *job; | ||
144 | + | ||
145 | + JOB_LOCK_GUARD(); | ||
146 | + job = find_block_job_locked(device, &aio_context, errp); | ||
147 | |||
148 | if (!job) { | ||
149 | return; | ||
150 | } | ||
151 | |||
152 | trace_qmp_block_job_complete(job); | ||
153 | - job_complete(&job->job, errp); | ||
154 | + job_complete_locked(&job->job, errp); | ||
155 | aio_context_release(aio_context); | ||
156 | } | ||
157 | |||
158 | void qmp_block_job_finalize(const char *id, Error **errp) | ||
159 | { | ||
160 | AioContext *aio_context; | ||
161 | - BlockJob *job = find_block_job(id, &aio_context, errp); | ||
162 | + BlockJob *job; | ||
163 | + | ||
164 | + JOB_LOCK_GUARD(); | ||
165 | + job = find_block_job_locked(id, &aio_context, errp); | ||
166 | |||
167 | if (!job) { | ||
168 | return; | ||
169 | } | ||
170 | |||
171 | trace_qmp_block_job_finalize(job); | ||
172 | - job_ref(&job->job); | ||
173 | - job_finalize(&job->job, errp); | ||
174 | + job_ref_locked(&job->job); | ||
175 | + job_finalize_locked(&job->job, errp); | ||
176 | |||
177 | /* | ||
178 | * Job's context might have changed via job_finalize (and job_txn_apply | ||
179 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_finalize(const char *id, Error **errp) | ||
180 | * one. | ||
181 | */ | ||
182 | aio_context = block_job_get_aio_context(job); | ||
183 | - job_unref(&job->job); | ||
184 | + job_unref_locked(&job->job); | ||
185 | aio_context_release(aio_context); | ||
186 | } | ||
187 | |||
188 | void qmp_block_job_dismiss(const char *id, Error **errp) | ||
189 | { | ||
190 | AioContext *aio_context; | ||
191 | - BlockJob *bjob = find_block_job(id, &aio_context, errp); | ||
192 | + BlockJob *bjob; | ||
193 | Job *job; | ||
194 | |||
195 | + JOB_LOCK_GUARD(); | ||
196 | + bjob = find_block_job_locked(id, &aio_context, errp); | ||
197 | + | ||
198 | if (!bjob) { | ||
199 | return; | ||
200 | } | ||
201 | |||
202 | trace_qmp_block_job_dismiss(bjob); | ||
203 | job = &bjob->job; | ||
204 | - job_dismiss(&job, errp); | ||
205 | + job_dismiss_locked(&job, errp); | ||
206 | aio_context_release(aio_context); | ||
207 | } | ||
208 | |||
209 | diff --git a/job-qmp.c b/job-qmp.c | ||
191 | index XXXXXXX..XXXXXXX 100644 | 210 | index XXXXXXX..XXXXXXX 100644 |
192 | --- a/blockjob.c | 211 | --- a/job-qmp.c |
193 | +++ b/blockjob.c | 212 | +++ b/job-qmp.c |
194 | @@ -XXX,XX +XXX,XX @@ bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = { | 213 | @@ -XXX,XX +XXX,XX @@ |
195 | /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0}, | 214 | #include "qapi/error.h" |
196 | }; | 215 | #include "trace/trace-root.h" |
197 | 216 | ||
198 | +bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = { | 217 | -/* Get a job using its ID and acquire its AioContext */ |
199 | + /* U, C, R, P, Y, S */ | 218 | -static Job *find_job(const char *id, AioContext **aio_context, Error **errp) |
200 | + [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1}, | 219 | +/* |
201 | + [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1}, | 220 | + * Get a job using its ID and acquire its AioContext. |
202 | + [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1}, | 221 | + * Called with job_mutex held. |
203 | + [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1}, | 222 | + */ |
204 | + [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0}, | 223 | +static Job *find_job_locked(const char *id, |
205 | +}; | 224 | + AioContext **aio_context, |
206 | + | 225 | + Error **errp) |
207 | static void block_job_state_transition(BlockJob *job, BlockJobStatus s1) | 226 | { |
208 | { | 227 | Job *job; |
209 | BlockJobStatus s0 = job->status; | 228 | |
210 | @@ -XXX,XX +XXX,XX @@ static void block_job_state_transition(BlockJob *job, BlockJobStatus s1) | 229 | *aio_context = NULL; |
211 | job->status = s1; | 230 | |
212 | } | 231 | - job = job_get(id); |
213 | 232 | + job = job_get_locked(id); | |
214 | +static int block_job_apply_verb(BlockJob *job, BlockJobVerb bv, Error **errp) | 233 | if (!job) { |
215 | +{ | 234 | error_setg(errp, "Job not found"); |
216 | + assert(bv >= 0 && bv <= BLOCK_JOB_VERB__MAX); | 235 | return NULL; |
217 | + trace_block_job_apply_verb(job, qapi_enum_lookup(&BlockJobStatus_lookup, | 236 | @@ -XXX,XX +XXX,XX @@ static Job *find_job(const char *id, AioContext **aio_context, Error **errp) |
218 | + job->status), | 237 | void qmp_job_cancel(const char *id, Error **errp) |
219 | + qapi_enum_lookup(&BlockJobVerb_lookup, bv), | 238 | { |
220 | + BlockJobVerbTable[bv][job->status] ? | 239 | AioContext *aio_context; |
221 | + "allowed" : "prohibited"); | 240 | - Job *job = find_job(id, &aio_context, errp); |
222 | + if (BlockJobVerbTable[bv][job->status]) { | 241 | + Job *job; |
223 | + return 0; | 242 | + |
224 | + } | 243 | + JOB_LOCK_GUARD(); |
225 | + error_setg(errp, "Job '%s' in state '%s' cannot accept command verb '%s'", | 244 | + job = find_job_locked(id, &aio_context, errp); |
226 | + job->id, qapi_enum_lookup(&BlockJobStatus_lookup, job->status), | 245 | |
227 | + qapi_enum_lookup(&BlockJobVerb_lookup, bv)); | 246 | if (!job) { |
228 | + return -EPERM; | 247 | return; |
229 | +} | 248 | } |
230 | + | 249 | |
231 | static void block_job_lock(void) | 250 | trace_qmp_job_cancel(job); |
232 | { | 251 | - job_user_cancel(job, true, errp); |
233 | qemu_mutex_lock(&block_job_mutex); | 252 | + job_user_cancel_locked(job, true, errp); |
234 | @@ -XXX,XX +XXX,XX @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) | 253 | aio_context_release(aio_context); |
235 | error_setg(errp, QERR_UNSUPPORTED); | 254 | } |
236 | return; | 255 | |
237 | } | 256 | void qmp_job_pause(const char *id, Error **errp) |
238 | + if (block_job_apply_verb(job, BLOCK_JOB_VERB_SET_SPEED, errp)) { | 257 | { |
239 | + return; | 258 | AioContext *aio_context; |
240 | + } | 259 | - Job *job = find_job(id, &aio_context, errp); |
241 | job->driver->set_speed(job, speed, &local_err); | 260 | + Job *job; |
242 | if (local_err) { | 261 | + |
243 | error_propagate(errp, local_err); | 262 | + JOB_LOCK_GUARD(); |
244 | @@ -XXX,XX +XXX,XX @@ void block_job_complete(BlockJob *job, Error **errp) | 263 | + job = find_job_locked(id, &aio_context, errp); |
245 | { | 264 | |
246 | /* Should not be reachable via external interface for internal jobs */ | 265 | if (!job) { |
247 | assert(job->id); | 266 | return; |
248 | - if (job->pause_count || job->cancelled || | 267 | } |
249 | - !block_job_started(job) || !job->driver->complete) { | 268 | |
250 | + if (block_job_apply_verb(job, BLOCK_JOB_VERB_COMPLETE, errp)) { | 269 | trace_qmp_job_pause(job); |
251 | + return; | 270 | - job_user_pause(job, errp); |
252 | + } | 271 | + job_user_pause_locked(job, errp); |
253 | + if (job->pause_count || job->cancelled || !job->driver->complete) { | 272 | aio_context_release(aio_context); |
254 | error_setg(errp, "The active block job '%s' cannot be completed", | 273 | } |
255 | job->id); | 274 | |
256 | return; | 275 | void qmp_job_resume(const char *id, Error **errp) |
257 | @@ -XXX,XX +XXX,XX @@ void block_job_complete(BlockJob *job, Error **errp) | 276 | { |
258 | job->driver->complete(job, errp); | 277 | AioContext *aio_context; |
259 | } | 278 | - Job *job = find_job(id, &aio_context, errp); |
260 | 279 | + Job *job; | |
261 | -void block_job_user_pause(BlockJob *job) | 280 | + |
262 | +void block_job_user_pause(BlockJob *job, Error **errp) | 281 | + JOB_LOCK_GUARD(); |
263 | { | 282 | + job = find_job_locked(id, &aio_context, errp); |
264 | + if (block_job_apply_verb(job, BLOCK_JOB_VERB_PAUSE, errp)) { | 283 | |
265 | + return; | 284 | if (!job) { |
266 | + } | 285 | return; |
267 | + if (job->user_paused) { | 286 | } |
268 | + error_setg(errp, "Job is already paused"); | 287 | |
269 | + return; | 288 | trace_qmp_job_resume(job); |
270 | + } | 289 | - job_user_resume(job, errp); |
271 | job->user_paused = true; | 290 | + job_user_resume_locked(job, errp); |
272 | block_job_pause(job); | 291 | aio_context_release(aio_context); |
273 | } | 292 | } |
274 | @@ -XXX,XX +XXX,XX @@ bool block_job_user_paused(BlockJob *job) | 293 | |
275 | return job->user_paused; | 294 | void qmp_job_complete(const char *id, Error **errp) |
276 | } | 295 | { |
277 | 296 | AioContext *aio_context; | |
278 | -void block_job_user_resume(BlockJob *job) | 297 | - Job *job = find_job(id, &aio_context, errp); |
279 | +void block_job_user_resume(BlockJob *job, Error **errp) | 298 | + Job *job; |
280 | { | 299 | + |
281 | - if (job && job->user_paused && job->pause_count > 0) { | 300 | + JOB_LOCK_GUARD(); |
282 | - block_job_iostatus_reset(job); | 301 | + job = find_job_locked(id, &aio_context, errp); |
283 | - job->user_paused = false; | 302 | |
284 | - block_job_resume(job); | 303 | if (!job) { |
285 | + assert(job); | 304 | return; |
286 | + if (!job->user_paused || job->pause_count <= 0) { | 305 | } |
287 | + error_setg(errp, "Can't resume a job that was not paused"); | 306 | |
288 | + return; | 307 | trace_qmp_job_complete(job); |
289 | + } | 308 | - job_complete(job, errp); |
290 | + if (block_job_apply_verb(job, BLOCK_JOB_VERB_RESUME, errp)) { | 309 | + job_complete_locked(job, errp); |
291 | + return; | 310 | aio_context_release(aio_context); |
292 | } | 311 | } |
293 | + block_job_iostatus_reset(job); | 312 | |
294 | + job->user_paused = false; | 313 | void qmp_job_finalize(const char *id, Error **errp) |
295 | + block_job_resume(job); | 314 | { |
296 | } | 315 | AioContext *aio_context; |
297 | 316 | - Job *job = find_job(id, &aio_context, errp); | |
298 | void block_job_cancel(BlockJob *job) | 317 | + Job *job; |
299 | @@ -XXX,XX +XXX,XX @@ void block_job_cancel(BlockJob *job) | 318 | + |
300 | } | 319 | + JOB_LOCK_GUARD(); |
301 | } | 320 | + job = find_job_locked(id, &aio_context, errp); |
302 | 321 | ||
303 | +void block_job_user_cancel(BlockJob *job, Error **errp) | 322 | if (!job) { |
304 | +{ | 323 | return; |
305 | + if (block_job_apply_verb(job, BLOCK_JOB_VERB_CANCEL, errp)) { | 324 | } |
306 | + return; | 325 | |
307 | + } | 326 | trace_qmp_job_finalize(job); |
308 | + block_job_cancel(job); | 327 | - job_ref(job); |
309 | +} | 328 | - job_finalize(job, errp); |
310 | + | 329 | + job_ref_locked(job); |
311 | /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be | 330 | + job_finalize_locked(job, errp); |
312 | * used with block_job_finish_sync() without the need for (rather nasty) | 331 | |
313 | * function pointer casts there. */ | 332 | /* |
314 | @@ -XXX,XX +XXX,XX @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, | 333 | * Job's context might have changed via job_finalize (and job_txn_apply |
315 | action, &error_abort); | 334 | @@ -XXX,XX +XXX,XX @@ void qmp_job_finalize(const char *id, Error **errp) |
316 | } | 335 | * one. |
317 | if (action == BLOCK_ERROR_ACTION_STOP) { | 336 | */ |
318 | + block_job_pause(job); | 337 | aio_context = job->aio_context; |
319 | /* make the pause user visible, which will be resumed from QMP. */ | 338 | - job_unref(job); |
320 | - block_job_user_pause(job); | 339 | + job_unref_locked(job); |
321 | + job->user_paused = true; | 340 | aio_context_release(aio_context); |
322 | block_job_iostatus_set_err(job, error); | 341 | } |
323 | } | 342 | |
324 | return action; | 343 | void qmp_job_dismiss(const char *id, Error **errp) |
325 | diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c | 344 | { |
326 | index XXXXXXX..XXXXXXX 100644 | 345 | AioContext *aio_context; |
327 | --- a/tests/test-bdrv-drain.c | 346 | - Job *job = find_job(id, &aio_context, errp); |
328 | +++ b/tests/test-bdrv-drain.c | 347 | + Job *job; |
329 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn test_job_start(void *opaque) | 348 | + |
330 | { | 349 | + JOB_LOCK_GUARD(); |
331 | TestBlockJob *s = opaque; | 350 | + job = find_job_locked(id, &aio_context, errp); |
332 | 351 | ||
333 | + block_job_event_ready(&s->common); | 352 | if (!job) { |
334 | while (!s->should_complete) { | 353 | return; |
335 | block_job_sleep_ns(&s->common, 100000); | 354 | } |
336 | } | 355 | |
337 | diff --git a/block/trace-events b/block/trace-events | 356 | trace_qmp_job_dismiss(job); |
338 | index XXXXXXX..XXXXXXX 100644 | 357 | - job_dismiss(&job, errp); |
339 | --- a/block/trace-events | 358 | + job_dismiss_locked(&job, errp); |
340 | +++ b/block/trace-events | 359 | aio_context_release(aio_context); |
341 | @@ -XXX,XX +XXX,XX @@ bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d" | 360 | } |
342 | 361 | ||
343 | # blockjob.c | ||
344 | block_job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)" | ||
345 | +block_job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)" | ||
346 | |||
347 | # block/block-backend.c | ||
348 | blk_co_preadv(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x" | ||
349 | -- | 362 | -- |
350 | 2.13.6 | 363 | 2.37.3 |
351 | |||
352 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Simply apply a function transaction-wide. | 3 | Add missing job synchronization in the unit tests, with |
4 | A few more uses of this in forthcoming patches. | 4 | explicit locks. |
5 | 5 | ||
6 | Signed-off-by: John Snow <jsnow@redhat.com> | 6 | We are deliberately using _locked functions wrapped by a guard |
7 | instead of a normal call because the normal call will be removed | ||
8 | in future, as the only usage is limited to the tests. | ||
9 | |||
10 | In other words, if a function like job_pause() is/will be only used | ||
11 | in tests to avoid: | ||
12 | |||
13 | WITH_JOB_LOCK_GUARD(){ | ||
14 | job_pause_locked(); | ||
15 | } | ||
16 | |||
17 | then it is not worth keeping job_pause(), and just use the guard. | ||
18 | |||
19 | Note: at this stage, job_{lock/unlock} and job lock guard macros | ||
20 | are *nop*. | ||
21 | |||
22 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
23 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
24 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
25 | Message-Id: <20220926093214.506243-10-eesposit@redhat.com> | ||
7 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 26 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
8 | --- | 27 | --- |
9 | blockjob.c | 25 ++++++++++++++++--------- | 28 | tests/unit/test-bdrv-drain.c | 76 ++++++++++++-------- |
10 | 1 file changed, 16 insertions(+), 9 deletions(-) | 29 | tests/unit/test-block-iothread.c | 8 ++- |
11 | 30 | tests/unit/test-blockjob-txn.c | 24 ++++--- | |
12 | diff --git a/blockjob.c b/blockjob.c | 31 | tests/unit/test-blockjob.c | 115 +++++++++++++++++++------------ |
32 | 4 files changed, 140 insertions(+), 83 deletions(-) | ||
33 | |||
34 | diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c | ||
13 | index XXXXXXX..XXXXXXX 100644 | 35 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/blockjob.c | 36 | --- a/tests/unit/test-bdrv-drain.c |
15 | +++ b/blockjob.c | 37 | +++ b/tests/unit/test-bdrv-drain.c |
16 | @@ -XXX,XX +XXX,XX @@ static void block_job_cancel_async(BlockJob *job) | 38 | @@ -XXX,XX +XXX,XX @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, |
17 | job->cancelled = true; | 39 | } |
18 | } | 40 | } |
19 | 41 | ||
20 | +static void block_job_txn_apply(BlockJobTxn *txn, void fn(BlockJob *)) | 42 | - g_assert_cmpint(job->job.pause_count, ==, 0); |
43 | - g_assert_false(job->job.paused); | ||
44 | - g_assert_true(tjob->running); | ||
45 | - g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ | ||
46 | + WITH_JOB_LOCK_GUARD() { | ||
47 | + g_assert_cmpint(job->job.pause_count, ==, 0); | ||
48 | + g_assert_false(job->job.paused); | ||
49 | + g_assert_true(tjob->running); | ||
50 | + g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ | ||
51 | + } | ||
52 | |||
53 | do_drain_begin_unlocked(drain_type, drain_bs); | ||
54 | |||
55 | - if (drain_type == BDRV_DRAIN_ALL) { | ||
56 | - /* bdrv_drain_all() drains both src and target */ | ||
57 | - g_assert_cmpint(job->job.pause_count, ==, 2); | ||
58 | - } else { | ||
59 | - g_assert_cmpint(job->job.pause_count, ==, 1); | ||
60 | + WITH_JOB_LOCK_GUARD() { | ||
61 | + if (drain_type == BDRV_DRAIN_ALL) { | ||
62 | + /* bdrv_drain_all() drains both src and target */ | ||
63 | + g_assert_cmpint(job->job.pause_count, ==, 2); | ||
64 | + } else { | ||
65 | + g_assert_cmpint(job->job.pause_count, ==, 1); | ||
66 | + } | ||
67 | + g_assert_true(job->job.paused); | ||
68 | + g_assert_false(job->job.busy); /* The job is paused */ | ||
69 | } | ||
70 | - g_assert_true(job->job.paused); | ||
71 | - g_assert_false(job->job.busy); /* The job is paused */ | ||
72 | |||
73 | do_drain_end_unlocked(drain_type, drain_bs); | ||
74 | |||
75 | if (use_iothread) { | ||
76 | - /* paused is reset in the I/O thread, wait for it */ | ||
77 | + /* | ||
78 | + * Here we are waiting for the paused status to change, | ||
79 | + * so don't bother protecting the read every time. | ||
80 | + * | ||
81 | + * paused is reset in the I/O thread, wait for it | ||
82 | + */ | ||
83 | while (job->job.paused) { | ||
84 | aio_poll(qemu_get_aio_context(), false); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | - g_assert_cmpint(job->job.pause_count, ==, 0); | ||
89 | - g_assert_false(job->job.paused); | ||
90 | - g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ | ||
91 | + WITH_JOB_LOCK_GUARD() { | ||
92 | + g_assert_cmpint(job->job.pause_count, ==, 0); | ||
93 | + g_assert_false(job->job.paused); | ||
94 | + g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ | ||
95 | + } | ||
96 | |||
97 | do_drain_begin_unlocked(drain_type, target); | ||
98 | |||
99 | - if (drain_type == BDRV_DRAIN_ALL) { | ||
100 | - /* bdrv_drain_all() drains both src and target */ | ||
101 | - g_assert_cmpint(job->job.pause_count, ==, 2); | ||
102 | - } else { | ||
103 | - g_assert_cmpint(job->job.pause_count, ==, 1); | ||
104 | + WITH_JOB_LOCK_GUARD() { | ||
105 | + if (drain_type == BDRV_DRAIN_ALL) { | ||
106 | + /* bdrv_drain_all() drains both src and target */ | ||
107 | + g_assert_cmpint(job->job.pause_count, ==, 2); | ||
108 | + } else { | ||
109 | + g_assert_cmpint(job->job.pause_count, ==, 1); | ||
110 | + } | ||
111 | + g_assert_true(job->job.paused); | ||
112 | + g_assert_false(job->job.busy); /* The job is paused */ | ||
113 | } | ||
114 | - g_assert_true(job->job.paused); | ||
115 | - g_assert_false(job->job.busy); /* The job is paused */ | ||
116 | |||
117 | do_drain_end_unlocked(drain_type, target); | ||
118 | |||
119 | if (use_iothread) { | ||
120 | - /* paused is reset in the I/O thread, wait for it */ | ||
121 | + /* | ||
122 | + * Here we are waiting for the paused status to change, | ||
123 | + * so don't bother protecting the read every time. | ||
124 | + * | ||
125 | + * paused is reset in the I/O thread, wait for it | ||
126 | + */ | ||
127 | while (job->job.paused) { | ||
128 | aio_poll(qemu_get_aio_context(), false); | ||
129 | } | ||
130 | } | ||
131 | |||
132 | - g_assert_cmpint(job->job.pause_count, ==, 0); | ||
133 | - g_assert_false(job->job.paused); | ||
134 | - g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ | ||
135 | + WITH_JOB_LOCK_GUARD() { | ||
136 | + g_assert_cmpint(job->job.pause_count, ==, 0); | ||
137 | + g_assert_false(job->job.paused); | ||
138 | + g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ | ||
139 | + } | ||
140 | |||
141 | aio_context_acquire(ctx); | ||
142 | - ret = job_complete_sync(&job->job, &error_abort); | ||
143 | + WITH_JOB_LOCK_GUARD() { | ||
144 | + ret = job_complete_sync_locked(&job->job, &error_abort); | ||
145 | + } | ||
146 | g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO)); | ||
147 | |||
148 | if (use_iothread) { | ||
149 | diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c | ||
150 | index XXXXXXX..XXXXXXX 100644 | ||
151 | --- a/tests/unit/test-block-iothread.c | ||
152 | +++ b/tests/unit/test-block-iothread.c | ||
153 | @@ -XXX,XX +XXX,XX @@ static void test_attach_blockjob(void) | ||
154 | } | ||
155 | |||
156 | aio_context_acquire(ctx); | ||
157 | - job_complete_sync(&tjob->common.job, &error_abort); | ||
158 | + WITH_JOB_LOCK_GUARD() { | ||
159 | + job_complete_sync_locked(&tjob->common.job, &error_abort); | ||
160 | + } | ||
161 | blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); | ||
162 | aio_context_release(ctx); | ||
163 | |||
164 | @@ -XXX,XX +XXX,XX @@ static void test_propagate_mirror(void) | ||
165 | BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT, | ||
166 | false, "filter_node", MIRROR_COPY_MODE_BACKGROUND, | ||
167 | &error_abort); | ||
168 | - job = job_get("job0"); | ||
169 | + WITH_JOB_LOCK_GUARD() { | ||
170 | + job = job_get_locked("job0"); | ||
171 | + } | ||
172 | filter = bdrv_find_node("filter_node"); | ||
173 | |||
174 | /* Change the AioContext of src */ | ||
175 | diff --git a/tests/unit/test-blockjob-txn.c b/tests/unit/test-blockjob-txn.c | ||
176 | index XXXXXXX..XXXXXXX 100644 | ||
177 | --- a/tests/unit/test-blockjob-txn.c | ||
178 | +++ b/tests/unit/test-blockjob-txn.c | ||
179 | @@ -XXX,XX +XXX,XX @@ static void test_single_job(int expected) | ||
180 | job = test_block_job_start(1, true, expected, &result, txn); | ||
181 | job_start(&job->job); | ||
182 | |||
183 | - if (expected == -ECANCELED) { | ||
184 | - job_cancel(&job->job, false); | ||
185 | + WITH_JOB_LOCK_GUARD() { | ||
186 | + if (expected == -ECANCELED) { | ||
187 | + job_cancel_locked(&job->job, false); | ||
188 | + } | ||
189 | } | ||
190 | |||
191 | while (result == -EINPROGRESS) { | ||
192 | @@ -XXX,XX +XXX,XX @@ static void test_pair_jobs(int expected1, int expected2) | ||
193 | /* Release our reference now to trigger as many nice | ||
194 | * use-after-free bugs as possible. | ||
195 | */ | ||
196 | - job_txn_unref(txn); | ||
197 | + WITH_JOB_LOCK_GUARD() { | ||
198 | + job_txn_unref_locked(txn); | ||
199 | |||
200 | - if (expected1 == -ECANCELED) { | ||
201 | - job_cancel(&job1->job, false); | ||
202 | - } | ||
203 | - if (expected2 == -ECANCELED) { | ||
204 | - job_cancel(&job2->job, false); | ||
205 | + if (expected1 == -ECANCELED) { | ||
206 | + job_cancel_locked(&job1->job, false); | ||
207 | + } | ||
208 | + if (expected2 == -ECANCELED) { | ||
209 | + job_cancel_locked(&job2->job, false); | ||
210 | + } | ||
211 | } | ||
212 | |||
213 | while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) { | ||
214 | @@ -XXX,XX +XXX,XX @@ static void test_pair_jobs_fail_cancel_race(void) | ||
215 | job_start(&job1->job); | ||
216 | job_start(&job2->job); | ||
217 | |||
218 | - job_cancel(&job1->job, false); | ||
219 | + WITH_JOB_LOCK_GUARD() { | ||
220 | + job_cancel_locked(&job1->job, false); | ||
221 | + } | ||
222 | |||
223 | /* Now make job2 finish before the main loop kicks jobs. This simulates | ||
224 | * the race between a pending kick and another job completing. | ||
225 | diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c | ||
226 | index XXXXXXX..XXXXXXX 100644 | ||
227 | --- a/tests/unit/test-blockjob.c | ||
228 | +++ b/tests/unit/test-blockjob.c | ||
229 | @@ -XXX,XX +XXX,XX @@ static CancelJob *create_common(Job **pjob) | ||
230 | bjob = mk_job(blk, "Steve", &test_cancel_driver, true, | ||
231 | JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS); | ||
232 | job = &bjob->job; | ||
233 | - job_ref(job); | ||
234 | - assert(job->status == JOB_STATUS_CREATED); | ||
235 | + WITH_JOB_LOCK_GUARD() { | ||
236 | + job_ref_locked(job); | ||
237 | + assert(job->status == JOB_STATUS_CREATED); | ||
238 | + } | ||
239 | + | ||
240 | s = container_of(bjob, CancelJob, common); | ||
241 | s->blk = blk; | ||
242 | |||
243 | @@ -XXX,XX +XXX,XX @@ static void cancel_common(CancelJob *s) | ||
244 | aio_context_acquire(ctx); | ||
245 | |||
246 | job_cancel_sync(&job->job, true); | ||
247 | - if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) { | ||
248 | - Job *dummy = &job->job; | ||
249 | - job_dismiss(&dummy, &error_abort); | ||
250 | + WITH_JOB_LOCK_GUARD() { | ||
251 | + if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) { | ||
252 | + Job *dummy = &job->job; | ||
253 | + job_dismiss_locked(&dummy, &error_abort); | ||
254 | + } | ||
255 | + assert(job->job.status == JOB_STATUS_NULL); | ||
256 | + job_unref_locked(&job->job); | ||
257 | } | ||
258 | - assert(job->job.status == JOB_STATUS_NULL); | ||
259 | - job_unref(&job->job); | ||
260 | destroy_blk(blk); | ||
261 | |||
262 | aio_context_release(ctx); | ||
263 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_created(void) | ||
264 | cancel_common(s); | ||
265 | } | ||
266 | |||
267 | +static void assert_job_status_is(Job *job, int status) | ||
21 | +{ | 268 | +{ |
22 | + AioContext *ctx; | 269 | + WITH_JOB_LOCK_GUARD() { |
23 | + BlockJob *job, *next; | 270 | + assert(job->status == status); |
24 | + | ||
25 | + QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) { | ||
26 | + ctx = blk_get_aio_context(job->blk); | ||
27 | + aio_context_acquire(ctx); | ||
28 | + fn(job); | ||
29 | + aio_context_release(ctx); | ||
30 | + } | 271 | + } |
31 | +} | 272 | +} |
32 | + | 273 | + |
33 | static int block_job_finish_sync(BlockJob *job, | 274 | static void test_cancel_running(void) |
34 | void (*finish)(BlockJob *, Error **errp), | ||
35 | Error **errp) | ||
36 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_abort(BlockJob *job) | ||
37 | |||
38 | static void block_job_completed_txn_success(BlockJob *job) | ||
39 | { | 275 | { |
40 | - AioContext *ctx; | 276 | Job *job; |
41 | BlockJobTxn *txn = job->txn; | 277 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_running(void) |
42 | - BlockJob *other_job, *next; | 278 | s = create_common(&job); |
43 | + BlockJob *other_job; | 279 | |
44 | /* | 280 | job_start(job); |
45 | * Successful completion, see if there are other running jobs in this | 281 | - assert(job->status == JOB_STATUS_RUNNING); |
46 | * txn. | 282 | + assert_job_status_is(job, JOB_STATUS_RUNNING); |
47 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_success(BlockJob *job) | 283 | |
48 | if (!other_job->completed) { | 284 | cancel_common(s); |
49 | return; | 285 | } |
50 | } | 286 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_paused(void) |
51 | - } | 287 | s = create_common(&job); |
52 | - /* We are the last completed job, commit the transaction. */ | 288 | |
53 | - QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { | 289 | job_start(job); |
54 | - ctx = blk_get_aio_context(other_job->blk); | 290 | - assert(job->status == JOB_STATUS_RUNNING); |
55 | - aio_context_acquire(ctx); | 291 | - |
56 | assert(other_job->ret == 0); | 292 | - job_user_pause(job, &error_abort); |
57 | - block_job_completed_single(other_job); | 293 | + WITH_JOB_LOCK_GUARD() { |
58 | - aio_context_release(ctx); | 294 | + assert(job->status == JOB_STATUS_RUNNING); |
59 | } | 295 | + job_user_pause_locked(job, &error_abort); |
60 | + /* We are the last completed job, commit the transaction. */ | 296 | + } |
61 | + block_job_txn_apply(txn, block_job_completed_single); | 297 | job_enter(job); |
62 | } | 298 | - assert(job->status == JOB_STATUS_PAUSED); |
63 | 299 | + assert_job_status_is(job, JOB_STATUS_PAUSED); | |
64 | /* Assumes the block_job_mutex is held */ | 300 | |
301 | cancel_common(s); | ||
302 | } | ||
303 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_ready(void) | ||
304 | s = create_common(&job); | ||
305 | |||
306 | job_start(job); | ||
307 | - assert(job->status == JOB_STATUS_RUNNING); | ||
308 | + assert_job_status_is(job, JOB_STATUS_RUNNING); | ||
309 | |||
310 | s->should_converge = true; | ||
311 | job_enter(job); | ||
312 | - assert(job->status == JOB_STATUS_READY); | ||
313 | + assert_job_status_is(job, JOB_STATUS_READY); | ||
314 | |||
315 | cancel_common(s); | ||
316 | } | ||
317 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_standby(void) | ||
318 | s = create_common(&job); | ||
319 | |||
320 | job_start(job); | ||
321 | - assert(job->status == JOB_STATUS_RUNNING); | ||
322 | + assert_job_status_is(job, JOB_STATUS_RUNNING); | ||
323 | |||
324 | s->should_converge = true; | ||
325 | job_enter(job); | ||
326 | - assert(job->status == JOB_STATUS_READY); | ||
327 | - | ||
328 | - job_user_pause(job, &error_abort); | ||
329 | + WITH_JOB_LOCK_GUARD() { | ||
330 | + assert(job->status == JOB_STATUS_READY); | ||
331 | + job_user_pause_locked(job, &error_abort); | ||
332 | + } | ||
333 | job_enter(job); | ||
334 | - assert(job->status == JOB_STATUS_STANDBY); | ||
335 | + assert_job_status_is(job, JOB_STATUS_STANDBY); | ||
336 | |||
337 | cancel_common(s); | ||
338 | } | ||
339 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_pending(void) | ||
340 | s = create_common(&job); | ||
341 | |||
342 | job_start(job); | ||
343 | - assert(job->status == JOB_STATUS_RUNNING); | ||
344 | + assert_job_status_is(job, JOB_STATUS_RUNNING); | ||
345 | |||
346 | s->should_converge = true; | ||
347 | job_enter(job); | ||
348 | - assert(job->status == JOB_STATUS_READY); | ||
349 | - | ||
350 | - job_complete(job, &error_abort); | ||
351 | + WITH_JOB_LOCK_GUARD() { | ||
352 | + assert(job->status == JOB_STATUS_READY); | ||
353 | + job_complete_locked(job, &error_abort); | ||
354 | + } | ||
355 | job_enter(job); | ||
356 | while (!job->deferred_to_main_loop) { | ||
357 | aio_poll(qemu_get_aio_context(), true); | ||
358 | } | ||
359 | - assert(job->status == JOB_STATUS_READY); | ||
360 | + assert_job_status_is(job, JOB_STATUS_READY); | ||
361 | aio_poll(qemu_get_aio_context(), true); | ||
362 | - assert(job->status == JOB_STATUS_PENDING); | ||
363 | + assert_job_status_is(job, JOB_STATUS_PENDING); | ||
364 | |||
365 | cancel_common(s); | ||
366 | } | ||
367 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_concluded(void) | ||
368 | s = create_common(&job); | ||
369 | |||
370 | job_start(job); | ||
371 | - assert(job->status == JOB_STATUS_RUNNING); | ||
372 | + assert_job_status_is(job, JOB_STATUS_RUNNING); | ||
373 | |||
374 | s->should_converge = true; | ||
375 | job_enter(job); | ||
376 | - assert(job->status == JOB_STATUS_READY); | ||
377 | - | ||
378 | - job_complete(job, &error_abort); | ||
379 | + WITH_JOB_LOCK_GUARD() { | ||
380 | + assert(job->status == JOB_STATUS_READY); | ||
381 | + job_complete_locked(job, &error_abort); | ||
382 | + } | ||
383 | job_enter(job); | ||
384 | while (!job->deferred_to_main_loop) { | ||
385 | aio_poll(qemu_get_aio_context(), true); | ||
386 | } | ||
387 | - assert(job->status == JOB_STATUS_READY); | ||
388 | + assert_job_status_is(job, JOB_STATUS_READY); | ||
389 | aio_poll(qemu_get_aio_context(), true); | ||
390 | - assert(job->status == JOB_STATUS_PENDING); | ||
391 | + assert_job_status_is(job, JOB_STATUS_PENDING); | ||
392 | |||
393 | aio_context_acquire(job->aio_context); | ||
394 | - job_finalize(job, &error_abort); | ||
395 | + WITH_JOB_LOCK_GUARD() { | ||
396 | + job_finalize_locked(job, &error_abort); | ||
397 | + } | ||
398 | aio_context_release(job->aio_context); | ||
399 | - assert(job->status == JOB_STATUS_CONCLUDED); | ||
400 | + assert_job_status_is(job, JOB_STATUS_CONCLUDED); | ||
401 | |||
402 | cancel_common(s); | ||
403 | } | ||
404 | @@ -XXX,XX +XXX,XX @@ static void test_complete_in_standby(void) | ||
405 | bjob = mk_job(blk, "job", &test_yielding_driver, true, | ||
406 | JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS); | ||
407 | job = &bjob->job; | ||
408 | - assert(job->status == JOB_STATUS_CREATED); | ||
409 | + assert_job_status_is(job, JOB_STATUS_CREATED); | ||
410 | |||
411 | /* Wait for the job to become READY */ | ||
412 | job_start(job); | ||
413 | aio_context_acquire(ctx); | ||
414 | + /* | ||
415 | + * Here we are waiting for the status to change, so don't bother | ||
416 | + * protecting the read every time. | ||
417 | + */ | ||
418 | AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY); | ||
419 | aio_context_release(ctx); | ||
420 | |||
421 | /* Begin the drained section, pausing the job */ | ||
422 | bdrv_drain_all_begin(); | ||
423 | - assert(job->status == JOB_STATUS_STANDBY); | ||
424 | + assert_job_status_is(job, JOB_STATUS_STANDBY); | ||
425 | + | ||
426 | /* Lock the IO thread to prevent the job from being run */ | ||
427 | aio_context_acquire(ctx); | ||
428 | /* This will schedule the job to resume it */ | ||
429 | bdrv_drain_all_end(); | ||
430 | |||
431 | - /* But the job cannot run, so it will remain on standby */ | ||
432 | - assert(job->status == JOB_STATUS_STANDBY); | ||
433 | + WITH_JOB_LOCK_GUARD() { | ||
434 | + /* But the job cannot run, so it will remain on standby */ | ||
435 | + assert(job->status == JOB_STATUS_STANDBY); | ||
436 | |||
437 | - /* Even though the job is on standby, this should work */ | ||
438 | - job_complete(job, &error_abort); | ||
439 | + /* Even though the job is on standby, this should work */ | ||
440 | + job_complete_locked(job, &error_abort); | ||
441 | |||
442 | - /* The test is done now, clean up. */ | ||
443 | - job_finish_sync(job, NULL, &error_abort); | ||
444 | - assert(job->status == JOB_STATUS_PENDING); | ||
445 | + /* The test is done now, clean up. */ | ||
446 | + job_finish_sync_locked(job, NULL, &error_abort); | ||
447 | + assert(job->status == JOB_STATUS_PENDING); | ||
448 | |||
449 | - job_finalize(job, &error_abort); | ||
450 | - assert(job->status == JOB_STATUS_CONCLUDED); | ||
451 | + job_finalize_locked(job, &error_abort); | ||
452 | + assert(job->status == JOB_STATUS_CONCLUDED); | ||
453 | |||
454 | - job_dismiss(&job, &error_abort); | ||
455 | + job_dismiss_locked(&job, &error_abort); | ||
456 | + } | ||
457 | |||
458 | destroy_blk(blk); | ||
459 | aio_context_release(ctx); | ||
65 | -- | 460 | -- |
66 | 2.13.6 | 461 | 2.37.3 |
67 | |||
68 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | model all independent jobs as single job transactions. | 3 | Once job lock is used and aiocontext is removed, mirror has |
4 | to perform job operations under the same critical section, | ||
5 | Note: at this stage, job_{lock/unlock} and job lock guard macros | ||
6 | are *nop*. | ||
4 | 7 | ||
5 | It's one less case we have to worry about when we add more states to the | 8 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
6 | transition machine. This way, we can just treat all job lifetimes exactly | 9 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
7 | the same. This helps tighten assertions of the STM graph and removes some | 10 | Message-Id: <20220926093214.506243-11-eesposit@redhat.com> |
8 | conditionals that would have been needed in the coming commits adding a | ||
9 | more explicit job lifetime management API. | ||
10 | |||
11 | Signed-off-by: John Snow <jsnow@redhat.com> | ||
12 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
13 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
15 | --- | 13 | --- |
16 | include/block/blockjob.h | 1 - | 14 | block/mirror.c | 13 +++++++++---- |
17 | include/block/blockjob_int.h | 3 ++- | 15 | 1 file changed, 9 insertions(+), 4 deletions(-) |
18 | block/backup.c | 3 +-- | ||
19 | block/commit.c | 2 +- | ||
20 | block/mirror.c | 2 +- | ||
21 | block/stream.c | 2 +- | ||
22 | blockjob.c | 25 ++++++++++++++++--------- | ||
23 | tests/test-bdrv-drain.c | 4 ++-- | ||
24 | tests/test-blockjob-txn.c | 19 +++++++------------ | ||
25 | tests/test-blockjob.c | 2 +- | ||
26 | 10 files changed, 32 insertions(+), 31 deletions(-) | ||
27 | 16 | ||
28 | diff --git a/include/block/blockjob.h b/include/block/blockjob.h | ||
29 | index XXXXXXX..XXXXXXX 100644 | ||
30 | --- a/include/block/blockjob.h | ||
31 | +++ b/include/block/blockjob.h | ||
32 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJob { | ||
33 | */ | ||
34 | QEMUTimer sleep_timer; | ||
35 | |||
36 | - /** Non-NULL if this job is part of a transaction */ | ||
37 | BlockJobTxn *txn; | ||
38 | QLIST_ENTRY(BlockJob) txn_list; | ||
39 | } BlockJob; | ||
40 | diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h | ||
41 | index XXXXXXX..XXXXXXX 100644 | ||
42 | --- a/include/block/blockjob_int.h | ||
43 | +++ b/include/block/blockjob_int.h | ||
44 | @@ -XXX,XX +XXX,XX @@ struct BlockJobDriver { | ||
45 | * @job_id: The id of the newly-created job, or %NULL to have one | ||
46 | * generated automatically. | ||
47 | * @job_type: The class object for the newly-created job. | ||
48 | + * @txn: The transaction this job belongs to, if any. %NULL otherwise. | ||
49 | * @bs: The block | ||
50 | * @perm, @shared_perm: Permissions to request for @bs | ||
51 | * @speed: The maximum speed, in bytes per second, or 0 for unlimited. | ||
52 | @@ -XXX,XX +XXX,XX @@ struct BlockJobDriver { | ||
53 | * called from a wrapper that is specific to the job type. | ||
54 | */ | ||
55 | void *block_job_create(const char *job_id, const BlockJobDriver *driver, | ||
56 | - BlockDriverState *bs, uint64_t perm, | ||
57 | + BlockJobTxn *txn, BlockDriverState *bs, uint64_t perm, | ||
58 | uint64_t shared_perm, int64_t speed, int flags, | ||
59 | BlockCompletionFunc *cb, void *opaque, Error **errp); | ||
60 | |||
61 | diff --git a/block/backup.c b/block/backup.c | ||
62 | index XXXXXXX..XXXXXXX 100644 | ||
63 | --- a/block/backup.c | ||
64 | +++ b/block/backup.c | ||
65 | @@ -XXX,XX +XXX,XX @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, | ||
66 | } | ||
67 | |||
68 | /* job->common.len is fixed, so we can't allow resize */ | ||
69 | - job = block_job_create(job_id, &backup_job_driver, bs, | ||
70 | + job = block_job_create(job_id, &backup_job_driver, txn, bs, | ||
71 | BLK_PERM_CONSISTENT_READ, | ||
72 | BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | | ||
73 | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD, | ||
74 | @@ -XXX,XX +XXX,XX @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, | ||
75 | block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL, | ||
76 | &error_abort); | ||
77 | job->common.len = len; | ||
78 | - block_job_txn_add_job(txn, &job->common); | ||
79 | |||
80 | return &job->common; | ||
81 | |||
82 | diff --git a/block/commit.c b/block/commit.c | ||
83 | index XXXXXXX..XXXXXXX 100644 | ||
84 | --- a/block/commit.c | ||
85 | +++ b/block/commit.c | ||
86 | @@ -XXX,XX +XXX,XX @@ void commit_start(const char *job_id, BlockDriverState *bs, | ||
87 | return; | ||
88 | } | ||
89 | |||
90 | - s = block_job_create(job_id, &commit_job_driver, bs, 0, BLK_PERM_ALL, | ||
91 | + s = block_job_create(job_id, &commit_job_driver, NULL, bs, 0, BLK_PERM_ALL, | ||
92 | speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp); | ||
93 | if (!s) { | ||
94 | return; | ||
95 | diff --git a/block/mirror.c b/block/mirror.c | 17 | diff --git a/block/mirror.c b/block/mirror.c |
96 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
97 | --- a/block/mirror.c | 19 | --- a/block/mirror.c |
98 | +++ b/block/mirror.c | 20 | +++ b/block/mirror.c |
99 | @@ -XXX,XX +XXX,XX @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs, | 21 | @@ -XXX,XX +XXX,XX @@ static void mirror_complete(Job *job, Error **errp) |
22 | s->should_complete = true; | ||
23 | |||
24 | /* If the job is paused, it will be re-entered when it is resumed */ | ||
25 | - if (!job->paused) { | ||
26 | - job_enter(job); | ||
27 | + WITH_JOB_LOCK_GUARD() { | ||
28 | + if (!job->paused) { | ||
29 | + job_enter_cond_locked(job, NULL); | ||
30 | + } | ||
100 | } | 31 | } |
101 | 32 | } | |
102 | /* Make sure that the source is not resized while the job is running */ | 33 | |
103 | - s = block_job_create(job_id, driver, mirror_top_bs, | 34 | @@ -XXX,XX +XXX,XX @@ static bool mirror_drained_poll(BlockJob *job) |
104 | + s = block_job_create(job_id, driver, NULL, mirror_top_bs, | 35 | * from one of our own drain sections, to avoid a deadlock waiting for |
105 | BLK_PERM_CONSISTENT_READ, | 36 | * ourselves. |
106 | BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | | 37 | */ |
107 | BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, | 38 | - if (!s->common.job.paused && !job_is_cancelled(&job->job) && !s->in_drain) { |
108 | diff --git a/block/stream.c b/block/stream.c | 39 | - return true; |
109 | index XXXXXXX..XXXXXXX 100644 | 40 | + WITH_JOB_LOCK_GUARD() { |
110 | --- a/block/stream.c | 41 | + if (!s->common.job.paused && !job_is_cancelled_locked(&job->job) |
111 | +++ b/block/stream.c | 42 | + && !s->in_drain) { |
112 | @@ -XXX,XX +XXX,XX @@ void stream_start(const char *job_id, BlockDriverState *bs, | 43 | + return true; |
113 | /* Prevent concurrent jobs trying to modify the graph structure here, we | 44 | + } |
114 | * already have our own plans. Also don't allow resize as the image size is | ||
115 | * queried only at the job start and then cached. */ | ||
116 | - s = block_job_create(job_id, &stream_job_driver, bs, | ||
117 | + s = block_job_create(job_id, &stream_job_driver, NULL, bs, | ||
118 | BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | | ||
119 | BLK_PERM_GRAPH_MOD, | ||
120 | BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | | ||
121 | diff --git a/blockjob.c b/blockjob.c | ||
122 | index XXXXXXX..XXXXXXX 100644 | ||
123 | --- a/blockjob.c | ||
124 | +++ b/blockjob.c | ||
125 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job) | ||
126 | } | ||
127 | } | 45 | } |
128 | 46 | ||
129 | - if (job->txn) { | 47 | return !!s->in_flight; |
130 | - QLIST_REMOVE(job, txn_list); | ||
131 | - block_job_txn_unref(job->txn); | ||
132 | - } | ||
133 | + QLIST_REMOVE(job, txn_list); | ||
134 | + block_job_txn_unref(job->txn); | ||
135 | block_job_unref(job); | ||
136 | } | ||
137 | |||
138 | @@ -XXX,XX +XXX,XX @@ static void block_job_event_completed(BlockJob *job, const char *msg) | ||
139 | */ | ||
140 | |||
141 | void *block_job_create(const char *job_id, const BlockJobDriver *driver, | ||
142 | - BlockDriverState *bs, uint64_t perm, | ||
143 | + BlockJobTxn *txn, BlockDriverState *bs, uint64_t perm, | ||
144 | uint64_t shared_perm, int64_t speed, int flags, | ||
145 | BlockCompletionFunc *cb, void *opaque, Error **errp) | ||
146 | { | ||
147 | @@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, | ||
148 | return NULL; | ||
149 | } | ||
150 | } | ||
151 | + | ||
152 | + /* Single jobs are modeled as single-job transactions for sake of | ||
153 | + * consolidating the job management logic */ | ||
154 | + if (!txn) { | ||
155 | + txn = block_job_txn_new(); | ||
156 | + block_job_txn_add_job(txn, job); | ||
157 | + block_job_txn_unref(txn); | ||
158 | + } else { | ||
159 | + block_job_txn_add_job(txn, job); | ||
160 | + } | ||
161 | + | ||
162 | return job; | ||
163 | } | ||
164 | |||
165 | @@ -XXX,XX +XXX,XX @@ void block_job_early_fail(BlockJob *job) | ||
166 | |||
167 | void block_job_completed(BlockJob *job, int ret) | ||
168 | { | ||
169 | + assert(job && job->txn && !job->completed); | ||
170 | assert(blk_bs(job->blk)->job == job); | ||
171 | - assert(!job->completed); | ||
172 | job->completed = true; | ||
173 | job->ret = ret; | ||
174 | - if (!job->txn) { | ||
175 | - block_job_completed_single(job); | ||
176 | - } else if (ret < 0 || block_job_is_cancelled(job)) { | ||
177 | + if (ret < 0 || block_job_is_cancelled(job)) { | ||
178 | block_job_completed_txn_abort(job); | ||
179 | } else { | ||
180 | block_job_completed_txn_success(job); | ||
181 | diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c | ||
182 | index XXXXXXX..XXXXXXX 100644 | ||
183 | --- a/tests/test-bdrv-drain.c | ||
184 | +++ b/tests/test-bdrv-drain.c | ||
185 | @@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type) | ||
186 | blk_target = blk_new(BLK_PERM_ALL, BLK_PERM_ALL); | ||
187 | blk_insert_bs(blk_target, target, &error_abort); | ||
188 | |||
189 | - job = block_job_create("job0", &test_job_driver, src, 0, BLK_PERM_ALL, 0, | ||
190 | - 0, NULL, NULL, &error_abort); | ||
191 | + job = block_job_create("job0", &test_job_driver, NULL, src, 0, BLK_PERM_ALL, | ||
192 | + 0, 0, NULL, NULL, &error_abort); | ||
193 | block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort); | ||
194 | block_job_start(job); | ||
195 | |||
196 | diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c | ||
197 | index XXXXXXX..XXXXXXX 100644 | ||
198 | --- a/tests/test-blockjob-txn.c | ||
199 | +++ b/tests/test-blockjob-txn.c | ||
200 | @@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_block_job_driver = { | ||
201 | */ | ||
202 | static BlockJob *test_block_job_start(unsigned int iterations, | ||
203 | bool use_timer, | ||
204 | - int rc, int *result) | ||
205 | + int rc, int *result, BlockJobTxn *txn) | ||
206 | { | ||
207 | BlockDriverState *bs; | ||
208 | TestBlockJob *s; | ||
209 | @@ -XXX,XX +XXX,XX @@ static BlockJob *test_block_job_start(unsigned int iterations, | ||
210 | g_assert_nonnull(bs); | ||
211 | |||
212 | snprintf(job_id, sizeof(job_id), "job%u", counter++); | ||
213 | - s = block_job_create(job_id, &test_block_job_driver, bs, | ||
214 | + s = block_job_create(job_id, &test_block_job_driver, txn, bs, | ||
215 | 0, BLK_PERM_ALL, 0, BLOCK_JOB_DEFAULT, | ||
216 | test_block_job_cb, data, &error_abort); | ||
217 | s->iterations = iterations; | ||
218 | @@ -XXX,XX +XXX,XX @@ static void test_single_job(int expected) | ||
219 | int result = -EINPROGRESS; | ||
220 | |||
221 | txn = block_job_txn_new(); | ||
222 | - job = test_block_job_start(1, true, expected, &result); | ||
223 | - block_job_txn_add_job(txn, job); | ||
224 | + job = test_block_job_start(1, true, expected, &result, txn); | ||
225 | block_job_start(job); | ||
226 | |||
227 | if (expected == -ECANCELED) { | ||
228 | @@ -XXX,XX +XXX,XX @@ static void test_pair_jobs(int expected1, int expected2) | ||
229 | int result2 = -EINPROGRESS; | ||
230 | |||
231 | txn = block_job_txn_new(); | ||
232 | - job1 = test_block_job_start(1, true, expected1, &result1); | ||
233 | - block_job_txn_add_job(txn, job1); | ||
234 | - job2 = test_block_job_start(2, true, expected2, &result2); | ||
235 | - block_job_txn_add_job(txn, job2); | ||
236 | + job1 = test_block_job_start(1, true, expected1, &result1, txn); | ||
237 | + job2 = test_block_job_start(2, true, expected2, &result2, txn); | ||
238 | block_job_start(job1); | ||
239 | block_job_start(job2); | ||
240 | |||
241 | @@ -XXX,XX +XXX,XX @@ static void test_pair_jobs_fail_cancel_race(void) | ||
242 | int result2 = -EINPROGRESS; | ||
243 | |||
244 | txn = block_job_txn_new(); | ||
245 | - job1 = test_block_job_start(1, true, -ECANCELED, &result1); | ||
246 | - block_job_txn_add_job(txn, job1); | ||
247 | - job2 = test_block_job_start(2, false, 0, &result2); | ||
248 | - block_job_txn_add_job(txn, job2); | ||
249 | + job1 = test_block_job_start(1, true, -ECANCELED, &result1, txn); | ||
250 | + job2 = test_block_job_start(2, false, 0, &result2, txn); | ||
251 | block_job_start(job1); | ||
252 | block_job_start(job2); | ||
253 | |||
254 | diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c | ||
255 | index XXXXXXX..XXXXXXX 100644 | ||
256 | --- a/tests/test-blockjob.c | ||
257 | +++ b/tests/test-blockjob.c | ||
258 | @@ -XXX,XX +XXX,XX @@ static BlockJob *do_test_id(BlockBackend *blk, const char *id, | ||
259 | BlockJob *job; | ||
260 | Error *errp = NULL; | ||
261 | |||
262 | - job = block_job_create(id, &test_block_job_driver, blk_bs(blk), | ||
263 | + job = block_job_create(id, &test_block_job_driver, NULL, blk_bs(blk), | ||
264 | 0, BLK_PERM_ALL, 0, BLOCK_JOB_DEFAULT, block_job_cb, | ||
265 | NULL, &errp); | ||
266 | if (should_succeed) { | ||
267 | -- | 48 | -- |
268 | 2.13.6 | 49 | 2.37.3 |
269 | |||
270 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Expose the "manual" property via QAPI for the backup-related jobs. | 3 | Now that the API offers also _locked() functions, take advantage |
4 | As of this commit, this allows the management API to request the | 4 | of it and give also the caller control to take the lock and call |
5 | "concluded" and "dismiss" semantics for backup jobs. | 5 | _locked functions. |
6 | 6 | ||
7 | Signed-off-by: John Snow <jsnow@redhat.com> | 7 | This makes sense especially when we have for loops, because it |
8 | makes no sense to have: | ||
9 | |||
10 | for(job = job_next(); ...) | ||
11 | |||
12 | where each job_next() takes the lock internally. | ||
13 | Instead we want | ||
14 | |||
15 | JOB_LOCK_GUARD(); | ||
16 | for(job = job_next_locked(); ...) | ||
17 | |||
18 | In addition, protect also direct field accesses, by either creating a | ||
19 | new critical section or widening the existing ones. | ||
20 | |||
21 | Note: at this stage, job_{lock/unlock} and job lock guard macros | ||
22 | are *nop*. | ||
23 | |||
24 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
25 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
26 | Message-Id: <20220926093214.506243-12-eesposit@redhat.com> | ||
27 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 28 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
9 | --- | 29 | --- |
10 | qapi/block-core.json | 48 ++++++++++++++++++++++++++++++++++++++-------- | 30 | block.c | 17 ++++++++++------- |
11 | blockdev.c | 31 +++++++++++++++++++++++++++--- | 31 | blockdev.c | 14 ++++++++++---- |
12 | blockjob.c | 2 ++ | 32 | blockjob.c | 35 ++++++++++++++++++++++------------- |
13 | tests/qemu-iotests/109.out | 24 +++++++++++------------ | 33 | job-qmp.c | 9 ++++++--- |
14 | 4 files changed, 82 insertions(+), 23 deletions(-) | 34 | monitor/qmp-cmds.c | 7 +++++-- |
15 | 35 | qemu-img.c | 15 ++++++++++----- | |
16 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 36 | 6 files changed, 63 insertions(+), 34 deletions(-) |
17 | index XXXXXXX..XXXXXXX 100644 | 37 | |
18 | --- a/qapi/block-core.json | 38 | diff --git a/block.c b/block.c |
19 | +++ b/qapi/block-core.json | 39 | index XXXXXXX..XXXXXXX 100644 |
20 | @@ -XXX,XX +XXX,XX @@ | 40 | --- a/block.c |
21 | # | 41 | +++ b/block.c |
22 | # @status: Current job state/status (since 2.12) | 42 | @@ -XXX,XX +XXX,XX @@ static void bdrv_close(BlockDriverState *bs) |
23 | # | 43 | |
24 | +# @auto-finalize: Job will finalize itself when PENDING, moving to | 44 | void bdrv_close_all(void) |
25 | +# the CONCLUDED state. (since 2.12) | 45 | { |
26 | +# | 46 | - assert(job_next(NULL) == NULL); |
27 | +# @auto-dismiss: Job will dismiss itself when CONCLUDED, moving to the NULL | 47 | GLOBAL_STATE_CODE(); |
28 | +# state and disappearing from the query list. (since 2.12) | 48 | + assert(job_next(NULL) == NULL); |
29 | +# | 49 | |
30 | # Since: 1.1 | 50 | /* Drop references from requests still in flight, such as canceled block |
31 | ## | 51 | * jobs whose AIO context has not been polled yet */ |
32 | { 'struct': 'BlockJobInfo', | 52 | @@ -XXX,XX +XXX,XX @@ XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp) |
33 | 'data': {'type': 'str', 'device': 'str', 'len': 'int', | 53 | } |
34 | 'offset': 'int', 'busy': 'bool', 'paused': 'bool', 'speed': 'int', | 54 | } |
35 | 'io-status': 'BlockDeviceIoStatus', 'ready': 'bool', | 55 | |
36 | - 'status': 'BlockJobStatus' } } | 56 | - for (job = block_job_next(NULL); job; job = block_job_next(job)) { |
37 | + 'status': 'BlockJobStatus', | 57 | - GSList *el; |
38 | + 'auto-finalize': 'bool', 'auto-dismiss': 'bool' } } | 58 | + WITH_JOB_LOCK_GUARD() { |
39 | 59 | + for (job = block_job_next_locked(NULL); job; | |
40 | ## | 60 | + job = block_job_next_locked(job)) { |
41 | # @query-block-jobs: | 61 | + GSList *el; |
42 | @@ -XXX,XX +XXX,XX @@ | 62 | |
43 | # default 'report' (no limitations, since this applies to | 63 | - xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB, |
44 | # a different block device than @device). | 64 | - job->job.id); |
45 | # | 65 | - for (el = job->nodes; el; el = el->next) { |
46 | +# @auto-finalize: When false, this job will wait in a PENDING state after it has | 66 | - xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data); |
47 | +# finished its work, waiting for @block-job-finalize. | 67 | + xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB, |
48 | +# When true, this job will automatically perform its abort or | 68 | + job->job.id); |
49 | +# commit actions. | 69 | + for (el = job->nodes; el; el = el->next) { |
50 | +# Defaults to true. (Since 2.12) | 70 | + xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data); |
51 | +# | 71 | + } |
52 | +# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it | 72 | } |
53 | +# has completed ceased all work, and wait for @block-job-dismiss. | 73 | } |
54 | +# When true, this job will automatically disappear from the query | 74 | |
55 | +# list without user intervention. | ||
56 | +# Defaults to true. (Since 2.12) | ||
57 | +# | ||
58 | # Note: @on-source-error and @on-target-error only affect background | ||
59 | # I/O. If an error occurs during a guest write request, the device's | ||
60 | # rerror/werror actions will be used. | ||
61 | @@ -XXX,XX +XXX,XX @@ | ||
62 | ## | ||
63 | { 'struct': 'DriveBackup', | ||
64 | 'data': { '*job-id': 'str', 'device': 'str', 'target': 'str', | ||
65 | - '*format': 'str', 'sync': 'MirrorSyncMode', '*mode': 'NewImageMode', | ||
66 | - '*speed': 'int', '*bitmap': 'str', '*compress': 'bool', | ||
67 | + '*format': 'str', 'sync': 'MirrorSyncMode', | ||
68 | + '*mode': 'NewImageMode', '*speed': 'int', | ||
69 | + '*bitmap': 'str', '*compress': 'bool', | ||
70 | '*on-source-error': 'BlockdevOnError', | ||
71 | - '*on-target-error': 'BlockdevOnError' } } | ||
72 | + '*on-target-error': 'BlockdevOnError', | ||
73 | + '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } } | ||
74 | |||
75 | ## | ||
76 | # @BlockdevBackup: | ||
77 | @@ -XXX,XX +XXX,XX @@ | ||
78 | # default 'report' (no limitations, since this applies to | ||
79 | # a different block device than @device). | ||
80 | # | ||
81 | +# @auto-finalize: When false, this job will wait in a PENDING state after it has | ||
82 | +# finished its work, waiting for @block-job-finalize. | ||
83 | +# When true, this job will automatically perform its abort or | ||
84 | +# commit actions. | ||
85 | +# Defaults to true. (Since 2.12) | ||
86 | +# | ||
87 | +# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it | ||
88 | +# has completed ceased all work, and wait for @block-job-dismiss. | ||
89 | +# When true, this job will automatically disappear from the query | ||
90 | +# list without user intervention. | ||
91 | +# Defaults to true. (Since 2.12) | ||
92 | +# | ||
93 | # Note: @on-source-error and @on-target-error only affect background | ||
94 | # I/O. If an error occurs during a guest write request, the device's | ||
95 | # rerror/werror actions will be used. | ||
96 | @@ -XXX,XX +XXX,XX @@ | ||
97 | ## | ||
98 | { 'struct': 'BlockdevBackup', | ||
99 | 'data': { '*job-id': 'str', 'device': 'str', 'target': 'str', | ||
100 | - 'sync': 'MirrorSyncMode', | ||
101 | - '*speed': 'int', | ||
102 | - '*compress': 'bool', | ||
103 | + 'sync': 'MirrorSyncMode', '*speed': 'int', '*compress': 'bool', | ||
104 | '*on-source-error': 'BlockdevOnError', | ||
105 | - '*on-target-error': 'BlockdevOnError' } } | ||
106 | + '*on-target-error': 'BlockdevOnError', | ||
107 | + '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } } | ||
108 | |||
109 | ## | ||
110 | # @blockdev-snapshot-sync: | ||
111 | diff --git a/blockdev.c b/blockdev.c | 75 | diff --git a/blockdev.c b/blockdev.c |
112 | index XXXXXXX..XXXXXXX 100644 | 76 | index XXXXXXX..XXXXXXX 100644 |
113 | --- a/blockdev.c | 77 | --- a/blockdev.c |
114 | +++ b/blockdev.c | 78 | +++ b/blockdev.c |
115 | @@ -XXX,XX +XXX,XX @@ static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn, | 79 | @@ -XXX,XX +XXX,XX @@ void blockdev_mark_auto_del(BlockBackend *blk) |
116 | AioContext *aio_context; | 80 | return; |
117 | QDict *options = NULL; | 81 | } |
118 | Error *local_err = NULL; | 82 | |
119 | - int flags; | 83 | - for (job = block_job_next(NULL); job; job = block_job_next(job)) { |
120 | + int flags, job_flags = BLOCK_JOB_DEFAULT; | 84 | + JOB_LOCK_GUARD(); |
121 | int64_t size; | 85 | + |
122 | bool set_backing_hd = false; | 86 | + for (job = block_job_next_locked(NULL); job; |
123 | 87 | + job = block_job_next_locked(job)) { | |
124 | @@ -XXX,XX +XXX,XX @@ static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn, | 88 | if (block_job_has_bdrv(job, blk_bs(blk))) { |
125 | if (!backup->has_job_id) { | 89 | AioContext *aio_context = job->job.aio_context; |
126 | backup->job_id = NULL; | 90 | aio_context_acquire(aio_context); |
127 | } | 91 | |
128 | + if (!backup->has_auto_finalize) { | 92 | - job_cancel(&job->job, false); |
129 | + backup->auto_finalize = true; | 93 | + job_cancel_locked(&job->job, false); |
130 | + } | 94 | |
131 | + if (!backup->has_auto_dismiss) { | 95 | aio_context_release(aio_context); |
132 | + backup->auto_dismiss = true; | 96 | } |
133 | + } | 97 | @@ -XXX,XX +XXX,XX @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp) |
134 | if (!backup->has_compress) { | 98 | BlockJobInfoList *head = NULL, **tail = &head; |
135 | backup->compress = false; | 99 | BlockJob *job; |
136 | } | 100 | |
137 | @@ -XXX,XX +XXX,XX @@ static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn, | 101 | - for (job = block_job_next(NULL); job; job = block_job_next(job)) { |
138 | goto out; | 102 | + JOB_LOCK_GUARD(); |
139 | } | 103 | + |
140 | } | 104 | + for (job = block_job_next_locked(NULL); job; |
141 | + if (!backup->auto_finalize) { | 105 | + job = block_job_next_locked(job)) { |
142 | + job_flags |= BLOCK_JOB_MANUAL_FINALIZE; | 106 | BlockJobInfo *value; |
143 | + } | 107 | AioContext *aio_context; |
144 | + if (!backup->auto_dismiss) { | 108 | |
145 | + job_flags |= BLOCK_JOB_MANUAL_DISMISS; | 109 | @@ -XXX,XX +XXX,XX @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp) |
146 | + } | 110 | } |
147 | 111 | aio_context = block_job_get_aio_context(job); | |
148 | job = backup_job_create(backup->job_id, bs, target_bs, backup->speed, | 112 | aio_context_acquire(aio_context); |
149 | backup->sync, bmap, backup->compress, | 113 | - value = block_job_query(job, errp); |
150 | backup->on_source_error, backup->on_target_error, | 114 | + value = block_job_query_locked(job, errp); |
151 | - BLOCK_JOB_DEFAULT, NULL, NULL, txn, &local_err); | 115 | aio_context_release(aio_context); |
152 | + job_flags, NULL, NULL, txn, &local_err); | 116 | if (!value) { |
153 | bdrv_unref(target_bs); | 117 | qapi_free_BlockJobInfoList(head); |
154 | if (local_err != NULL) { | ||
155 | error_propagate(errp, local_err); | ||
156 | @@ -XXX,XX +XXX,XX @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn, | ||
157 | Error *local_err = NULL; | ||
158 | AioContext *aio_context; | ||
159 | BlockJob *job = NULL; | ||
160 | + int job_flags = BLOCK_JOB_DEFAULT; | ||
161 | |||
162 | if (!backup->has_speed) { | ||
163 | backup->speed = 0; | ||
164 | @@ -XXX,XX +XXX,XX @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn, | ||
165 | if (!backup->has_job_id) { | ||
166 | backup->job_id = NULL; | ||
167 | } | ||
168 | + if (!backup->has_auto_finalize) { | ||
169 | + backup->auto_finalize = true; | ||
170 | + } | ||
171 | + if (!backup->has_auto_dismiss) { | ||
172 | + backup->auto_dismiss = true; | ||
173 | + } | ||
174 | if (!backup->has_compress) { | ||
175 | backup->compress = false; | ||
176 | } | ||
177 | @@ -XXX,XX +XXX,XX @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn, | ||
178 | goto out; | ||
179 | } | ||
180 | } | ||
181 | + if (!backup->auto_finalize) { | ||
182 | + job_flags |= BLOCK_JOB_MANUAL_FINALIZE; | ||
183 | + } | ||
184 | + if (!backup->auto_dismiss) { | ||
185 | + job_flags |= BLOCK_JOB_MANUAL_DISMISS; | ||
186 | + } | ||
187 | job = backup_job_create(backup->job_id, bs, target_bs, backup->speed, | ||
188 | backup->sync, NULL, backup->compress, | ||
189 | backup->on_source_error, backup->on_target_error, | ||
190 | - BLOCK_JOB_DEFAULT, NULL, NULL, txn, &local_err); | ||
191 | + job_flags, NULL, NULL, txn, &local_err); | ||
192 | if (local_err != NULL) { | ||
193 | error_propagate(errp, local_err); | ||
194 | } | ||
195 | diff --git a/blockjob.c b/blockjob.c | 118 | diff --git a/blockjob.c b/blockjob.c |
196 | index XXXXXXX..XXXXXXX 100644 | 119 | index XXXXXXX..XXXXXXX 100644 |
197 | --- a/blockjob.c | 120 | --- a/blockjob.c |
198 | +++ b/blockjob.c | 121 | +++ b/blockjob.c |
199 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp) | 122 | @@ -XXX,XX +XXX,XX @@ static bool child_job_drained_poll(BdrvChild *c) |
200 | info->io_status = job->iostatus; | 123 | /* An inactive or completed job doesn't have any pending requests. Jobs |
201 | info->ready = job->ready; | 124 | * with !job->busy are either already paused or have a pause point after |
202 | info->status = job->status; | 125 | * being reentered, so no job driver code will run before they pause. */ |
203 | + info->auto_finalize = job->auto_finalize; | 126 | - if (!job->busy || job_is_completed(job)) { |
204 | + info->auto_dismiss = job->auto_dismiss; | 127 | - return false; |
205 | return info; | 128 | + WITH_JOB_LOCK_GUARD() { |
129 | + if (!job->busy || job_is_completed_locked(job)) { | ||
130 | + return false; | ||
131 | + } | ||
132 | } | ||
133 | |||
134 | /* Otherwise, assume that it isn't fully stopped yet, but allow the job to | ||
135 | @@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, | ||
136 | job->ready_notifier.notify = block_job_event_ready; | ||
137 | job->idle_notifier.notify = block_job_on_idle; | ||
138 | |||
139 | - notifier_list_add(&job->job.on_finalize_cancelled, | ||
140 | - &job->finalize_cancelled_notifier); | ||
141 | - notifier_list_add(&job->job.on_finalize_completed, | ||
142 | - &job->finalize_completed_notifier); | ||
143 | - notifier_list_add(&job->job.on_pending, &job->pending_notifier); | ||
144 | - notifier_list_add(&job->job.on_ready, &job->ready_notifier); | ||
145 | - notifier_list_add(&job->job.on_idle, &job->idle_notifier); | ||
146 | + WITH_JOB_LOCK_GUARD() { | ||
147 | + notifier_list_add(&job->job.on_finalize_cancelled, | ||
148 | + &job->finalize_cancelled_notifier); | ||
149 | + notifier_list_add(&job->job.on_finalize_completed, | ||
150 | + &job->finalize_completed_notifier); | ||
151 | + notifier_list_add(&job->job.on_pending, &job->pending_notifier); | ||
152 | + notifier_list_add(&job->job.on_ready, &job->ready_notifier); | ||
153 | + notifier_list_add(&job->job.on_idle, &job->idle_notifier); | ||
154 | + } | ||
155 | |||
156 | error_setg(&job->blocker, "block device is in use by block job: %s", | ||
157 | job_type_str(&job->job)); | ||
158 | @@ -XXX,XX +XXX,XX @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, | ||
159 | action); | ||
160 | } | ||
161 | if (action == BLOCK_ERROR_ACTION_STOP) { | ||
162 | - if (!job->job.user_paused) { | ||
163 | - job_pause(&job->job); | ||
164 | - /* make the pause user visible, which will be resumed from QMP. */ | ||
165 | - job->job.user_paused = true; | ||
166 | + WITH_JOB_LOCK_GUARD() { | ||
167 | + if (!job->job.user_paused) { | ||
168 | + job_pause_locked(&job->job); | ||
169 | + /* | ||
170 | + * make the pause user visible, which will be | ||
171 | + * resumed from QMP. | ||
172 | + */ | ||
173 | + job->job.user_paused = true; | ||
174 | + } | ||
175 | } | ||
176 | block_job_iostatus_set_err(job, error); | ||
177 | } | ||
178 | diff --git a/job-qmp.c b/job-qmp.c | ||
179 | index XXXXXXX..XXXXXXX 100644 | ||
180 | --- a/job-qmp.c | ||
181 | +++ b/job-qmp.c | ||
182 | @@ -XXX,XX +XXX,XX @@ void qmp_job_dismiss(const char *id, Error **errp) | ||
183 | aio_context_release(aio_context); | ||
206 | } | 184 | } |
207 | 185 | ||
208 | diff --git a/tests/qemu-iotests/109.out b/tests/qemu-iotests/109.out | 186 | -static JobInfo *job_query_single(Job *job, Error **errp) |
209 | index XXXXXXX..XXXXXXX 100644 | 187 | +/* Called with job_mutex held. */ |
210 | --- a/tests/qemu-iotests/109.out | 188 | +static JobInfo *job_query_single_locked(Job *job, Error **errp) |
211 | +++ b/tests/qemu-iotests/109.out | 189 | { |
212 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | 190 | JobInfo *info; |
213 | {"return": {}} | 191 | uint64_t progress_current; |
214 | {"return": {}} | 192 | @@ -XXX,XX +XXX,XX @@ JobInfoList *qmp_query_jobs(Error **errp) |
215 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} | 193 | JobInfoList *head = NULL, **tail = &head; |
216 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 194 | Job *job; |
217 | +{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 195 | |
218 | {"return": {}} | 196 | - for (job = job_next(NULL); job; job = job_next(job)) { |
219 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | 197 | + JOB_LOCK_GUARD(); |
220 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} | 198 | + |
221 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | 199 | + for (job = job_next_locked(NULL); job; job = job_next_locked(job)) { |
222 | {"return": {}} | 200 | JobInfo *value; |
223 | {"return": {}} | 201 | AioContext *aio_context; |
224 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}} | 202 | |
225 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 197120, "offset": 197120, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 203 | @@ -XXX,XX +XXX,XX @@ JobInfoList *qmp_query_jobs(Error **errp) |
226 | +{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 197120, "offset": 197120, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 204 | } |
227 | {"return": {}} | 205 | aio_context = job->aio_context; |
228 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | 206 | aio_context_acquire(aio_context); |
229 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}} | 207 | - value = job_query_single(job, errp); |
230 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | 208 | + value = job_query_single_locked(job, errp); |
231 | {"return": {}} | 209 | aio_context_release(aio_context); |
232 | {"return": {}} | 210 | if (!value) { |
233 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} | 211 | qapi_free_JobInfoList(head); |
234 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 212 | diff --git a/monitor/qmp-cmds.c b/monitor/qmp-cmds.c |
235 | +{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 213 | index XXXXXXX..XXXXXXX 100644 |
236 | {"return": {}} | 214 | --- a/monitor/qmp-cmds.c |
237 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | 215 | +++ b/monitor/qmp-cmds.c |
238 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} | 216 | @@ -XXX,XX +XXX,XX @@ void qmp_cont(Error **errp) |
239 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | 217 | blk_iostatus_reset(blk); |
240 | {"return": {}} | 218 | } |
241 | {"return": {}} | 219 | |
242 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} | 220 | - for (job = block_job_next(NULL); job; job = block_job_next(job)) { |
243 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 221 | - block_job_iostatus_reset(job); |
244 | +{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 222 | + WITH_JOB_LOCK_GUARD() { |
245 | {"return": {}} | 223 | + for (job = block_job_next_locked(NULL); job; |
246 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | 224 | + job = block_job_next_locked(job)) { |
247 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} | 225 | + block_job_iostatus_reset_locked(job); |
248 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | 226 | + } |
249 | {"return": {}} | 227 | } |
250 | {"return": {}} | 228 | |
251 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}} | 229 | /* Continuing after completed migration. Images have been inactivated to |
252 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 65536, "offset": 65536, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 230 | diff --git a/qemu-img.c b/qemu-img.c |
253 | +{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 65536, "offset": 65536, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 231 | index XXXXXXX..XXXXXXX 100644 |
254 | {"return": {}} | 232 | --- a/qemu-img.c |
255 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | 233 | +++ b/qemu-img.c |
256 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}} | 234 | @@ -XXX,XX +XXX,XX @@ static void run_block_job(BlockJob *job, Error **errp) |
257 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | 235 | int ret = 0; |
258 | {"return": {}} | 236 | |
259 | {"return": {}} | 237 | aio_context_acquire(aio_context); |
260 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} | 238 | - job_ref(&job->job); |
261 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 239 | + job_lock(); |
262 | +{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 240 | + job_ref_locked(&job->job); |
263 | {"return": {}} | 241 | do { |
264 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | 242 | float progress = 0.0f; |
265 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} | 243 | + job_unlock(); |
266 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | 244 | aio_poll(aio_context, true); |
267 | {"return": {}} | 245 | |
268 | {"return": {}} | 246 | progress_get_snapshot(&job->job.progress, &progress_current, |
269 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} | 247 | @@ -XXX,XX +XXX,XX @@ static void run_block_job(BlockJob *job, Error **errp) |
270 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 248 | progress = (float)progress_current / progress_total * 100.f; |
271 | +{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 249 | } |
272 | {"return": {}} | 250 | qemu_progress_print(progress, 0); |
273 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | 251 | - } while (!job_is_ready(&job->job) && !job_is_completed(&job->job)); |
274 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} | 252 | + job_lock(); |
275 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | 253 | + } while (!job_is_ready_locked(&job->job) && |
276 | {"return": {}} | 254 | + !job_is_completed_locked(&job->job)); |
277 | {"return": {}} | 255 | |
278 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}} | 256 | - if (!job_is_completed(&job->job)) { |
279 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 31457280, "offset": 31457280, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 257 | - ret = job_complete_sync(&job->job, errp); |
280 | +{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 31457280, "offset": 31457280, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 258 | + if (!job_is_completed_locked(&job->job)) { |
281 | {"return": {}} | 259 | + ret = job_complete_sync_locked(&job->job, errp); |
282 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | 260 | } else { |
283 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}} | 261 | ret = job->job.ret; |
284 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | 262 | } |
285 | {"return": {}} | 263 | - job_unref(&job->job); |
286 | {"return": {}} | 264 | + job_unref_locked(&job->job); |
287 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} | 265 | + job_unlock(); |
288 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 266 | aio_context_release(aio_context); |
289 | +{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | 267 | |
290 | {"return": {}} | 268 | /* publish completion progress only when success */ |
291 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
292 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} | ||
293 | @@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0 | ||
294 | {"return": {}} | ||
295 | {"return": {}} | ||
296 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}} | ||
297 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2048, "offset": 2048, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
298 | +{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2048, "offset": 2048, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
299 | {"return": {}} | ||
300 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
301 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}} | ||
302 | @@ -XXX,XX +XXX,XX @@ Automatically detecting the format is dangerous for raw images, write operations | ||
303 | Specify the 'raw' format explicitly to remove the restrictions. | ||
304 | {"return": {}} | ||
305 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} | ||
306 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
307 | +{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
308 | {"return": {}} | ||
309 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
310 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} | ||
311 | @@ -XXX,XX +XXX,XX @@ Images are identical. | ||
312 | {"return": {}} | ||
313 | {"return": {}} | ||
314 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} | ||
315 | -{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
316 | +{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]} | ||
317 | {"return": {}} | ||
318 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}} | ||
319 | {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} | ||
320 | -- | 269 | -- |
321 | 2.13.6 | 270 | 2.37.3 |
322 | |||
323 | diff view generated by jsdifflib |
1 | From: Max Reitz <mreitz@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Signed-off-by: Max Reitz <mreitz@redhat.com> | 3 | We want to make sure access of job->aio_context is always done |
4 | under either BQL or job_mutex. The problem is that using | ||
5 | aio_co_enter(job->aiocontext, job->co) in job_start and job_enter_cond | ||
6 | makes the coroutine immediately resume, so we can't hold the job lock. | ||
7 | And caching it is not safe either, as it might change. | ||
8 | |||
9 | job_start is under BQL, so it can freely read job->aiocontext, but | ||
10 | job_enter_cond is not. | ||
11 | We want to avoid reading job->aio_context in job_enter_cond, therefore: | ||
12 | 1) use aio_co_wake(), since it doesn't want an aiocontext as argument | ||
13 | but uses job->co->ctx | ||
14 | 2) detect possible discrepancy between job->co->ctx and job->aio_context | ||
15 | by checking right after the coroutine resumes back from yielding if | ||
16 | job->aio_context has changed. If so, reschedule the coroutine to the | ||
17 | new context. | ||
18 | |||
19 | Calling bdrv_try_set_aio_context() will issue the following calls | ||
20 | (simplified): | ||
21 | * in terms of bdrv callbacks: | ||
22 | .drained_begin -> .set_aio_context -> .drained_end | ||
23 | * in terms of child_job functions: | ||
24 | child_job_drained_begin -> child_job_set_aio_context -> child_job_drained_end | ||
25 | * in terms of job functions: | ||
26 | job_pause_locked -> job_set_aio_context -> job_resume_locked | ||
27 | |||
28 | We can see that after setting the new aio_context, job_resume_locked | ||
29 | calls again job_enter_cond, which then invokes aio_co_wake(). But | ||
30 | while job->aiocontext has been set in job_set_aio_context, | ||
31 | job->co->ctx has not changed, so the coroutine would be entering in | ||
32 | the wrong aiocontext. | ||
33 | |||
34 | Using aio_co_schedule in job_resume_locked() might seem as a valid | ||
35 | alternative, but the problem is that the bh resuming the coroutine | ||
36 | is not scheduled immediately, and if in the meanwhile another | ||
37 | bdrv_try_set_aio_context() is run (see test_propagate_mirror() in | ||
38 | test-block-iothread.c), we would have the first schedule in the | ||
39 | wrong aiocontext, and the second set of drains won't even manage | ||
40 | to schedule the coroutine, as job->busy would still be true from | ||
41 | the previous job_resume_locked(). | ||
42 | |||
43 | The solution is to stick with aio_co_wake() and detect every time | ||
44 | the coroutine resumes back from yielding if job->aio_context | ||
45 | has changed. If so, we can reschedule it to the new context. | ||
46 | |||
47 | Check for the aiocontext change in job_do_yield_locked because: | ||
48 | 1) aio_co_reschedule_self requires to be in the running coroutine | ||
49 | 2) since child_job_set_aio_context allows changing the aiocontext only | ||
50 | while the job is paused, this is the exact place where the coroutine | ||
51 | resumes, before running JobDriver's code. | ||
52 | |||
53 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
54 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
55 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
56 | Message-Id: <20220926093214.506243-13-eesposit@redhat.com> | ||
57 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 58 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
5 | --- | 59 | --- |
6 | block/vdi.c | 46 ++++++++++++++++++++++++++++------------------ | 60 | job.c | 19 +++++++++++++++++-- |
7 | 1 file changed, 28 insertions(+), 18 deletions(-) | 61 | 1 file changed, 17 insertions(+), 2 deletions(-) |
8 | 62 | ||
9 | diff --git a/block/vdi.c b/block/vdi.c | 63 | diff --git a/job.c b/job.c |
10 | index XXXXXXX..XXXXXXX 100644 | 64 | index XXXXXXX..XXXXXXX 100644 |
11 | --- a/block/vdi.c | 65 | --- a/job.c |
12 | +++ b/block/vdi.c | 66 | +++ b/job.c |
13 | @@ -XXX,XX +XXX,XX @@ nonallocating_write: | 67 | @@ -XXX,XX +XXX,XX @@ void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)) |
14 | return ret; | 68 | job->busy = true; |
69 | real_job_unlock(); | ||
70 | job_unlock(); | ||
71 | - aio_co_enter(job->aio_context, job->co); | ||
72 | + aio_co_wake(job->co); | ||
73 | job_lock(); | ||
15 | } | 74 | } |
16 | 75 | ||
17 | -static int coroutine_fn vdi_co_do_create(const char *filename, | 76 | @@ -XXX,XX +XXX,XX @@ void job_enter(Job *job) |
18 | - QemuOpts *file_opts, | 77 | */ |
19 | - BlockdevCreateOptionsVdi *vdi_opts, | 78 | static void coroutine_fn job_do_yield_locked(Job *job, uint64_t ns) |
20 | +static int coroutine_fn vdi_co_do_create(BlockdevCreateOptionsVdi *vdi_opts, | ||
21 | size_t block_size, Error **errp) | ||
22 | { | 79 | { |
23 | int ret = 0; | 80 | + AioContext *next_aio_context; |
24 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(const char *filename, | 81 | + |
25 | size_t i; | 82 | real_job_lock(); |
26 | size_t bmap_size; | 83 | if (ns != -1) { |
27 | int64_t offset = 0; | 84 | timer_mod(&job->sleep_timer, ns); |
28 | - Error *local_err = NULL; | 85 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_do_yield_locked(Job *job, uint64_t ns) |
29 | + BlockDriverState *bs_file = NULL; | 86 | qemu_coroutine_yield(); |
30 | BlockBackend *blk = NULL; | 87 | job_lock(); |
31 | uint32_t *bmap = NULL; | 88 | |
32 | 89 | - /* Set by job_enter_cond() before re-entering the coroutine. */ | |
33 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(const char *filename, | 90 | + next_aio_context = job->aio_context; |
34 | goto exit; | 91 | + /* |
35 | } | 92 | + * Coroutine has resumed, but in the meanwhile the job AioContext |
36 | 93 | + * might have changed via bdrv_try_set_aio_context(), so we need to move | |
37 | - ret = bdrv_create_file(filename, file_opts, &local_err); | 94 | + * the coroutine too in the new aiocontext. |
38 | - if (ret < 0) { | 95 | + */ |
39 | - error_propagate(errp, local_err); | 96 | + while (qemu_get_current_aio_context() != next_aio_context) { |
40 | + bs_file = bdrv_open_blockdev_ref(vdi_opts->file, errp); | 97 | + job_unlock(); |
41 | + if (!bs_file) { | 98 | + aio_co_reschedule_self(next_aio_context); |
42 | + ret = -EIO; | 99 | + job_lock(); |
43 | goto exit; | 100 | + next_aio_context = job->aio_context; |
44 | } | ||
45 | |||
46 | - blk = blk_new_open(filename, NULL, NULL, | ||
47 | - BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, | ||
48 | - &local_err); | ||
49 | - if (blk == NULL) { | ||
50 | - error_propagate(errp, local_err); | ||
51 | - ret = -EIO; | ||
52 | + blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL); | ||
53 | + ret = blk_insert_bs(blk, bs_file, errp); | ||
54 | + if (ret < 0) { | ||
55 | goto exit; | ||
56 | } | ||
57 | |||
58 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(const char *filename, | ||
59 | vdi_header_to_le(&header); | ||
60 | ret = blk_pwrite(blk, offset, &header, sizeof(header), 0); | ||
61 | if (ret < 0) { | ||
62 | - error_setg(errp, "Error writing header to %s", filename); | ||
63 | + error_setg(errp, "Error writing header"); | ||
64 | goto exit; | ||
65 | } | ||
66 | offset += sizeof(header); | ||
67 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(const char *filename, | ||
68 | } | ||
69 | ret = blk_pwrite(blk, offset, bmap, bmap_size, 0); | ||
70 | if (ret < 0) { | ||
71 | - error_setg(errp, "Error writing bmap to %s", filename); | ||
72 | + error_setg(errp, "Error writing bmap"); | ||
73 | goto exit; | ||
74 | } | ||
75 | offset += bmap_size; | ||
76 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(const char *filename, | ||
77 | ret = blk_truncate(blk, offset + blocks * block_size, | ||
78 | PREALLOC_MODE_OFF, errp); | ||
79 | if (ret < 0) { | ||
80 | - error_prepend(errp, "Failed to statically allocate %s", filename); | ||
81 | + error_prepend(errp, "Failed to statically allocate file"); | ||
82 | goto exit; | ||
83 | } | ||
84 | } | ||
85 | |||
86 | exit: | ||
87 | blk_unref(blk); | ||
88 | + bdrv_unref(bs_file); | ||
89 | g_free(bmap); | ||
90 | return ret; | ||
91 | } | ||
92 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts, | ||
93 | { | ||
94 | QDict *qdict = NULL; | ||
95 | BlockdevCreateOptionsVdi *create_options = NULL; | ||
96 | + BlockDriverState *bs_file = NULL; | ||
97 | uint64_t block_size = DEFAULT_CLUSTER_SIZE; | ||
98 | Visitor *v; | ||
99 | Error *local_err = NULL; | ||
100 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts, | ||
101 | |||
102 | qdict = qemu_opts_to_qdict_filtered(opts, NULL, &vdi_create_opts, true); | ||
103 | |||
104 | - qdict_put_str(qdict, "file", ""); /* FIXME */ | ||
105 | + ret = bdrv_create_file(filename, opts, errp); | ||
106 | + if (ret < 0) { | ||
107 | + goto done; | ||
108 | + } | 101 | + } |
109 | + | 102 | + |
110 | + bs_file = bdrv_open(filename, NULL, NULL, | 103 | + /* Set by job_enter_cond_locked() before re-entering the coroutine. */ |
111 | + BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); | 104 | assert(job->busy); |
112 | + if (!bs_file) { | ||
113 | + ret = -EIO; | ||
114 | + goto done; | ||
115 | + } | ||
116 | + | ||
117 | + qdict_put_str(qdict, "file", bs_file->node_name); | ||
118 | |||
119 | /* Get the QAPI object */ | ||
120 | v = qobject_input_visitor_new_keyval(QOBJECT(qdict)); | ||
121 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts, | ||
122 | |||
123 | create_options->size = ROUND_UP(create_options->size, BDRV_SECTOR_SIZE); | ||
124 | |||
125 | - ret = vdi_co_do_create(filename, opts, create_options, block_size, errp); | ||
126 | + ret = vdi_co_do_create(create_options, block_size, errp); | ||
127 | done: | ||
128 | QDECREF(qdict); | ||
129 | qapi_free_BlockdevCreateOptionsVdi(create_options); | ||
130 | + bdrv_unref(bs_file); | ||
131 | return ret; | ||
132 | } | 105 | } |
133 | 106 | ||
134 | -- | 107 | -- |
135 | 2.13.6 | 108 | 2.37.3 |
136 | |||
137 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | For jobs utilizing the new manual workflow, we intend to prohibit | 3 | In order to make it thread safe, implement a "fake rwlock", |
4 | them from modifying the block graph until the management layer provides | 4 | where we allow reads under BQL *or* job_mutex held, but |
5 | an explicit ACK via block-job-finalize to move the process forward. | 5 | writes only under BQL *and* job_mutex. |
6 | 6 | ||
7 | To distinguish this runstate from "ready" or "waiting," we add a new | 7 | The only write we have is in child_job_set_aio_ctx, which always |
8 | "pending" event and status. | 8 | happens under drain (so the job is paused). |
9 | For this reason, introduce job_set_aio_context and make sure that | ||
10 | the context is set under BQL, job_mutex and drain. | ||
11 | Also make sure all other places where the aiocontext is read | ||
12 | are protected. | ||
9 | 13 | ||
10 | For now, the transition from PENDING to CONCLUDED/ABORTING is automatic, | 14 | The reads in commit.c and mirror.c are actually safe, because always |
11 | but a future commit will add the explicit block-job-finalize step. | 15 | done under BQL. |
12 | 16 | ||
13 | Transitions: | 17 | Note: at this stage, job_{lock/unlock} and job lock guard macros |
14 | Waiting -> Pending: Normal transition. | 18 | are *nop*. |
15 | Pending -> Concluded: Normal transition. | ||
16 | Pending -> Aborting: Late transactional failures and cancellations. | ||
17 | 19 | ||
18 | Removed Transitions: | 20 | Suggested-by: Paolo Bonzini <pbonzini@redhat.com> |
19 | Waiting -> Concluded: Jobs must go to PENDING first. | 21 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
20 | 22 | Message-Id: <20220926093214.506243-14-eesposit@redhat.com> | |
21 | Verbs: | 23 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
22 | Cancel: Can be applied to a pending job. | 24 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
23 | |||
24 | +---------+ | ||
25 | |UNDEFINED| | ||
26 | +--+------+ | ||
27 | | | ||
28 | +--v----+ | ||
29 | +---------+CREATED+-----------------+ | ||
30 | | +--+----+ | | ||
31 | | | | | ||
32 | | +--+----+ +------+ | | ||
33 | +---------+RUNNING<----->PAUSED| | | ||
34 | | +--+-+--+ +------+ | | ||
35 | | | | | | ||
36 | | | +------------------+ | | ||
37 | | | | | | ||
38 | | +--v--+ +-------+ | | | ||
39 | +---------+READY<------->STANDBY| | | | ||
40 | | +--+--+ +-------+ | | | ||
41 | | | | | | ||
42 | | +--v----+ | | | ||
43 | +---------+WAITING<---------------+ | | ||
44 | | +--+----+ | | ||
45 | | | | | ||
46 | | +--v----+ | | ||
47 | +---------+PENDING| | | ||
48 | | +--+----+ | | ||
49 | | | | | ||
50 | +--v-----+ +--v------+ | | ||
51 | |ABORTING+--->CONCLUDED| | | ||
52 | +--------+ +--+------+ | | ||
53 | | | | ||
54 | +--v-+ | | ||
55 | |NULL<--------------------+ | ||
56 | +----+ | ||
57 | |||
58 | Signed-off-by: John Snow <jsnow@redhat.com> | ||
59 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 25 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
60 | --- | 26 | --- |
61 | qapi/block-core.json | 31 +++++++++++++++++++++- | 27 | include/qemu/job.h | 23 ++++++++++++++++++++--- |
62 | include/block/blockjob.h | 5 ++++ | 28 | block/replication.c | 1 + |
63 | blockjob.c | 67 +++++++++++++++++++++++++++++++----------------- | 29 | blockjob.c | 3 ++- |
64 | 3 files changed, 78 insertions(+), 25 deletions(-) | 30 | job.c | 12 ++++++++++++ |
31 | 4 files changed, 35 insertions(+), 4 deletions(-) | ||
65 | 32 | ||
66 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 33 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
67 | index XXXXXXX..XXXXXXX 100644 | 34 | index XXXXXXX..XXXXXXX 100644 |
68 | --- a/qapi/block-core.json | 35 | --- a/include/qemu/job.h |
69 | +++ b/qapi/block-core.json | 36 | +++ b/include/qemu/job.h |
70 | @@ -XXX,XX +XXX,XX @@ | 37 | @@ -XXX,XX +XXX,XX @@ typedef struct Job { |
71 | # to the waiting state. This status will likely not be visible for | 38 | /* ProgressMeter API is thread-safe */ |
72 | # the last job in a transaction. | 39 | ProgressMeter progress; |
73 | # | 40 | |
74 | +# @pending: The job has finished its work, but has finalization steps that it | 41 | + /** |
75 | +# needs to make prior to completing. These changes may require | 42 | + * AioContext to run the job coroutine in. |
76 | +# manual intervention by the management process if manual was set | 43 | + * The job Aiocontext can be read when holding *either* |
77 | +# to true. These changes may still fail. | 44 | + * the BQL (so we are in the main loop) or the job_mutex. |
78 | +# | 45 | + * It can only be written when we hold *both* BQL |
79 | # @aborting: The job is in the process of being aborted, and will finish with | 46 | + * and the job_mutex. |
80 | # an error. The job will afterwards report that it is @concluded. | 47 | + */ |
81 | # This status may not be visible to the management process. | 48 | + AioContext *aio_context; |
82 | @@ -XXX,XX +XXX,XX @@ | 49 | |
83 | ## | 50 | - /** Protected by AioContext lock */ |
84 | { 'enum': 'BlockJobStatus', | 51 | |
85 | 'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby', | 52 | - /** AioContext to run the job coroutine in */ |
86 | - 'waiting', 'aborting', 'concluded', 'null' ] } | 53 | - AioContext *aio_context; |
87 | + 'waiting', 'pending', 'aborting', 'concluded', 'null' ] } | 54 | + /** Protected by AioContext lock */ |
88 | 55 | ||
89 | ## | 56 | /** Reference count of the block job */ |
90 | # @BlockJobInfo: | 57 | int refcnt; |
91 | @@ -XXX,XX +XXX,XX @@ | 58 | @@ -XXX,XX +XXX,XX @@ int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), |
92 | 'speed' : 'int' } } | 59 | int job_finish_sync_locked(Job *job, void (*finish)(Job *, Error **errp), |
93 | 60 | Error **errp); | |
94 | ## | 61 | |
95 | +# @BLOCK_JOB_PENDING: | 62 | +/** |
96 | +# | 63 | + * Sets the @job->aio_context. |
97 | +# Emitted when a block job is awaiting explicit authorization to finalize graph | 64 | + * Called with job_mutex *not* held. |
98 | +# changes via @block-job-finalize. If this job is part of a transaction, it will | 65 | + * |
99 | +# not emit this event until the transaction has converged first. | 66 | + * This function must run in the main thread to protect against |
100 | +# | 67 | + * concurrent read in job_finish_sync_locked(), takes the job_mutex |
101 | +# @type: job type | 68 | + * lock to protect against the read in job_do_yield_locked(), and must |
102 | +# | 69 | + * be called when the job is quiescent. |
103 | +# @id: The job identifier. | 70 | + */ |
104 | +# | 71 | +void job_set_aio_context(Job *job, AioContext *ctx); |
105 | +# Since: 2.12 | ||
106 | +# | ||
107 | +# Example: | ||
108 | +# | ||
109 | +# <- { "event": "BLOCK_JOB_WAITING", | ||
110 | +# "data": { "device": "drive0", "type": "mirror" }, | ||
111 | +# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } | ||
112 | +# | ||
113 | +## | ||
114 | +{ 'event': 'BLOCK_JOB_PENDING', | ||
115 | + 'data': { 'type' : 'BlockJobType', | ||
116 | + 'id' : 'str' } } | ||
117 | + | 72 | + |
118 | +## | 73 | #endif |
119 | # @PreallocMode: | 74 | diff --git a/block/replication.c b/block/replication.c |
120 | # | ||
121 | # Preallocation mode of QEMU image file | ||
122 | diff --git a/include/block/blockjob.h b/include/block/blockjob.h | ||
123 | index XXXXXXX..XXXXXXX 100644 | 75 | index XXXXXXX..XXXXXXX 100644 |
124 | --- a/include/block/blockjob.h | 76 | --- a/block/replication.c |
125 | +++ b/include/block/blockjob.h | 77 | +++ b/block/replication.c |
126 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJob { | 78 | @@ -XXX,XX +XXX,XX @@ static void replication_close(BlockDriverState *bs) |
127 | /** Current state; See @BlockJobStatus for details. */ | 79 | { |
128 | BlockJobStatus status; | 80 | BDRVReplicationState *s = bs->opaque; |
129 | 81 | Job *commit_job; | |
130 | + /** True if this job should automatically finalize itself */ | 82 | + GLOBAL_STATE_CODE(); |
131 | + bool auto_finalize; | 83 | |
132 | + | 84 | if (s->stage == BLOCK_REPLICATION_RUNNING) { |
133 | /** True if this job should automatically dismiss itself */ | 85 | replication_stop(s->rs, false, NULL); |
134 | bool auto_dismiss; | ||
135 | |||
136 | @@ -XXX,XX +XXX,XX @@ typedef enum BlockJobCreateFlags { | ||
137 | BLOCK_JOB_DEFAULT = 0x00, | ||
138 | /* BlockJob is not QMP-created and should not send QMP events */ | ||
139 | BLOCK_JOB_INTERNAL = 0x01, | ||
140 | + /* BlockJob requires manual finalize step */ | ||
141 | + BLOCK_JOB_MANUAL_FINALIZE = 0x02, | ||
142 | /* BlockJob requires manual dismiss step */ | ||
143 | BLOCK_JOB_MANUAL_DISMISS = 0x04, | ||
144 | } BlockJobCreateFlags; | ||
145 | diff --git a/blockjob.c b/blockjob.c | 86 | diff --git a/blockjob.c b/blockjob.c |
146 | index XXXXXXX..XXXXXXX 100644 | 87 | index XXXXXXX..XXXXXXX 100644 |
147 | --- a/blockjob.c | 88 | --- a/blockjob.c |
148 | +++ b/blockjob.c | 89 | +++ b/blockjob.c |
149 | @@ -XXX,XX +XXX,XX @@ static QemuMutex block_job_mutex; | 90 | @@ -XXX,XX +XXX,XX @@ static void child_job_set_aio_ctx(BdrvChild *c, AioContext *ctx, |
150 | 91 | bdrv_set_aio_context_ignore(sibling->bs, ctx, ignore); | |
151 | /* BlockJob State Transition Table */ | 92 | } |
152 | bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = { | 93 | |
153 | - /* U, C, R, P, Y, S, W, X, E, N */ | 94 | - job->job.aio_context = ctx; |
154 | - /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, | 95 | + job_set_aio_context(&job->job, ctx); |
155 | - /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 0, 1, 0, 1}, | ||
156 | - /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 1, 0, 0}, | ||
157 | - /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0}, | ||
158 | - /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 1, 0, 0}, | ||
159 | - /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0}, | ||
160 | - /* W: */ [BLOCK_JOB_STATUS_WAITING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0}, | ||
161 | - /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0}, | ||
162 | - /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, | ||
163 | - /* N: */ [BLOCK_JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, | ||
164 | + /* U, C, R, P, Y, S, W, D, X, E, N */ | ||
165 | + /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, | ||
166 | + /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1}, | ||
167 | + /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0}, | ||
168 | + /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, | ||
169 | + /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0}, | ||
170 | + /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, | ||
171 | + /* W: */ [BLOCK_JOB_STATUS_WAITING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0}, | ||
172 | + /* D: */ [BLOCK_JOB_STATUS_PENDING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0}, | ||
173 | + /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0}, | ||
174 | + /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, | ||
175 | + /* N: */ [BLOCK_JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, | ||
176 | }; | ||
177 | |||
178 | bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = { | ||
179 | - /* U, C, R, P, Y, S, W, X, E, N */ | ||
180 | - [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 1, 0, 0, 0}, | ||
181 | - [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0}, | ||
182 | - [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0}, | ||
183 | - [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0}, | ||
184 | - [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0}, | ||
185 | - [BLOCK_JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 0}, | ||
186 | + /* U, C, R, P, Y, S, W, D, X, E, N */ | ||
187 | + [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0}, | ||
188 | + [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, | ||
189 | + [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, | ||
190 | + [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, | ||
191 | + [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, | ||
192 | + [BLOCK_JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}, | ||
193 | }; | ||
194 | |||
195 | static void block_job_state_transition(BlockJob *job, BlockJobStatus s1) | ||
196 | @@ -XXX,XX +XXX,XX @@ static void __attribute__((__constructor__)) block_job_init(void) | ||
197 | |||
198 | static void block_job_event_cancelled(BlockJob *job); | ||
199 | static void block_job_event_completed(BlockJob *job, const char *msg); | ||
200 | +static int block_job_event_pending(BlockJob *job); | ||
201 | static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job)); | ||
202 | |||
203 | /* Transactional group of block jobs */ | ||
204 | @@ -XXX,XX +XXX,XX @@ static void block_job_cancel_async(BlockJob *job) | ||
205 | job->cancelled = true; | ||
206 | } | 96 | } |
207 | 97 | ||
208 | -static int block_job_txn_apply(BlockJobTxn *txn, int fn(BlockJob *)) | 98 | static AioContext *child_job_get_parent_aio_context(BdrvChild *c) |
209 | +static int block_job_txn_apply(BlockJobTxn *txn, int fn(BlockJob *), bool lock) | ||
210 | { | 99 | { |
211 | AioContext *ctx; | 100 | BlockJob *job = c->opaque; |
212 | BlockJob *job, *next; | 101 | + GLOBAL_STATE_CODE(); |
213 | int rc; | 102 | |
214 | 103 | return job->job.aio_context; | |
215 | QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) { | ||
216 | - ctx = blk_get_aio_context(job->blk); | ||
217 | - aio_context_acquire(ctx); | ||
218 | + if (lock) { | ||
219 | + ctx = blk_get_aio_context(job->blk); | ||
220 | + aio_context_acquire(ctx); | ||
221 | + } | ||
222 | rc = fn(job); | ||
223 | - aio_context_release(ctx); | ||
224 | + if (lock) { | ||
225 | + aio_context_release(ctx); | ||
226 | + } | ||
227 | if (rc) { | ||
228 | break; | ||
229 | } | ||
230 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_success(BlockJob *job) | ||
231 | } | ||
232 | |||
233 | /* Jobs may require some prep-work to complete without failure */ | ||
234 | - rc = block_job_txn_apply(txn, block_job_prepare); | ||
235 | + rc = block_job_txn_apply(txn, block_job_prepare, true); | ||
236 | if (rc) { | ||
237 | block_job_completed_txn_abort(job); | ||
238 | return; | ||
239 | } | ||
240 | |||
241 | /* We are the last completed job, commit the transaction. */ | ||
242 | - block_job_txn_apply(txn, block_job_completed_single); | ||
243 | + block_job_txn_apply(txn, block_job_event_pending, false); | ||
244 | + block_job_txn_apply(txn, block_job_completed_single, true); | ||
245 | } | 104 | } |
246 | 105 | diff --git a/job.c b/job.c | |
247 | /* Assumes the block_job_mutex is held */ | 106 | index XXXXXXX..XXXXXXX 100644 |
248 | @@ -XXX,XX +XXX,XX @@ static void block_job_event_completed(BlockJob *job, const char *msg) | 107 | --- a/job.c |
249 | &error_abort); | 108 | +++ b/job.c |
109 | @@ -XXX,XX +XXX,XX @@ Job *job_get(const char *id) | ||
110 | return job_get_locked(id); | ||
250 | } | 111 | } |
251 | 112 | ||
252 | +static int block_job_event_pending(BlockJob *job) | 113 | +void job_set_aio_context(Job *job, AioContext *ctx) |
253 | +{ | 114 | +{ |
254 | + block_job_state_transition(job, BLOCK_JOB_STATUS_PENDING); | 115 | + /* protect against read in job_finish_sync_locked and job_start */ |
255 | + if (!job->auto_finalize && !block_job_is_internal(job)) { | 116 | + GLOBAL_STATE_CODE(); |
256 | + qapi_event_send_block_job_pending(job->driver->job_type, | 117 | + /* protect against read in job_do_yield_locked */ |
257 | + job->id, | 118 | + JOB_LOCK_GUARD(); |
258 | + &error_abort); | 119 | + /* ensure the job is quiescent while the AioContext is changed */ |
259 | + } | 120 | + assert(job->paused || job_is_completed_locked(job)); |
260 | + return 0; | 121 | + job->aio_context = ctx; |
261 | +} | 122 | +} |
262 | + | 123 | + |
263 | /* | 124 | /* Called with job_mutex *not* held. */ |
264 | * API for block job drivers and the block layer. These functions are | 125 | static void job_sleep_timer_cb(void *opaque) |
265 | * declared in blockjob_int.h. | 126 | { |
266 | @@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, | 127 | @@ -XXX,XX +XXX,XX @@ int job_finish_sync_locked(Job *job, |
267 | job->paused = true; | 128 | { |
268 | job->pause_count = 1; | 129 | Error *local_err = NULL; |
269 | job->refcnt = 1; | 130 | int ret; |
270 | + job->auto_finalize = !(flags & BLOCK_JOB_MANUAL_FINALIZE); | 131 | + GLOBAL_STATE_CODE(); |
271 | job->auto_dismiss = !(flags & BLOCK_JOB_MANUAL_DISMISS); | 132 | |
272 | block_job_state_transition(job, BLOCK_JOB_STATUS_CREATED); | 133 | job_ref_locked(job); |
273 | aio_timer_init(qemu_get_aio_context(), &job->sleep_timer, | 134 | |
274 | -- | 135 | -- |
275 | 2.13.6 | 136 | 2.37.3 |
276 | |||
277 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Trivial; Document what the job creation flags do, | 3 | The same job lock is being used also to protect some of blockjob fields. |
4 | and some general tidying. | 4 | Categorize them just as done in job.h. |
5 | 5 | ||
6 | Signed-off-by: John Snow <jsnow@redhat.com> | 6 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
7 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
8 | Message-Id: <20220926093214.506243-15-eesposit@redhat.com> | ||
9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
7 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
8 | --- | 11 | --- |
9 | include/block/blockjob.h | 8 ++++---- | 12 | include/block/blockjob.h | 32 ++++++++++++++++++++++++++------ |
10 | include/block/blockjob_int.h | 4 +++- | 13 | 1 file changed, 26 insertions(+), 6 deletions(-) |
11 | 2 files changed, 7 insertions(+), 5 deletions(-) | ||
12 | 14 | ||
13 | diff --git a/include/block/blockjob.h b/include/block/blockjob.h | 15 | diff --git a/include/block/blockjob.h b/include/block/blockjob.h |
14 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/include/block/blockjob.h | 17 | --- a/include/block/blockjob.h |
16 | +++ b/include/block/blockjob.h | 18 | +++ b/include/block/blockjob.h |
19 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJobDriver BlockJobDriver; | ||
20 | * Long-running operation on a BlockDriverState. | ||
21 | */ | ||
22 | typedef struct BlockJob { | ||
23 | - /** Data belonging to the generic Job infrastructure */ | ||
24 | + /** | ||
25 | + * Data belonging to the generic Job infrastructure. | ||
26 | + * Protected by job mutex. | ||
27 | + */ | ||
28 | Job job; | ||
29 | |||
30 | - /** Status that is published by the query-block-jobs QMP API */ | ||
31 | + /** | ||
32 | + * Status that is published by the query-block-jobs QMP API. | ||
33 | + * Protected by job mutex. | ||
34 | + */ | ||
35 | BlockDeviceIoStatus iostatus; | ||
36 | |||
37 | - /** Speed that was set with @block_job_set_speed. */ | ||
38 | + /** | ||
39 | + * Speed that was set with @block_job_set_speed. | ||
40 | + * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). | ||
41 | + */ | ||
42 | int64_t speed; | ||
43 | |||
44 | - /** Rate limiting data structure for implementing @speed. */ | ||
45 | + /** | ||
46 | + * Rate limiting data structure for implementing @speed. | ||
47 | + * RateLimit API is thread-safe. | ||
48 | + */ | ||
49 | RateLimit limit; | ||
50 | |||
51 | - /** Block other operations when block job is running */ | ||
52 | + /** | ||
53 | + * Block other operations when block job is running. | ||
54 | + * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). | ||
55 | + */ | ||
56 | Error *blocker; | ||
57 | |||
58 | + /** All notifiers are set once in block_job_create() and never modified. */ | ||
59 | + | ||
60 | /** Called when a cancelled job is finalised. */ | ||
61 | Notifier finalize_cancelled_notifier; | ||
62 | |||
17 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJob { | 63 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJob { |
18 | /** Reference count of the block job */ | 64 | /** Called when the job coroutine yields or terminates */ |
19 | int refcnt; | 65 | Notifier idle_notifier; |
20 | 66 | ||
21 | - /* True if this job has reported completion by calling block_job_completed. | 67 | - /** BlockDriverStates that are involved in this block job */ |
22 | - */ | 68 | + /** |
23 | + /** True when job has reported completion by calling block_job_completed. */ | 69 | + * BlockDriverStates that are involved in this block job. |
24 | bool completed; | 70 | + * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). |
25 | 71 | + */ | |
26 | - /* ret code passed to block_job_completed. | 72 | GSList *nodes; |
27 | - */ | ||
28 | + /** ret code passed to block_job_completed. */ | ||
29 | int ret; | ||
30 | |||
31 | /** | ||
32 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJob { | ||
33 | } BlockJob; | 73 | } BlockJob; |
34 | 74 | ||
35 | typedef enum BlockJobCreateFlags { | ||
36 | + /* Default behavior */ | ||
37 | BLOCK_JOB_DEFAULT = 0x00, | ||
38 | + /* BlockJob is not QMP-created and should not send QMP events */ | ||
39 | BLOCK_JOB_INTERNAL = 0x01, | ||
40 | } BlockJobCreateFlags; | ||
41 | |||
42 | diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h | ||
43 | index XXXXXXX..XXXXXXX 100644 | ||
44 | --- a/include/block/blockjob_int.h | ||
45 | +++ b/include/block/blockjob_int.h | ||
46 | @@ -XXX,XX +XXX,XX @@ struct BlockJobDriver { | ||
47 | * block_job_create: | ||
48 | * @job_id: The id of the newly-created job, or %NULL to have one | ||
49 | * generated automatically. | ||
50 | - * @job_type: The class object for the newly-created job. | ||
51 | + * @driver: The class object for the newly-created job. | ||
52 | * @txn: The transaction this job belongs to, if any. %NULL otherwise. | ||
53 | * @bs: The block | ||
54 | * @perm, @shared_perm: Permissions to request for @bs | ||
55 | * @speed: The maximum speed, in bytes per second, or 0 for unlimited. | ||
56 | + * @flags: Creation flags for the Block Job. | ||
57 | + * See @BlockJobCreateFlags | ||
58 | * @cb: Completion function for the job. | ||
59 | * @opaque: Opaque pointer value passed to @cb. | ||
60 | * @errp: Error object. | ||
61 | -- | 75 | -- |
62 | 2.13.6 | 76 | 2.37.3 |
63 | |||
64 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Some jobs upon finalization may need to perform some work that can | 3 | They all are called with job_lock held, in job_event_*_locked() |
4 | still fail. If these jobs are part of a transaction, it's important | ||
5 | that these callbacks fail the entire transaction. | ||
6 | 4 | ||
7 | We allow for a new callback in addition to commit/abort/clean that | 5 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
8 | allows us the opportunity to have fairly late-breaking failures | 6 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
9 | in the transactional process. | 7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
10 | 8 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | |
11 | The expected flow is: | 9 | Message-Id: <20220926093214.506243-16-eesposit@redhat.com> |
12 | |||
13 | - All jobs in a transaction converge to the PENDING state, | ||
14 | added in a forthcoming commit. | ||
15 | - Upon being finalized, either automatically or explicitly | ||
16 | by the user, jobs prepare to complete. | ||
17 | - If any job fails preparation, all jobs call .abort. | ||
18 | - Otherwise, they succeed and call .commit. | ||
19 | |||
20 | Signed-off-by: John Snow <jsnow@redhat.com> | ||
21 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
22 | --- | 11 | --- |
23 | include/block/blockjob_int.h | 10 ++++++++++ | 12 | blockjob.c | 25 +++++++++++++++---------- |
24 | blockjob.c | 30 +++++++++++++++++++++++++++--- | 13 | 1 file changed, 15 insertions(+), 10 deletions(-) |
25 | 2 files changed, 37 insertions(+), 3 deletions(-) | ||
26 | 14 | ||
27 | diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h | ||
28 | index XXXXXXX..XXXXXXX 100644 | ||
29 | --- a/include/block/blockjob_int.h | ||
30 | +++ b/include/block/blockjob_int.h | ||
31 | @@ -XXX,XX +XXX,XX @@ struct BlockJobDriver { | ||
32 | void (*complete)(BlockJob *job, Error **errp); | ||
33 | |||
34 | /** | ||
35 | + * If the callback is not NULL, prepare will be invoked when all the jobs | ||
36 | + * belonging to the same transaction complete; or upon this job's completion | ||
37 | + * if it is not in a transaction. | ||
38 | + * | ||
39 | + * This callback will not be invoked if the job has already failed. | ||
40 | + * If it fails, abort and then clean will be called. | ||
41 | + */ | ||
42 | + int (*prepare)(BlockJob *job); | ||
43 | + | ||
44 | + /** | ||
45 | * If the callback is not NULL, it will be invoked when all the jobs | ||
46 | * belonging to the same transaction complete; or upon this job's | ||
47 | * completion if it is not in a transaction. Skipped if NULL. | ||
48 | diff --git a/blockjob.c b/blockjob.c | 15 | diff --git a/blockjob.c b/blockjob.c |
49 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
50 | --- a/blockjob.c | 17 | --- a/blockjob.c |
51 | +++ b/blockjob.c | 18 | +++ b/blockjob.c |
52 | @@ -XXX,XX +XXX,XX @@ static void block_job_update_rc(BlockJob *job) | 19 | @@ -XXX,XX +XXX,XX @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, |
20 | return 0; | ||
21 | } | ||
22 | |||
23 | -static void block_job_on_idle(Notifier *n, void *opaque) | ||
24 | +/* Called with job_mutex lock held. */ | ||
25 | +static void block_job_on_idle_locked(Notifier *n, void *opaque) | ||
26 | { | ||
27 | aio_wait_kick(); | ||
28 | } | ||
29 | @@ -XXX,XX +XXX,XX @@ static void block_job_iostatus_set_err(BlockJob *job, int error) | ||
53 | } | 30 | } |
54 | } | 31 | } |
55 | 32 | ||
56 | +static int block_job_prepare(BlockJob *job) | 33 | -static void block_job_event_cancelled(Notifier *n, void *opaque) |
57 | +{ | 34 | +/* Called with job_mutex lock held. */ |
58 | + if (job->ret == 0 && job->driver->prepare) { | 35 | +static void block_job_event_cancelled_locked(Notifier *n, void *opaque) |
59 | + job->ret = job->driver->prepare(job); | ||
60 | + } | ||
61 | + return job->ret; | ||
62 | +} | ||
63 | + | ||
64 | static void block_job_commit(BlockJob *job) | ||
65 | { | 36 | { |
66 | assert(!job->ret); | 37 | BlockJob *job = opaque; |
67 | @@ -XXX,XX +XXX,XX @@ static void block_job_clean(BlockJob *job) | 38 | uint64_t progress_current, progress_total; |
68 | } | 39 | @@ -XXX,XX +XXX,XX @@ static void block_job_event_cancelled(Notifier *n, void *opaque) |
40 | job->speed); | ||
69 | } | 41 | } |
70 | 42 | ||
71 | -static void block_job_completed_single(BlockJob *job) | 43 | -static void block_job_event_completed(Notifier *n, void *opaque) |
72 | +static int block_job_completed_single(BlockJob *job) | 44 | +/* Called with job_mutex lock held. */ |
45 | +static void block_job_event_completed_locked(Notifier *n, void *opaque) | ||
73 | { | 46 | { |
74 | assert(job->completed); | 47 | BlockJob *job = opaque; |
75 | 48 | const char *msg = NULL; | |
76 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job) | 49 | @@ -XXX,XX +XXX,XX @@ static void block_job_event_completed(Notifier *n, void *opaque) |
77 | QLIST_REMOVE(job, txn_list); | 50 | msg); |
78 | block_job_txn_unref(job->txn); | ||
79 | block_job_conclude(job); | ||
80 | + return 0; | ||
81 | } | 51 | } |
82 | 52 | ||
83 | static void block_job_cancel_async(BlockJob *job) | 53 | -static void block_job_event_pending(Notifier *n, void *opaque) |
84 | @@ -XXX,XX +XXX,XX @@ static void block_job_cancel_async(BlockJob *job) | 54 | +/* Called with job_mutex lock held. */ |
85 | job->cancelled = true; | 55 | +static void block_job_event_pending_locked(Notifier *n, void *opaque) |
56 | { | ||
57 | BlockJob *job = opaque; | ||
58 | |||
59 | @@ -XXX,XX +XXX,XX @@ static void block_job_event_pending(Notifier *n, void *opaque) | ||
60 | job->job.id); | ||
86 | } | 61 | } |
87 | 62 | ||
88 | -static void block_job_txn_apply(BlockJobTxn *txn, void fn(BlockJob *)) | 63 | -static void block_job_event_ready(Notifier *n, void *opaque) |
89 | +static int block_job_txn_apply(BlockJobTxn *txn, int fn(BlockJob *)) | 64 | +/* Called with job_mutex lock held. */ |
65 | +static void block_job_event_ready_locked(Notifier *n, void *opaque) | ||
90 | { | 66 | { |
91 | AioContext *ctx; | 67 | BlockJob *job = opaque; |
92 | BlockJob *job, *next; | 68 | uint64_t progress_current, progress_total; |
93 | + int rc; | 69 | @@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, |
94 | 70 | ||
95 | QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) { | 71 | ratelimit_init(&job->limit); |
96 | ctx = blk_get_aio_context(job->blk); | 72 | |
97 | aio_context_acquire(ctx); | 73 | - job->finalize_cancelled_notifier.notify = block_job_event_cancelled; |
98 | - fn(job); | 74 | - job->finalize_completed_notifier.notify = block_job_event_completed; |
99 | + rc = fn(job); | 75 | - job->pending_notifier.notify = block_job_event_pending; |
100 | aio_context_release(ctx); | 76 | - job->ready_notifier.notify = block_job_event_ready; |
101 | + if (rc) { | 77 | - job->idle_notifier.notify = block_job_on_idle; |
102 | + break; | 78 | + job->finalize_cancelled_notifier.notify = block_job_event_cancelled_locked; |
103 | + } | 79 | + job->finalize_completed_notifier.notify = block_job_event_completed_locked; |
104 | } | 80 | + job->pending_notifier.notify = block_job_event_pending_locked; |
105 | + return rc; | 81 | + job->ready_notifier.notify = block_job_event_ready_locked; |
106 | } | 82 | + job->idle_notifier.notify = block_job_on_idle_locked; |
107 | 83 | ||
108 | static int block_job_finish_sync(BlockJob *job, | 84 | WITH_JOB_LOCK_GUARD() { |
109 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_success(BlockJob *job) | 85 | notifier_list_add(&job->job.on_finalize_cancelled, |
110 | { | ||
111 | BlockJobTxn *txn = job->txn; | ||
112 | BlockJob *other_job; | ||
113 | + int rc = 0; | ||
114 | + | ||
115 | /* | ||
116 | * Successful completion, see if there are other running jobs in this | ||
117 | * txn. | ||
118 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_success(BlockJob *job) | ||
119 | } | ||
120 | assert(other_job->ret == 0); | ||
121 | } | ||
122 | + | ||
123 | + /* Jobs may require some prep-work to complete without failure */ | ||
124 | + rc = block_job_txn_apply(txn, block_job_prepare); | ||
125 | + if (rc) { | ||
126 | + block_job_completed_txn_abort(job); | ||
127 | + return; | ||
128 | + } | ||
129 | + | ||
130 | /* We are the last completed job, commit the transaction. */ | ||
131 | block_job_txn_apply(txn, block_job_completed_single); | ||
132 | } | ||
133 | -- | 86 | -- |
134 | 2.13.6 | 87 | 2.37.3 |
135 | |||
136 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | add a new state "CONCLUDED" that identifies a job that has ceased all | 3 | iostatus is the only field (together with .job) that needs |
4 | operations. The wording was chosen to avoid any phrasing that might | 4 | protection using the job mutex. |
5 | imply success, error, or cancellation. The task has simply ceased all | ||
6 | operation and can never again perform any work. | ||
7 | 5 | ||
8 | ("finished", "done", and "completed" might all imply success.) | 6 | It is set in the main loop (GLOBAL_STATE functions) but read |
7 | in I/O code (block_job_error_action). | ||
9 | 8 | ||
10 | Transitions: | 9 | In order to protect it, change block_job_iostatus_set_err |
11 | Running -> Concluded: normal completion | 10 | to block_job_iostatus_set_err_locked(), always called under |
12 | Ready -> Concluded: normal completion | 11 | job lock. |
13 | Aborting -> Concluded: error and cancellations | ||
14 | 12 | ||
15 | Verbs: | 13 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
16 | None as of this commit. (a future commit adds 'dismiss') | 14 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
17 | 15 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | |
18 | +---------+ | 16 | Message-Id: <20220926093214.506243-17-eesposit@redhat.com> |
19 | |UNDEFINED| | 17 | [kwolf: Fixed up type of iostatus] |
20 | +--+------+ | ||
21 | | | ||
22 | +--v----+ | ||
23 | +---------+CREATED| | ||
24 | | +--+----+ | ||
25 | | | | ||
26 | | +--v----+ +------+ | ||
27 | +---------+RUNNING<----->PAUSED| | ||
28 | | +--+-+--+ +------+ | ||
29 | | | | | ||
30 | | | +------------------+ | ||
31 | | | | | ||
32 | | +--v--+ +-------+ | | ||
33 | +---------+READY<------->STANDBY| | | ||
34 | | +--+--+ +-------+ | | ||
35 | | | | | ||
36 | +--v-----+ +--v------+ | | ||
37 | |ABORTING+--->CONCLUDED<-------------+ | ||
38 | +--------+ +---------+ | ||
39 | |||
40 | Signed-off-by: John Snow <jsnow@redhat.com> | ||
41 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 18 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
42 | --- | 19 | --- |
43 | qapi/block-core.json | 7 +++++-- | 20 | block/mirror.c | 6 +++++- |
44 | blockjob.c | 39 ++++++++++++++++++++++++--------------- | 21 | blockjob.c | 5 +++-- |
45 | 2 files changed, 29 insertions(+), 17 deletions(-) | 22 | 2 files changed, 8 insertions(+), 3 deletions(-) |
46 | 23 | ||
47 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 24 | diff --git a/block/mirror.c b/block/mirror.c |
48 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
49 | --- a/qapi/block-core.json | 26 | --- a/block/mirror.c |
50 | +++ b/qapi/block-core.json | 27 | +++ b/block/mirror.c |
51 | @@ -XXX,XX +XXX,XX @@ | 28 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn mirror_run(Job *job, Error **errp) |
52 | # The job may return to @ready or otherwise be canceled. | 29 | BlockDriverState *bs = s->mirror_top_bs->backing->bs; |
53 | # | 30 | BlockDriverState *target_bs = blk_bs(s->target); |
54 | # @aborting: The job is in the process of being aborted, and will finish with | 31 | bool need_drain = true; |
55 | -# an error. | 32 | + BlockDeviceIoStatus iostatus; |
56 | +# an error. The job will afterwards report that it is @concluded. | 33 | int64_t length; |
57 | # This status may not be visible to the management process. | 34 | int64_t target_length; |
58 | # | 35 | BlockDriverInfo bdi; |
59 | +# @concluded: The job has finished all work. If manual was set to true, the job | 36 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn mirror_run(Job *job, Error **errp) |
60 | +# will remain in the query list until it is dismissed. | 37 | * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is |
61 | +# | 38 | * an error, or when the source is clean, whichever comes first. */ |
62 | # Since: 2.12 | 39 | delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; |
63 | ## | 40 | + WITH_JOB_LOCK_GUARD() { |
64 | { 'enum': 'BlockJobStatus', | 41 | + iostatus = s->common.iostatus; |
65 | 'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby', | 42 | + } |
66 | - 'aborting' ] } | 43 | if (delta < BLOCK_JOB_SLICE_TIME && |
67 | + 'aborting', 'concluded' ] } | 44 | - s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
68 | 45 | + iostatus == BLOCK_DEVICE_IO_STATUS_OK) { | |
69 | ## | 46 | if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || |
70 | # @BlockJobInfo: | 47 | (cnt == 0 && s->in_flight > 0)) { |
48 | trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); | ||
71 | diff --git a/blockjob.c b/blockjob.c | 49 | diff --git a/blockjob.c b/blockjob.c |
72 | index XXXXXXX..XXXXXXX 100644 | 50 | index XXXXXXX..XXXXXXX 100644 |
73 | --- a/blockjob.c | 51 | --- a/blockjob.c |
74 | +++ b/blockjob.c | 52 | +++ b/blockjob.c |
75 | @@ -XXX,XX +XXX,XX @@ static QemuMutex block_job_mutex; | 53 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp) |
76 | 54 | return block_job_query_locked(job, errp); | |
77 | /* BlockJob State Transition Table */ | ||
78 | bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = { | ||
79 | - /* U, C, R, P, Y, S, X */ | ||
80 | - /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0}, | ||
81 | - /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 1}, | ||
82 | - /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1}, | ||
83 | - /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0}, | ||
84 | - /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1}, | ||
85 | - /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0}, | ||
86 | - /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0}, | ||
87 | + /* U, C, R, P, Y, S, X, E */ | ||
88 | + /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0}, | ||
89 | + /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 1, 0}, | ||
90 | + /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 1}, | ||
91 | + /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0}, | ||
92 | + /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 1}, | ||
93 | + /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0}, | ||
94 | + /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 1}, | ||
95 | + /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0}, | ||
96 | }; | ||
97 | |||
98 | bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = { | ||
99 | - /* U, C, R, P, Y, S, X */ | ||
100 | - [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 0}, | ||
101 | - [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0}, | ||
102 | - [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0}, | ||
103 | - [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0}, | ||
104 | - [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0}, | ||
105 | + /* U, C, R, P, Y, S, X, E */ | ||
106 | + [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 0, 0}, | ||
107 | + [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0}, | ||
108 | + [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0}, | ||
109 | + [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0}, | ||
110 | + [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0}, | ||
111 | }; | ||
112 | |||
113 | static void block_job_state_transition(BlockJob *job, BlockJobStatus s1) | ||
114 | @@ -XXX,XX +XXX,XX @@ void block_job_start(BlockJob *job) | ||
115 | bdrv_coroutine_enter(blk_bs(job->blk), job->co); | ||
116 | } | 55 | } |
117 | 56 | ||
118 | +static void block_job_conclude(BlockJob *job) | 57 | -static void block_job_iostatus_set_err(BlockJob *job, int error) |
119 | +{ | 58 | +/* Called with job lock held */ |
120 | + block_job_state_transition(job, BLOCK_JOB_STATUS_CONCLUDED); | 59 | +static void block_job_iostatus_set_err_locked(BlockJob *job, int error) |
121 | +} | ||
122 | + | ||
123 | static void block_job_completed_single(BlockJob *job) | ||
124 | { | 60 | { |
125 | assert(job->completed); | 61 | if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
126 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job) | 62 | job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : |
127 | 63 | @@ -XXX,XX +XXX,XX @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, | |
128 | QLIST_REMOVE(job, txn_list); | 64 | */ |
129 | block_job_txn_unref(job->txn); | 65 | job->job.user_paused = true; |
130 | + block_job_conclude(job); | 66 | } |
131 | block_job_unref(job); | 67 | + block_job_iostatus_set_err_locked(job, error); |
68 | } | ||
69 | - block_job_iostatus_set_err(job, error); | ||
70 | } | ||
71 | return action; | ||
132 | } | 72 | } |
133 | |||
134 | @@ -XXX,XX +XXX,XX @@ void block_job_user_resume(BlockJob *job, Error **errp) | ||
135 | |||
136 | void block_job_cancel(BlockJob *job) | ||
137 | { | ||
138 | - if (block_job_started(job)) { | ||
139 | + if (job->status == BLOCK_JOB_STATUS_CONCLUDED) { | ||
140 | + return; | ||
141 | + } else if (block_job_started(job)) { | ||
142 | block_job_cancel_async(job); | ||
143 | block_job_enter(job); | ||
144 | } else { | ||
145 | -- | 73 | -- |
146 | 2.13.6 | 74 | 2.37.3 |
147 | |||
148 | diff view generated by jsdifflib |
1 | The .bdrv_getlength implementation of the crypto block driver asserted | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | that the payload offset isn't after EOF. This is an invalid assertion to | ||
3 | make as the image file could be corrupted. Instead, check it and return | ||
4 | -EIO if the file is too small for the payload offset. | ||
5 | 2 | ||
6 | Zero length images are fine, so trigger -EIO only on offset > len, not | 3 | Some callbacks implementation use bdrv_* APIs that assume the |
7 | on offset >= len as the assertion did before. | 4 | AioContext lock is held. Make sure this invariant is documented. |
8 | 5 | ||
6 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
7 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
8 | Message-Id: <20220926093214.506243-18-eesposit@redhat.com> | ||
9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
10 | Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> | ||
11 | --- | 11 | --- |
12 | block/crypto.c | 5 ++++- | 12 | include/qemu/job.h | 27 +++++++++++++++++++++++++-- |
13 | 1 file changed, 4 insertions(+), 1 deletion(-) | 13 | 1 file changed, 25 insertions(+), 2 deletions(-) |
14 | 14 | ||
15 | diff --git a/block/crypto.c b/block/crypto.c | 15 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
16 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/block/crypto.c | 17 | --- a/include/qemu/job.h |
18 | +++ b/block/crypto.c | 18 | +++ b/include/qemu/job.h |
19 | @@ -XXX,XX +XXX,XX @@ static int64_t block_crypto_getlength(BlockDriverState *bs) | 19 | @@ -XXX,XX +XXX,XX @@ typedef struct Job { |
20 | 20 | /** True if this job should automatically dismiss itself */ | |
21 | uint64_t offset = qcrypto_block_get_payload_offset(crypto->block); | 21 | bool auto_dismiss; |
22 | assert(offset < INT64_MAX); | 22 | |
23 | - assert(offset < len); | 23 | - /** The completion function that will be called when the job completes. */ |
24 | + | 24 | + /** |
25 | + if (offset > len) { | 25 | + * The completion function that will be called when the job completes. |
26 | + return -EIO; | 26 | + * Called with AioContext lock held, since many callback implementations |
27 | + } | 27 | + * use bdrv_* functions that require to hold the lock. |
28 | 28 | + */ | |
29 | len -= offset; | 29 | BlockCompletionFunc *cb; |
30 | |||
31 | /** The opaque value that is passed to the completion function. */ | ||
32 | @@ -XXX,XX +XXX,XX @@ struct JobDriver { | ||
33 | * | ||
34 | * This callback will not be invoked if the job has already failed. | ||
35 | * If it fails, abort and then clean will be called. | ||
36 | + * | ||
37 | + * Called with AioContext lock held, since many callbacs implementations | ||
38 | + * use bdrv_* functions that require to hold the lock. | ||
39 | */ | ||
40 | int (*prepare)(Job *job); | ||
41 | |||
42 | @@ -XXX,XX +XXX,XX @@ struct JobDriver { | ||
43 | * | ||
44 | * All jobs will complete with a call to either .commit() or .abort() but | ||
45 | * never both. | ||
46 | + * | ||
47 | + * Called with AioContext lock held, since many callback implementations | ||
48 | + * use bdrv_* functions that require to hold the lock. | ||
49 | */ | ||
50 | void (*commit)(Job *job); | ||
51 | |||
52 | @@ -XXX,XX +XXX,XX @@ struct JobDriver { | ||
53 | * | ||
54 | * All jobs will complete with a call to either .commit() or .abort() but | ||
55 | * never both. | ||
56 | + * | ||
57 | + * Called with AioContext lock held, since many callback implementations | ||
58 | + * use bdrv_* functions that require to hold the lock. | ||
59 | */ | ||
60 | void (*abort)(Job *job); | ||
61 | |||
62 | @@ -XXX,XX +XXX,XX @@ struct JobDriver { | ||
63 | * .commit() or .abort(). Regardless of which callback is invoked after | ||
64 | * completion, .clean() will always be called, even if the job does not | ||
65 | * belong to a transaction group. | ||
66 | + * | ||
67 | + * Called with AioContext lock held, since many callbacs implementations | ||
68 | + * use bdrv_* functions that require to hold the lock. | ||
69 | */ | ||
70 | void (*clean)(Job *job); | ||
71 | |||
72 | @@ -XXX,XX +XXX,XX @@ struct JobDriver { | ||
73 | * READY). | ||
74 | * (If the callback is NULL, the job is assumed to terminate | ||
75 | * without I/O.) | ||
76 | + * | ||
77 | + * Called with AioContext lock held, since many callback implementations | ||
78 | + * use bdrv_* functions that require to hold the lock. | ||
79 | */ | ||
80 | bool (*cancel)(Job *job, bool force); | ||
81 | |||
82 | |||
83 | - /** Called when the job is freed */ | ||
84 | + /** | ||
85 | + * Called when the job is freed. | ||
86 | + * Called with AioContext lock held, since many callback implementations | ||
87 | + * use bdrv_* functions that require to hold the lock. | ||
88 | + */ | ||
89 | void (*free)(Job *job); | ||
90 | }; | ||
30 | 91 | ||
31 | -- | 92 | -- |
32 | 2.13.6 | 93 | 2.37.3 |
33 | |||
34 | diff view generated by jsdifflib |
1 | From: Liang Li <liliang.opensource@gmail.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | When doing drive mirror to a low speed shared storage, if there was heavy | 3 | Change the job_{lock/unlock} and macros to use job_mutex. |
4 | BLK IO write workload in VM after the 'ready' event, drive mirror block job | ||
5 | can't be canceled immediately, it would keep running until the heavy BLK IO | ||
6 | workload stopped in the VM. | ||
7 | 4 | ||
8 | Libvirt depends on the current block-job-cancel semantics, which is that | 5 | Now that they are not nop anymore, remove the aiocontext |
9 | when used without a flag after the 'ready' event, the command blocks | 6 | to avoid deadlocks. |
10 | until data is in sync. However, these semantics are awkward in other | ||
11 | situations, for example, people may use drive mirror for realtime | ||
12 | backups while still wanting to use block live migration. Libvirt cannot | ||
13 | start a block live migration while another drive mirror is in progress, | ||
14 | but the user would rather abandon the backup attempt as broken and | ||
15 | proceed with the live migration than be stuck waiting for the current | ||
16 | drive mirror backup to finish. | ||
17 | 7 | ||
18 | The drive-mirror command already includes a 'force' flag, which libvirt | 8 | Therefore: |
19 | does not use, although it documented the flag as only being useful to | 9 | - when possible, remove completely the aiocontext lock/unlock pair |
20 | quit a job which is paused. However, since quitting a paused job has | 10 | - if it is used by some other function too, reduce the locking |
21 | the same effect as abandoning a backup in a non-paused job (namely, the | 11 | section as much as possible, leaving the job API outside. |
22 | destination file is not in sync, and the command completes immediately), | 12 | - change AIO_WAIT_WHILE in AIO_WAIT_WHILE_UNLOCKED, since we |
23 | we can just improve the documentation to make the force flag obviously | 13 | are not using the aiocontext lock anymore |
24 | useful. | ||
25 | 14 | ||
26 | Cc: Paolo Bonzini <pbonzini@redhat.com> | 15 | The only functions that still need the aiocontext lock are: |
27 | Cc: Jeff Cody <jcody@redhat.com> | 16 | - the JobDriver callbacks, already documented in job.h |
28 | Cc: Kevin Wolf <kwolf@redhat.com> | 17 | - job_cancel_sync() in replication.c is called with aio_context_lock |
29 | Cc: Max Reitz <mreitz@redhat.com> | 18 | taken, but now job is using AIO_WAIT_WHILE_UNLOCKED so we need to |
30 | Cc: Eric Blake <eblake@redhat.com> | 19 | release the lock. |
31 | Cc: John Snow <jsnow@redhat.com> | 20 | |
32 | Reported-by: Huaitong Han <huanhuaitong@didichuxing.com> | 21 | Reduce the locking section to only cover the callback invocation |
33 | Signed-off-by: Huaitong Han <huanhuaitong@didichuxing.com> | 22 | and document the functions that take the AioContext lock, |
34 | Signed-off-by: Liang Li <liliangleo@didichuxing.com> | 23 | to avoid taking it twice. |
35 | Signed-off-by: Jeff Cody <jcody@redhat.com> | 24 | |
25 | Also remove real_job_{lock/unlock}, as they are replaced by the | ||
26 | public functions. | ||
27 | |||
28 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> | ||
29 | Message-Id: <20220926093214.506243-19-eesposit@redhat.com> | ||
30 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
31 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
36 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 32 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
37 | --- | 33 | --- |
38 | qapi/block-core.json | 5 +++-- | 34 | include/qemu/job.h | 17 ++--- |
39 | include/block/blockjob.h | 12 ++++++++++-- | 35 | block/replication.c | 2 + |
40 | block/mirror.c | 10 ++++------ | 36 | blockdev.c | 72 +++----------------- |
41 | blockdev.c | 4 ++-- | 37 | job-qmp.c | 46 +++---------- |
42 | blockjob.c | 16 +++++++++------- | 38 | job.c | 111 +++++++++---------------------- |
43 | tests/test-blockjob-txn.c | 8 ++++---- | 39 | qemu-img.c | 2 - |
44 | hmp-commands.hx | 3 ++- | 40 | tests/unit/test-bdrv-drain.c | 4 +- |
45 | 7 files changed, 34 insertions(+), 24 deletions(-) | 41 | tests/unit/test-block-iothread.c | 2 +- |
42 | tests/unit/test-blockjob.c | 19 +++--- | ||
43 | 9 files changed, 72 insertions(+), 203 deletions(-) | ||
46 | 44 | ||
47 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 45 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
48 | index XXXXXXX..XXXXXXX 100644 | 46 | index XXXXXXX..XXXXXXX 100644 |
49 | --- a/qapi/block-core.json | 47 | --- a/include/qemu/job.h |
50 | +++ b/qapi/block-core.json | 48 | +++ b/include/qemu/job.h |
51 | @@ -XXX,XX +XXX,XX @@ | 49 | @@ -XXX,XX +XXX,XX @@ typedef struct Job { |
52 | # the name of the parameter), but since QEMU 2.7 it can have | 50 | AioContext *aio_context; |
53 | # other values. | 51 | |
54 | # | 52 | |
55 | -# @force: whether to allow cancellation of a paused job (default | 53 | - /** Protected by AioContext lock */ |
56 | -# false). Since 1.3. | 54 | + /** Protected by job_mutex */ |
57 | +# @force: If true, and the job has already emitted the event BLOCK_JOB_READY, | 55 | |
58 | +# abandon the job immediately (even if it is paused) instead of waiting | 56 | /** Reference count of the block job */ |
59 | +# for the destination to complete its final synchronization (since 1.3) | 57 | int refcnt; |
60 | # | 58 | @@ -XXX,XX +XXX,XX @@ typedef struct Job { |
61 | # Returns: Nothing on success | 59 | /** |
62 | # If no background operation is active on this device, DeviceNotActive | 60 | * Set to false by the job while the coroutine has yielded and may be |
63 | diff --git a/include/block/blockjob.h b/include/block/blockjob.h | 61 | * re-entered by job_enter(). There may still be I/O or event loop activity |
62 | - * pending. Accessed under block_job_mutex (in blockjob.c). | ||
63 | + * pending. Accessed under job_mutex. | ||
64 | * | ||
65 | * When the job is deferred to the main loop, busy is true as long as the | ||
66 | * bottom half is still pending. | ||
67 | @@ -XXX,XX +XXX,XX @@ typedef enum JobCreateFlags { | ||
68 | |||
69 | extern QemuMutex job_mutex; | ||
70 | |||
71 | -#define JOB_LOCK_GUARD() /* QEMU_LOCK_GUARD(&job_mutex) */ | ||
72 | +#define JOB_LOCK_GUARD() QEMU_LOCK_GUARD(&job_mutex) | ||
73 | |||
74 | -#define WITH_JOB_LOCK_GUARD() /* WITH_QEMU_LOCK_GUARD(&job_mutex) */ | ||
75 | +#define WITH_JOB_LOCK_GUARD() WITH_QEMU_LOCK_GUARD(&job_mutex) | ||
76 | |||
77 | /** | ||
78 | * job_lock: | ||
79 | @@ -XXX,XX +XXX,XX @@ void job_ref_locked(Job *job); | ||
80 | /** | ||
81 | * Release a reference that was previously acquired with job_ref() or | ||
82 | * job_create(). If it's the last reference to the object, it will be freed. | ||
83 | + * | ||
84 | + * Takes AioContext lock internally to invoke a job->driver callback. | ||
85 | */ | ||
86 | void job_unref(Job *job); | ||
87 | |||
88 | @@ -XXX,XX +XXX,XX @@ void job_user_cancel_locked(Job *job, bool force, Error **errp); | ||
89 | * Returns the return value from the job if the job actually completed | ||
90 | * during the call, or -ECANCELED if it was canceled. | ||
91 | * | ||
92 | - * Callers must hold the AioContext lock of job->aio_context. | ||
93 | + * Called with job_lock *not* held. | ||
94 | */ | ||
95 | int job_cancel_sync(Job *job, bool force); | ||
96 | |||
97 | @@ -XXX,XX +XXX,XX @@ void job_cancel_sync_all(void); | ||
98 | * function). | ||
99 | * | ||
100 | * Returns the return value from the job. | ||
101 | - * | ||
102 | - * Callers must hold the AioContext lock of job->aio_context. | ||
103 | + * Called with job_lock *not* held. | ||
104 | */ | ||
105 | int job_complete_sync(Job *job, Error **errp); | ||
106 | |||
107 | @@ -XXX,XX +XXX,XX @@ void job_dismiss_locked(Job **job, Error **errp); | ||
108 | * Returns 0 if the job is successfully completed, -ECANCELED if the job was | ||
109 | * cancelled before completing, and -errno in other error cases. | ||
110 | * | ||
111 | - * Callers must hold the AioContext lock of job->aio_context. | ||
112 | + * Called with job_lock *not* held. | ||
113 | */ | ||
114 | int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), | ||
115 | Error **errp); | ||
116 | diff --git a/block/replication.c b/block/replication.c | ||
64 | index XXXXXXX..XXXXXXX 100644 | 117 | index XXXXXXX..XXXXXXX 100644 |
65 | --- a/include/block/blockjob.h | 118 | --- a/block/replication.c |
66 | +++ b/include/block/blockjob.h | 119 | +++ b/block/replication.c |
67 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJob { | 120 | @@ -XXX,XX +XXX,XX @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp) |
68 | bool cancelled; | 121 | * disk, secondary disk in backup_job_completed(). |
69 | |||
70 | /** | ||
71 | + * Set to true if the job should abort immediately without waiting | ||
72 | + * for data to be in sync. | ||
73 | + */ | ||
74 | + bool force; | ||
75 | + | ||
76 | + /** | ||
77 | * Counter for pause request. If non-zero, the block job is either paused, | ||
78 | * or if busy == true will pause itself as soon as possible. | ||
79 | */ | ||
80 | @@ -XXX,XX +XXX,XX @@ void block_job_start(BlockJob *job); | ||
81 | /** | ||
82 | * block_job_cancel: | ||
83 | * @job: The job to be canceled. | ||
84 | + * @force: Quit a job without waiting for data to be in sync. | ||
85 | * | ||
86 | * Asynchronously cancel the specified job. | ||
87 | */ | ||
88 | -void block_job_cancel(BlockJob *job); | ||
89 | +void block_job_cancel(BlockJob *job, bool force); | ||
90 | |||
91 | /** | ||
92 | * block_job_complete: | ||
93 | @@ -XXX,XX +XXX,XX @@ void block_job_user_resume(BlockJob *job, Error **errp); | ||
94 | /** | ||
95 | * block_job_user_cancel: | ||
96 | * @job: The job to be cancelled. | ||
97 | + * @force: Quit a job without waiting for data to be in sync. | ||
98 | * | ||
99 | * Cancels the specified job, but may refuse to do so if the | ||
100 | * operation isn't currently meaningful. | ||
101 | */ | ||
102 | -void block_job_user_cancel(BlockJob *job, Error **errp); | ||
103 | +void block_job_user_cancel(BlockJob *job, bool force, Error **errp); | ||
104 | |||
105 | /** | ||
106 | * block_job_cancel_sync: | ||
107 | diff --git a/block/mirror.c b/block/mirror.c | ||
108 | index XXXXXXX..XXXXXXX 100644 | ||
109 | --- a/block/mirror.c | ||
110 | +++ b/block/mirror.c | ||
111 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn mirror_run(void *opaque) | ||
112 | |||
113 | ret = 0; | ||
114 | trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); | ||
115 | - if (!s->synced) { | ||
116 | - block_job_sleep_ns(&s->common, delay_ns); | ||
117 | - if (block_job_is_cancelled(&s->common)) { | ||
118 | - break; | ||
119 | - } | ||
120 | + if (block_job_is_cancelled(&s->common) && s->common.force) { | ||
121 | + break; | ||
122 | } else if (!should_complete) { | ||
123 | delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); | ||
124 | block_job_sleep_ns(&s->common, delay_ns); | ||
125 | @@ -XXX,XX +XXX,XX @@ immediate_exit: | ||
126 | * or it was cancelled prematurely so that we do not guarantee that | ||
127 | * the target is a copy of the source. | ||
128 | */ | 122 | */ |
129 | - assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); | 123 | if (s->backup_job) { |
130 | + assert(ret < 0 || ((s->common.force || !s->synced) && | 124 | + aio_context_release(aio_context); |
131 | + block_job_is_cancelled(&s->common))); | 125 | job_cancel_sync(&s->backup_job->job, true); |
132 | assert(need_drain); | 126 | + aio_context_acquire(aio_context); |
133 | mirror_wait_for_all_io(s); | 127 | } |
134 | } | 128 | |
129 | if (!failover) { | ||
135 | diff --git a/blockdev.c b/blockdev.c | 130 | diff --git a/blockdev.c b/blockdev.c |
136 | index XXXXXXX..XXXXXXX 100644 | 131 | index XXXXXXX..XXXXXXX 100644 |
137 | --- a/blockdev.c | 132 | --- a/blockdev.c |
138 | +++ b/blockdev.c | 133 | +++ b/blockdev.c |
139 | @@ -XXX,XX +XXX,XX @@ void blockdev_mark_auto_del(BlockBackend *blk) | 134 | @@ -XXX,XX +XXX,XX @@ void blockdev_mark_auto_del(BlockBackend *blk) |
140 | aio_context_acquire(aio_context); | 135 | for (job = block_job_next_locked(NULL); job; |
141 | 136 | job = block_job_next_locked(job)) { | |
142 | if (bs->job) { | 137 | if (block_job_has_bdrv(job, blk_bs(blk))) { |
143 | - block_job_cancel(bs->job); | 138 | - AioContext *aio_context = job->job.aio_context; |
144 | + block_job_cancel(bs->job, false); | 139 | - aio_context_acquire(aio_context); |
140 | - | ||
141 | job_cancel_locked(&job->job, false); | ||
142 | - | ||
143 | - aio_context_release(aio_context); | ||
145 | } | 144 | } |
146 | 145 | } | |
147 | aio_context_release(aio_context); | 146 | |
147 | @@ -XXX,XX +XXX,XX @@ static void drive_backup_abort(BlkActionState *common) | ||
148 | DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); | ||
149 | |||
150 | if (state->job) { | ||
151 | - AioContext *aio_context; | ||
152 | - | ||
153 | - aio_context = bdrv_get_aio_context(state->bs); | ||
154 | - aio_context_acquire(aio_context); | ||
155 | - | ||
156 | job_cancel_sync(&state->job->job, true); | ||
157 | - | ||
158 | - aio_context_release(aio_context); | ||
159 | } | ||
160 | } | ||
161 | |||
162 | @@ -XXX,XX +XXX,XX @@ static void blockdev_backup_abort(BlkActionState *common) | ||
163 | BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); | ||
164 | |||
165 | if (state->job) { | ||
166 | - AioContext *aio_context; | ||
167 | - | ||
168 | - aio_context = bdrv_get_aio_context(state->bs); | ||
169 | - aio_context_acquire(aio_context); | ||
170 | - | ||
171 | job_cancel_sync(&state->job->job, true); | ||
172 | - | ||
173 | - aio_context_release(aio_context); | ||
174 | } | ||
175 | } | ||
176 | |||
177 | @@ -XXX,XX +XXX,XX @@ out: | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | - * Get a block job using its ID and acquire its AioContext. | ||
182 | - * Called with job_mutex held. | ||
183 | + * Get a block job using its ID. Called with job_mutex held. | ||
184 | */ | ||
185 | -static BlockJob *find_block_job_locked(const char *id, | ||
186 | - AioContext **aio_context, | ||
187 | - Error **errp) | ||
188 | +static BlockJob *find_block_job_locked(const char *id, Error **errp) | ||
189 | { | ||
190 | BlockJob *job; | ||
191 | |||
192 | assert(id != NULL); | ||
193 | |||
194 | - *aio_context = NULL; | ||
195 | - | ||
196 | job = block_job_get_locked(id); | ||
197 | |||
198 | if (!job) { | ||
199 | @@ -XXX,XX +XXX,XX @@ static BlockJob *find_block_job_locked(const char *id, | ||
200 | return NULL; | ||
201 | } | ||
202 | |||
203 | - *aio_context = block_job_get_aio_context(job); | ||
204 | - aio_context_acquire(*aio_context); | ||
205 | - | ||
206 | return job; | ||
207 | } | ||
208 | |||
209 | void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp) | ||
210 | { | ||
211 | - AioContext *aio_context; | ||
212 | BlockJob *job; | ||
213 | |||
214 | JOB_LOCK_GUARD(); | ||
215 | - job = find_block_job_locked(device, &aio_context, errp); | ||
216 | + job = find_block_job_locked(device, errp); | ||
217 | |||
218 | if (!job) { | ||
219 | return; | ||
220 | } | ||
221 | |||
222 | block_job_set_speed_locked(job, speed, errp); | ||
223 | - aio_context_release(aio_context); | ||
224 | } | ||
225 | |||
226 | void qmp_block_job_cancel(const char *device, | ||
227 | bool has_force, bool force, Error **errp) | ||
228 | { | ||
229 | - AioContext *aio_context; | ||
230 | BlockJob *job; | ||
231 | |||
232 | JOB_LOCK_GUARD(); | ||
233 | - job = find_block_job_locked(device, &aio_context, errp); | ||
234 | + job = find_block_job_locked(device, errp); | ||
235 | |||
236 | if (!job) { | ||
237 | return; | ||
148 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_cancel(const char *device, | 238 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_cancel(const char *device, |
149 | } | 239 | if (job_user_paused_locked(&job->job) && !force) { |
240 | error_setg(errp, "The block job for device '%s' is currently paused", | ||
241 | device); | ||
242 | - goto out; | ||
243 | + return; | ||
244 | } | ||
150 | 245 | ||
151 | trace_qmp_block_job_cancel(job); | 246 | trace_qmp_block_job_cancel(job); |
152 | - block_job_user_cancel(job, errp); | 247 | job_user_cancel_locked(&job->job, force, errp); |
153 | + block_job_user_cancel(job, force, errp); | 248 | -out: |
154 | out: | 249 | - aio_context_release(aio_context); |
155 | aio_context_release(aio_context); | 250 | } |
156 | } | 251 | |
157 | diff --git a/blockjob.c b/blockjob.c | 252 | void qmp_block_job_pause(const char *device, Error **errp) |
253 | { | ||
254 | - AioContext *aio_context; | ||
255 | BlockJob *job; | ||
256 | |||
257 | JOB_LOCK_GUARD(); | ||
258 | - job = find_block_job_locked(device, &aio_context, errp); | ||
259 | + job = find_block_job_locked(device, errp); | ||
260 | |||
261 | if (!job) { | ||
262 | return; | ||
263 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_pause(const char *device, Error **errp) | ||
264 | |||
265 | trace_qmp_block_job_pause(job); | ||
266 | job_user_pause_locked(&job->job, errp); | ||
267 | - aio_context_release(aio_context); | ||
268 | } | ||
269 | |||
270 | void qmp_block_job_resume(const char *device, Error **errp) | ||
271 | { | ||
272 | - AioContext *aio_context; | ||
273 | BlockJob *job; | ||
274 | |||
275 | JOB_LOCK_GUARD(); | ||
276 | - job = find_block_job_locked(device, &aio_context, errp); | ||
277 | + job = find_block_job_locked(device, errp); | ||
278 | |||
279 | if (!job) { | ||
280 | return; | ||
281 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_resume(const char *device, Error **errp) | ||
282 | |||
283 | trace_qmp_block_job_resume(job); | ||
284 | job_user_resume_locked(&job->job, errp); | ||
285 | - aio_context_release(aio_context); | ||
286 | } | ||
287 | |||
288 | void qmp_block_job_complete(const char *device, Error **errp) | ||
289 | { | ||
290 | - AioContext *aio_context; | ||
291 | BlockJob *job; | ||
292 | |||
293 | JOB_LOCK_GUARD(); | ||
294 | - job = find_block_job_locked(device, &aio_context, errp); | ||
295 | + job = find_block_job_locked(device, errp); | ||
296 | |||
297 | if (!job) { | ||
298 | return; | ||
299 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_complete(const char *device, Error **errp) | ||
300 | |||
301 | trace_qmp_block_job_complete(job); | ||
302 | job_complete_locked(&job->job, errp); | ||
303 | - aio_context_release(aio_context); | ||
304 | } | ||
305 | |||
306 | void qmp_block_job_finalize(const char *id, Error **errp) | ||
307 | { | ||
308 | - AioContext *aio_context; | ||
309 | BlockJob *job; | ||
310 | |||
311 | JOB_LOCK_GUARD(); | ||
312 | - job = find_block_job_locked(id, &aio_context, errp); | ||
313 | + job = find_block_job_locked(id, errp); | ||
314 | |||
315 | if (!job) { | ||
316 | return; | ||
317 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_finalize(const char *id, Error **errp) | ||
318 | job_ref_locked(&job->job); | ||
319 | job_finalize_locked(&job->job, errp); | ||
320 | |||
321 | - /* | ||
322 | - * Job's context might have changed via job_finalize (and job_txn_apply | ||
323 | - * automatically acquires the new one), so make sure we release the correct | ||
324 | - * one. | ||
325 | - */ | ||
326 | - aio_context = block_job_get_aio_context(job); | ||
327 | job_unref_locked(&job->job); | ||
328 | - aio_context_release(aio_context); | ||
329 | } | ||
330 | |||
331 | void qmp_block_job_dismiss(const char *id, Error **errp) | ||
332 | { | ||
333 | - AioContext *aio_context; | ||
334 | BlockJob *bjob; | ||
335 | Job *job; | ||
336 | |||
337 | JOB_LOCK_GUARD(); | ||
338 | - bjob = find_block_job_locked(id, &aio_context, errp); | ||
339 | + bjob = find_block_job_locked(id, errp); | ||
340 | |||
341 | if (!bjob) { | ||
342 | return; | ||
343 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_dismiss(const char *id, Error **errp) | ||
344 | trace_qmp_block_job_dismiss(bjob); | ||
345 | job = &bjob->job; | ||
346 | job_dismiss_locked(&job, errp); | ||
347 | - aio_context_release(aio_context); | ||
348 | } | ||
349 | |||
350 | void qmp_change_backing_file(const char *device, | ||
351 | @@ -XXX,XX +XXX,XX @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp) | ||
352 | for (job = block_job_next_locked(NULL); job; | ||
353 | job = block_job_next_locked(job)) { | ||
354 | BlockJobInfo *value; | ||
355 | - AioContext *aio_context; | ||
356 | |||
357 | if (block_job_is_internal(job)) { | ||
358 | continue; | ||
359 | } | ||
360 | - aio_context = block_job_get_aio_context(job); | ||
361 | - aio_context_acquire(aio_context); | ||
362 | value = block_job_query_locked(job, errp); | ||
363 | - aio_context_release(aio_context); | ||
364 | if (!value) { | ||
365 | qapi_free_BlockJobInfoList(head); | ||
366 | return NULL; | ||
367 | diff --git a/job-qmp.c b/job-qmp.c | ||
158 | index XXXXXXX..XXXXXXX 100644 | 368 | index XXXXXXX..XXXXXXX 100644 |
159 | --- a/blockjob.c | 369 | --- a/job-qmp.c |
160 | +++ b/blockjob.c | 370 | +++ b/job-qmp.c |
161 | @@ -XXX,XX +XXX,XX @@ static int block_job_finalize_single(BlockJob *job) | 371 | @@ -XXX,XX +XXX,XX @@ |
372 | #include "trace/trace-root.h" | ||
373 | |||
374 | /* | ||
375 | - * Get a job using its ID and acquire its AioContext. | ||
376 | - * Called with job_mutex held. | ||
377 | + * Get a job using its ID. Called with job_mutex held. | ||
378 | */ | ||
379 | -static Job *find_job_locked(const char *id, | ||
380 | - AioContext **aio_context, | ||
381 | - Error **errp) | ||
382 | +static Job *find_job_locked(const char *id, Error **errp) | ||
383 | { | ||
384 | Job *job; | ||
385 | |||
386 | - *aio_context = NULL; | ||
387 | - | ||
388 | job = job_get_locked(id); | ||
389 | if (!job) { | ||
390 | error_setg(errp, "Job not found"); | ||
391 | return NULL; | ||
392 | } | ||
393 | |||
394 | - *aio_context = job->aio_context; | ||
395 | - aio_context_acquire(*aio_context); | ||
396 | - | ||
397 | return job; | ||
398 | } | ||
399 | |||
400 | void qmp_job_cancel(const char *id, Error **errp) | ||
401 | { | ||
402 | - AioContext *aio_context; | ||
403 | Job *job; | ||
404 | |||
405 | JOB_LOCK_GUARD(); | ||
406 | - job = find_job_locked(id, &aio_context, errp); | ||
407 | + job = find_job_locked(id, errp); | ||
408 | |||
409 | if (!job) { | ||
410 | return; | ||
411 | @@ -XXX,XX +XXX,XX @@ void qmp_job_cancel(const char *id, Error **errp) | ||
412 | |||
413 | trace_qmp_job_cancel(job); | ||
414 | job_user_cancel_locked(job, true, errp); | ||
415 | - aio_context_release(aio_context); | ||
416 | } | ||
417 | |||
418 | void qmp_job_pause(const char *id, Error **errp) | ||
419 | { | ||
420 | - AioContext *aio_context; | ||
421 | Job *job; | ||
422 | |||
423 | JOB_LOCK_GUARD(); | ||
424 | - job = find_job_locked(id, &aio_context, errp); | ||
425 | + job = find_job_locked(id, errp); | ||
426 | |||
427 | if (!job) { | ||
428 | return; | ||
429 | @@ -XXX,XX +XXX,XX @@ void qmp_job_pause(const char *id, Error **errp) | ||
430 | |||
431 | trace_qmp_job_pause(job); | ||
432 | job_user_pause_locked(job, errp); | ||
433 | - aio_context_release(aio_context); | ||
434 | } | ||
435 | |||
436 | void qmp_job_resume(const char *id, Error **errp) | ||
437 | { | ||
438 | - AioContext *aio_context; | ||
439 | Job *job; | ||
440 | |||
441 | JOB_LOCK_GUARD(); | ||
442 | - job = find_job_locked(id, &aio_context, errp); | ||
443 | + job = find_job_locked(id, errp); | ||
444 | |||
445 | if (!job) { | ||
446 | return; | ||
447 | @@ -XXX,XX +XXX,XX @@ void qmp_job_resume(const char *id, Error **errp) | ||
448 | |||
449 | trace_qmp_job_resume(job); | ||
450 | job_user_resume_locked(job, errp); | ||
451 | - aio_context_release(aio_context); | ||
452 | } | ||
453 | |||
454 | void qmp_job_complete(const char *id, Error **errp) | ||
455 | { | ||
456 | - AioContext *aio_context; | ||
457 | Job *job; | ||
458 | |||
459 | JOB_LOCK_GUARD(); | ||
460 | - job = find_job_locked(id, &aio_context, errp); | ||
461 | + job = find_job_locked(id, errp); | ||
462 | |||
463 | if (!job) { | ||
464 | return; | ||
465 | @@ -XXX,XX +XXX,XX @@ void qmp_job_complete(const char *id, Error **errp) | ||
466 | |||
467 | trace_qmp_job_complete(job); | ||
468 | job_complete_locked(job, errp); | ||
469 | - aio_context_release(aio_context); | ||
470 | } | ||
471 | |||
472 | void qmp_job_finalize(const char *id, Error **errp) | ||
473 | { | ||
474 | - AioContext *aio_context; | ||
475 | Job *job; | ||
476 | |||
477 | JOB_LOCK_GUARD(); | ||
478 | - job = find_job_locked(id, &aio_context, errp); | ||
479 | + job = find_job_locked(id, errp); | ||
480 | |||
481 | if (!job) { | ||
482 | return; | ||
483 | @@ -XXX,XX +XXX,XX @@ void qmp_job_finalize(const char *id, Error **errp) | ||
484 | job_ref_locked(job); | ||
485 | job_finalize_locked(job, errp); | ||
486 | |||
487 | - /* | ||
488 | - * Job's context might have changed via job_finalize (and job_txn_apply | ||
489 | - * automatically acquires the new one), so make sure we release the correct | ||
490 | - * one. | ||
491 | - */ | ||
492 | - aio_context = job->aio_context; | ||
493 | job_unref_locked(job); | ||
494 | - aio_context_release(aio_context); | ||
495 | } | ||
496 | |||
497 | void qmp_job_dismiss(const char *id, Error **errp) | ||
498 | { | ||
499 | - AioContext *aio_context; | ||
500 | Job *job; | ||
501 | |||
502 | JOB_LOCK_GUARD(); | ||
503 | - job = find_job_locked(id, &aio_context, errp); | ||
504 | + job = find_job_locked(id, errp); | ||
505 | |||
506 | if (!job) { | ||
507 | return; | ||
508 | @@ -XXX,XX +XXX,XX @@ void qmp_job_dismiss(const char *id, Error **errp) | ||
509 | |||
510 | trace_qmp_job_dismiss(job); | ||
511 | job_dismiss_locked(&job, errp); | ||
512 | - aio_context_release(aio_context); | ||
513 | } | ||
514 | |||
515 | /* Called with job_mutex held. */ | ||
516 | @@ -XXX,XX +XXX,XX @@ JobInfoList *qmp_query_jobs(Error **errp) | ||
517 | |||
518 | for (job = job_next_locked(NULL); job; job = job_next_locked(job)) { | ||
519 | JobInfo *value; | ||
520 | - AioContext *aio_context; | ||
521 | |||
522 | if (job_is_internal(job)) { | ||
523 | continue; | ||
524 | } | ||
525 | - aio_context = job->aio_context; | ||
526 | - aio_context_acquire(aio_context); | ||
527 | value = job_query_single_locked(job, errp); | ||
528 | - aio_context_release(aio_context); | ||
529 | if (!value) { | ||
530 | qapi_free_JobInfoList(head); | ||
531 | return NULL; | ||
532 | diff --git a/job.c b/job.c | ||
533 | index XXXXXXX..XXXXXXX 100644 | ||
534 | --- a/job.c | ||
535 | +++ b/job.c | ||
536 | @@ -XXX,XX +XXX,XX @@ | ||
537 | * | ||
538 | * The second includes functions used by the job drivers and sometimes | ||
539 | * by the core block layer. These delegate the locking to the callee instead. | ||
540 | - * | ||
541 | - * TODO Actually make this true | ||
542 | */ | ||
543 | |||
544 | /* | ||
545 | @@ -XXX,XX +XXX,XX @@ struct JobTxn { | ||
546 | }; | ||
547 | |||
548 | void job_lock(void) | ||
549 | -{ | ||
550 | - /* nop */ | ||
551 | -} | ||
552 | - | ||
553 | -void job_unlock(void) | ||
554 | -{ | ||
555 | - /* nop */ | ||
556 | -} | ||
557 | - | ||
558 | -static void real_job_lock(void) | ||
559 | { | ||
560 | qemu_mutex_lock(&job_mutex); | ||
561 | } | ||
562 | |||
563 | -static void real_job_unlock(void) | ||
564 | +void job_unlock(void) | ||
565 | { | ||
566 | qemu_mutex_unlock(&job_mutex); | ||
567 | } | ||
568 | @@ -XXX,XX +XXX,XX @@ static void job_txn_del_job_locked(Job *job) | ||
569 | /* Called with job_mutex held, but releases it temporarily. */ | ||
570 | static int job_txn_apply_locked(Job *job, int fn(Job *)) | ||
571 | { | ||
572 | - AioContext *inner_ctx; | ||
573 | Job *other_job, *next; | ||
574 | JobTxn *txn = job->txn; | ||
575 | int rc = 0; | ||
576 | @@ -XXX,XX +XXX,XX @@ static int job_txn_apply_locked(Job *job, int fn(Job *)) | ||
577 | * break AIO_WAIT_WHILE from within fn. | ||
578 | */ | ||
579 | job_ref_locked(job); | ||
580 | - aio_context_release(job->aio_context); | ||
581 | |||
582 | QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { | ||
583 | - inner_ctx = other_job->aio_context; | ||
584 | - aio_context_acquire(inner_ctx); | ||
585 | rc = fn(other_job); | ||
586 | - aio_context_release(inner_ctx); | ||
587 | if (rc) { | ||
588 | break; | ||
589 | } | ||
590 | } | ||
591 | |||
592 | - /* | ||
593 | - * Note that job->aio_context might have been changed by calling fn, so we | ||
594 | - * can't use a local variable to cache it. | ||
595 | - */ | ||
596 | - aio_context_acquire(job->aio_context); | ||
597 | job_unref_locked(job); | ||
598 | return rc; | ||
599 | } | ||
600 | @@ -XXX,XX +XXX,XX @@ void job_unref_locked(Job *job) | ||
601 | assert(!job->txn); | ||
602 | |||
603 | if (job->driver->free) { | ||
604 | + AioContext *aio_context = job->aio_context; | ||
605 | job_unlock(); | ||
606 | + /* FIXME: aiocontext lock is required because cb calls blk_unref */ | ||
607 | + aio_context_acquire(aio_context); | ||
608 | job->driver->free(job); | ||
609 | + aio_context_release(aio_context); | ||
610 | job_lock(); | ||
611 | } | ||
612 | |||
613 | @@ -XXX,XX +XXX,XX @@ void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)) | ||
614 | return; | ||
615 | } | ||
616 | |||
617 | - real_job_lock(); | ||
618 | if (job->busy) { | ||
619 | - real_job_unlock(); | ||
620 | return; | ||
621 | } | ||
622 | |||
623 | if (fn && !fn(job)) { | ||
624 | - real_job_unlock(); | ||
625 | return; | ||
626 | } | ||
627 | |||
628 | assert(!job->deferred_to_main_loop); | ||
629 | timer_del(&job->sleep_timer); | ||
630 | job->busy = true; | ||
631 | - real_job_unlock(); | ||
632 | job_unlock(); | ||
633 | aio_co_wake(job->co); | ||
634 | job_lock(); | ||
635 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_do_yield_locked(Job *job, uint64_t ns) | ||
636 | { | ||
637 | AioContext *next_aio_context; | ||
638 | |||
639 | - real_job_lock(); | ||
640 | if (ns != -1) { | ||
641 | timer_mod(&job->sleep_timer, ns); | ||
642 | } | ||
643 | job->busy = false; | ||
644 | job_event_idle_locked(job); | ||
645 | - real_job_unlock(); | ||
646 | job_unlock(); | ||
647 | qemu_coroutine_yield(); | ||
648 | job_lock(); | ||
649 | @@ -XXX,XX +XXX,XX @@ static void job_clean(Job *job) | ||
650 | } | ||
651 | } | ||
652 | |||
653 | -/* Called with job_mutex held, but releases it temporarily */ | ||
654 | +/* | ||
655 | + * Called with job_mutex held, but releases it temporarily. | ||
656 | + * Takes AioContext lock internally to invoke a job->driver callback. | ||
657 | + */ | ||
658 | static int job_finalize_single_locked(Job *job) | ||
659 | { | ||
660 | int job_ret; | ||
661 | + AioContext *ctx = job->aio_context; | ||
662 | |||
663 | assert(job_is_completed_locked(job)); | ||
664 | |||
665 | @@ -XXX,XX +XXX,XX @@ static int job_finalize_single_locked(Job *job) | ||
666 | |||
667 | job_ret = job->ret; | ||
668 | job_unlock(); | ||
669 | + aio_context_acquire(ctx); | ||
670 | |||
671 | if (!job_ret) { | ||
672 | job_commit(job); | ||
673 | @@ -XXX,XX +XXX,XX @@ static int job_finalize_single_locked(Job *job) | ||
674 | } | ||
675 | job_clean(job); | ||
676 | |||
677 | - job_lock(); | ||
678 | - | ||
679 | if (job->cb) { | ||
680 | - job_ret = job->ret; | ||
681 | - job_unlock(); | ||
682 | job->cb(job->opaque, job_ret); | ||
683 | - job_lock(); | ||
684 | } | ||
685 | |||
686 | + aio_context_release(ctx); | ||
687 | + job_lock(); | ||
688 | + | ||
689 | /* Emit events only if we actually started */ | ||
690 | if (job_started_locked(job)) { | ||
691 | if (job_is_cancelled_locked(job)) { | ||
692 | @@ -XXX,XX +XXX,XX @@ static int job_finalize_single_locked(Job *job) | ||
162 | return 0; | 693 | return 0; |
163 | } | 694 | } |
164 | 695 | ||
165 | -static void block_job_cancel_async(BlockJob *job) | 696 | -/* Called with job_mutex held, but releases it temporarily */ |
166 | +static void block_job_cancel_async(BlockJob *job, bool force) | 697 | +/* |
167 | { | 698 | + * Called with job_mutex held, but releases it temporarily. |
168 | if (job->iostatus != BLOCK_DEVICE_IO_STATUS_OK) { | 699 | + * Takes AioContext lock internally to invoke a job->driver callback. |
169 | block_job_iostatus_reset(job); | 700 | + */ |
170 | @@ -XXX,XX +XXX,XX @@ static void block_job_cancel_async(BlockJob *job) | 701 | static void job_cancel_async_locked(Job *job, bool force) |
171 | job->pause_count--; | 702 | { |
172 | } | 703 | + AioContext *ctx = job->aio_context; |
173 | job->cancelled = true; | 704 | GLOBAL_STATE_CODE(); |
174 | + /* To prevent 'force == false' overriding a previous 'force == true' */ | 705 | if (job->driver->cancel) { |
175 | + job->force |= force; | 706 | job_unlock(); |
176 | } | 707 | + aio_context_acquire(ctx); |
177 | 708 | force = job->driver->cancel(job, force); | |
178 | static int block_job_txn_apply(BlockJobTxn *txn, int fn(BlockJob *), bool lock) | 709 | + aio_context_release(ctx); |
179 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_abort(BlockJob *job) | 710 | job_lock(); |
711 | } else { | ||
712 | /* No .cancel() means the job will behave as if force-cancelled */ | ||
713 | @@ -XXX,XX +XXX,XX @@ static void job_cancel_async_locked(Job *job, bool force) | ||
714 | } | ||
715 | } | ||
716 | |||
717 | -/* Called with job_mutex held, but releases it temporarily. */ | ||
718 | +/* | ||
719 | + * Called with job_mutex held, but releases it temporarily. | ||
720 | + * Takes AioContext lock internally to invoke a job->driver callback. | ||
721 | + */ | ||
722 | static void job_completed_txn_abort_locked(Job *job) | ||
723 | { | ||
724 | - AioContext *ctx; | ||
725 | JobTxn *txn = job->txn; | ||
726 | Job *other_job; | ||
727 | |||
728 | @@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort_locked(Job *job) | ||
729 | txn->aborting = true; | ||
730 | job_txn_ref_locked(txn); | ||
731 | |||
732 | - /* | ||
733 | - * We can only hold the single job's AioContext lock while calling | ||
734 | - * job_finalize_single() because the finalization callbacks can involve | ||
735 | - * calls of AIO_WAIT_WHILE(), which could deadlock otherwise. | ||
736 | - * Note that the job's AioContext may change when it is finalized. | ||
737 | - */ | ||
738 | job_ref_locked(job); | ||
739 | - aio_context_release(job->aio_context); | ||
740 | |||
741 | /* Other jobs are effectively cancelled by us, set the status for | ||
742 | * them; this job, however, may or may not be cancelled, depending | ||
180 | * on the caller, so leave it. */ | 743 | * on the caller, so leave it. */ |
181 | QLIST_FOREACH(other_job, &txn->jobs, txn_list) { | 744 | QLIST_FOREACH(other_job, &txn->jobs, txn_list) { |
182 | if (other_job != job) { | 745 | if (other_job != job) { |
183 | - block_job_cancel_async(other_job); | 746 | - ctx = other_job->aio_context; |
184 | + block_job_cancel_async(other_job, false); | 747 | - aio_context_acquire(ctx); |
748 | /* | ||
749 | * This is a transaction: If one job failed, no result will matter. | ||
750 | * Therefore, pass force=true to terminate all other jobs as quickly | ||
751 | * as possible. | ||
752 | */ | ||
753 | job_cancel_async_locked(other_job, true); | ||
754 | - aio_context_release(ctx); | ||
185 | } | 755 | } |
186 | } | 756 | } |
187 | while (!QLIST_EMPTY(&txn->jobs)) { | 757 | while (!QLIST_EMPTY(&txn->jobs)) { |
188 | @@ -XXX,XX +XXX,XX @@ void block_job_user_resume(BlockJob *job, Error **errp) | 758 | other_job = QLIST_FIRST(&txn->jobs); |
189 | block_job_resume(job); | 759 | - /* |
190 | } | 760 | - * The job's AioContext may change, so store it in @ctx so we |
191 | 761 | - * release the same context that we have acquired before. | |
192 | -void block_job_cancel(BlockJob *job) | 762 | - */ |
193 | +void block_job_cancel(BlockJob *job, bool force) | 763 | - ctx = other_job->aio_context; |
194 | { | 764 | - aio_context_acquire(ctx); |
195 | if (job->status == BLOCK_JOB_STATUS_CONCLUDED) { | 765 | if (!job_is_completed_locked(other_job)) { |
196 | block_job_do_dismiss(job); | 766 | assert(job_cancel_requested_locked(other_job)); |
197 | return; | 767 | job_finish_sync_locked(other_job, NULL, NULL); |
198 | } | 768 | } |
199 | - block_job_cancel_async(job); | 769 | job_finalize_single_locked(other_job); |
200 | + block_job_cancel_async(job, force); | 770 | - aio_context_release(ctx); |
201 | if (!block_job_started(job)) { | 771 | } |
202 | block_job_completed(job, -ECANCELED); | 772 | |
203 | } else if (job->deferred_to_main_loop) { | 773 | - /* |
204 | @@ -XXX,XX +XXX,XX @@ void block_job_cancel(BlockJob *job) | 774 | - * Use job_ref()/job_unref() so we can read the AioContext here |
205 | } | 775 | - * even if the job went away during job_finalize_single(). |
206 | } | 776 | - */ |
207 | 777 | - aio_context_acquire(job->aio_context); | |
208 | -void block_job_user_cancel(BlockJob *job, Error **errp) | 778 | job_unref_locked(job); |
209 | +void block_job_user_cancel(BlockJob *job, bool force, Error **errp) | 779 | - |
210 | { | 780 | job_txn_unref_locked(txn); |
211 | if (block_job_apply_verb(job, BLOCK_JOB_VERB_CANCEL, errp)) { | 781 | } |
212 | return; | 782 | |
213 | } | 783 | @@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort_locked(Job *job) |
214 | - block_job_cancel(job); | 784 | static int job_prepare_locked(Job *job) |
215 | + block_job_cancel(job, force); | 785 | { |
216 | } | 786 | int ret; |
217 | 787 | + AioContext *ctx = job->aio_context; | |
218 | /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be | 788 | |
219 | @@ -XXX,XX +XXX,XX @@ void block_job_user_cancel(BlockJob *job, Error **errp) | 789 | GLOBAL_STATE_CODE(); |
220 | * function pointer casts there. */ | 790 | + |
221 | static void block_job_cancel_err(BlockJob *job, Error **errp) | 791 | if (job->ret == 0 && job->driver->prepare) { |
222 | { | 792 | job_unlock(); |
223 | - block_job_cancel(job); | 793 | + aio_context_acquire(ctx); |
224 | + block_job_cancel(job, false); | 794 | ret = job->driver->prepare(job); |
225 | } | 795 | + aio_context_release(ctx); |
226 | 796 | job_lock(); | |
227 | int block_job_cancel_sync(BlockJob *job) | 797 | job->ret = ret; |
228 | diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c | 798 | job_update_rc_locked(job); |
799 | } | ||
800 | + | ||
801 | return job->ret; | ||
802 | } | ||
803 | |||
804 | @@ -XXX,XX +XXX,XX @@ static void job_completed_locked(Job *job) | ||
805 | static void job_exit(void *opaque) | ||
806 | { | ||
807 | Job *job = (Job *)opaque; | ||
808 | - AioContext *ctx; | ||
809 | JOB_LOCK_GUARD(); | ||
810 | - | ||
811 | job_ref_locked(job); | ||
812 | - aio_context_acquire(job->aio_context); | ||
813 | |||
814 | /* This is a lie, we're not quiescent, but still doing the completion | ||
815 | * callbacks. However, completion callbacks tend to involve operations that | ||
816 | @@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque) | ||
817 | job_event_idle_locked(job); | ||
818 | |||
819 | job_completed_locked(job); | ||
820 | - | ||
821 | - /* | ||
822 | - * Note that calling job_completed can move the job to a different | ||
823 | - * aio_context, so we cannot cache from above. job_txn_apply takes care of | ||
824 | - * acquiring the new lock, and we ref/unref to avoid job_completed freeing | ||
825 | - * the job underneath us. | ||
826 | - */ | ||
827 | - ctx = job->aio_context; | ||
828 | job_unref_locked(job); | ||
829 | - aio_context_release(ctx); | ||
830 | } | ||
831 | |||
832 | /** | ||
833 | @@ -XXX,XX +XXX,XX @@ int job_cancel_sync(Job *job, bool force) | ||
834 | void job_cancel_sync_all(void) | ||
835 | { | ||
836 | Job *job; | ||
837 | - AioContext *aio_context; | ||
838 | JOB_LOCK_GUARD(); | ||
839 | |||
840 | while ((job = job_next_locked(NULL))) { | ||
841 | - aio_context = job->aio_context; | ||
842 | - aio_context_acquire(aio_context); | ||
843 | job_cancel_sync_locked(job, true); | ||
844 | - aio_context_release(aio_context); | ||
845 | } | ||
846 | } | ||
847 | |||
848 | @@ -XXX,XX +XXX,XX @@ int job_finish_sync_locked(Job *job, | ||
849 | } | ||
850 | |||
851 | job_unlock(); | ||
852 | - AIO_WAIT_WHILE(job->aio_context, | ||
853 | - (job_enter(job), !job_is_completed(job))); | ||
854 | + AIO_WAIT_WHILE_UNLOCKED(job->aio_context, | ||
855 | + (job_enter(job), !job_is_completed(job))); | ||
856 | job_lock(); | ||
857 | |||
858 | ret = (job_is_cancelled_locked(job) && job->ret == 0) | ||
859 | diff --git a/qemu-img.c b/qemu-img.c | ||
229 | index XXXXXXX..XXXXXXX 100644 | 860 | index XXXXXXX..XXXXXXX 100644 |
230 | --- a/tests/test-blockjob-txn.c | 861 | --- a/qemu-img.c |
231 | +++ b/tests/test-blockjob-txn.c | 862 | +++ b/qemu-img.c |
232 | @@ -XXX,XX +XXX,XX @@ static void test_single_job(int expected) | 863 | @@ -XXX,XX +XXX,XX @@ static void run_block_job(BlockJob *job, Error **errp) |
233 | block_job_start(job); | 864 | AioContext *aio_context = block_job_get_aio_context(job); |
234 | 865 | int ret = 0; | |
235 | if (expected == -ECANCELED) { | 866 | |
236 | - block_job_cancel(job); | 867 | - aio_context_acquire(aio_context); |
237 | + block_job_cancel(job, false); | 868 | job_lock(); |
238 | } | 869 | job_ref_locked(&job->job); |
239 | 870 | do { | |
240 | while (result == -EINPROGRESS) { | 871 | @@ -XXX,XX +XXX,XX @@ static void run_block_job(BlockJob *job, Error **errp) |
241 | @@ -XXX,XX +XXX,XX @@ static void test_pair_jobs(int expected1, int expected2) | 872 | } |
242 | block_job_txn_unref(txn); | 873 | job_unref_locked(&job->job); |
243 | 874 | job_unlock(); | |
244 | if (expected1 == -ECANCELED) { | 875 | - aio_context_release(aio_context); |
245 | - block_job_cancel(job1); | 876 | |
246 | + block_job_cancel(job1, false); | 877 | /* publish completion progress only when success */ |
247 | } | 878 | if (!ret) { |
248 | if (expected2 == -ECANCELED) { | 879 | diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c |
249 | - block_job_cancel(job2); | ||
250 | + block_job_cancel(job2, false); | ||
251 | } | ||
252 | |||
253 | while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) { | ||
254 | @@ -XXX,XX +XXX,XX @@ static void test_pair_jobs_fail_cancel_race(void) | ||
255 | block_job_start(job1); | ||
256 | block_job_start(job2); | ||
257 | |||
258 | - block_job_cancel(job1); | ||
259 | + block_job_cancel(job1, false); | ||
260 | |||
261 | /* Now make job2 finish before the main loop kicks jobs. This simulates | ||
262 | * the race between a pending kick and another job completing. | ||
263 | diff --git a/hmp-commands.hx b/hmp-commands.hx | ||
264 | index XXXXXXX..XXXXXXX 100644 | 880 | index XXXXXXX..XXXXXXX 100644 |
265 | --- a/hmp-commands.hx | 881 | --- a/tests/unit/test-bdrv-drain.c |
266 | +++ b/hmp-commands.hx | 882 | +++ b/tests/unit/test-bdrv-drain.c |
267 | @@ -XXX,XX +XXX,XX @@ ETEXI | 883 | @@ -XXX,XX +XXX,XX @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, |
268 | .args_type = "force:-f,device:B", | 884 | tjob->prepare_ret = -EIO; |
269 | .params = "[-f] device", | 885 | break; |
270 | .help = "stop an active background block operation (use -f" | 886 | } |
271 | - "\n\t\t\t if the operation is currently paused)", | 887 | + aio_context_release(ctx); |
272 | + "\n\t\t\t if you want to abort the operation immediately" | 888 | |
273 | + "\n\t\t\t instead of keep running until data is in sync)", | 889 | job_start(&job->job); |
274 | .cmd = hmp_block_job_cancel, | 890 | - aio_context_release(ctx); |
275 | }, | 891 | |
276 | 892 | if (use_iothread) { | |
893 | /* job_co_entry() is run in the I/O thread, wait for the actual job | ||
894 | @@ -XXX,XX +XXX,XX @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, | ||
895 | g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ | ||
896 | } | ||
897 | |||
898 | - aio_context_acquire(ctx); | ||
899 | WITH_JOB_LOCK_GUARD() { | ||
900 | ret = job_complete_sync_locked(&job->job, &error_abort); | ||
901 | } | ||
902 | g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO)); | ||
903 | |||
904 | + aio_context_acquire(ctx); | ||
905 | if (use_iothread) { | ||
906 | blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort); | ||
907 | assert(blk_get_aio_context(blk_target) == qemu_get_aio_context()); | ||
908 | diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c | ||
909 | index XXXXXXX..XXXXXXX 100644 | ||
910 | --- a/tests/unit/test-block-iothread.c | ||
911 | +++ b/tests/unit/test-block-iothread.c | ||
912 | @@ -XXX,XX +XXX,XX @@ static void test_attach_blockjob(void) | ||
913 | aio_poll(qemu_get_aio_context(), false); | ||
914 | } | ||
915 | |||
916 | - aio_context_acquire(ctx); | ||
917 | WITH_JOB_LOCK_GUARD() { | ||
918 | job_complete_sync_locked(&tjob->common.job, &error_abort); | ||
919 | } | ||
920 | + aio_context_acquire(ctx); | ||
921 | blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); | ||
922 | aio_context_release(ctx); | ||
923 | |||
924 | diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c | ||
925 | index XXXXXXX..XXXXXXX 100644 | ||
926 | --- a/tests/unit/test-blockjob.c | ||
927 | +++ b/tests/unit/test-blockjob.c | ||
928 | @@ -XXX,XX +XXX,XX @@ static void cancel_common(CancelJob *s) | ||
929 | BlockJob *job = &s->common; | ||
930 | BlockBackend *blk = s->blk; | ||
931 | JobStatus sts = job->job.status; | ||
932 | - AioContext *ctx; | ||
933 | - | ||
934 | - ctx = job->job.aio_context; | ||
935 | - aio_context_acquire(ctx); | ||
936 | + AioContext *ctx = job->job.aio_context; | ||
937 | |||
938 | job_cancel_sync(&job->job, true); | ||
939 | WITH_JOB_LOCK_GUARD() { | ||
940 | @@ -XXX,XX +XXX,XX @@ static void cancel_common(CancelJob *s) | ||
941 | assert(job->job.status == JOB_STATUS_NULL); | ||
942 | job_unref_locked(&job->job); | ||
943 | } | ||
944 | - destroy_blk(blk); | ||
945 | |||
946 | + aio_context_acquire(ctx); | ||
947 | + destroy_blk(blk); | ||
948 | aio_context_release(ctx); | ||
949 | + | ||
950 | } | ||
951 | |||
952 | static void test_cancel_created(void) | ||
953 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_concluded(void) | ||
954 | aio_poll(qemu_get_aio_context(), true); | ||
955 | assert_job_status_is(job, JOB_STATUS_PENDING); | ||
956 | |||
957 | - aio_context_acquire(job->aio_context); | ||
958 | WITH_JOB_LOCK_GUARD() { | ||
959 | job_finalize_locked(job, &error_abort); | ||
960 | + assert(job->status == JOB_STATUS_CONCLUDED); | ||
961 | } | ||
962 | - aio_context_release(job->aio_context); | ||
963 | - assert_job_status_is(job, JOB_STATUS_CONCLUDED); | ||
964 | |||
965 | cancel_common(s); | ||
966 | } | ||
967 | @@ -XXX,XX +XXX,XX @@ static void test_complete_in_standby(void) | ||
968 | |||
969 | /* Wait for the job to become READY */ | ||
970 | job_start(job); | ||
971 | - aio_context_acquire(ctx); | ||
972 | /* | ||
973 | * Here we are waiting for the status to change, so don't bother | ||
974 | * protecting the read every time. | ||
975 | */ | ||
976 | - AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY); | ||
977 | - aio_context_release(ctx); | ||
978 | + AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY); | ||
979 | |||
980 | /* Begin the drained section, pausing the job */ | ||
981 | bdrv_drain_all_begin(); | ||
982 | @@ -XXX,XX +XXX,XX @@ static void test_complete_in_standby(void) | ||
983 | aio_context_acquire(ctx); | ||
984 | /* This will schedule the job to resume it */ | ||
985 | bdrv_drain_all_end(); | ||
986 | + aio_context_release(ctx); | ||
987 | |||
988 | WITH_JOB_LOCK_GUARD() { | ||
989 | /* But the job cannot run, so it will remain on standby */ | ||
990 | @@ -XXX,XX +XXX,XX @@ static void test_complete_in_standby(void) | ||
991 | job_dismiss_locked(&job, &error_abort); | ||
992 | } | ||
993 | |||
994 | + aio_context_acquire(ctx); | ||
995 | destroy_blk(blk); | ||
996 | aio_context_release(ctx); | ||
997 | iothread_join(iothread); | ||
277 | -- | 998 | -- |
278 | 2.13.6 | 999 | 2.37.3 |
279 | |||
280 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | If speed is '0' it's not actually "less than" the previous speed. | 3 | Not sure what the atomic here was supposed to do, since job.busy |
4 | Kick the job in this case too. | 4 | is protected by the job lock. Since the whole function |
5 | is called under job_mutex, just remove the atomic. | ||
5 | 6 | ||
6 | Signed-off-by: John Snow <jsnow@redhat.com> | 7 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | 8 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
11 | Message-Id: <20220926093214.506243-20-eesposit@redhat.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
10 | --- | 13 | --- |
11 | blockjob.c | 2 +- | 14 | blockjob.c | 2 +- |
12 | 1 file changed, 1 insertion(+), 1 deletion(-) | 15 | 1 file changed, 1 insertion(+), 1 deletion(-) |
13 | 16 | ||
14 | diff --git a/blockjob.c b/blockjob.c | 17 | diff --git a/blockjob.c b/blockjob.c |
15 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/blockjob.c | 19 | --- a/blockjob.c |
17 | +++ b/blockjob.c | 20 | +++ b/blockjob.c |
18 | @@ -XXX,XX +XXX,XX @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) | 21 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp) |
19 | } | 22 | info = g_new0(BlockJobInfo, 1); |
20 | 23 | info->type = g_strdup(job_type_str(&job->job)); | |
21 | job->speed = speed; | 24 | info->device = g_strdup(job->job.id); |
22 | - if (speed <= old_speed) { | 25 | - info->busy = qatomic_read(&job->job.busy); |
23 | + if (speed && speed <= old_speed) { | 26 | + info->busy = job->job.busy; |
24 | return; | 27 | info->paused = job->job.pause_count > 0; |
25 | } | 28 | info->offset = progress_current; |
26 | 29 | info->len = progress_total; | |
27 | -- | 30 | -- |
28 | 2.13.6 | 31 | 2.37.3 |
29 | |||
30 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | For jobs that have reached their CONCLUDED state, prior to having their | 3 | These public functions are not used anywhere, thus can be dropped. |
4 | last reference put down (meaning jobs that have completed successfully, | ||
5 | unsuccessfully, or have been canceled), allow the user to dismiss the | ||
6 | job's lingering status report via block-job-dismiss. | ||
7 | 4 | ||
8 | This gives management APIs the chance to conclusively determine if a job | 5 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
9 | failed or succeeded, even if the event broadcast was missed. | 6 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
10 | 7 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | |
11 | Note: block_job_do_dismiss and block_job_decommission happen to do | 8 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
12 | exactly the same thing, but they're called from different semantic | 9 | Message-Id: <20220926093214.506243-21-eesposit@redhat.com> |
13 | contexts, so both aliases are kept to improve readability. | ||
14 | |||
15 | Note 2: Don't worry about the 0x04 flag definition for AUTO_DISMISS, she | ||
16 | has a friend coming in a future patch to fill the hole where 0x02 is. | ||
17 | |||
18 | Verbs: | ||
19 | Dismiss: operates on CONCLUDED jobs only. | ||
20 | Signed-off-by: John Snow <jsnow@redhat.com> | ||
21 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
22 | --- | 11 | --- |
23 | qapi/block-core.json | 24 +++++++++++++++++++++++- | 12 | include/block/blockjob.h | 31 ++++++++++++------------------- |
24 | include/block/blockjob.h | 14 ++++++++++++++ | 13 | blockjob.c | 16 ++-------------- |
25 | blockdev.c | 14 ++++++++++++++ | 14 | 2 files changed, 14 insertions(+), 33 deletions(-) |
26 | blockjob.c | 26 ++++++++++++++++++++++++-- | ||
27 | block/trace-events | 1 + | ||
28 | 5 files changed, 76 insertions(+), 3 deletions(-) | ||
29 | 15 | ||
30 | diff --git a/qapi/block-core.json b/qapi/block-core.json | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/qapi/block-core.json | ||
33 | +++ b/qapi/block-core.json | ||
34 | @@ -XXX,XX +XXX,XX @@ | ||
35 | # | ||
36 | # @complete: see @block-job-complete | ||
37 | # | ||
38 | +# @dismiss: see @block-job-dismiss | ||
39 | +# | ||
40 | # Since: 2.12 | ||
41 | ## | ||
42 | { 'enum': 'BlockJobVerb', | ||
43 | - 'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete' ] } | ||
44 | + 'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete', 'dismiss' ] } | ||
45 | |||
46 | ## | ||
47 | # @BlockJobStatus: | ||
48 | @@ -XXX,XX +XXX,XX @@ | ||
49 | { 'command': 'block-job-complete', 'data': { 'device': 'str' } } | ||
50 | |||
51 | ## | ||
52 | +# @block-job-dismiss: | ||
53 | +# | ||
54 | +# For jobs that have already concluded, remove them from the block-job-query | ||
55 | +# list. This command only needs to be run for jobs which were started with | ||
56 | +# QEMU 2.12+ job lifetime management semantics. | ||
57 | +# | ||
58 | +# This command will refuse to operate on any job that has not yet reached | ||
59 | +# its terminal state, BLOCK_JOB_STATUS_CONCLUDED. For jobs that make use of | ||
60 | +# BLOCK_JOB_READY event, block-job-cancel or block-job-complete will still need | ||
61 | +# to be used as appropriate. | ||
62 | +# | ||
63 | +# @id: The job identifier. | ||
64 | +# | ||
65 | +# Returns: Nothing on success | ||
66 | +# | ||
67 | +# Since: 2.12 | ||
68 | +## | ||
69 | +{ 'command': 'block-job-dismiss', 'data': { 'id': 'str' } } | ||
70 | + | ||
71 | +## | ||
72 | # @BlockdevDiscardOptions: | ||
73 | # | ||
74 | # Determines how to handle discard requests. | ||
75 | diff --git a/include/block/blockjob.h b/include/block/blockjob.h | 16 | diff --git a/include/block/blockjob.h b/include/block/blockjob.h |
76 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
77 | --- a/include/block/blockjob.h | 18 | --- a/include/block/blockjob.h |
78 | +++ b/include/block/blockjob.h | 19 | +++ b/include/block/blockjob.h |
79 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJob { | 20 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockJob { |
80 | /** Current state; See @BlockJobStatus for details. */ | 21 | */ |
81 | BlockJobStatus status; | ||
82 | |||
83 | + /** True if this job should automatically dismiss itself */ | ||
84 | + bool auto_dismiss; | ||
85 | + | ||
86 | BlockJobTxn *txn; | ||
87 | QLIST_ENTRY(BlockJob) txn_list; | ||
88 | } BlockJob; | ||
89 | @@ -XXX,XX +XXX,XX @@ typedef enum BlockJobCreateFlags { | ||
90 | BLOCK_JOB_DEFAULT = 0x00, | ||
91 | /* BlockJob is not QMP-created and should not send QMP events */ | ||
92 | BLOCK_JOB_INTERNAL = 0x01, | ||
93 | + /* BlockJob requires manual dismiss step */ | ||
94 | + BLOCK_JOB_MANUAL_DISMISS = 0x04, | ||
95 | } BlockJobCreateFlags; | ||
96 | 22 | ||
97 | /** | 23 | /** |
98 | @@ -XXX,XX +XXX,XX @@ void block_job_cancel(BlockJob *job); | 24 | - * block_job_next: |
99 | void block_job_complete(BlockJob *job, Error **errp); | 25 | + * block_job_next_locked: |
26 | * @job: A block job, or %NULL. | ||
27 | * | ||
28 | * Get the next element from the list of block jobs after @job, or the | ||
29 | * first one if @job is %NULL. | ||
30 | * | ||
31 | * Returns the requested job, or %NULL if there are no more jobs left. | ||
32 | + * Called with job lock held. | ||
33 | */ | ||
34 | -BlockJob *block_job_next(BlockJob *job); | ||
35 | - | ||
36 | -/* Same as block_job_next(), but called with job lock held. */ | ||
37 | BlockJob *block_job_next_locked(BlockJob *job); | ||
100 | 38 | ||
101 | /** | 39 | /** |
102 | + * block_job_dismiss: | 40 | @@ -XXX,XX +XXX,XX @@ BlockJob *block_job_next_locked(BlockJob *job); |
103 | + * @job: The job to be dismissed. | 41 | * Get the block job identified by @id (which must not be %NULL). |
104 | + * @errp: Error object. | 42 | * |
43 | * Returns the requested job, or %NULL if it doesn't exist. | ||
44 | + * Called with job lock *not* held. | ||
45 | */ | ||
46 | BlockJob *block_job_get(const char *id); | ||
47 | |||
48 | @@ -XXX,XX +XXX,XX @@ void block_job_remove_all_bdrv(BlockJob *job); | ||
49 | bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs); | ||
50 | |||
51 | /** | ||
52 | - * block_job_set_speed: | ||
53 | + * block_job_set_speed_locked: | ||
54 | * @job: The job to set the speed for. | ||
55 | * @speed: The new value | ||
56 | * @errp: Error object. | ||
57 | * | ||
58 | * Set a rate-limiting parameter for the job; the actual meaning may | ||
59 | * vary depending on the job type. | ||
60 | - */ | ||
61 | -bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp); | ||
62 | - | ||
63 | -/* | ||
64 | - * Same as block_job_set_speed(), but called with job lock held. | ||
65 | - * Might release the lock temporarily. | ||
105 | + * | 66 | + * |
106 | + * Remove a concluded job from the query list. | 67 | + * Called with job lock held, but might release it temporarily. |
107 | + */ | 68 | */ |
108 | +void block_job_dismiss(BlockJob **job, Error **errp); | 69 | bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp); |
109 | + | 70 | |
110 | +/** | 71 | /** |
111 | * block_job_query: | 72 | - * block_job_query: |
73 | + * block_job_query_locked: | ||
112 | * @job: The job to get information about. | 74 | * @job: The job to get information about. |
113 | * | 75 | * |
114 | diff --git a/blockdev.c b/blockdev.c | 76 | * Return information about a job. |
115 | index XXXXXXX..XXXXXXX 100644 | 77 | + * |
116 | --- a/blockdev.c | 78 | + * Called with job lock held. |
117 | +++ b/blockdev.c | 79 | */ |
118 | @@ -XXX,XX +XXX,XX @@ void qmp_block_job_complete(const char *device, Error **errp) | 80 | -BlockJobInfo *block_job_query(BlockJob *job, Error **errp); |
119 | aio_context_release(aio_context); | 81 | - |
120 | } | 82 | -/* Same as block_job_query(), but called with job lock held. */ |
121 | 83 | BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp); | |
122 | +void qmp_block_job_dismiss(const char *id, Error **errp) | 84 | |
123 | +{ | 85 | /** |
124 | + AioContext *aio_context; | 86 | - * block_job_iostatus_reset: |
125 | + BlockJob *job = find_block_job(id, &aio_context, errp); | 87 | + * block_job_iostatus_reset_locked: |
126 | + | 88 | * @job: The job whose I/O status should be reset. |
127 | + if (!job) { | 89 | * |
128 | + return; | 90 | * Reset I/O status on @job and on BlockDriverState objects it uses, |
129 | + } | 91 | * other than job->blk. |
130 | + | 92 | + * |
131 | + trace_qmp_block_job_dismiss(job); | 93 | + * Called with job lock held. |
132 | + block_job_dismiss(&job, errp); | 94 | */ |
133 | + aio_context_release(aio_context); | 95 | -void block_job_iostatus_reset(BlockJob *job); |
134 | +} | 96 | - |
135 | + | 97 | -/* Same as block_job_iostatus_reset(), but called with job lock held. */ |
136 | void qmp_change_backing_file(const char *device, | 98 | void block_job_iostatus_reset_locked(BlockJob *job); |
137 | const char *image_node_name, | 99 | |
138 | const char *backing_file, | 100 | /* |
139 | diff --git a/blockjob.c b/blockjob.c | 101 | diff --git a/blockjob.c b/blockjob.c |
140 | index XXXXXXX..XXXXXXX 100644 | 102 | index XXXXXXX..XXXXXXX 100644 |
141 | --- a/blockjob.c | 103 | --- a/blockjob.c |
142 | +++ b/blockjob.c | 104 | +++ b/blockjob.c |
143 | @@ -XXX,XX +XXX,XX @@ bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = { | 105 | @@ -XXX,XX +XXX,XX @@ BlockJob *block_job_next_locked(BlockJob *bjob) |
144 | [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0}, | 106 | return job ? container_of(job, BlockJob, job) : NULL; |
145 | [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0}, | ||
146 | [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0}, | ||
147 | + [BLOCK_JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 1, 0}, | ||
148 | }; | ||
149 | |||
150 | static void block_job_state_transition(BlockJob *job, BlockJobStatus s1) | ||
151 | @@ -XXX,XX +XXX,XX @@ static void block_job_decommission(BlockJob *job) | ||
152 | block_job_unref(job); | ||
153 | } | 107 | } |
154 | 108 | ||
155 | +static void block_job_do_dismiss(BlockJob *job) | 109 | -BlockJob *block_job_next(BlockJob *bjob) |
156 | +{ | 110 | -{ |
157 | + block_job_decommission(job); | 111 | - JOB_LOCK_GUARD(); |
158 | +} | 112 | - return block_job_next_locked(bjob); |
159 | + | 113 | -} |
160 | static void block_job_conclude(BlockJob *job) | 114 | - |
115 | BlockJob *block_job_get_locked(const char *id) | ||
161 | { | 116 | { |
162 | block_job_state_transition(job, BLOCK_JOB_STATUS_CONCLUDED); | 117 | Job *job = job_get_locked(id); |
163 | + if (job->auto_dismiss || !block_job_started(job)) { | 118 | @@ -XXX,XX +XXX,XX @@ bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp) |
164 | + block_job_do_dismiss(job); | 119 | return true; |
165 | + } | ||
166 | } | 120 | } |
167 | 121 | ||
168 | static void block_job_completed_single(BlockJob *job) | 122 | -bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) |
169 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job) | 123 | +static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) |
170 | QLIST_REMOVE(job, txn_list); | 124 | { |
171 | block_job_txn_unref(job->txn); | 125 | JOB_LOCK_GUARD(); |
172 | block_job_conclude(job); | 126 | return block_job_set_speed_locked(job, speed, errp); |
173 | - block_job_decommission(job); | 127 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp) |
128 | return info; | ||
174 | } | 129 | } |
175 | 130 | ||
176 | static void block_job_cancel_async(BlockJob *job) | 131 | -BlockJobInfo *block_job_query(BlockJob *job, Error **errp) |
177 | @@ -XXX,XX +XXX,XX @@ void block_job_complete(BlockJob *job, Error **errp) | 132 | -{ |
178 | job->driver->complete(job, errp); | 133 | - JOB_LOCK_GUARD(); |
134 | - return block_job_query_locked(job, errp); | ||
135 | -} | ||
136 | - | ||
137 | /* Called with job lock held */ | ||
138 | static void block_job_iostatus_set_err_locked(BlockJob *job, int error) | ||
139 | { | ||
140 | @@ -XXX,XX +XXX,XX @@ void block_job_iostatus_reset_locked(BlockJob *job) | ||
141 | job->iostatus = BLOCK_DEVICE_IO_STATUS_OK; | ||
179 | } | 142 | } |
180 | 143 | ||
181 | +void block_job_dismiss(BlockJob **jobptr, Error **errp) | 144 | -void block_job_iostatus_reset(BlockJob *job) |
182 | +{ | 145 | +static void block_job_iostatus_reset(BlockJob *job) |
183 | + BlockJob *job = *jobptr; | ||
184 | + /* similarly to _complete, this is QMP-interface only. */ | ||
185 | + assert(job->id); | ||
186 | + if (block_job_apply_verb(job, BLOCK_JOB_VERB_DISMISS, errp)) { | ||
187 | + return; | ||
188 | + } | ||
189 | + | ||
190 | + block_job_do_dismiss(job); | ||
191 | + *jobptr = NULL; | ||
192 | +} | ||
193 | + | ||
194 | void block_job_user_pause(BlockJob *job, Error **errp) | ||
195 | { | 146 | { |
196 | if (block_job_apply_verb(job, BLOCK_JOB_VERB_PAUSE, errp)) { | 147 | JOB_LOCK_GUARD(); |
197 | @@ -XXX,XX +XXX,XX @@ void block_job_user_resume(BlockJob *job, Error **errp) | 148 | block_job_iostatus_reset_locked(job); |
198 | void block_job_cancel(BlockJob *job) | ||
199 | { | ||
200 | if (job->status == BLOCK_JOB_STATUS_CONCLUDED) { | ||
201 | - return; | ||
202 | + block_job_do_dismiss(job); | ||
203 | } else if (block_job_started(job)) { | ||
204 | block_job_cancel_async(job); | ||
205 | block_job_enter(job); | ||
206 | @@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, | ||
207 | job->paused = true; | ||
208 | job->pause_count = 1; | ||
209 | job->refcnt = 1; | ||
210 | + job->auto_dismiss = !(flags & BLOCK_JOB_MANUAL_DISMISS); | ||
211 | block_job_state_transition(job, BLOCK_JOB_STATUS_CREATED); | ||
212 | aio_timer_init(qemu_get_aio_context(), &job->sleep_timer, | ||
213 | QEMU_CLOCK_REALTIME, SCALE_NS, | ||
214 | diff --git a/block/trace-events b/block/trace-events | ||
215 | index XXXXXXX..XXXXXXX 100644 | ||
216 | --- a/block/trace-events | ||
217 | +++ b/block/trace-events | ||
218 | @@ -XXX,XX +XXX,XX @@ qmp_block_job_cancel(void *job) "job %p" | ||
219 | qmp_block_job_pause(void *job) "job %p" | ||
220 | qmp_block_job_resume(void *job) "job %p" | ||
221 | qmp_block_job_complete(void *job) "job %p" | ||
222 | +qmp_block_job_dismiss(void *job) "job %p" | ||
223 | qmp_block_stream(void *bs, void *job) "bs %p job %p" | ||
224 | |||
225 | # block/file-win32.c | ||
226 | -- | 149 | -- |
227 | 2.13.6 | 150 | 2.37.3 |
228 | |||
229 | diff view generated by jsdifflib |
1 | From: John Snow <jsnow@redhat.com> | 1 | From: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | For jobs that are stuck waiting on others in a transaction, it would | 3 | These public functions are not used anywhere, thus can be dropped. |
4 | be nice to know that they are no longer "running" in that sense, but | 4 | Also, since this is the final job API that doesn't use AioContext |
5 | instead are waiting on other jobs in the transaction. | 5 | lock and replaces it with job_lock, adjust all remaining function |
6 | documentation to clearly specify if the job lock is taken or not. | ||
6 | 7 | ||
7 | Jobs that are "waiting" in this sense cannot be meaningfully altered | 8 | Also document the locking requirements for a few functions |
8 | any longer as they have left their running loop. The only meaningful | 9 | where the second version is not removed. |
9 | user verb for jobs in this state is "cancel," which will cancel the | ||
10 | whole transaction, too. | ||
11 | 10 | ||
12 | Transitions: | 11 | Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> |
13 | Running -> Waiting: Normal transition. | 12 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
14 | Ready -> Waiting: Normal transition. | 13 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
15 | Waiting -> Aborting: Transactional cancellation. | 14 | Message-Id: <20220926093214.506243-22-eesposit@redhat.com> |
16 | Waiting -> Concluded: Normal transition. | 15 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
17 | |||
18 | Removed Transitions: | ||
19 | Running -> Concluded: Jobs must go to WAITING first. | ||
20 | Ready -> Concluded: Jobs must go to WAITING first. | ||
21 | |||
22 | Verbs: | ||
23 | Cancel: Can be applied to WAITING jobs. | ||
24 | |||
25 | +---------+ | ||
26 | |UNDEFINED| | ||
27 | +--+------+ | ||
28 | | | ||
29 | +--v----+ | ||
30 | +---------+CREATED+-----------------+ | ||
31 | | +--+----+ | | ||
32 | | | | | ||
33 | | +--v----+ +------+ | | ||
34 | +---------+RUNNING<----->PAUSED| | | ||
35 | | +--+-+--+ +------+ | | ||
36 | | | | | | ||
37 | | | +------------------+ | | ||
38 | | | | | | ||
39 | | +--v--+ +-------+ | | | ||
40 | +---------+READY<------->STANDBY| | | | ||
41 | | +--+--+ +-------+ | | | ||
42 | | | | | | ||
43 | | +--v----+ | | | ||
44 | +---------+WAITING<---------------+ | | ||
45 | | +--+----+ | | ||
46 | | | | | ||
47 | +--v-----+ +--v------+ | | ||
48 | |ABORTING+--->CONCLUDED| | | ||
49 | +--------+ +--+------+ | | ||
50 | | | | ||
51 | +--v-+ | | ||
52 | |NULL<--------------------+ | ||
53 | +----+ | ||
54 | |||
55 | Signed-off-by: John Snow <jsnow@redhat.com> | ||
56 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
57 | --- | 17 | --- |
58 | qapi/block-core.json | 6 +++++- | 18 | include/qemu/job.h | 110 +++++++++++++------------------------ |
59 | blockjob.c | 37 ++++++++++++++++++++----------------- | 19 | job.c | 107 ++---------------------------------- |
60 | 2 files changed, 25 insertions(+), 18 deletions(-) | 20 | tests/unit/test-blockjob.c | 4 +- |
21 | 3 files changed, 46 insertions(+), 175 deletions(-) | ||
61 | 22 | ||
62 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 23 | diff --git a/include/qemu/job.h b/include/qemu/job.h |
63 | index XXXXXXX..XXXXXXX 100644 | 24 | index XXXXXXX..XXXXXXX 100644 |
64 | --- a/qapi/block-core.json | 25 | --- a/include/qemu/job.h |
65 | +++ b/qapi/block-core.json | 26 | +++ b/include/qemu/job.h |
66 | @@ -XXX,XX +XXX,XX @@ | 27 | @@ -XXX,XX +XXX,XX @@ JobTxn *job_txn_new(void); |
67 | # @standby: The job is ready, but paused. This is nearly identical to @paused. | 28 | /** |
68 | # The job may return to @ready or otherwise be canceled. | 29 | * Release a reference that was previously acquired with job_txn_add_job or |
69 | # | 30 | * job_txn_new. If it's the last reference to the object, it will be freed. |
70 | +# @waiting: The job is waiting for other jobs in the transaction to converge | 31 | + * |
71 | +# to the waiting state. This status will likely not be visible for | 32 | + * Called with job lock *not* held. |
72 | +# the last job in a transaction. | 33 | */ |
73 | +# | 34 | void job_txn_unref(JobTxn *txn); |
74 | # @aborting: The job is in the process of being aborted, and will finish with | 35 | |
75 | # an error. The job will afterwards report that it is @concluded. | 36 | @@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, |
76 | # This status may not be visible to the management process. | 37 | /** |
77 | @@ -XXX,XX +XXX,XX @@ | 38 | * Add a reference to Job refcnt, it will be decreased with job_unref, and then |
78 | ## | 39 | * be freed if it comes to be the last reference. |
79 | { 'enum': 'BlockJobStatus', | 40 | + * |
80 | 'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby', | 41 | + * Called with job lock held. |
81 | - 'aborting', 'concluded', 'null' ] } | 42 | */ |
82 | + 'waiting', 'aborting', 'concluded', 'null' ] } | 43 | -void job_ref(Job *job); |
83 | 44 | - | |
84 | ## | 45 | -/* Same as job_ref(), but called with job lock held. */ |
85 | # @BlockJobInfo: | 46 | void job_ref_locked(Job *job); |
86 | diff --git a/blockjob.c b/blockjob.c | 47 | |
48 | /** | ||
49 | - * Release a reference that was previously acquired with job_ref() or | ||
50 | + * Release a reference that was previously acquired with job_ref_locked() or | ||
51 | * job_create(). If it's the last reference to the object, it will be freed. | ||
52 | * | ||
53 | * Takes AioContext lock internally to invoke a job->driver callback. | ||
54 | + * Called with job lock held. | ||
55 | */ | ||
56 | -void job_unref(Job *job); | ||
57 | - | ||
58 | -/* Same as job_unref(), but called with job lock held. */ | ||
59 | void job_unref_locked(Job *job); | ||
60 | |||
61 | /** | ||
62 | @@ -XXX,XX +XXX,XX @@ void job_progress_increase_remaining(Job *job, uint64_t delta); | ||
63 | * Conditionally enter the job coroutine if the job is ready to run, not | ||
64 | * already busy and fn() returns true. fn() is called while under the job_lock | ||
65 | * critical section. | ||
66 | - */ | ||
67 | -void job_enter_cond(Job *job, bool(*fn)(Job *job)); | ||
68 | - | ||
69 | -/* | ||
70 | - * Same as job_enter_cond(), but called with job lock held. | ||
71 | - * Might release the lock temporarily. | ||
72 | + * | ||
73 | + * Called with job lock held, but might release it temporarily. | ||
74 | */ | ||
75 | void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)); | ||
76 | |||
77 | @@ -XXX,XX +XXX,XX @@ bool job_cancel_requested(Job *job); | ||
78 | |||
79 | /** | ||
80 | * Returns whether the job is in a completed state. | ||
81 | - * Called with job_mutex *not* held. | ||
82 | + * Called with job lock held. | ||
83 | */ | ||
84 | -bool job_is_completed(Job *job); | ||
85 | - | ||
86 | -/* Same as job_is_completed(), but called with job lock held. */ | ||
87 | bool job_is_completed_locked(Job *job); | ||
88 | |||
89 | /** | ||
90 | @@ -XXX,XX +XXX,XX @@ bool job_is_ready_locked(Job *job); | ||
91 | /** | ||
92 | * Request @job to pause at the next pause point. Must be paired with | ||
93 | * job_resume(). If the job is supposed to be resumed by user action, call | ||
94 | - * job_user_pause() instead. | ||
95 | + * job_user_pause_locked() instead. | ||
96 | + * | ||
97 | + * Called with job lock *not* held. | ||
98 | */ | ||
99 | void job_pause(Job *job); | ||
100 | |||
101 | /* Same as job_pause(), but called with job lock held. */ | ||
102 | void job_pause_locked(Job *job); | ||
103 | |||
104 | -/** Resumes a @job paused with job_pause. */ | ||
105 | +/** Resumes a @job paused with job_pause. Called with job lock *not* held. */ | ||
106 | void job_resume(Job *job); | ||
107 | |||
108 | /* | ||
109 | @@ -XXX,XX +XXX,XX @@ void job_resume_locked(Job *job); | ||
110 | /** | ||
111 | * Asynchronously pause the specified @job. | ||
112 | * Do not allow a resume until a matching call to job_user_resume. | ||
113 | + * Called with job lock held. | ||
114 | */ | ||
115 | -void job_user_pause(Job *job, Error **errp); | ||
116 | - | ||
117 | -/* Same as job_user_pause(), but called with job lock held. */ | ||
118 | void job_user_pause_locked(Job *job, Error **errp); | ||
119 | |||
120 | -/** Returns true if the job is user-paused. */ | ||
121 | -bool job_user_paused(Job *job); | ||
122 | - | ||
123 | -/* Same as job_user_paused(), but called with job lock held. */ | ||
124 | +/** | ||
125 | + * Returns true if the job is user-paused. | ||
126 | + * Called with job lock held. | ||
127 | + */ | ||
128 | bool job_user_paused_locked(Job *job); | ||
129 | |||
130 | /** | ||
131 | * Resume the specified @job. | ||
132 | - * Must be paired with a preceding job_user_pause. | ||
133 | - */ | ||
134 | -void job_user_resume(Job *job, Error **errp); | ||
135 | - | ||
136 | -/* | ||
137 | - * Same as job_user_resume(), but called with job lock held. | ||
138 | - * Might release the lock temporarily. | ||
139 | + * Must be paired with a preceding job_user_pause_locked. | ||
140 | + * Called with job lock held, but might release it temporarily. | ||
141 | */ | ||
142 | void job_user_resume_locked(Job *job, Error **errp); | ||
143 | |||
144 | @@ -XXX,XX +XXX,XX @@ void job_user_resume_locked(Job *job, Error **errp); | ||
145 | * first one if @job is %NULL. | ||
146 | * | ||
147 | * Returns the requested job, or %NULL if there are no more jobs left. | ||
148 | + * Called with job lock *not* held. | ||
149 | */ | ||
150 | Job *job_next(Job *job); | ||
151 | |||
152 | @@ -XXX,XX +XXX,XX @@ Job *job_next_locked(Job *job); | ||
153 | * Get the job identified by @id (which must not be %NULL). | ||
154 | * | ||
155 | * Returns the requested job, or %NULL if it doesn't exist. | ||
156 | + * Called with job lock held. | ||
157 | */ | ||
158 | -Job *job_get(const char *id); | ||
159 | - | ||
160 | -/* Same as job_get(), but called with job lock held. */ | ||
161 | Job *job_get_locked(const char *id); | ||
162 | |||
163 | /** | ||
164 | * Check whether the verb @verb can be applied to @job in its current state. | ||
165 | * Returns 0 if the verb can be applied; otherwise errp is set and -EPERM | ||
166 | * returned. | ||
167 | + * | ||
168 | + * Called with job lock held. | ||
169 | */ | ||
170 | -int job_apply_verb(Job *job, JobVerb verb, Error **errp); | ||
171 | - | ||
172 | -/* Same as job_apply_verb, but called with job lock held. */ | ||
173 | int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp); | ||
174 | |||
175 | /** | ||
176 | @@ -XXX,XX +XXX,XX @@ void job_early_fail(Job *job); | ||
177 | */ | ||
178 | void job_transition_to_ready(Job *job); | ||
179 | |||
180 | -/** Asynchronously complete the specified @job. */ | ||
181 | -void job_complete(Job *job, Error **errp); | ||
182 | - | ||
183 | -/* | ||
184 | - * Same as job_complete(), but called with job lock held. | ||
185 | - * Might release the lock temporarily. | ||
186 | +/** | ||
187 | + * Asynchronously complete the specified @job. | ||
188 | + * Called with job lock held, but might release it temporarily. | ||
189 | */ | ||
190 | void job_complete_locked(Job *job, Error **errp); | ||
191 | |||
192 | /** | ||
193 | * Asynchronously cancel the specified @job. If @force is true, the job should | ||
194 | * be cancelled immediately without waiting for a consistent state. | ||
195 | + * Called with job lock held. | ||
196 | */ | ||
197 | -void job_cancel(Job *job, bool force); | ||
198 | - | ||
199 | -/* Same as job_cancel(), but called with job lock held. */ | ||
200 | void job_cancel_locked(Job *job, bool force); | ||
201 | |||
202 | /** | ||
203 | - * Cancels the specified job like job_cancel(), but may refuse to do so if the | ||
204 | - * operation isn't meaningful in the current state of the job. | ||
205 | + * Cancels the specified job like job_cancel_locked(), but may refuse | ||
206 | + * to do so if the operation isn't meaningful in the current state of the job. | ||
207 | + * Called with job lock held. | ||
208 | */ | ||
209 | -void job_user_cancel(Job *job, bool force, Error **errp); | ||
210 | - | ||
211 | -/* Same as job_user_cancel(), but called with job lock held. */ | ||
212 | void job_user_cancel_locked(Job *job, bool force, Error **errp); | ||
213 | |||
214 | /** | ||
215 | @@ -XXX,XX +XXX,XX @@ void job_cancel_sync_all(void); | ||
216 | |||
217 | /** | ||
218 | * @job: The job to be completed. | ||
219 | - * @errp: Error object which may be set by job_complete(); this is not | ||
220 | + * @errp: Error object which may be set by job_complete_locked(); this is not | ||
221 | * necessarily set on every error, the job return value has to be | ||
222 | * checked as well. | ||
223 | * | ||
224 | @@ -XXX,XX +XXX,XX @@ void job_cancel_sync_all(void); | ||
225 | * function). | ||
226 | * | ||
227 | * Returns the return value from the job. | ||
228 | - * Called with job_lock *not* held. | ||
229 | + * Called with job_lock held. | ||
230 | */ | ||
231 | -int job_complete_sync(Job *job, Error **errp); | ||
232 | - | ||
233 | -/* Same as job_complete_sync, but called with job lock held. */ | ||
234 | int job_complete_sync_locked(Job *job, Error **errp); | ||
235 | |||
236 | /** | ||
237 | @@ -XXX,XX +XXX,XX @@ int job_complete_sync_locked(Job *job, Error **errp); | ||
238 | * FIXME: Make the below statement universally true: | ||
239 | * For jobs that support the manual workflow mode, all graph changes that occur | ||
240 | * as a result will occur after this command and before a successful reply. | ||
241 | + * | ||
242 | + * Called with job lock held. | ||
243 | */ | ||
244 | -void job_finalize(Job *job, Error **errp); | ||
245 | - | ||
246 | -/* Same as job_finalize(), but called with job lock held. */ | ||
247 | void job_finalize_locked(Job *job, Error **errp); | ||
248 | |||
249 | /** | ||
250 | * Remove the concluded @job from the query list and resets the passed pointer | ||
251 | * to %NULL. Returns an error if the job is not actually concluded. | ||
252 | + * | ||
253 | + * Called with job lock held. | ||
254 | */ | ||
255 | -void job_dismiss(Job **job, Error **errp); | ||
256 | - | ||
257 | -/* Same as job_dismiss(), but called with job lock held. */ | ||
258 | void job_dismiss_locked(Job **job, Error **errp); | ||
259 | |||
260 | /** | ||
261 | @@ -XXX,XX +XXX,XX @@ void job_dismiss_locked(Job **job, Error **errp); | ||
262 | * Returns 0 if the job is successfully completed, -ECANCELED if the job was | ||
263 | * cancelled before completing, and -errno in other error cases. | ||
264 | * | ||
265 | - * Called with job_lock *not* held. | ||
266 | - */ | ||
267 | -int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), | ||
268 | - Error **errp); | ||
269 | - | ||
270 | -/* | ||
271 | - * Same as job_finish_sync(), but called with job lock held. | ||
272 | - * Might release the lock temporarily. | ||
273 | + * Called with job_lock held, but might release it temporarily. | ||
274 | */ | ||
275 | int job_finish_sync_locked(Job *job, void (*finish)(Job *, Error **errp), | ||
276 | Error **errp); | ||
277 | diff --git a/job.c b/job.c | ||
87 | index XXXXXXX..XXXXXXX 100644 | 278 | index XXXXXXX..XXXXXXX 100644 |
88 | --- a/blockjob.c | 279 | --- a/job.c |
89 | +++ b/blockjob.c | 280 | +++ b/job.c |
90 | @@ -XXX,XX +XXX,XX @@ static QemuMutex block_job_mutex; | 281 | @@ -XXX,XX +XXX,XX @@ int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp) |
91 | 282 | return -EPERM; | |
92 | /* BlockJob State Transition Table */ | 283 | } |
93 | bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = { | 284 | |
94 | - /* U, C, R, P, Y, S, X, E, N */ | 285 | -int job_apply_verb(Job *job, JobVerb verb, Error **errp) |
95 | - /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0}, | 286 | -{ |
96 | - /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 1, 0, 1}, | 287 | - JOB_LOCK_GUARD(); |
97 | - /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 1, 0}, | 288 | - return job_apply_verb_locked(job, verb, errp); |
98 | - /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0}, | 289 | -} |
99 | - /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 1, 0}, | 290 | - |
100 | - /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0}, | 291 | JobType job_type(const Job *job) |
101 | - /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 1, 1, 0}, | 292 | { |
102 | - /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 1}, | 293 | return job->driver->job_type; |
103 | - /* N: */ [BLOCK_JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0}, | 294 | @@ -XXX,XX +XXX,XX @@ bool job_is_completed_locked(Job *job) |
104 | + /* U, C, R, P, Y, S, W, X, E, N */ | 295 | return false; |
105 | + /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, | 296 | } |
106 | + /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 0, 1, 0, 1}, | 297 | |
107 | + /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 1, 0, 0}, | 298 | -bool job_is_completed(Job *job) |
108 | + /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0}, | 299 | +static bool job_is_completed(Job *job) |
109 | + /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 1, 0, 0}, | 300 | { |
110 | + /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0}, | 301 | JOB_LOCK_GUARD(); |
111 | + /* W: */ [BLOCK_JOB_STATUS_WAITING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0}, | 302 | return job_is_completed_locked(job); |
112 | + /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0}, | 303 | @@ -XXX,XX +XXX,XX @@ Job *job_get_locked(const char *id) |
113 | + /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, | 304 | return NULL; |
114 | + /* N: */ [BLOCK_JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, | 305 | } |
306 | |||
307 | -Job *job_get(const char *id) | ||
308 | -{ | ||
309 | - JOB_LOCK_GUARD(); | ||
310 | - return job_get_locked(id); | ||
311 | -} | ||
312 | - | ||
313 | void job_set_aio_context(Job *job, AioContext *ctx) | ||
314 | { | ||
315 | /* protect against read in job_finish_sync_locked and job_start */ | ||
316 | @@ -XXX,XX +XXX,XX @@ void job_ref_locked(Job *job) | ||
317 | ++job->refcnt; | ||
318 | } | ||
319 | |||
320 | -void job_ref(Job *job) | ||
321 | -{ | ||
322 | - JOB_LOCK_GUARD(); | ||
323 | - job_ref_locked(job); | ||
324 | -} | ||
325 | - | ||
326 | void job_unref_locked(Job *job) | ||
327 | { | ||
328 | GLOBAL_STATE_CODE(); | ||
329 | @@ -XXX,XX +XXX,XX @@ void job_unref_locked(Job *job) | ||
330 | } | ||
331 | } | ||
332 | |||
333 | -void job_unref(Job *job) | ||
334 | -{ | ||
335 | - JOB_LOCK_GUARD(); | ||
336 | - job_unref_locked(job); | ||
337 | -} | ||
338 | - | ||
339 | void job_progress_update(Job *job, uint64_t done) | ||
340 | { | ||
341 | progress_work_done(&job->progress, done); | ||
342 | @@ -XXX,XX +XXX,XX @@ void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)) | ||
343 | job_lock(); | ||
344 | } | ||
345 | |||
346 | -void job_enter_cond(Job *job, bool(*fn)(Job *job)) | ||
347 | -{ | ||
348 | - JOB_LOCK_GUARD(); | ||
349 | - job_enter_cond_locked(job, fn); | ||
350 | -} | ||
351 | - | ||
352 | void job_enter(Job *job) | ||
353 | { | ||
354 | JOB_LOCK_GUARD(); | ||
355 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn job_pause_point(Job *job) | ||
356 | job_pause_point_locked(job); | ||
357 | } | ||
358 | |||
359 | -static void coroutine_fn job_yield_locked(Job *job) | ||
360 | +void coroutine_fn job_yield(Job *job) | ||
361 | { | ||
362 | + JOB_LOCK_GUARD(); | ||
363 | assert(job->busy); | ||
364 | |||
365 | /* Check cancellation *before* setting busy = false, too! */ | ||
366 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_yield_locked(Job *job) | ||
367 | job_pause_point_locked(job); | ||
368 | } | ||
369 | |||
370 | -void coroutine_fn job_yield(Job *job) | ||
371 | -{ | ||
372 | - JOB_LOCK_GUARD(); | ||
373 | - job_yield_locked(job); | ||
374 | -} | ||
375 | - | ||
376 | void coroutine_fn job_sleep_ns(Job *job, int64_t ns) | ||
377 | { | ||
378 | JOB_LOCK_GUARD(); | ||
379 | @@ -XXX,XX +XXX,XX @@ void job_user_pause_locked(Job *job, Error **errp) | ||
380 | job_pause_locked(job); | ||
381 | } | ||
382 | |||
383 | -void job_user_pause(Job *job, Error **errp) | ||
384 | -{ | ||
385 | - JOB_LOCK_GUARD(); | ||
386 | - job_user_pause_locked(job, errp); | ||
387 | -} | ||
388 | - | ||
389 | bool job_user_paused_locked(Job *job) | ||
390 | { | ||
391 | return job->user_paused; | ||
392 | } | ||
393 | |||
394 | -bool job_user_paused(Job *job) | ||
395 | -{ | ||
396 | - JOB_LOCK_GUARD(); | ||
397 | - return job_user_paused_locked(job); | ||
398 | -} | ||
399 | - | ||
400 | void job_user_resume_locked(Job *job, Error **errp) | ||
401 | { | ||
402 | assert(job); | ||
403 | @@ -XXX,XX +XXX,XX @@ void job_user_resume_locked(Job *job, Error **errp) | ||
404 | job_resume_locked(job); | ||
405 | } | ||
406 | |||
407 | -void job_user_resume(Job *job, Error **errp) | ||
408 | -{ | ||
409 | - JOB_LOCK_GUARD(); | ||
410 | - job_user_resume_locked(job, errp); | ||
411 | -} | ||
412 | - | ||
413 | /* Called with job_mutex held, but releases it temporarily. */ | ||
414 | static void job_do_dismiss_locked(Job *job) | ||
415 | { | ||
416 | @@ -XXX,XX +XXX,XX @@ void job_dismiss_locked(Job **jobptr, Error **errp) | ||
417 | *jobptr = NULL; | ||
418 | } | ||
419 | |||
420 | -void job_dismiss(Job **jobptr, Error **errp) | ||
421 | -{ | ||
422 | - JOB_LOCK_GUARD(); | ||
423 | - job_dismiss_locked(jobptr, errp); | ||
424 | -} | ||
425 | - | ||
426 | void job_early_fail(Job *job) | ||
427 | { | ||
428 | JOB_LOCK_GUARD(); | ||
429 | @@ -XXX,XX +XXX,XX @@ void job_finalize_locked(Job *job, Error **errp) | ||
430 | job_do_finalize_locked(job); | ||
431 | } | ||
432 | |||
433 | -void job_finalize(Job *job, Error **errp) | ||
434 | -{ | ||
435 | - JOB_LOCK_GUARD(); | ||
436 | - job_finalize_locked(job, errp); | ||
437 | -} | ||
438 | - | ||
439 | /* Called with job_mutex held. */ | ||
440 | static int job_transition_to_pending_locked(Job *job) | ||
441 | { | ||
442 | @@ -XXX,XX +XXX,XX @@ void job_cancel_locked(Job *job, bool force) | ||
443 | } | ||
444 | } | ||
445 | |||
446 | -void job_cancel(Job *job, bool force) | ||
447 | -{ | ||
448 | - JOB_LOCK_GUARD(); | ||
449 | - job_cancel_locked(job, force); | ||
450 | -} | ||
451 | - | ||
452 | void job_user_cancel_locked(Job *job, bool force, Error **errp) | ||
453 | { | ||
454 | if (job_apply_verb_locked(job, JOB_VERB_CANCEL, errp)) { | ||
455 | @@ -XXX,XX +XXX,XX @@ void job_user_cancel_locked(Job *job, bool force, Error **errp) | ||
456 | job_cancel_locked(job, force); | ||
457 | } | ||
458 | |||
459 | -void job_user_cancel(Job *job, bool force, Error **errp) | ||
460 | -{ | ||
461 | - JOB_LOCK_GUARD(); | ||
462 | - job_user_cancel_locked(job, force, errp); | ||
463 | -} | ||
464 | - | ||
465 | -/* A wrapper around job_cancel() taking an Error ** parameter so it may be | ||
466 | - * used with job_finish_sync() without the need for (rather nasty) function | ||
467 | - * pointer casts there. | ||
468 | +/* A wrapper around job_cancel_locked() taking an Error ** parameter so it may | ||
469 | + * be used with job_finish_sync_locked() without the need for (rather nasty) | ||
470 | + * function pointer casts there. | ||
471 | * | ||
472 | * Called with job_mutex held. | ||
473 | */ | ||
474 | @@ -XXX,XX +XXX,XX @@ int job_complete_sync_locked(Job *job, Error **errp) | ||
475 | return job_finish_sync_locked(job, job_complete_locked, errp); | ||
476 | } | ||
477 | |||
478 | -int job_complete_sync(Job *job, Error **errp) | ||
479 | -{ | ||
480 | - JOB_LOCK_GUARD(); | ||
481 | - return job_complete_sync_locked(job, errp); | ||
482 | -} | ||
483 | - | ||
484 | void job_complete_locked(Job *job, Error **errp) | ||
485 | { | ||
486 | /* Should not be reachable via external interface for internal jobs */ | ||
487 | @@ -XXX,XX +XXX,XX @@ void job_complete_locked(Job *job, Error **errp) | ||
488 | job_lock(); | ||
489 | } | ||
490 | |||
491 | -void job_complete(Job *job, Error **errp) | ||
492 | -{ | ||
493 | - JOB_LOCK_GUARD(); | ||
494 | - job_complete_locked(job, errp); | ||
495 | -} | ||
496 | - | ||
497 | int job_finish_sync_locked(Job *job, | ||
498 | void (*finish)(Job *, Error **errp), | ||
499 | Error **errp) | ||
500 | @@ -XXX,XX +XXX,XX @@ int job_finish_sync_locked(Job *job, | ||
501 | job_unref_locked(job); | ||
502 | return ret; | ||
503 | } | ||
504 | - | ||
505 | -int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp) | ||
506 | -{ | ||
507 | - JOB_LOCK_GUARD(); | ||
508 | - return job_finish_sync_locked(job, finish, errp); | ||
509 | -} | ||
510 | diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c | ||
511 | index XXXXXXX..XXXXXXX 100644 | ||
512 | --- a/tests/unit/test-blockjob.c | ||
513 | +++ b/tests/unit/test-blockjob.c | ||
514 | @@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_yielding_driver = { | ||
115 | }; | 515 | }; |
116 | 516 | ||
117 | bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = { | 517 | /* |
118 | - /* U, C, R, P, Y, S, X, E, N */ | 518 | - * Test that job_complete() works even on jobs that are in a paused |
119 | - [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 0, 0, 0}, | 519 | + * Test that job_complete_locked() works even on jobs that are in a paused |
120 | - [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0}, | 520 | * state (i.e., STANDBY). |
121 | - [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0}, | 521 | * |
122 | - [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0}, | 522 | * To do this, run YieldingJob in an IO thread, get it into the READY |
123 | - [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0}, | 523 | @@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_yielding_driver = { |
124 | - [BLOCK_JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 1, 0}, | 524 | * acquire the context so the job will not be entered and will thus |
125 | + /* U, C, R, P, Y, S, W, X, E, N */ | 525 | * remain on STANDBY. |
126 | + [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 1, 0, 0, 0}, | 526 | * |
127 | + [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0}, | 527 | - * job_complete() should still work without error. |
128 | + [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0}, | 528 | + * job_complete_locked() should still work without error. |
129 | + [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0}, | 529 | * |
130 | + [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0}, | 530 | * Note that on the QMP interface, it is impossible to lock an IO |
131 | + [BLOCK_JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 0}, | 531 | * thread before a drained section ends. In practice, the |
132 | }; | ||
133 | |||
134 | static void block_job_state_transition(BlockJob *job, BlockJobStatus s1) | ||
135 | @@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_success(BlockJob *job) | ||
136 | BlockJob *other_job; | ||
137 | int rc = 0; | ||
138 | |||
139 | + block_job_state_transition(job, BLOCK_JOB_STATUS_WAITING); | ||
140 | + | ||
141 | /* | ||
142 | * Successful completion, see if there are other running jobs in this | ||
143 | * txn. | ||
144 | -- | 532 | -- |
145 | 2.13.6 | 533 | 2.37.3 |
146 | |||
147 | diff view generated by jsdifflib |
1 | The crypto driver used to create the image file in a callback from the | 1 | The field is unused (only ever set, but never read) since commit |
---|---|---|---|
2 | crypto subsystem. If we want to implement .bdrv_co_create, this needs to | 2 | ac9185603. Additionally, the commit message of commit 34fa110e already |
3 | go away because that callback will get a reference to an already | 3 | explained earlier why it's unreliable. Remove it. |
4 | existing block node. | ||
5 | |||
6 | Move the image file creation to block_crypto_create_generic(). | ||
7 | 4 | ||
8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 5 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
9 | Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> | 6 | Message-Id: <20220923142838.91043-1-kwolf@redhat.com> |
10 | Reviewed-by: Eric Blake <eblake@redhat.com> | 7 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
11 | --- | 8 | --- |
12 | block/crypto.c | 37 +++++++++++++++++-------------------- | 9 | block/file-posix.c | 9 --------- |
13 | 1 file changed, 17 insertions(+), 20 deletions(-) | 10 | 1 file changed, 9 deletions(-) |
14 | 11 | ||
15 | diff --git a/block/crypto.c b/block/crypto.c | 12 | diff --git a/block/file-posix.c b/block/file-posix.c |
16 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/block/crypto.c | 14 | --- a/block/file-posix.c |
18 | +++ b/block/crypto.c | 15 | +++ b/block/file-posix.c |
19 | @@ -XXX,XX +XXX,XX @@ static ssize_t block_crypto_read_func(QCryptoBlock *block, | 16 | @@ -XXX,XX +XXX,XX @@ typedef struct BDRVRawState { |
20 | 17 | ||
21 | 18 | bool has_discard:1; | |
22 | struct BlockCryptoCreateData { | 19 | bool has_write_zeroes:1; |
23 | - const char *filename; | 20 | - bool discard_zeroes:1; |
24 | - QemuOpts *opts; | 21 | bool use_linux_aio:1; |
25 | BlockBackend *blk; | 22 | bool use_linux_io_uring:1; |
26 | uint64_t size; | 23 | int page_cache_inconsistent; /* errno from fdatasync failure */ |
27 | }; | 24 | @@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options, |
28 | @@ -XXX,XX +XXX,XX @@ static ssize_t block_crypto_init_func(QCryptoBlock *block, | 25 | ret = -EINVAL; |
29 | Error **errp) | 26 | goto fail; |
30 | { | 27 | } else { |
31 | struct BlockCryptoCreateData *data = opaque; | 28 | - s->discard_zeroes = true; |
32 | - int ret; | 29 | s->has_fallocate = true; |
33 | 30 | } | |
34 | /* User provided size should reflect amount of space made | 31 | } else { |
35 | * available to the guest, so we must take account of that | 32 | @@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options, |
36 | * which will be used by the crypto header | ||
37 | */ | ||
38 | - data->size += headerlen; | ||
39 | - | ||
40 | - qemu_opt_set_number(data->opts, BLOCK_OPT_SIZE, data->size, &error_abort); | ||
41 | - ret = bdrv_create_file(data->filename, data->opts, errp); | ||
42 | - if (ret < 0) { | ||
43 | - return -1; | ||
44 | - } | ||
45 | - | ||
46 | - data->blk = blk_new_open(data->filename, NULL, NULL, | ||
47 | - BDRV_O_RDWR | BDRV_O_PROTOCOL, errp); | ||
48 | - if (!data->blk) { | ||
49 | - return -1; | ||
50 | - } | ||
51 | - | ||
52 | - return 0; | ||
53 | + return blk_truncate(data->blk, data->size + headerlen, PREALLOC_MODE_OFF, | ||
54 | + errp); | ||
55 | } | ||
56 | |||
57 | |||
58 | @@ -XXX,XX +XXX,XX @@ static int block_crypto_create_generic(QCryptoBlockFormat format, | ||
59 | struct BlockCryptoCreateData data = { | ||
60 | .size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), | ||
61 | BDRV_SECTOR_SIZE), | ||
62 | - .opts = opts, | ||
63 | - .filename = filename, | ||
64 | }; | ||
65 | QDict *cryptoopts; | ||
66 | |||
67 | + /* Parse options */ | ||
68 | cryptoopts = qemu_opts_to_qdict(opts, NULL); | ||
69 | |||
70 | create_opts = block_crypto_create_opts_init(format, cryptoopts, errp); | ||
71 | @@ -XXX,XX +XXX,XX @@ static int block_crypto_create_generic(QCryptoBlockFormat format, | ||
72 | return -1; | ||
73 | } | 33 | } |
74 | 34 | ||
75 | + /* Create protocol layer */ | 35 | if (S_ISBLK(st.st_mode)) { |
76 | + ret = bdrv_create_file(filename, opts, errp); | 36 | -#ifdef BLKDISCARDZEROES |
77 | + if (ret < 0) { | 37 | - unsigned int arg; |
78 | + return ret; | 38 | - if (ioctl(s->fd, BLKDISCARDZEROES, &arg) == 0 && arg) { |
79 | + } | 39 | - s->discard_zeroes = true; |
80 | + | 40 | - } |
81 | + data.blk = blk_new_open(filename, NULL, NULL, | 41 | -#endif |
82 | + BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, | 42 | #ifdef __linux__ |
83 | + errp); | 43 | /* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do |
84 | + if (!data.blk) { | 44 | * not rely on the contents of discarded blocks unless using O_DIRECT. |
85 | + return -EINVAL; | 45 | * Same for BLKZEROOUT. |
86 | + } | 46 | */ |
87 | + | 47 | if (!(bs->open_flags & BDRV_O_NOCACHE)) { |
88 | + /* Create format layer */ | 48 | - s->discard_zeroes = false; |
89 | crypto = qcrypto_block_create(create_opts, NULL, | 49 | s->has_write_zeroes = false; |
90 | block_crypto_init_func, | 50 | } |
91 | block_crypto_write_func, | 51 | #endif |
92 | -- | 52 | -- |
93 | 2.13.6 | 53 | 2.37.3 |
94 | |||
95 | diff view generated by jsdifflib |