1
The following changes since commit 22ef7ba8e8ce7fef297549b3defcac333742b804:
1
The following changes since commit ec11dc41eec5142b4776db1296972c6323ba5847:
2
2
3
Merge remote-tracking branch 'remotes/famz/tags/staging-pull-request' into staging (2018-03-13 11:42:45 +0000)
3
Merge tag 'pull-misc-2022-05-11' of git://repo.or.cz/qemu/armbru into staging (2022-05-11 09:00:26 -0700)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://repo.or.cz/qemu/kevin.git tags/for-upstream
7
git://repo.or.cz/qemu/kevin.git tags/for-upstream
8
8
9
for you to fetch changes up to be6c885842efded81a20f4ca24f0d4e123a80c00:
9
for you to fetch changes up to f70625299ecc9ba577c87f3d1d75012c747c7d88:
10
10
11
block/mirror: change the semantic of 'force' of block-job-cancel (2018-03-13 16:54:47 +0100)
11
qemu-iotests: inline common.config into common.rc (2022-05-12 15:42:49 +0200)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Block layer patches
14
Block layer patches
15
15
16
- coroutine: Fix crashes due to too large pool batch size
17
- fdc: Prevent end-of-track overrun
18
- nbd: MULTI_CONN for shared writable exports
19
- iotests test runner improvements
20
16
----------------------------------------------------------------
21
----------------------------------------------------------------
17
Fam Zheng (2):
22
Daniel P. Berrangé (2):
18
block: Fix flags in reopen queue
23
tests/qemu-iotests: print intent to run a test in TAP mode
19
iotests: Add regression test for commit base locking
24
.gitlab-ci.d: export meson testlog.txt as an artifact
20
25
21
John Snow (21):
26
Eric Blake (2):
22
blockjobs: fix set-speed kick
27
qemu-nbd: Pass max connections to blockdev layer
23
blockjobs: model single jobs as transactions
28
nbd/server: Allow MULTI_CONN for shared writable exports
24
Blockjobs: documentation touchup
25
blockjobs: add status enum
26
blockjobs: add state transition table
27
iotests: add pause_wait
28
blockjobs: add block_job_verb permission table
29
blockjobs: add ABORTING state
30
blockjobs: add CONCLUDED state
31
blockjobs: add NULL state
32
blockjobs: add block_job_dismiss
33
blockjobs: ensure abort is called for cancelled jobs
34
blockjobs: add commit, abort, clean helpers
35
blockjobs: add block_job_txn_apply function
36
blockjobs: add prepare callback
37
blockjobs: add waiting status
38
blockjobs: add PENDING status and event
39
blockjobs: add block-job-finalize
40
blockjobs: Expose manual property
41
iotests: test manual job dismissal
42
tests/test-blockjob: test cancellations
43
29
44
Kevin Wolf (14):
30
Hanna Reitz (1):
45
luks: Separate image file creation from formatting
31
iotests/testrunner: Flush after run_test()
46
luks: Create block_crypto_co_create_generic()
47
luks: Support .bdrv_co_create
48
luks: Turn invalid assertion into check
49
luks: Catch integer overflow for huge sizes
50
qemu-iotests: Test luks QMP image creation
51
parallels: Support .bdrv_co_create
52
qemu-iotests: Enable write tests for parallels
53
qcow: Support .bdrv_co_create
54
qed: Support .bdrv_co_create
55
vdi: Make comments consistent with other drivers
56
vhdx: Support .bdrv_co_create
57
vpc: Support .bdrv_co_create
58
vpc: Require aligned size in .bdrv_co_create
59
32
60
Liang Li (1):
33
Kevin Wolf (2):
61
block/mirror: change the semantic of 'force' of block-job-cancel
34
coroutine: Rename qemu_coroutine_inc/dec_pool_size()
35
coroutine: Revert to constant batch size
62
36
63
Max Reitz (3):
37
Paolo Bonzini (1):
64
vdi: Pull option parsing from vdi_co_create
38
qemu-iotests: inline common.config into common.rc
65
vdi: Move file creation to vdi_co_create_opts
66
vdi: Implement .bdrv_co_create
67
39
68
qapi/block-core.json | 363 ++++++++++++++++++++++++++++++++++++++++--
40
Philippe Mathieu-Daudé (2):
69
include/block/blockjob.h | 71 ++++++++-
41
hw/block/fdc: Prevent end-of-track overrun (CVE-2021-3507)
70
include/block/blockjob_int.h | 17 +-
42
tests/qtest/fdc-test: Add a regression test for CVE-2021-3507
71
block.c | 8 +
72
block/backup.c | 5 +-
73
block/commit.c | 2 +-
74
block/crypto.c | 150 ++++++++++++-----
75
block/mirror.c | 12 +-
76
block/parallels.c | 199 +++++++++++++++++------
77
block/qcow.c | 196 +++++++++++++++--------
78
block/qed.c | 204 ++++++++++++++++--------
79
block/stream.c | 2 +-
80
block/vdi.c | 147 +++++++++++++----
81
block/vhdx.c | 216 +++++++++++++++++++------
82
block/vpc.c | 241 +++++++++++++++++++++-------
83
blockdev.c | 71 +++++++--
84
blockjob.c | 358 +++++++++++++++++++++++++++++++++++------
85
tests/test-bdrv-drain.c | 5 +-
86
tests/test-blockjob-txn.c | 27 ++--
87
tests/test-blockjob.c | 233 ++++++++++++++++++++++++++-
88
block/trace-events | 7 +
89
hmp-commands.hx | 3 +-
90
tests/qemu-iotests/030 | 6 +-
91
tests/qemu-iotests/055 | 17 +-
92
tests/qemu-iotests/056 | 187 ++++++++++++++++++++++
93
tests/qemu-iotests/056.out | 4 +-
94
tests/qemu-iotests/109.out | 24 +--
95
tests/qemu-iotests/153 | 12 ++
96
tests/qemu-iotests/153.out | 5 +
97
tests/qemu-iotests/181 | 2 +-
98
tests/qemu-iotests/209 | 210 ++++++++++++++++++++++++
99
tests/qemu-iotests/209.out | 136 ++++++++++++++++
100
tests/qemu-iotests/check | 1 -
101
tests/qemu-iotests/common.rc | 2 +-
102
tests/qemu-iotests/group | 1 +
103
tests/qemu-iotests/iotests.py | 12 +-
104
36 files changed, 2642 insertions(+), 514 deletions(-)
105
create mode 100755 tests/qemu-iotests/209
106
create mode 100644 tests/qemu-iotests/209.out
107
43
44
qapi/block-export.json | 8 +-
45
docs/interop/nbd.txt | 1 +
46
docs/tools/qemu-nbd.rst | 3 +-
47
include/block/nbd.h | 5 +-
48
include/qemu/coroutine.h | 6 +-
49
blockdev-nbd.c | 13 +-
50
hw/block/fdc.c | 8 ++
51
hw/block/virtio-blk.c | 6 +-
52
nbd/server.c | 10 +-
53
qemu-nbd.c | 2 +-
54
tests/qtest/fdc-test.c | 21 ++++
55
util/qemu-coroutine.c | 26 ++--
56
tests/qemu-iotests/testrunner.py | 4 +
57
.gitlab-ci.d/buildtest-template.yml | 12 +-
58
MAINTAINERS | 1 +
59
tests/qemu-iotests/common.config | 41 -------
60
tests/qemu-iotests/common.rc | 31 +++--
61
tests/qemu-iotests/tests/nbd-multiconn | 145 +++++++++++++++++++++++
62
tests/qemu-iotests/tests/nbd-multiconn.out | 5 +
63
tests/qemu-iotests/tests/nbd-qemu-allocation.out | 2 +-
64
20 files changed, 261 insertions(+), 89 deletions(-)
65
delete mode 100644 tests/qemu-iotests/common.config
66
create mode 100755 tests/qemu-iotests/tests/nbd-multiconn
67
create mode 100644 tests/qemu-iotests/tests/nbd-multiconn.out
68
69
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
If speed is '0' it's not actually "less than" the previous speed.
4
Kick the job in this case too.
5
6
Signed-off-by: John Snow <jsnow@redhat.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
---
11
blockjob.c | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
13
14
diff --git a/blockjob.c b/blockjob.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/blockjob.c
17
+++ b/blockjob.c
18
@@ -XXX,XX +XXX,XX @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
19
}
20
21
job->speed = speed;
22
- if (speed <= old_speed) {
23
+ if (speed && speed <= old_speed) {
24
return;
25
}
26
27
--
28
2.13.6
29
30
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
It's true that these functions currently affect the batch size in which
2
coroutines are reused (i.e. moved from the global release pool to the
3
allocation pool of a specific thread), but this is a bug and will be
4
fixed in a separate patch.
2
5
3
Some jobs upon finalization may need to perform some work that can
6
In fact, the comment in the header file already just promises that it
4
still fail. If these jobs are part of a transaction, it's important
7
influences the pool size, so reflect this in the name of the functions.
5
that these callbacks fail the entire transaction.
8
As a nice side effect, the shorter function name makes some line
9
wrapping unnecessary.
6
10
7
We allow for a new callback in addition to commit/abort/clean that
11
Cc: qemu-stable@nongnu.org
8
allows us the opportunity to have fairly late-breaking failures
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9
in the transactional process.
13
Message-Id: <20220510151020.105528-2-kwolf@redhat.com>
10
11
The expected flow is:
12
13
- All jobs in a transaction converge to the PENDING state,
14
added in a forthcoming commit.
15
- Upon being finalized, either automatically or explicitly
16
by the user, jobs prepare to complete.
17
- If any job fails preparation, all jobs call .abort.
18
- Otherwise, they succeed and call .commit.
19
20
Signed-off-by: John Snow <jsnow@redhat.com>
21
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
22
---
15
---
23
include/block/blockjob_int.h | 10 ++++++++++
16
include/qemu/coroutine.h | 6 +++---
24
blockjob.c | 30 +++++++++++++++++++++++++++---
17
hw/block/virtio-blk.c | 6 ++----
25
2 files changed, 37 insertions(+), 3 deletions(-)
18
util/qemu-coroutine.c | 4 ++--
19
3 files changed, 7 insertions(+), 9 deletions(-)
26
20
27
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
21
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
28
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
29
--- a/include/block/blockjob_int.h
23
--- a/include/qemu/coroutine.h
30
+++ b/include/block/blockjob_int.h
24
+++ b/include/qemu/coroutine.h
31
@@ -XXX,XX +XXX,XX @@ struct BlockJobDriver {
25
@@ -XXX,XX +XXX,XX @@ void coroutine_fn yield_until_fd_readable(int fd);
32
void (*complete)(BlockJob *job, Error **errp);
26
/**
33
27
* Increase coroutine pool size
34
/**
28
*/
35
+ * If the callback is not NULL, prepare will be invoked when all the jobs
29
-void qemu_coroutine_increase_pool_batch_size(unsigned int additional_pool_size);
36
+ * belonging to the same transaction complete; or upon this job's completion
30
+void qemu_coroutine_inc_pool_size(unsigned int additional_pool_size);
37
+ * if it is not in a transaction.
31
38
+ *
32
/**
39
+ * This callback will not be invoked if the job has already failed.
33
- * Devcrease coroutine pool size
40
+ * If it fails, abort and then clean will be called.
34
+ * Decrease coroutine pool size
41
+ */
35
*/
42
+ int (*prepare)(BlockJob *job);
36
-void qemu_coroutine_decrease_pool_batch_size(unsigned int additional_pool_size);
43
+
37
+void qemu_coroutine_dec_pool_size(unsigned int additional_pool_size);
44
+ /**
38
45
* If the callback is not NULL, it will be invoked when all the jobs
39
#include "qemu/lockable.h"
46
* belonging to the same transaction complete; or upon this job's
40
47
* completion if it is not in a transaction. Skipped if NULL.
41
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
48
diff --git a/blockjob.c b/blockjob.c
49
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
50
--- a/blockjob.c
43
--- a/hw/block/virtio-blk.c
51
+++ b/blockjob.c
44
+++ b/hw/block/virtio-blk.c
52
@@ -XXX,XX +XXX,XX @@ static void block_job_update_rc(BlockJob *job)
45
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
46
for (i = 0; i < conf->num_queues; i++) {
47
virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output);
53
}
48
}
49
- qemu_coroutine_increase_pool_batch_size(conf->num_queues * conf->queue_size
50
- / 2);
51
+ qemu_coroutine_inc_pool_size(conf->num_queues * conf->queue_size / 2);
52
virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err);
53
if (err != NULL) {
54
error_propagate(errp, err);
55
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_unrealize(DeviceState *dev)
56
for (i = 0; i < conf->num_queues; i++) {
57
virtio_del_queue(vdev, i);
58
}
59
- qemu_coroutine_decrease_pool_batch_size(conf->num_queues * conf->queue_size
60
- / 2);
61
+ qemu_coroutine_dec_pool_size(conf->num_queues * conf->queue_size / 2);
62
qemu_del_vm_change_state_handler(s->change);
63
blockdev_mark_auto_del(s->blk);
64
virtio_cleanup(vdev);
65
diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/util/qemu-coroutine.c
68
+++ b/util/qemu-coroutine.c
69
@@ -XXX,XX +XXX,XX @@ AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co)
70
return co->ctx;
54
}
71
}
55
72
56
+static int block_job_prepare(BlockJob *job)
73
-void qemu_coroutine_increase_pool_batch_size(unsigned int additional_pool_size)
57
+{
74
+void qemu_coroutine_inc_pool_size(unsigned int additional_pool_size)
58
+ if (job->ret == 0 && job->driver->prepare) {
59
+ job->ret = job->driver->prepare(job);
60
+ }
61
+ return job->ret;
62
+}
63
+
64
static void block_job_commit(BlockJob *job)
65
{
75
{
66
assert(!job->ret);
76
qatomic_add(&pool_batch_size, additional_pool_size);
67
@@ -XXX,XX +XXX,XX @@ static void block_job_clean(BlockJob *job)
68
}
69
}
77
}
70
78
71
-static void block_job_completed_single(BlockJob *job)
79
-void qemu_coroutine_decrease_pool_batch_size(unsigned int removing_pool_size)
72
+static int block_job_completed_single(BlockJob *job)
80
+void qemu_coroutine_dec_pool_size(unsigned int removing_pool_size)
73
{
81
{
74
assert(job->completed);
82
qatomic_sub(&pool_batch_size, removing_pool_size);
75
76
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job)
77
QLIST_REMOVE(job, txn_list);
78
block_job_txn_unref(job->txn);
79
block_job_conclude(job);
80
+ return 0;
81
}
82
83
static void block_job_cancel_async(BlockJob *job)
84
@@ -XXX,XX +XXX,XX @@ static void block_job_cancel_async(BlockJob *job)
85
job->cancelled = true;
86
}
87
88
-static void block_job_txn_apply(BlockJobTxn *txn, void fn(BlockJob *))
89
+static int block_job_txn_apply(BlockJobTxn *txn, int fn(BlockJob *))
90
{
91
AioContext *ctx;
92
BlockJob *job, *next;
93
+ int rc;
94
95
QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) {
96
ctx = blk_get_aio_context(job->blk);
97
aio_context_acquire(ctx);
98
- fn(job);
99
+ rc = fn(job);
100
aio_context_release(ctx);
101
+ if (rc) {
102
+ break;
103
+ }
104
}
105
+ return rc;
106
}
107
108
static int block_job_finish_sync(BlockJob *job,
109
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_success(BlockJob *job)
110
{
111
BlockJobTxn *txn = job->txn;
112
BlockJob *other_job;
113
+ int rc = 0;
114
+
115
/*
116
* Successful completion, see if there are other running jobs in this
117
* txn.
118
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_success(BlockJob *job)
119
}
120
assert(other_job->ret == 0);
121
}
122
+
123
+ /* Jobs may require some prep-work to complete without failure */
124
+ rc = block_job_txn_apply(txn, block_job_prepare);
125
+ if (rc) {
126
+ block_job_completed_txn_abort(job);
127
+ return;
128
+ }
129
+
130
/* We are the last completed job, commit the transaction. */
131
block_job_txn_apply(txn, block_job_completed_single);
132
}
83
}
133
--
84
--
134
2.13.6
85
2.35.3
135
136
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
Commit 4c41c69e changed the way the coroutine pool is sized because for
2
virtio-blk devices with a large queue size and heavy I/O, it was just
3
too small and caused coroutines to be deleted and reallocated soon
4
afterwards. The change made the size dynamic based on the number of
5
queues and the queue size of virtio-blk devices.
2
6
3
Add a new state that specifically demarcates when we begin to permanently
7
There are two important numbers here: Slightly simplified, when a
4
demolish a job after it has performed all work. This makes the transition
8
coroutine terminates, it is generally stored in the global release pool
5
explicit in the STM table and highlights conditions under which a job may
9
up to a certain pool size, and if the pool is full, it is freed.
6
be demolished.
10
Conversely, when allocating a new coroutine, the coroutines in the
11
release pool are reused if the pool already has reached a certain
12
minimum size (the batch size), otherwise we allocate new coroutines.
7
13
8
Alongside this state, add a new helper command "block_job_decommission",
14
The problem after commit 4c41c69e is that it not only increases the
9
which transitions to the NULL state and puts down our implicit reference.
15
maximum pool size (which is the intended effect), but also the batch
10
This separates instances in the code for "block_job_unref" which merely
16
size for reusing coroutines (which is a bug). It means that in cases
11
undo a matching "block_job_ref" with instances intended to initiate the
17
with many devices and/or a large queue size (which defaults to the
12
full destruction of the object.
18
number of vcpus for virtio-blk-pci), many thousand coroutines could be
19
sitting in the release pool without being reused.
13
20
14
This decommission action also sets a number of fields to make sure that
21
This is not only a waste of memory and allocations, but it actually
15
block internals or external users that are holding a reference to a job
22
makes the QEMU process likely to hit the vm.max_map_count limit on Linux
16
to see when it "finishes" are convinced that the job object is "done."
23
because each coroutine requires two mappings (its stack and the guard
17
This is necessary, for instance, to do a block_job_cancel_sync on a
24
page for the stack), causing it to abort() in qemu_alloc_stack() because
18
created object which will not make any progress.
25
when the limit is hit, mprotect() starts to fail with ENOMEM.
19
26
20
Now, all jobs must go through block_job_decommission prior to being
27
In order to fix the problem, change the batch size back to 64 to avoid
21
freed, giving us start-to-finish state machine coverage for jobs.
28
uselessly accumulating coroutines in the release pool, but keep the
29
dynamic maximum pool size so that coroutines aren't freed too early
30
in heavy I/O scenarios.
22
31
23
Transitions:
32
Note that this fix doesn't strictly make it impossible to hit the limit,
24
Created -> Null: Early failure event before the job is started
33
but this would only happen if most of the coroutines are actually in use
25
Concluded -> Null: Standard transition.
34
at the same time, not just sitting in a pool. This is the same behaviour
35
as we already had before commit 4c41c69e. Fully preventing this would
36
require allowing qemu_coroutine_create() to return an error, but it
37
doesn't seem to be a scenario that people hit in practice.
26
38
27
Verbs:
39
Cc: qemu-stable@nongnu.org
28
None. This should not ever be visible to the monitor.
40
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2079938
29
41
Fixes: 4c41c69e05fe28c0f95f8abd2ebf407e95a4f04b
30
+---------+
42
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
31
|UNDEFINED|
43
Message-Id: <20220510151020.105528-3-kwolf@redhat.com>
32
+--+------+
44
Tested-by: Hiroki Narukawa <hnarukaw@yahoo-corp.jp>
33
|
34
+--v----+
35
+---------+CREATED+------------------+
36
| +--+----+ |
37
| | |
38
| +--v----+ +------+ |
39
+---------+RUNNING<----->PAUSED| |
40
| +--+-+--+ +------+ |
41
| | | |
42
| | +------------------+ |
43
| | | |
44
| +--v--+ +-------+ | |
45
+---------+READY<------->STANDBY| | |
46
| +--+--+ +-------+ | |
47
| | | |
48
+--v-----+ +--v------+ | |
49
|ABORTING+--->CONCLUDED<-------------+ |
50
+--------+ +--+------+ |
51
| |
52
+--v-+ |
53
|NULL<---------------------+
54
+----+
55
56
Signed-off-by: John Snow <jsnow@redhat.com>
57
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
45
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
58
---
46
---
59
qapi/block-core.json | 5 ++++-
47
util/qemu-coroutine.c | 22 ++++++++++++++--------
60
blockjob.c | 50 ++++++++++++++++++++++++++++++++------------------
48
1 file changed, 14 insertions(+), 8 deletions(-)
61
2 files changed, 36 insertions(+), 19 deletions(-)
62
49
63
diff --git a/qapi/block-core.json b/qapi/block-core.json
50
diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
64
index XXXXXXX..XXXXXXX 100644
51
index XXXXXXX..XXXXXXX 100644
65
--- a/qapi/block-core.json
52
--- a/util/qemu-coroutine.c
66
+++ b/qapi/block-core.json
53
+++ b/util/qemu-coroutine.c
67
@@ -XXX,XX +XXX,XX @@
54
@@ -XXX,XX +XXX,XX @@
68
# @concluded: The job has finished all work. If manual was set to true, the job
55
#include "qemu/coroutine-tls.h"
69
# will remain in the query list until it is dismissed.
56
#include "block/aio.h"
70
#
57
71
+# @null: The job is in the process of being dismantled. This state should not
58
-/** Initial batch size is 64, and is increased on demand */
72
+# ever be visible externally.
59
+/**
73
+#
60
+ * The minimal batch size is always 64, coroutines from the release_pool are
74
# Since: 2.12
61
+ * reused as soon as there are 64 coroutines in it. The maximum pool size starts
75
##
62
+ * with 64 and is increased on demand so that coroutines are not deleted even if
76
{ 'enum': 'BlockJobStatus',
63
+ * they are not immediately reused.
77
'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby',
64
+ */
78
- 'aborting', 'concluded' ] }
65
enum {
79
+ 'aborting', 'concluded', 'null' ] }
66
- POOL_INITIAL_BATCH_SIZE = 64,
80
67
+ POOL_MIN_BATCH_SIZE = 64,
81
##
68
+ POOL_INITIAL_MAX_SIZE = 64,
82
# @BlockJobInfo:
83
diff --git a/blockjob.c b/blockjob.c
84
index XXXXXXX..XXXXXXX 100644
85
--- a/blockjob.c
86
+++ b/blockjob.c
87
@@ -XXX,XX +XXX,XX @@ static QemuMutex block_job_mutex;
88
89
/* BlockJob State Transition Table */
90
bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = {
91
- /* U, C, R, P, Y, S, X, E */
92
- /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0},
93
- /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 1, 0},
94
- /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 1},
95
- /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0},
96
- /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 1},
97
- /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0},
98
- /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 1},
99
- /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0},
100
+ /* U, C, R, P, Y, S, X, E, N */
101
+ /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0},
102
+ /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 1, 0, 1},
103
+ /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 1, 0},
104
+ /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0},
105
+ /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 1, 0},
106
+ /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0},
107
+ /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 1, 0},
108
+ /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 1},
109
+ /* N: */ [BLOCK_JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0},
110
};
69
};
111
70
112
bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = {
71
/** Free list to speed up creation */
113
- /* U, C, R, P, Y, S, X, E */
72
static QSLIST_HEAD(, Coroutine) release_pool = QSLIST_HEAD_INITIALIZER(pool);
114
- [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 0, 0},
73
-static unsigned int pool_batch_size = POOL_INITIAL_BATCH_SIZE;
115
- [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0},
74
+static unsigned int pool_max_size = POOL_INITIAL_MAX_SIZE;
116
- [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0},
75
static unsigned int release_pool_size;
117
- [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0},
76
118
- [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0},
77
typedef QSLIST_HEAD(, Coroutine) CoroutineQSList;
119
+ /* U, C, R, P, Y, S, X, E, N */
78
@@ -XXX,XX +XXX,XX @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque)
120
+ [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
79
121
+ [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
80
co = QSLIST_FIRST(alloc_pool);
122
+ [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
81
if (!co) {
123
+ [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
82
- if (release_pool_size > qatomic_read(&pool_batch_size)) {
124
+ [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0},
83
+ if (release_pool_size > POOL_MIN_BATCH_SIZE) {
125
};
84
/* Slow path; a good place to register the destructor, too. */
126
85
Notifier *notifier = get_ptr_coroutine_pool_cleanup_notifier();
127
static void block_job_state_transition(BlockJob *job, BlockJobStatus s1)
86
if (!notifier->notify) {
128
@@ -XXX,XX +XXX,XX @@ static void block_job_detach_aio_context(void *opaque);
87
@@ -XXX,XX +XXX,XX @@ static void coroutine_delete(Coroutine *co)
129
void block_job_unref(BlockJob *job)
88
co->caller = NULL;
89
90
if (CONFIG_COROUTINE_POOL) {
91
- if (release_pool_size < qatomic_read(&pool_batch_size) * 2) {
92
+ if (release_pool_size < qatomic_read(&pool_max_size) * 2) {
93
QSLIST_INSERT_HEAD_ATOMIC(&release_pool, co, pool_next);
94
qatomic_inc(&release_pool_size);
95
return;
96
}
97
- if (get_alloc_pool_size() < qatomic_read(&pool_batch_size)) {
98
+ if (get_alloc_pool_size() < qatomic_read(&pool_max_size)) {
99
QSLIST_INSERT_HEAD(get_ptr_alloc_pool(), co, pool_next);
100
set_alloc_pool_size(get_alloc_pool_size() + 1);
101
return;
102
@@ -XXX,XX +XXX,XX @@ AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co)
103
104
void qemu_coroutine_inc_pool_size(unsigned int additional_pool_size)
130
{
105
{
131
if (--job->refcnt == 0) {
106
- qatomic_add(&pool_batch_size, additional_pool_size);
132
+ assert(job->status == BLOCK_JOB_STATUS_NULL);
107
+ qatomic_add(&pool_max_size, additional_pool_size);
133
BlockDriverState *bs = blk_bs(job->blk);
134
QLIST_REMOVE(job, job_list);
135
bs->job = NULL;
136
@@ -XXX,XX +XXX,XX @@ void block_job_start(BlockJob *job)
137
bdrv_coroutine_enter(blk_bs(job->blk), job->co);
138
}
108
}
139
109
140
+static void block_job_decommission(BlockJob *job)
110
void qemu_coroutine_dec_pool_size(unsigned int removing_pool_size)
141
+{
142
+ assert(job);
143
+ job->completed = true;
144
+ job->busy = false;
145
+ job->paused = false;
146
+ job->deferred_to_main_loop = true;
147
+ block_job_state_transition(job, BLOCK_JOB_STATUS_NULL);
148
+ block_job_unref(job);
149
+}
150
+
151
static void block_job_conclude(BlockJob *job)
152
{
111
{
153
block_job_state_transition(job, BLOCK_JOB_STATUS_CONCLUDED);
112
- qatomic_sub(&pool_batch_size, removing_pool_size);
154
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job)
113
+ qatomic_sub(&pool_max_size, removing_pool_size);
155
QLIST_REMOVE(job, txn_list);
156
block_job_txn_unref(job->txn);
157
block_job_conclude(job);
158
- block_job_unref(job);
159
+ block_job_decommission(job);
160
}
114
}
161
162
static void block_job_cancel_async(BlockJob *job)
163
@@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
164
165
block_job_set_speed(job, speed, &local_err);
166
if (local_err) {
167
- block_job_unref(job);
168
+ block_job_early_fail(job);
169
error_propagate(errp, local_err);
170
return NULL;
171
}
172
@@ -XXX,XX +XXX,XX @@ void block_job_pause_all(void)
173
174
void block_job_early_fail(BlockJob *job)
175
{
176
- block_job_unref(job);
177
+ assert(job->status == BLOCK_JOB_STATUS_CREATED);
178
+ block_job_decommission(job);
179
}
180
181
void block_job_completed(BlockJob *job, int ret)
182
--
115
--
183
2.13.6
116
2.35.3
184
185
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Hanna Reitz <hreitz@redhat.com>
2
2
3
Which commands ("verbs") are appropriate for jobs in which state is
3
When stdout is not a terminal, the buffer may not be flushed at each end
4
also somewhat burdensome to keep track of.
4
of line, so we should flush after each test is done. This is especially
5
apparent when run by check-block, in two ways:
5
6
6
As of this commit, it looks rather useless, but begins to look more
7
First, when running make check-block -jX with X > 1, progress indication
7
interesting the more states we add to the STM table.
8
was missing, even though testrunner.py does theoretically print each
9
test's status once it has been run, even in multi-processing mode.
10
Flushing after each test restores this progress indication.
8
11
9
A recurring theme is that no verb will apply to an 'undefined' job.
12
Second, sometimes make check-block failed altogether, with an error
13
message that "too few tests [were] run". I presume that's because one
14
worker process in the job pool did not get to flush its stdout before
15
the main process exited, and so meson did not get to see that worker's
16
test results. In any case, by flushing at the end of run_test(), the
17
problem has disappeared for me.
10
18
11
Further, it's not presently possible to restrict the "pause" or "resume"
19
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
12
verbs any more than they are in this commit because of the asynchronous
20
Message-Id: <20220506134215.10086-1-hreitz@redhat.com>
13
nature of how jobs enter the PAUSED state; justifications for some
14
seemingly erroneous applications are given below.
15
16
=====
17
Verbs
18
=====
19
20
Cancel: Any state except undefined.
21
Pause: Any state except undefined;
22
'created': Requests that the job pauses as it starts.
23
'running': Normal usage. (PAUSED)
24
'paused': The job may be paused for internal reasons,
25
but the user may wish to force an indefinite
26
user-pause, so this is allowed.
27
'ready': Normal usage. (STANDBY)
28
'standby': Same logic as above.
29
Resume: Any state except undefined;
30
'created': Will lift a user's pause-on-start request.
31
'running': Will lift a pause request before it takes effect.
32
'paused': Normal usage.
33
'ready': Will lift a pause request before it takes effect.
34
'standby': Normal usage.
35
Set-speed: Any state except undefined, though ready may not be meaningful.
36
Complete: Only a 'ready' job may accept a complete request.
37
38
=======
39
Changes
40
=======
41
42
(1)
43
44
To facilitate "nice" error checking, all five major block-job verb
45
interfaces in blockjob.c now support an errp parameter:
46
47
- block_job_user_cancel is added as a new interface.
48
- block_job_user_pause gains an errp paramter
49
- block_job_user_resume gains an errp parameter
50
- block_job_set_speed already had an errp parameter.
51
- block_job_complete already had an errp parameter.
52
53
(2)
54
55
block-job-pause and block-job-resume will no longer no-op when trying
56
to pause an already paused job, or trying to resume a job that isn't
57
paused. These functions will now report that they did not perform the
58
action requested because it was not possible.
59
60
iotests have been adjusted to address this new behavior.
61
62
(3)
63
64
block-job-complete doesn't worry about checking !block_job_started,
65
because the permission table guards against this.
66
67
(4)
68
69
test-bdrv-drain's job implementation needs to announce that it is
70
'ready' now, in order to be completed.
71
72
Signed-off-by: John Snow <jsnow@redhat.com>
73
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
74
Reviewed-by: Eric Blake <eblake@redhat.com>
21
Reviewed-by: Eric Blake <eblake@redhat.com>
75
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
22
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
76
---
23
---
77
qapi/block-core.json | 20 ++++++++++++++
24
tests/qemu-iotests/testrunner.py | 1 +
78
include/block/blockjob.h | 13 +++++++--
25
1 file changed, 1 insertion(+)
79
blockdev.c | 10 +++----
80
blockjob.c | 71 ++++++++++++++++++++++++++++++++++++++++++------
81
tests/test-bdrv-drain.c | 1 +
82
block/trace-events | 1 +
83
6 files changed, 100 insertions(+), 16 deletions(-)
84
26
85
diff --git a/qapi/block-core.json b/qapi/block-core.json
27
diff --git a/tests/qemu-iotests/testrunner.py b/tests/qemu-iotests/testrunner.py
86
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
87
--- a/qapi/block-core.json
29
--- a/tests/qemu-iotests/testrunner.py
88
+++ b/qapi/block-core.json
30
+++ b/tests/qemu-iotests/testrunner.py
89
@@ -XXX,XX +XXX,XX @@
31
@@ -XXX,XX +XXX,XX @@ def run_test(self, test: str,
90
'data': ['commit', 'stream', 'mirror', 'backup'] }
32
else:
91
33
print(res.casenotrun)
92
##
34
93
+# @BlockJobVerb:
35
+ sys.stdout.flush()
94
+#
36
return res
95
+# Represents command verbs that can be applied to a blockjob.
37
96
+#
38
def run_tests(self, tests: List[str], jobs: int = 1) -> bool:
97
+# @cancel: see @block-job-cancel
98
+#
99
+# @pause: see @block-job-pause
100
+#
101
+# @resume: see @block-job-resume
102
+#
103
+# @set-speed: see @block-job-set-speed
104
+#
105
+# @complete: see @block-job-complete
106
+#
107
+# Since: 2.12
108
+##
109
+{ 'enum': 'BlockJobVerb',
110
+ 'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete' ] }
111
+
112
+##
113
# @BlockJobStatus:
114
#
115
# Indicates the present state of a given blockjob in its lifetime.
116
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
117
index XXXXXXX..XXXXXXX 100644
118
--- a/include/block/blockjob.h
119
+++ b/include/block/blockjob.h
120
@@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp);
121
* Asynchronously pause the specified job.
122
* Do not allow a resume until a matching call to block_job_user_resume.
123
*/
124
-void block_job_user_pause(BlockJob *job);
125
+void block_job_user_pause(BlockJob *job, Error **errp);
126
127
/**
128
* block_job_paused:
129
@@ -XXX,XX +XXX,XX @@ bool block_job_user_paused(BlockJob *job);
130
* Resume the specified job.
131
* Must be paired with a preceding block_job_user_pause.
132
*/
133
-void block_job_user_resume(BlockJob *job);
134
+void block_job_user_resume(BlockJob *job, Error **errp);
135
+
136
+/**
137
+ * block_job_user_cancel:
138
+ * @job: The job to be cancelled.
139
+ *
140
+ * Cancels the specified job, but may refuse to do so if the
141
+ * operation isn't currently meaningful.
142
+ */
143
+void block_job_user_cancel(BlockJob *job, Error **errp);
144
145
/**
146
* block_job_cancel_sync:
147
diff --git a/blockdev.c b/blockdev.c
148
index XXXXXXX..XXXXXXX 100644
149
--- a/blockdev.c
150
+++ b/blockdev.c
151
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_cancel(const char *device,
152
}
153
154
trace_qmp_block_job_cancel(job);
155
- block_job_cancel(job);
156
+ block_job_user_cancel(job, errp);
157
out:
158
aio_context_release(aio_context);
159
}
160
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_pause(const char *device, Error **errp)
161
AioContext *aio_context;
162
BlockJob *job = find_block_job(device, &aio_context, errp);
163
164
- if (!job || block_job_user_paused(job)) {
165
+ if (!job) {
166
return;
167
}
168
169
trace_qmp_block_job_pause(job);
170
- block_job_user_pause(job);
171
+ block_job_user_pause(job, errp);
172
aio_context_release(aio_context);
173
}
174
175
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_resume(const char *device, Error **errp)
176
AioContext *aio_context;
177
BlockJob *job = find_block_job(device, &aio_context, errp);
178
179
- if (!job || !block_job_user_paused(job)) {
180
+ if (!job) {
181
return;
182
}
183
184
trace_qmp_block_job_resume(job);
185
- block_job_user_resume(job);
186
+ block_job_user_resume(job, errp);
187
aio_context_release(aio_context);
188
}
189
190
diff --git a/blockjob.c b/blockjob.c
191
index XXXXXXX..XXXXXXX 100644
192
--- a/blockjob.c
193
+++ b/blockjob.c
194
@@ -XXX,XX +XXX,XX @@ bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = {
195
/* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0},
196
};
197
198
+bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = {
199
+ /* U, C, R, P, Y, S */
200
+ [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1},
201
+ [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1},
202
+ [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1},
203
+ [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1},
204
+ [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0},
205
+};
206
+
207
static void block_job_state_transition(BlockJob *job, BlockJobStatus s1)
208
{
209
BlockJobStatus s0 = job->status;
210
@@ -XXX,XX +XXX,XX @@ static void block_job_state_transition(BlockJob *job, BlockJobStatus s1)
211
job->status = s1;
212
}
213
214
+static int block_job_apply_verb(BlockJob *job, BlockJobVerb bv, Error **errp)
215
+{
216
+ assert(bv >= 0 && bv <= BLOCK_JOB_VERB__MAX);
217
+ trace_block_job_apply_verb(job, qapi_enum_lookup(&BlockJobStatus_lookup,
218
+ job->status),
219
+ qapi_enum_lookup(&BlockJobVerb_lookup, bv),
220
+ BlockJobVerbTable[bv][job->status] ?
221
+ "allowed" : "prohibited");
222
+ if (BlockJobVerbTable[bv][job->status]) {
223
+ return 0;
224
+ }
225
+ error_setg(errp, "Job '%s' in state '%s' cannot accept command verb '%s'",
226
+ job->id, qapi_enum_lookup(&BlockJobStatus_lookup, job->status),
227
+ qapi_enum_lookup(&BlockJobVerb_lookup, bv));
228
+ return -EPERM;
229
+}
230
+
231
static void block_job_lock(void)
232
{
233
qemu_mutex_lock(&block_job_mutex);
234
@@ -XXX,XX +XXX,XX @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
235
error_setg(errp, QERR_UNSUPPORTED);
236
return;
237
}
238
+ if (block_job_apply_verb(job, BLOCK_JOB_VERB_SET_SPEED, errp)) {
239
+ return;
240
+ }
241
job->driver->set_speed(job, speed, &local_err);
242
if (local_err) {
243
error_propagate(errp, local_err);
244
@@ -XXX,XX +XXX,XX @@ void block_job_complete(BlockJob *job, Error **errp)
245
{
246
/* Should not be reachable via external interface for internal jobs */
247
assert(job->id);
248
- if (job->pause_count || job->cancelled ||
249
- !block_job_started(job) || !job->driver->complete) {
250
+ if (block_job_apply_verb(job, BLOCK_JOB_VERB_COMPLETE, errp)) {
251
+ return;
252
+ }
253
+ if (job->pause_count || job->cancelled || !job->driver->complete) {
254
error_setg(errp, "The active block job '%s' cannot be completed",
255
job->id);
256
return;
257
@@ -XXX,XX +XXX,XX @@ void block_job_complete(BlockJob *job, Error **errp)
258
job->driver->complete(job, errp);
259
}
260
261
-void block_job_user_pause(BlockJob *job)
262
+void block_job_user_pause(BlockJob *job, Error **errp)
263
{
264
+ if (block_job_apply_verb(job, BLOCK_JOB_VERB_PAUSE, errp)) {
265
+ return;
266
+ }
267
+ if (job->user_paused) {
268
+ error_setg(errp, "Job is already paused");
269
+ return;
270
+ }
271
job->user_paused = true;
272
block_job_pause(job);
273
}
274
@@ -XXX,XX +XXX,XX @@ bool block_job_user_paused(BlockJob *job)
275
return job->user_paused;
276
}
277
278
-void block_job_user_resume(BlockJob *job)
279
+void block_job_user_resume(BlockJob *job, Error **errp)
280
{
281
- if (job && job->user_paused && job->pause_count > 0) {
282
- block_job_iostatus_reset(job);
283
- job->user_paused = false;
284
- block_job_resume(job);
285
+ assert(job);
286
+ if (!job->user_paused || job->pause_count <= 0) {
287
+ error_setg(errp, "Can't resume a job that was not paused");
288
+ return;
289
+ }
290
+ if (block_job_apply_verb(job, BLOCK_JOB_VERB_RESUME, errp)) {
291
+ return;
292
}
293
+ block_job_iostatus_reset(job);
294
+ job->user_paused = false;
295
+ block_job_resume(job);
296
}
297
298
void block_job_cancel(BlockJob *job)
299
@@ -XXX,XX +XXX,XX @@ void block_job_cancel(BlockJob *job)
300
}
301
}
302
303
+void block_job_user_cancel(BlockJob *job, Error **errp)
304
+{
305
+ if (block_job_apply_verb(job, BLOCK_JOB_VERB_CANCEL, errp)) {
306
+ return;
307
+ }
308
+ block_job_cancel(job);
309
+}
310
+
311
/* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
312
* used with block_job_finish_sync() without the need for (rather nasty)
313
* function pointer casts there. */
314
@@ -XXX,XX +XXX,XX @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
315
action, &error_abort);
316
}
317
if (action == BLOCK_ERROR_ACTION_STOP) {
318
+ block_job_pause(job);
319
/* make the pause user visible, which will be resumed from QMP. */
320
- block_job_user_pause(job);
321
+ job->user_paused = true;
322
block_job_iostatus_set_err(job, error);
323
}
324
return action;
325
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
326
index XXXXXXX..XXXXXXX 100644
327
--- a/tests/test-bdrv-drain.c
328
+++ b/tests/test-bdrv-drain.c
329
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn test_job_start(void *opaque)
330
{
331
TestBlockJob *s = opaque;
332
333
+ block_job_event_ready(&s->common);
334
while (!s->should_complete) {
335
block_job_sleep_ns(&s->common, 100000);
336
}
337
diff --git a/block/trace-events b/block/trace-events
338
index XXXXXXX..XXXXXXX 100644
339
--- a/block/trace-events
340
+++ b/block/trace-events
341
@@ -XXX,XX +XXX,XX @@ bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d"
342
343
# blockjob.c
344
block_job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)"
345
+block_job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)"
346
347
# block/block-backend.c
348
blk_co_preadv(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x"
349
--
39
--
350
2.13.6
40
2.35.3
351
352
diff view generated by jsdifflib
1
From: Liang Li <liliang.opensource@gmail.com>
1
From: Daniel P. Berrangé <berrange@redhat.com>
2
2
3
When doing drive mirror to a low speed shared storage, if there was heavy
3
When running I/O tests using TAP output mode, we get a single TAP test
4
BLK IO write workload in VM after the 'ready' event, drive mirror block job
4
with a sub-test reported for each I/O test that is run. The output looks
5
can't be canceled immediately, it would keep running until the heavy BLK IO
5
something like this:
6
workload stopped in the VM.
7
6
8
Libvirt depends on the current block-job-cancel semantics, which is that
7
1..123
9
when used without a flag after the 'ready' event, the command blocks
8
ok qcow2 011
10
until data is in sync. However, these semantics are awkward in other
9
ok qcow2 012
11
situations, for example, people may use drive mirror for realtime
10
ok qcow2 013
12
backups while still wanting to use block live migration. Libvirt cannot
11
ok qcow2 217
13
start a block live migration while another drive mirror is in progress,
12
...
14
but the user would rather abandon the backup attempt as broken and
15
proceed with the live migration than be stuck waiting for the current
16
drive mirror backup to finish.
17
13
18
The drive-mirror command already includes a 'force' flag, which libvirt
14
If everything runs or fails normally this is fine, but periodically we
19
does not use, although it documented the flag as only being useful to
15
have been seeing the test harness abort early before all 123 tests have
20
quit a job which is paused. However, since quitting a paused job has
16
been run, just leaving a fairly useless message like
21
the same effect as abandoning a backup in a non-paused job (namely, the
22
destination file is not in sync, and the command completes immediately),
23
we can just improve the documentation to make the force flag obviously
24
useful.
25
17
26
Cc: Paolo Bonzini <pbonzini@redhat.com>
18
TAP parsing error: Too few tests run (expected 123, got 107)
27
Cc: Jeff Cody <jcody@redhat.com>
19
28
Cc: Kevin Wolf <kwolf@redhat.com>
20
we have no idea which tests were running at the time the test harness
29
Cc: Max Reitz <mreitz@redhat.com>
21
abruptly exited. This change causes us to print a message about our
30
Cc: Eric Blake <eblake@redhat.com>
22
intent to run each test, so we have a record of what is active at the
31
Cc: John Snow <jsnow@redhat.com>
23
time the harness exits abnormally.
32
Reported-by: Huaitong Han <huanhuaitong@didichuxing.com>
24
33
Signed-off-by: Huaitong Han <huanhuaitong@didichuxing.com>
25
1..123
34
Signed-off-by: Liang Li <liliangleo@didichuxing.com>
26
# running qcow2 011
35
Signed-off-by: Jeff Cody <jcody@redhat.com>
27
ok qcow2 011
28
# running qcow2 012
29
ok qcow2 012
30
# running qcow2 013
31
ok qcow2 013
32
# running qcow2 217
33
ok qcow2 217
34
...
35
36
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
37
Message-Id: <20220509124134.867431-2-berrange@redhat.com>
38
Reviewed-by: Thomas Huth <thuth@redhat.com>
36
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
39
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
37
---
40
---
38
qapi/block-core.json | 5 +++--
41
tests/qemu-iotests/testrunner.py | 3 +++
39
include/block/blockjob.h | 12 ++++++++++--
42
1 file changed, 3 insertions(+)
40
block/mirror.c | 10 ++++------
41
blockdev.c | 4 ++--
42
blockjob.c | 16 +++++++++-------
43
tests/test-blockjob-txn.c | 8 ++++----
44
hmp-commands.hx | 3 ++-
45
7 files changed, 34 insertions(+), 24 deletions(-)
46
43
47
diff --git a/qapi/block-core.json b/qapi/block-core.json
44
diff --git a/tests/qemu-iotests/testrunner.py b/tests/qemu-iotests/testrunner.py
48
index XXXXXXX..XXXXXXX 100644
45
index XXXXXXX..XXXXXXX 100644
49
--- a/qapi/block-core.json
46
--- a/tests/qemu-iotests/testrunner.py
50
+++ b/qapi/block-core.json
47
+++ b/tests/qemu-iotests/testrunner.py
51
@@ -XXX,XX +XXX,XX @@
48
@@ -XXX,XX +XXX,XX @@ def run_test(self, test: str,
52
# the name of the parameter), but since QEMU 2.7 it can have
49
starttime=start,
53
# other values.
50
lasttime=last_el,
54
#
51
end = '\n' if mp else '\r')
55
-# @force: whether to allow cancellation of a paused job (default
52
+ else:
56
-# false). Since 1.3.
53
+ testname = os.path.basename(test)
57
+# @force: If true, and the job has already emitted the event BLOCK_JOB_READY,
54
+ print(f'# running {self.env.imgfmt} {testname}')
58
+# abandon the job immediately (even if it is paused) instead of waiting
55
59
+# for the destination to complete its final synchronization (since 1.3)
56
res = self.do_run_test(test, mp)
60
#
61
# Returns: Nothing on success
62
# If no background operation is active on this device, DeviceNotActive
63
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/include/block/blockjob.h
66
+++ b/include/block/blockjob.h
67
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
68
bool cancelled;
69
70
/**
71
+ * Set to true if the job should abort immediately without waiting
72
+ * for data to be in sync.
73
+ */
74
+ bool force;
75
+
76
+ /**
77
* Counter for pause request. If non-zero, the block job is either paused,
78
* or if busy == true will pause itself as soon as possible.
79
*/
80
@@ -XXX,XX +XXX,XX @@ void block_job_start(BlockJob *job);
81
/**
82
* block_job_cancel:
83
* @job: The job to be canceled.
84
+ * @force: Quit a job without waiting for data to be in sync.
85
*
86
* Asynchronously cancel the specified job.
87
*/
88
-void block_job_cancel(BlockJob *job);
89
+void block_job_cancel(BlockJob *job, bool force);
90
91
/**
92
* block_job_complete:
93
@@ -XXX,XX +XXX,XX @@ void block_job_user_resume(BlockJob *job, Error **errp);
94
/**
95
* block_job_user_cancel:
96
* @job: The job to be cancelled.
97
+ * @force: Quit a job without waiting for data to be in sync.
98
*
99
* Cancels the specified job, but may refuse to do so if the
100
* operation isn't currently meaningful.
101
*/
102
-void block_job_user_cancel(BlockJob *job, Error **errp);
103
+void block_job_user_cancel(BlockJob *job, bool force, Error **errp);
104
105
/**
106
* block_job_cancel_sync:
107
diff --git a/block/mirror.c b/block/mirror.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/block/mirror.c
110
+++ b/block/mirror.c
111
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn mirror_run(void *opaque)
112
113
ret = 0;
114
trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
115
- if (!s->synced) {
116
- block_job_sleep_ns(&s->common, delay_ns);
117
- if (block_job_is_cancelled(&s->common)) {
118
- break;
119
- }
120
+ if (block_job_is_cancelled(&s->common) && s->common.force) {
121
+ break;
122
} else if (!should_complete) {
123
delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
124
block_job_sleep_ns(&s->common, delay_ns);
125
@@ -XXX,XX +XXX,XX @@ immediate_exit:
126
* or it was cancelled prematurely so that we do not guarantee that
127
* the target is a copy of the source.
128
*/
129
- assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
130
+ assert(ret < 0 || ((s->common.force || !s->synced) &&
131
+ block_job_is_cancelled(&s->common)));
132
assert(need_drain);
133
mirror_wait_for_all_io(s);
134
}
135
diff --git a/blockdev.c b/blockdev.c
136
index XXXXXXX..XXXXXXX 100644
137
--- a/blockdev.c
138
+++ b/blockdev.c
139
@@ -XXX,XX +XXX,XX @@ void blockdev_mark_auto_del(BlockBackend *blk)
140
aio_context_acquire(aio_context);
141
142
if (bs->job) {
143
- block_job_cancel(bs->job);
144
+ block_job_cancel(bs->job, false);
145
}
146
147
aio_context_release(aio_context);
148
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_cancel(const char *device,
149
}
150
151
trace_qmp_block_job_cancel(job);
152
- block_job_user_cancel(job, errp);
153
+ block_job_user_cancel(job, force, errp);
154
out:
155
aio_context_release(aio_context);
156
}
157
diff --git a/blockjob.c b/blockjob.c
158
index XXXXXXX..XXXXXXX 100644
159
--- a/blockjob.c
160
+++ b/blockjob.c
161
@@ -XXX,XX +XXX,XX @@ static int block_job_finalize_single(BlockJob *job)
162
return 0;
163
}
164
165
-static void block_job_cancel_async(BlockJob *job)
166
+static void block_job_cancel_async(BlockJob *job, bool force)
167
{
168
if (job->iostatus != BLOCK_DEVICE_IO_STATUS_OK) {
169
block_job_iostatus_reset(job);
170
@@ -XXX,XX +XXX,XX @@ static void block_job_cancel_async(BlockJob *job)
171
job->pause_count--;
172
}
173
job->cancelled = true;
174
+ /* To prevent 'force == false' overriding a previous 'force == true' */
175
+ job->force |= force;
176
}
177
178
static int block_job_txn_apply(BlockJobTxn *txn, int fn(BlockJob *), bool lock)
179
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_abort(BlockJob *job)
180
* on the caller, so leave it. */
181
QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
182
if (other_job != job) {
183
- block_job_cancel_async(other_job);
184
+ block_job_cancel_async(other_job, false);
185
}
186
}
187
while (!QLIST_EMPTY(&txn->jobs)) {
188
@@ -XXX,XX +XXX,XX @@ void block_job_user_resume(BlockJob *job, Error **errp)
189
block_job_resume(job);
190
}
191
192
-void block_job_cancel(BlockJob *job)
193
+void block_job_cancel(BlockJob *job, bool force)
194
{
195
if (job->status == BLOCK_JOB_STATUS_CONCLUDED) {
196
block_job_do_dismiss(job);
197
return;
198
}
199
- block_job_cancel_async(job);
200
+ block_job_cancel_async(job, force);
201
if (!block_job_started(job)) {
202
block_job_completed(job, -ECANCELED);
203
} else if (job->deferred_to_main_loop) {
204
@@ -XXX,XX +XXX,XX @@ void block_job_cancel(BlockJob *job)
205
}
206
}
207
208
-void block_job_user_cancel(BlockJob *job, Error **errp)
209
+void block_job_user_cancel(BlockJob *job, bool force, Error **errp)
210
{
211
if (block_job_apply_verb(job, BLOCK_JOB_VERB_CANCEL, errp)) {
212
return;
213
}
214
- block_job_cancel(job);
215
+ block_job_cancel(job, force);
216
}
217
218
/* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
219
@@ -XXX,XX +XXX,XX @@ void block_job_user_cancel(BlockJob *job, Error **errp)
220
* function pointer casts there. */
221
static void block_job_cancel_err(BlockJob *job, Error **errp)
222
{
223
- block_job_cancel(job);
224
+ block_job_cancel(job, false);
225
}
226
227
int block_job_cancel_sync(BlockJob *job)
228
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
229
index XXXXXXX..XXXXXXX 100644
230
--- a/tests/test-blockjob-txn.c
231
+++ b/tests/test-blockjob-txn.c
232
@@ -XXX,XX +XXX,XX @@ static void test_single_job(int expected)
233
block_job_start(job);
234
235
if (expected == -ECANCELED) {
236
- block_job_cancel(job);
237
+ block_job_cancel(job, false);
238
}
239
240
while (result == -EINPROGRESS) {
241
@@ -XXX,XX +XXX,XX @@ static void test_pair_jobs(int expected1, int expected2)
242
block_job_txn_unref(txn);
243
244
if (expected1 == -ECANCELED) {
245
- block_job_cancel(job1);
246
+ block_job_cancel(job1, false);
247
}
248
if (expected2 == -ECANCELED) {
249
- block_job_cancel(job2);
250
+ block_job_cancel(job2, false);
251
}
252
253
while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) {
254
@@ -XXX,XX +XXX,XX @@ static void test_pair_jobs_fail_cancel_race(void)
255
block_job_start(job1);
256
block_job_start(job2);
257
258
- block_job_cancel(job1);
259
+ block_job_cancel(job1, false);
260
261
/* Now make job2 finish before the main loop kicks jobs. This simulates
262
* the race between a pending kick and another job completing.
263
diff --git a/hmp-commands.hx b/hmp-commands.hx
264
index XXXXXXX..XXXXXXX 100644
265
--- a/hmp-commands.hx
266
+++ b/hmp-commands.hx
267
@@ -XXX,XX +XXX,XX @@ ETEXI
268
.args_type = "force:-f,device:B",
269
.params = "[-f] device",
270
.help = "stop an active background block operation (use -f"
271
- "\n\t\t\t if the operation is currently paused)",
272
+ "\n\t\t\t if you want to abort the operation immediately"
273
+ "\n\t\t\t instead of keep running until data is in sync)",
274
.cmd = hmp_block_job_cancel,
275
},
276
57
277
--
58
--
278
2.13.6
59
2.35.3
279
60
280
61
diff view generated by jsdifflib
1
From: Fam Zheng <famz@redhat.com>
1
From: Daniel P. Berrangé <berrange@redhat.com>
2
2
3
Signed-off-by: Fam Zheng <famz@redhat.com>
3
When running 'make check' we only get a summary of progress on the
4
Reviewed-by: Max Reitz <mreitz@redhat.com>
4
console. Fortunately meson/ninja have saved the raw test output to a
5
logfile. Exposing this log will make it easier to debug failures that
6
happen in CI.
7
8
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
9
Message-Id: <20220509124134.867431-3-berrange@redhat.com>
10
Reviewed-by: Thomas Huth <thuth@redhat.com>
11
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
6
---
13
---
7
tests/qemu-iotests/153 | 12 ++++++++++++
14
.gitlab-ci.d/buildtest-template.yml | 12 ++++++++++--
8
tests/qemu-iotests/153.out | 5 +++++
15
1 file changed, 10 insertions(+), 2 deletions(-)
9
2 files changed, 17 insertions(+)
10
16
11
diff --git a/tests/qemu-iotests/153 b/tests/qemu-iotests/153
17
diff --git a/.gitlab-ci.d/buildtest-template.yml b/.gitlab-ci.d/buildtest-template.yml
12
index XXXXXXX..XXXXXXX 100755
18
index XXXXXXX..XXXXXXX 100644
13
--- a/tests/qemu-iotests/153
19
--- a/.gitlab-ci.d/buildtest-template.yml
14
+++ b/tests/qemu-iotests/153
20
+++ b/.gitlab-ci.d/buildtest-template.yml
15
@@ -XXX,XX +XXX,XX @@ rm -f "${TEST_IMG}.lnk" &>/dev/null
21
@@ -XXX,XX +XXX,XX @@
16
ln -s ${TEST_IMG} "${TEST_IMG}.lnk" || echo "Failed to create link"
22
make -j"$JOBS" $MAKE_CHECK_ARGS ;
17
_run_qemu_with_images "${TEST_IMG}.lnk" "${TEST_IMG}"
23
fi
18
24
19
+echo
25
-.native_test_job_template:
20
+echo "== Active commit to intermediate layer should work when base in use =="
26
+.common_test_job_template:
21
+_launch_qemu -drive format=$IMGFMT,file="${TEST_IMG}.a",id=drive0,if=none \
27
stage: test
22
+ -device virtio-blk,drive=drive0
28
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
29
script:
30
@@ -XXX,XX +XXX,XX @@
31
# Avoid recompiling by hiding ninja with NINJA=":"
32
- make NINJA=":" $MAKE_CHECK_ARGS
33
34
+.native_test_job_template:
35
+ extends: .common_test_job_template
36
+ artifacts:
37
+ name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
38
+ expire_in: 7 days
39
+ paths:
40
+ - build/meson-logs/testlog.txt
23
+
41
+
24
+_send_qemu_cmd $QEMU_HANDLE \
42
.avocado_test_job_template:
25
+ "{ 'execute': 'qmp_capabilities' }" \
43
- extends: .native_test_job_template
26
+ 'return'
44
+ extends: .common_test_job_template
27
+_run_cmd $QEMU_IMG commit -b "${TEST_IMG}.b" "${TEST_IMG}.c"
45
cache:
28
+
46
key: "${CI_JOB_NAME}-cache"
29
+_cleanup_qemu
47
paths:
30
+
31
_launch_qemu
32
33
_send_qemu_cmd $QEMU_HANDLE \
34
diff --git a/tests/qemu-iotests/153.out b/tests/qemu-iotests/153.out
35
index XXXXXXX..XXXXXXX 100644
36
--- a/tests/qemu-iotests/153.out
37
+++ b/tests/qemu-iotests/153.out
38
@@ -XXX,XX +XXX,XX @@ Is another process using the image?
39
== Symbolic link ==
40
QEMU_PROG: -drive if=none,file=TEST_DIR/t.qcow2: Failed to get "write" lock
41
Is another process using the image?
42
+
43
+== Active commit to intermediate layer should work when base in use ==
44
+{"return": {}}
45
+
46
+_qemu_img_wrapper commit -b TEST_DIR/t.qcow2.b TEST_DIR/t.qcow2.c
47
{"return": {}}
48
Adding drive
49
50
--
48
--
51
2.13.6
49
2.35.3
52
50
53
51
diff view generated by jsdifflib
1
From: Fam Zheng <famz@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
Reopen flags are not synchronized according to the
3
Per the 82078 datasheet, if the end-of-track (EOT byte in
4
bdrv_reopen_queue_child precedence until bdrv_reopen_prepare. It is a
4
the FIFO) is more than the number of sectors per side, the
5
bit too late: we already check the consistency in bdrv_check_perm before
5
command is terminated unsuccessfully:
6
that.
7
6
8
This fixes the bug that when bdrv_reopen a RO node as RW, the flags for
7
* 5.2.5 DATA TRANSFER TERMINATION
9
backing child are wrong. Before, we could recurse with flags.rw=1; now,
10
role->inherit_options + update_flags_from_options will make sure to
11
clear the bit when necessary. Note that this will not clear an
12
explicitly set bit, as in the case of parallel block jobs (e.g.
13
test_stream_parallel in 030), because the explicit options include
14
'read-only=false' (for an intermediate node used by a different job).
15
8
16
Signed-off-by: Fam Zheng <famz@redhat.com>
9
The 82078 supports terminal count explicitly through
17
Reviewed-by: Max Reitz <mreitz@redhat.com>
10
the TC pin and implicitly through the underrun/over-
11
run and end-of-track (EOT) functions. For full sector
12
transfers, the EOT parameter can define the last
13
sector to be transferred in a single or multisector
14
transfer. If the last sector to be transferred is a par-
15
tial sector, the host can stop transferring the data in
16
mid-sector, and the 82078 will continue to complete
17
the sector as if a hardware TC was received. The
18
only difference between these implicit functions and
19
TC is that they return "abnormal termination" result
20
status. Such status indications can be ignored if they
21
were expected.
22
23
* 6.1.3 READ TRACK
24
25
This command terminates when the EOT specified
26
number of sectors have been read. If the 82078
27
does not find an I D Address Mark on the diskette
28
after the second· occurrence of a pulse on the
29
INDX# pin, then it sets the IC code in Status Regis-
30
ter 0 to "01" (Abnormal termination), sets the MA bit
31
in Status Register 1 to "1", and terminates the com-
32
mand.
33
34
* 6.1.6 VERIFY
35
36
Refer to Table 6-6 and Table 6-7 for information
37
concerning the values of MT and EC versus SC and
38
EOT value.
39
40
* Table 6·6. Result Phase Table
41
42
* Table 6-7. Verify Command Result Phase Table
43
44
Fix by aborting the transfer when EOT > # Sectors Per Side.
45
46
Cc: qemu-stable@nongnu.org
47
Cc: Hervé Poussineau <hpoussin@reactos.org>
48
Fixes: baca51faff0 ("floppy driver: disk geometry auto detect")
49
Reported-by: Alexander Bulekov <alxndr@bu.edu>
50
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/339
51
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
52
Message-Id: <20211118115733.4038610-2-philmd@redhat.com>
53
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
18
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
54
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
19
---
55
---
20
block.c | 8 ++++++++
56
hw/block/fdc.c | 8 ++++++++
21
1 file changed, 8 insertions(+)
57
1 file changed, 8 insertions(+)
22
58
23
diff --git a/block.c b/block.c
59
diff --git a/hw/block/fdc.c b/hw/block/fdc.c
24
index XXXXXXX..XXXXXXX 100644
60
index XXXXXXX..XXXXXXX 100644
25
--- a/block.c
61
--- a/hw/block/fdc.c
26
+++ b/block.c
62
+++ b/hw/block/fdc.c
27
@@ -XXX,XX +XXX,XX @@ static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
63
@@ -XXX,XX +XXX,XX @@ static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction)
28
64
int tmp;
29
/* Inherit from parent node */
65
fdctrl->data_len = 128 << (fdctrl->fifo[5] > 7 ? 7 : fdctrl->fifo[5]);
30
if (parent_options) {
66
tmp = (fdctrl->fifo[6] - ks + 1);
31
+ QemuOpts *opts;
67
+ if (tmp < 0) {
32
+ QDict *options_copy;
68
+ FLOPPY_DPRINTF("invalid EOT: %d\n", tmp);
33
assert(!flags);
69
+ fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, FD_SR1_MA, 0x00);
34
role->inherit_options(&flags, options, parent_flags, parent_options);
70
+ fdctrl->fifo[3] = kt;
35
+ options_copy = qdict_clone_shallow(options);
71
+ fdctrl->fifo[4] = kh;
36
+ opts = qemu_opts_create(&bdrv_runtime_opts, NULL, 0, &error_abort);
72
+ fdctrl->fifo[5] = ks;
37
+ qemu_opts_absorb_qdict(opts, options_copy, NULL);
73
+ return;
38
+ update_flags_from_options(&flags, opts);
74
+ }
39
+ qemu_opts_del(opts);
75
if (fdctrl->fifo[0] & 0x80)
40
+ QDECREF(options_copy);
76
tmp += fdctrl->fifo[6];
41
}
77
fdctrl->data_len *= tmp;
42
43
/* Old values are used for options that aren't set yet */
44
--
78
--
45
2.13.6
79
2.35.3
46
80
47
81
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
Whatever the state a blockjob is in, it should be able to be canceled
3
Add the reproducer from https://gitlab.com/qemu-project/qemu/-/issues/339
4
by the block layer.
5
4
6
Signed-off-by: John Snow <jsnow@redhat.com>
5
Without the previous commit, when running 'make check-qtest-i386'
6
with QEMU configured with '--enable-sanitizers' we get:
7
8
==4028352==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x619000062a00 at pc 0x5626d03c491a bp 0x7ffdb4199410 sp 0x7ffdb4198bc0
9
READ of size 786432 at 0x619000062a00 thread T0
10
#0 0x5626d03c4919 in __asan_memcpy (qemu-system-i386+0x1e65919)
11
#1 0x5626d1c023cc in flatview_write_continue softmmu/physmem.c:2787:13
12
#2 0x5626d1bf0c0f in flatview_write softmmu/physmem.c:2822:14
13
#3 0x5626d1bf0798 in address_space_write softmmu/physmem.c:2914:18
14
#4 0x5626d1bf0f37 in address_space_rw softmmu/physmem.c:2924:16
15
#5 0x5626d1bf14c8 in cpu_physical_memory_rw softmmu/physmem.c:2933:5
16
#6 0x5626d0bd5649 in cpu_physical_memory_write include/exec/cpu-common.h:82:5
17
#7 0x5626d0bd0a07 in i8257_dma_write_memory hw/dma/i8257.c:452:9
18
#8 0x5626d09f825d in fdctrl_transfer_handler hw/block/fdc.c:1616:13
19
#9 0x5626d0a048b4 in fdctrl_start_transfer hw/block/fdc.c:1539:13
20
#10 0x5626d09f4c3e in fdctrl_write_data hw/block/fdc.c:2266:13
21
#11 0x5626d09f22f7 in fdctrl_write hw/block/fdc.c:829:9
22
#12 0x5626d1c20bc5 in portio_write softmmu/ioport.c:207:17
23
24
0x619000062a00 is located 0 bytes to the right of 512-byte region [0x619000062800,0x619000062a00)
25
allocated by thread T0 here:
26
#0 0x5626d03c66ec in posix_memalign (qemu-system-i386+0x1e676ec)
27
#1 0x5626d2b988d4 in qemu_try_memalign util/oslib-posix.c:210:11
28
#2 0x5626d2b98b0c in qemu_memalign util/oslib-posix.c:226:27
29
#3 0x5626d09fbaf0 in fdctrl_realize_common hw/block/fdc.c:2341:20
30
#4 0x5626d0a150ed in isabus_fdc_realize hw/block/fdc-isa.c:113:5
31
#5 0x5626d2367935 in device_set_realized hw/core/qdev.c:531:13
32
33
SUMMARY: AddressSanitizer: heap-buffer-overflow (qemu-system-i386+0x1e65919) in __asan_memcpy
34
Shadow bytes around the buggy address:
35
0x0c32800044f0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
36
0x0c3280004500: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
37
0x0c3280004510: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
38
0x0c3280004520: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
39
0x0c3280004530: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
40
=>0x0c3280004540:[fa]fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
41
0x0c3280004550: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
42
0x0c3280004560: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
43
0x0c3280004570: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
44
0x0c3280004580: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
45
0x0c3280004590: fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd fd
46
Shadow byte legend (one shadow byte represents 8 application bytes):
47
Addressable: 00
48
Heap left redzone: fa
49
Freed heap region: fd
50
==4028352==ABORTING
51
52
[ kwolf: Added snapshot=on to prevent write file lock failure ]
53
54
Reported-by: Alexander Bulekov <alxndr@bu.edu>
55
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
56
Reviewed-by: Alexander Bulekov <alxndr@bu.edu>
7
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
57
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
8
---
58
---
9
tests/test-blockjob.c | 233 +++++++++++++++++++++++++++++++++++++++++++++++++-
59
tests/qtest/fdc-test.c | 21 +++++++++++++++++++++
10
1 file changed, 229 insertions(+), 4 deletions(-)
60
1 file changed, 21 insertions(+)
11
61
12
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
62
diff --git a/tests/qtest/fdc-test.c b/tests/qtest/fdc-test.c
13
index XXXXXXX..XXXXXXX 100644
63
index XXXXXXX..XXXXXXX 100644
14
--- a/tests/test-blockjob.c
64
--- a/tests/qtest/fdc-test.c
15
+++ b/tests/test-blockjob.c
65
+++ b/tests/qtest/fdc-test.c
16
@@ -XXX,XX +XXX,XX @@ static void block_job_cb(void *opaque, int ret)
66
@@ -XXX,XX +XXX,XX @@ static void test_cve_2021_20196(void)
17
{
67
qtest_quit(s);
18
}
68
}
19
69
20
-static BlockJob *do_test_id(BlockBackend *blk, const char *id,
70
+static void test_cve_2021_3507(void)
21
- bool should_succeed)
22
+static BlockJob *mk_job(BlockBackend *blk, const char *id,
23
+ const BlockJobDriver *drv, bool should_succeed,
24
+ int flags)
25
{
26
BlockJob *job;
27
Error *errp = NULL;
28
29
- job = block_job_create(id, &test_block_job_driver, NULL, blk_bs(blk),
30
- 0, BLK_PERM_ALL, 0, BLOCK_JOB_DEFAULT, block_job_cb,
31
+ job = block_job_create(id, drv, NULL, blk_bs(blk),
32
+ 0, BLK_PERM_ALL, 0, flags, block_job_cb,
33
NULL, &errp);
34
if (should_succeed) {
35
g_assert_null(errp);
36
@@ -XXX,XX +XXX,XX @@ static BlockJob *do_test_id(BlockBackend *blk, const char *id,
37
return job;
38
}
39
40
+static BlockJob *do_test_id(BlockBackend *blk, const char *id,
41
+ bool should_succeed)
42
+{
71
+{
43
+ return mk_job(blk, id, &test_block_job_driver,
72
+ QTestState *s;
44
+ should_succeed, BLOCK_JOB_DEFAULT);
45
+}
46
+
73
+
47
/* This creates a BlockBackend (optionally with a name) with a
74
+ s = qtest_initf("-nographic -m 32M -nodefaults "
48
* BlockDriverState inserted. */
75
+ "-drive file=%s,format=raw,if=floppy,snapshot=on",
49
static BlockBackend *create_blk(const char *name)
76
+ test_image);
50
@@ -XXX,XX +XXX,XX @@ static void test_job_ids(void)
77
+ qtest_outl(s, 0x9, 0x0a0206);
51
destroy_blk(blk[2]);
78
+ qtest_outw(s, 0x3f4, 0x1600);
52
}
79
+ qtest_outw(s, 0x3f4, 0x0000);
53
80
+ qtest_outw(s, 0x3f4, 0x0000);
54
+typedef struct CancelJob {
81
+ qtest_outw(s, 0x3f4, 0x0000);
55
+ BlockJob common;
82
+ qtest_outw(s, 0x3f4, 0x0200);
56
+ BlockBackend *blk;
83
+ qtest_outw(s, 0x3f4, 0x0200);
57
+ bool should_converge;
84
+ qtest_outw(s, 0x3f4, 0x0000);
58
+ bool should_complete;
85
+ qtest_outw(s, 0x3f4, 0x0000);
59
+ bool completed;
86
+ qtest_outw(s, 0x3f4, 0x0000);
60
+} CancelJob;
87
+ qtest_quit(s);
61
+
62
+static void cancel_job_completed(BlockJob *job, void *opaque)
63
+{
64
+ CancelJob *s = opaque;
65
+ s->completed = true;
66
+ block_job_completed(job, 0);
67
+}
68
+
69
+static void cancel_job_complete(BlockJob *job, Error **errp)
70
+{
71
+ CancelJob *s = container_of(job, CancelJob, common);
72
+ s->should_complete = true;
73
+}
74
+
75
+static void coroutine_fn cancel_job_start(void *opaque)
76
+{
77
+ CancelJob *s = opaque;
78
+
79
+ while (!s->should_complete) {
80
+ if (block_job_is_cancelled(&s->common)) {
81
+ goto defer;
82
+ }
83
+
84
+ if (!s->common.ready && s->should_converge) {
85
+ block_job_event_ready(&s->common);
86
+ }
87
+
88
+ block_job_sleep_ns(&s->common, 100000);
89
+ }
90
+
91
+ defer:
92
+ block_job_defer_to_main_loop(&s->common, cancel_job_completed, s);
93
+}
94
+
95
+static const BlockJobDriver test_cancel_driver = {
96
+ .instance_size = sizeof(CancelJob),
97
+ .start = cancel_job_start,
98
+ .complete = cancel_job_complete,
99
+};
100
+
101
+static CancelJob *create_common(BlockJob **pjob)
102
+{
103
+ BlockBackend *blk;
104
+ BlockJob *job;
105
+ CancelJob *s;
106
+
107
+ blk = create_blk(NULL);
108
+ job = mk_job(blk, "Steve", &test_cancel_driver, true,
109
+ BLOCK_JOB_MANUAL_FINALIZE | BLOCK_JOB_MANUAL_DISMISS);
110
+ block_job_ref(job);
111
+ assert(job->status == BLOCK_JOB_STATUS_CREATED);
112
+ s = container_of(job, CancelJob, common);
113
+ s->blk = blk;
114
+
115
+ *pjob = job;
116
+ return s;
117
+}
118
+
119
+static void cancel_common(CancelJob *s)
120
+{
121
+ BlockJob *job = &s->common;
122
+ BlockBackend *blk = s->blk;
123
+ BlockJobStatus sts = job->status;
124
+
125
+ block_job_cancel_sync(job);
126
+ if ((sts != BLOCK_JOB_STATUS_CREATED) &&
127
+ (sts != BLOCK_JOB_STATUS_CONCLUDED)) {
128
+ BlockJob *dummy = job;
129
+ block_job_dismiss(&dummy, &error_abort);
130
+ }
131
+ assert(job->status == BLOCK_JOB_STATUS_NULL);
132
+ block_job_unref(job);
133
+ destroy_blk(blk);
134
+}
135
+
136
+static void test_cancel_created(void)
137
+{
138
+ BlockJob *job;
139
+ CancelJob *s;
140
+
141
+ s = create_common(&job);
142
+ cancel_common(s);
143
+}
144
+
145
+static void test_cancel_running(void)
146
+{
147
+ BlockJob *job;
148
+ CancelJob *s;
149
+
150
+ s = create_common(&job);
151
+
152
+ block_job_start(job);
153
+ assert(job->status == BLOCK_JOB_STATUS_RUNNING);
154
+
155
+ cancel_common(s);
156
+}
157
+
158
+static void test_cancel_paused(void)
159
+{
160
+ BlockJob *job;
161
+ CancelJob *s;
162
+
163
+ s = create_common(&job);
164
+
165
+ block_job_start(job);
166
+ assert(job->status == BLOCK_JOB_STATUS_RUNNING);
167
+
168
+ block_job_user_pause(job, &error_abort);
169
+ block_job_enter(job);
170
+ assert(job->status == BLOCK_JOB_STATUS_PAUSED);
171
+
172
+ cancel_common(s);
173
+}
174
+
175
+static void test_cancel_ready(void)
176
+{
177
+ BlockJob *job;
178
+ CancelJob *s;
179
+
180
+ s = create_common(&job);
181
+
182
+ block_job_start(job);
183
+ assert(job->status == BLOCK_JOB_STATUS_RUNNING);
184
+
185
+ s->should_converge = true;
186
+ block_job_enter(job);
187
+ assert(job->status == BLOCK_JOB_STATUS_READY);
188
+
189
+ cancel_common(s);
190
+}
191
+
192
+static void test_cancel_standby(void)
193
+{
194
+ BlockJob *job;
195
+ CancelJob *s;
196
+
197
+ s = create_common(&job);
198
+
199
+ block_job_start(job);
200
+ assert(job->status == BLOCK_JOB_STATUS_RUNNING);
201
+
202
+ s->should_converge = true;
203
+ block_job_enter(job);
204
+ assert(job->status == BLOCK_JOB_STATUS_READY);
205
+
206
+ block_job_user_pause(job, &error_abort);
207
+ block_job_enter(job);
208
+ assert(job->status == BLOCK_JOB_STATUS_STANDBY);
209
+
210
+ cancel_common(s);
211
+}
212
+
213
+static void test_cancel_pending(void)
214
+{
215
+ BlockJob *job;
216
+ CancelJob *s;
217
+
218
+ s = create_common(&job);
219
+
220
+ block_job_start(job);
221
+ assert(job->status == BLOCK_JOB_STATUS_RUNNING);
222
+
223
+ s->should_converge = true;
224
+ block_job_enter(job);
225
+ assert(job->status == BLOCK_JOB_STATUS_READY);
226
+
227
+ block_job_complete(job, &error_abort);
228
+ block_job_enter(job);
229
+ while (!s->completed) {
230
+ aio_poll(qemu_get_aio_context(), true);
231
+ }
232
+ assert(job->status == BLOCK_JOB_STATUS_PENDING);
233
+
234
+ cancel_common(s);
235
+}
236
+
237
+static void test_cancel_concluded(void)
238
+{
239
+ BlockJob *job;
240
+ CancelJob *s;
241
+
242
+ s = create_common(&job);
243
+
244
+ block_job_start(job);
245
+ assert(job->status == BLOCK_JOB_STATUS_RUNNING);
246
+
247
+ s->should_converge = true;
248
+ block_job_enter(job);
249
+ assert(job->status == BLOCK_JOB_STATUS_READY);
250
+
251
+ block_job_complete(job, &error_abort);
252
+ block_job_enter(job);
253
+ while (!s->completed) {
254
+ aio_poll(qemu_get_aio_context(), true);
255
+ }
256
+ assert(job->status == BLOCK_JOB_STATUS_PENDING);
257
+
258
+ block_job_finalize(job, &error_abort);
259
+ assert(job->status == BLOCK_JOB_STATUS_CONCLUDED);
260
+
261
+ cancel_common(s);
262
+}
88
+}
263
+
89
+
264
int main(int argc, char **argv)
90
int main(int argc, char **argv)
265
{
91
{
266
qemu_init_main_loop(&error_abort);
92
int fd;
267
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
93
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
268
94
qtest_add_func("/fdc/read_no_dma_19", test_read_no_dma_19);
269
g_test_init(&argc, &argv, NULL);
95
qtest_add_func("/fdc/fuzz-registers", fuzz_registers);
270
g_test_add_func("/blockjob/ids", test_job_ids);
96
qtest_add_func("/fdc/fuzz/cve_2021_20196", test_cve_2021_20196);
271
+ g_test_add_func("/blockjob/cancel/created", test_cancel_created);
97
+ qtest_add_func("/fdc/fuzz/cve_2021_3507", test_cve_2021_3507);
272
+ g_test_add_func("/blockjob/cancel/running", test_cancel_running);
98
273
+ g_test_add_func("/blockjob/cancel/paused", test_cancel_paused);
99
ret = g_test_run();
274
+ g_test_add_func("/blockjob/cancel/ready", test_cancel_ready);
100
275
+ g_test_add_func("/blockjob/cancel/standby", test_cancel_standby);
276
+ g_test_add_func("/blockjob/cancel/pending", test_cancel_pending);
277
+ g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded);
278
return g_test_run();
279
}
280
--
101
--
281
2.13.6
102
2.35.3
282
103
283
104
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Eric Blake <eblake@redhat.com>
2
2
3
add a new state "CONCLUDED" that identifies a job that has ceased all
3
The next patch wants to adjust whether the NBD server code advertises
4
operations. The wording was chosen to avoid any phrasing that might
4
MULTI_CONN based on whether it is known if the server limits to
5
imply success, error, or cancellation. The task has simply ceased all
5
exactly one client. For a server started by QMP, this information is
6
operation and can never again perform any work.
6
obtained through nbd_server_start (which can support more than one
7
export); but for qemu-nbd (which supports exactly one export), it is
8
controlled only by the command-line option -e/--shared. Since we
9
already have a hook function used by qemu-nbd, it's easiest to just
10
alter its signature to fit our needs.
7
11
8
("finished", "done", and "completed" might all imply success.)
12
Signed-off-by: Eric Blake <eblake@redhat.com>
9
13
Message-Id: <20220512004924.417153-2-eblake@redhat.com>
10
Transitions:
11
Running -> Concluded: normal completion
12
Ready -> Concluded: normal completion
13
Aborting -> Concluded: error and cancellations
14
15
Verbs:
16
None as of this commit. (a future commit adds 'dismiss')
17
18
+---------+
19
|UNDEFINED|
20
+--+------+
21
|
22
+--v----+
23
+---------+CREATED|
24
| +--+----+
25
| |
26
| +--v----+ +------+
27
+---------+RUNNING<----->PAUSED|
28
| +--+-+--+ +------+
29
| | |
30
| | +------------------+
31
| | |
32
| +--v--+ +-------+ |
33
+---------+READY<------->STANDBY| |
34
| +--+--+ +-------+ |
35
| | |
36
+--v-----+ +--v------+ |
37
|ABORTING+--->CONCLUDED<-------------+
38
+--------+ +---------+
39
40
Signed-off-by: John Snow <jsnow@redhat.com>
41
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
42
---
15
---
43
qapi/block-core.json | 7 +++++--
16
include/block/nbd.h | 2 +-
44
blockjob.c | 39 ++++++++++++++++++++++++---------------
17
blockdev-nbd.c | 8 ++++----
45
2 files changed, 29 insertions(+), 17 deletions(-)
18
qemu-nbd.c | 2 +-
19
3 files changed, 6 insertions(+), 6 deletions(-)
46
20
47
diff --git a/qapi/block-core.json b/qapi/block-core.json
21
diff --git a/include/block/nbd.h b/include/block/nbd.h
48
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
49
--- a/qapi/block-core.json
23
--- a/include/block/nbd.h
50
+++ b/qapi/block-core.json
24
+++ b/include/block/nbd.h
51
@@ -XXX,XX +XXX,XX @@
25
@@ -XXX,XX +XXX,XX @@ void nbd_client_new(QIOChannelSocket *sioc,
52
# The job may return to @ready or otherwise be canceled.
26
void nbd_client_get(NBDClient *client);
53
#
27
void nbd_client_put(NBDClient *client);
54
# @aborting: The job is in the process of being aborted, and will finish with
28
55
-# an error.
29
-void nbd_server_is_qemu_nbd(bool value);
56
+# an error. The job will afterwards report that it is @concluded.
30
+void nbd_server_is_qemu_nbd(int max_connections);
57
# This status may not be visible to the management process.
31
bool nbd_server_is_running(void);
58
#
32
void nbd_server_start(SocketAddress *addr, const char *tls_creds,
59
+# @concluded: The job has finished all work. If manual was set to true, the job
33
const char *tls_authz, uint32_t max_connections,
60
+# will remain in the query list until it is dismissed.
34
diff --git a/blockdev-nbd.c b/blockdev-nbd.c
61
+#
62
# Since: 2.12
63
##
64
{ 'enum': 'BlockJobStatus',
65
'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby',
66
- 'aborting' ] }
67
+ 'aborting', 'concluded' ] }
68
69
##
70
# @BlockJobInfo:
71
diff --git a/blockjob.c b/blockjob.c
72
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
73
--- a/blockjob.c
36
--- a/blockdev-nbd.c
74
+++ b/blockjob.c
37
+++ b/blockdev-nbd.c
75
@@ -XXX,XX +XXX,XX @@ static QemuMutex block_job_mutex;
38
@@ -XXX,XX +XXX,XX @@ typedef struct NBDServerData {
76
39
} NBDServerData;
77
/* BlockJob State Transition Table */
40
78
bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = {
41
static NBDServerData *nbd_server;
79
- /* U, C, R, P, Y, S, X */
42
-static bool is_qemu_nbd;
80
- /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0},
43
+static int qemu_nbd_connections = -1; /* Non-negative if this is qemu-nbd */
81
- /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 1},
44
82
- /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1},
45
static void nbd_update_server_watch(NBDServerData *s);
83
- /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0},
46
84
- /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1},
47
-void nbd_server_is_qemu_nbd(bool value)
85
- /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0},
48
+void nbd_server_is_qemu_nbd(int max_connections)
86
- /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0},
49
{
87
+ /* U, C, R, P, Y, S, X, E */
50
- is_qemu_nbd = value;
88
+ /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0},
51
+ qemu_nbd_connections = max_connections;
89
+ /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 1, 0},
90
+ /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 1},
91
+ /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0},
92
+ /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 1},
93
+ /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0},
94
+ /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 1},
95
+ /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0},
96
};
97
98
bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = {
99
- /* U, C, R, P, Y, S, X */
100
- [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 0},
101
- [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0},
102
- [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0},
103
- [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0},
104
- [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0},
105
+ /* U, C, R, P, Y, S, X, E */
106
+ [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 0, 0},
107
+ [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0},
108
+ [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0},
109
+ [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0},
110
+ [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0},
111
};
112
113
static void block_job_state_transition(BlockJob *job, BlockJobStatus s1)
114
@@ -XXX,XX +XXX,XX @@ void block_job_start(BlockJob *job)
115
bdrv_coroutine_enter(blk_bs(job->blk), job->co);
116
}
52
}
117
53
118
+static void block_job_conclude(BlockJob *job)
54
bool nbd_server_is_running(void)
119
+{
120
+ block_job_state_transition(job, BLOCK_JOB_STATUS_CONCLUDED);
121
+}
122
+
123
static void block_job_completed_single(BlockJob *job)
124
{
55
{
125
assert(job->completed);
56
- return nbd_server || is_qemu_nbd;
126
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job)
57
+ return nbd_server || qemu_nbd_connections >= 0;
127
128
QLIST_REMOVE(job, txn_list);
129
block_job_txn_unref(job->txn);
130
+ block_job_conclude(job);
131
block_job_unref(job);
132
}
58
}
133
59
134
@@ -XXX,XX +XXX,XX @@ void block_job_user_resume(BlockJob *job, Error **errp)
60
static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
135
61
diff --git a/qemu-nbd.c b/qemu-nbd.c
136
void block_job_cancel(BlockJob *job)
62
index XXXXXXX..XXXXXXX 100644
137
{
63
--- a/qemu-nbd.c
138
- if (block_job_started(job)) {
64
+++ b/qemu-nbd.c
139
+ if (job->status == BLOCK_JOB_STATUS_CONCLUDED) {
65
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
140
+ return;
66
141
+ } else if (block_job_started(job)) {
67
bs->detect_zeroes = detect_zeroes;
142
block_job_cancel_async(job);
68
143
block_job_enter(job);
69
- nbd_server_is_qemu_nbd(true);
144
} else {
70
+ nbd_server_is_qemu_nbd(shared);
71
72
export_opts = g_new(BlockExportOptions, 1);
73
*export_opts = (BlockExportOptions) {
145
--
74
--
146
2.13.6
75
2.35.3
147
148
diff view generated by jsdifflib
1
From: Eric Blake <eblake@redhat.com>
2
3
According to the NBD spec, a server that advertises
4
NBD_FLAG_CAN_MULTI_CONN promises that multiple client connections will
5
not see any cache inconsistencies: when properly separated by a single
6
flush, actions performed by one client will be visible to another
7
client, regardless of which client did the flush.
8
9
We always satisfy these conditions in qemu - even when we support
10
multiple clients, ALL clients go through a single point of reference
11
into the block layer, with no local caching. The effect of one client
12
is instantly visible to the next client. Even if our backend were a
13
network device, we argue that any multi-path caching effects that
14
would cause inconsistencies in back-to-back actions not seeing the
15
effect of previous actions would be a bug in that backend, and not the
16
fault of caching in qemu. As such, it is safe to unconditionally
17
advertise CAN_MULTI_CONN for any qemu NBD server situation that
18
supports parallel clients.
19
20
Note, however, that we don't want to advertise CAN_MULTI_CONN when we
21
know that a second client cannot connect (for historical reasons,
22
qemu-nbd defaults to a single connection while nbd-server-add and QMP
23
commands default to unlimited connections; but we already have
24
existing means to let either style of NBD server creation alter those
25
defaults). This is visible by no longer advertising MULTI_CONN for
26
'qemu-nbd -r' without -e, as in the iotest nbd-qemu-allocation.
27
28
The harder part of this patch is setting up an iotest to demonstrate
29
behavior of multiple NBD clients to a single server. It might be
30
possible with parallel qemu-io processes, but I found it easier to do
31
in python with the help of libnbd, and help from Nir and Vladimir in
32
writing the test.
33
34
Signed-off-by: Eric Blake <eblake@redhat.com>
35
Suggested-by: Nir Soffer <nsoffer@redhat.com>
36
Suggested-by: Vladimir Sementsov-Ogievskiy <v.sementsov-og@mail.ru>
37
Message-Id: <20220512004924.417153-3-eblake@redhat.com>
1
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
38
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
3
---
39
---
4
tests/qemu-iotests/209 | 210 +++++++++++++++++++++++++++++++++++++++++++
40
qapi/block-export.json | 8 +-
5
tests/qemu-iotests/209.out | 136 ++++++++++++++++++++++++++++
41
docs/interop/nbd.txt | 1 +
6
tests/qemu-iotests/common.rc | 2 +-
42
docs/tools/qemu-nbd.rst | 3 +-
7
tests/qemu-iotests/group | 1 +
43
include/block/nbd.h | 3 +-
8
4 files changed, 348 insertions(+), 1 deletion(-)
44
blockdev-nbd.c | 5 +
9
create mode 100755 tests/qemu-iotests/209
45
nbd/server.c | 10 +-
10
create mode 100644 tests/qemu-iotests/209.out
46
MAINTAINERS | 1 +
11
47
tests/qemu-iotests/tests/nbd-multiconn | 145 ++++++++++++++++++
12
diff --git a/tests/qemu-iotests/209 b/tests/qemu-iotests/209
48
tests/qemu-iotests/tests/nbd-multiconn.out | 5 +
49
.../tests/nbd-qemu-allocation.out | 2 +-
50
10 files changed, 172 insertions(+), 11 deletions(-)
51
create mode 100755 tests/qemu-iotests/tests/nbd-multiconn
52
create mode 100644 tests/qemu-iotests/tests/nbd-multiconn.out
53
54
diff --git a/qapi/block-export.json b/qapi/block-export.json
55
index XXXXXXX..XXXXXXX 100644
56
--- a/qapi/block-export.json
57
+++ b/qapi/block-export.json
58
@@ -XXX,XX +XXX,XX @@
59
# recreated on the fly while the NBD server is active.
60
# If missing, it will default to denying access (since 4.0).
61
# @max-connections: The maximum number of connections to allow at the same
62
-# time, 0 for unlimited. (since 5.2; default: 0)
63
+# time, 0 for unlimited. Setting this to 1 also stops
64
+# the server from advertising multiple client support
65
+# (since 5.2; default: 0)
66
#
67
# Since: 4.2
68
##
69
@@ -XXX,XX +XXX,XX @@
70
# recreated on the fly while the NBD server is active.
71
# If missing, it will default to denying access (since 4.0).
72
# @max-connections: The maximum number of connections to allow at the same
73
-# time, 0 for unlimited. (since 5.2; default: 0)
74
+# time, 0 for unlimited. Setting this to 1 also stops
75
+# the server from advertising multiple client support
76
+# (since 5.2; default: 0).
77
#
78
# Returns: error if the server is already running.
79
#
80
diff --git a/docs/interop/nbd.txt b/docs/interop/nbd.txt
81
index XXXXXXX..XXXXXXX 100644
82
--- a/docs/interop/nbd.txt
83
+++ b/docs/interop/nbd.txt
84
@@ -XXX,XX +XXX,XX @@ NBD_CMD_BLOCK_STATUS for "qemu:dirty-bitmap:", NBD_CMD_CACHE
85
* 4.2: NBD_FLAG_CAN_MULTI_CONN for shareable read-only exports,
86
NBD_CMD_FLAG_FAST_ZERO
87
* 5.2: NBD_CMD_BLOCK_STATUS for "qemu:allocation-depth"
88
+* 7.1: NBD_FLAG_CAN_MULTI_CONN for shareable writable exports
89
diff --git a/docs/tools/qemu-nbd.rst b/docs/tools/qemu-nbd.rst
90
index XXXXXXX..XXXXXXX 100644
91
--- a/docs/tools/qemu-nbd.rst
92
+++ b/docs/tools/qemu-nbd.rst
93
@@ -XXX,XX +XXX,XX @@ driver options if :option:`--image-opts` is specified.
94
.. option:: -e, --shared=NUM
95
96
Allow up to *NUM* clients to share the device (default
97
- ``1``), 0 for unlimited. Safe for readers, but for now,
98
- consistency is not guaranteed between multiple writers.
99
+ ``1``), 0 for unlimited.
100
101
.. option:: -t, --persistent
102
103
diff --git a/include/block/nbd.h b/include/block/nbd.h
104
index XXXXXXX..XXXXXXX 100644
105
--- a/include/block/nbd.h
106
+++ b/include/block/nbd.h
107
@@ -XXX,XX +XXX,XX @@
108
/*
109
- * Copyright (C) 2016-2020 Red Hat, Inc.
110
+ * Copyright (C) 2016-2022 Red Hat, Inc.
111
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
112
*
113
* Network Block Device
114
@@ -XXX,XX +XXX,XX @@ void nbd_client_put(NBDClient *client);
115
116
void nbd_server_is_qemu_nbd(int max_connections);
117
bool nbd_server_is_running(void);
118
+int nbd_server_max_connections(void);
119
void nbd_server_start(SocketAddress *addr, const char *tls_creds,
120
const char *tls_authz, uint32_t max_connections,
121
Error **errp);
122
diff --git a/blockdev-nbd.c b/blockdev-nbd.c
123
index XXXXXXX..XXXXXXX 100644
124
--- a/blockdev-nbd.c
125
+++ b/blockdev-nbd.c
126
@@ -XXX,XX +XXX,XX @@ bool nbd_server_is_running(void)
127
return nbd_server || qemu_nbd_connections >= 0;
128
}
129
130
+int nbd_server_max_connections(void)
131
+{
132
+ return nbd_server ? nbd_server->max_connections : qemu_nbd_connections;
133
+}
134
+
135
static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
136
{
137
nbd_client_put(client);
138
diff --git a/nbd/server.c b/nbd/server.c
139
index XXXXXXX..XXXXXXX 100644
140
--- a/nbd/server.c
141
+++ b/nbd/server.c
142
@@ -XXX,XX +XXX,XX @@
143
/*
144
- * Copyright (C) 2016-2021 Red Hat, Inc.
145
+ * Copyright (C) 2016-2022 Red Hat, Inc.
146
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
147
*
148
* Network Block Device Server Side
149
@@ -XXX,XX +XXX,XX @@ static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args,
150
int64_t size;
151
uint64_t perm, shared_perm;
152
bool readonly = !exp_args->writable;
153
- bool shared = !exp_args->writable;
154
BlockDirtyBitmapOrStrList *bitmaps;
155
size_t i;
156
int ret;
157
@@ -XXX,XX +XXX,XX @@ static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args,
158
exp->description = g_strdup(arg->description);
159
exp->nbdflags = (NBD_FLAG_HAS_FLAGS | NBD_FLAG_SEND_FLUSH |
160
NBD_FLAG_SEND_FUA | NBD_FLAG_SEND_CACHE);
161
+
162
+ if (nbd_server_max_connections() != 1) {
163
+ exp->nbdflags |= NBD_FLAG_CAN_MULTI_CONN;
164
+ }
165
if (readonly) {
166
exp->nbdflags |= NBD_FLAG_READ_ONLY;
167
- if (shared) {
168
- exp->nbdflags |= NBD_FLAG_CAN_MULTI_CONN;
169
- }
170
} else {
171
exp->nbdflags |= (NBD_FLAG_SEND_TRIM | NBD_FLAG_SEND_WRITE_ZEROES |
172
NBD_FLAG_SEND_FAST_ZERO);
173
diff --git a/MAINTAINERS b/MAINTAINERS
174
index XXXXXXX..XXXXXXX 100644
175
--- a/MAINTAINERS
176
+++ b/MAINTAINERS
177
@@ -XXX,XX +XXX,XX @@ F: qemu-nbd.*
178
F: blockdev-nbd.c
179
F: docs/interop/nbd.txt
180
F: docs/tools/qemu-nbd.rst
181
+F: tests/qemu-iotests/tests/*nbd*
182
T: git https://repo.or.cz/qemu/ericb.git nbd
183
T: git https://src.openvz.org/scm/~vsementsov/qemu.git nbd
184
185
diff --git a/tests/qemu-iotests/tests/nbd-multiconn b/tests/qemu-iotests/tests/nbd-multiconn
13
new file mode 100755
186
new file mode 100755
14
index XXXXXXX..XXXXXXX
187
index XXXXXXX..XXXXXXX
15
--- /dev/null
188
--- /dev/null
16
+++ b/tests/qemu-iotests/209
189
+++ b/tests/qemu-iotests/tests/nbd-multiconn
17
@@ -XXX,XX +XXX,XX @@
190
@@ -XXX,XX +XXX,XX @@
18
+#!/bin/bash
191
+#!/usr/bin/env python3
19
+#
192
+# group: rw auto quick
20
+# Test luks and file image creation
193
+#
21
+#
194
+# Test cases for NBD multi-conn advertisement
22
+# Copyright (C) 2018 Red Hat, Inc.
195
+#
196
+# Copyright (C) 2022 Red Hat, Inc.
23
+#
197
+#
24
+# This program is free software; you can redistribute it and/or modify
198
+# This program is free software; you can redistribute it and/or modify
25
+# it under the terms of the GNU General Public License as published by
199
+# it under the terms of the GNU General Public License as published by
26
+# the Free Software Foundation; either version 2 of the License, or
200
+# the Free Software Foundation; either version 2 of the License, or
27
+# (at your option) any later version.
201
+# (at your option) any later version.
...
...
31
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
205
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32
+# GNU General Public License for more details.
206
+# GNU General Public License for more details.
33
+#
207
+#
34
+# You should have received a copy of the GNU General Public License
208
+# You should have received a copy of the GNU General Public License
35
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
209
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
36
+#
210
+
37
+
211
+import os
38
+# creator
212
+from contextlib import contextmanager
39
+owner=kwolf@redhat.com
213
+import iotests
40
+
214
+from iotests import qemu_img_create, qemu_io
41
+seq=`basename $0`
215
+
42
+echo "QA output created by $seq"
216
+
43
+
217
+disk = os.path.join(iotests.test_dir, 'disk')
44
+here=`pwd`
218
+size = '4M'
45
+status=1    # failure is the default!
219
+nbd_sock = os.path.join(iotests.sock_dir, 'nbd_sock')
46
+
220
+nbd_uri = 'nbd+unix:///{}?socket=' + nbd_sock
47
+# get standard environment, filters and checks
221
+
48
+. ./common.rc
222
+
49
+. ./common.filter
223
+@contextmanager
50
+
224
+def open_nbd(export_name):
51
+_supported_fmt luks
225
+ h = nbd.NBD()
52
+_supported_proto file
226
+ try:
53
+_supported_os Linux
227
+ h.connect_uri(nbd_uri.format(export_name))
54
+
228
+ yield h
55
+function do_run_qemu()
229
+ finally:
56
+{
230
+ h.shutdown()
57
+ echo Testing: "$@"
231
+
58
+ $QEMU -nographic -qmp stdio -serial none "$@"
232
+class TestNbdMulticonn(iotests.QMPTestCase):
59
+ echo
233
+ def setUp(self):
60
+}
234
+ qemu_img_create('-f', iotests.imgfmt, disk, size)
61
+
235
+ qemu_io('-c', 'w -P 1 0 2M', '-c', 'w -P 2 2M 2M', disk)
62
+function run_qemu()
236
+
63
+{
237
+ self.vm = iotests.VM()
64
+ do_run_qemu "$@" 2>&1 | _filter_testdir | _filter_qmp \
238
+ self.vm.launch()
65
+ | _filter_qemu | _filter_imgfmt \
239
+ result = self.vm.qmp('blockdev-add', {
66
+ | _filter_actual_image_size
240
+ 'driver': 'qcow2',
67
+}
241
+ 'node-name': 'n',
68
+
242
+ 'file': {'driver': 'file', 'filename': disk}
69
+echo
243
+ })
70
+echo "=== Successful image creation (defaults) ==="
244
+ self.assert_qmp(result, 'return', {})
71
+echo
245
+
72
+
246
+ def tearDown(self):
73
+size=$((128 * 1024 * 1024))
247
+ self.vm.shutdown()
74
+
248
+ os.remove(disk)
75
+run_qemu -object secret,id=keysec0,data="foo" <<EOF
249
+ try:
76
+{ "execute": "qmp_capabilities" }
250
+ os.remove(nbd_sock)
77
+{ "execute": "x-blockdev-create",
251
+ except OSError:
78
+ "arguments": {
252
+ pass
79
+ "driver": "file",
253
+
80
+ "filename": "$TEST_IMG_FILE",
254
+ @contextmanager
81
+ "size": 0
255
+ def run_server(self, max_connections=None):
82
+ }
256
+ args = {
83
+}
257
+ 'addr': {
84
+{ "execute": "blockdev-add",
258
+ 'type': 'unix',
85
+ "arguments": {
259
+ 'data': {'path': nbd_sock}
86
+ "driver": "file",
260
+ }
87
+ "node-name": "imgfile",
261
+ }
88
+ "filename": "$TEST_IMG_FILE"
262
+ if max_connections is not None:
89
+ }
263
+ args['max-connections'] = max_connections
90
+}
264
+
91
+{ "execute": "x-blockdev-create",
265
+ result = self.vm.qmp('nbd-server-start', args)
92
+ "arguments": {
266
+ self.assert_qmp(result, 'return', {})
93
+ "driver": "$IMGFMT",
267
+ yield
94
+ "file": "imgfile",
268
+
95
+ "key-secret": "keysec0",
269
+ result = self.vm.qmp('nbd-server-stop')
96
+ "size": $size,
270
+ self.assert_qmp(result, 'return', {})
97
+ "iter-time": 10
271
+
98
+ }
272
+ def add_export(self, name, writable=None):
99
+}
273
+ args = {
100
+{ "execute": "quit" }
274
+ 'type': 'nbd',
101
+EOF
275
+ 'id': name,
102
+
276
+ 'node-name': 'n',
103
+_img_info --format-specific | _filter_img_info --format-specific
277
+ 'name': name,
104
+
278
+ }
105
+echo
279
+ if writable is not None:
106
+echo "=== Successful image creation (with non-default options) ==="
280
+ args['writable'] = writable
107
+echo
281
+
108
+
282
+ result = self.vm.qmp('block-export-add', args)
109
+# Choose a different size to show that we got a new image
283
+ self.assert_qmp(result, 'return', {})
110
+size=$((64 * 1024 * 1024))
284
+
111
+
285
+ def test_default_settings(self):
112
+run_qemu -object secret,id=keysec0,data="foo" <<EOF
286
+ with self.run_server():
113
+{ "execute": "qmp_capabilities" }
287
+ self.add_export('r')
114
+{ "execute": "x-blockdev-create",
288
+ self.add_export('w', writable=True)
115
+ "arguments": {
289
+ with open_nbd('r') as h:
116
+ "driver": "file",
290
+ self.assertTrue(h.can_multi_conn())
117
+ "filename": "$TEST_IMG_FILE",
291
+ with open_nbd('w') as h:
118
+ "size": 0
292
+ self.assertTrue(h.can_multi_conn())
119
+ }
293
+
120
+}
294
+ def test_limited_connections(self):
121
+{ "execute": "x-blockdev-create",
295
+ with self.run_server(max_connections=1):
122
+ "arguments": {
296
+ self.add_export('r')
123
+ "driver": "$IMGFMT",
297
+ self.add_export('w', writable=True)
124
+ "file": {
298
+ with open_nbd('r') as h:
125
+ "driver": "file",
299
+ self.assertFalse(h.can_multi_conn())
126
+ "filename": "$TEST_IMG_FILE"
300
+ with open_nbd('w') as h:
127
+ },
301
+ self.assertFalse(h.can_multi_conn())
128
+ "size": $size,
302
+
129
+ "key-secret": "keysec0",
303
+ def test_parallel_writes(self):
130
+ "cipher-alg": "twofish-128",
304
+ with self.run_server():
131
+ "cipher-mode": "ctr",
305
+ self.add_export('w', writable=True)
132
+ "ivgen-alg": "plain64",
306
+
133
+ "ivgen-hash-alg": "md5",
307
+ clients = [nbd.NBD() for _ in range(3)]
134
+ "hash-alg": "sha1",
308
+ for c in clients:
135
+ "iter-time": 10
309
+ c.connect_uri(nbd_uri.format('w'))
136
+ }
310
+ self.assertTrue(c.can_multi_conn())
137
+}
311
+
138
+{ "execute": "quit" }
312
+ initial_data = clients[0].pread(1024 * 1024, 0)
139
+EOF
313
+ self.assertEqual(initial_data, b'\x01' * 1024 * 1024)
140
+
314
+
141
+_img_info --format-specific | _filter_img_info --format-specific
315
+ updated_data = b'\x03' * 1024 * 1024
142
+
316
+ clients[1].pwrite(updated_data, 0)
143
+echo
317
+ clients[2].flush()
144
+echo "=== Invalid BlockdevRef ==="
318
+ current_data = clients[0].pread(1024 * 1024, 0)
145
+echo
319
+
146
+
320
+ self.assertEqual(updated_data, current_data)
147
+run_qemu <<EOF
321
+
148
+{ "execute": "qmp_capabilities" }
322
+ for i in range(3):
149
+{ "execute": "x-blockdev-create",
323
+ clients[i].shutdown()
150
+ "arguments": {
324
+
151
+ "driver": "$IMGFMT",
325
+
152
+ "file": "this doesn't exist",
326
+if __name__ == '__main__':
153
+ "size": $size
327
+ try:
154
+ }
328
+ # Easier to use libnbd than to try and set up parallel
155
+}
329
+ # 'qemu-nbd --list' or 'qemu-io' processes, but not all systems
156
+{ "execute": "quit" }
330
+ # have libnbd installed.
157
+EOF
331
+ import nbd # type: ignore
158
+
332
+
159
+echo
333
+ iotests.main(supported_fmts=['qcow2'])
160
+echo "=== Zero size ==="
334
+ except ImportError:
161
+echo
335
+ iotests.notrun('libnbd not installed')
162
+
336
diff --git a/tests/qemu-iotests/tests/nbd-multiconn.out b/tests/qemu-iotests/tests/nbd-multiconn.out
163
+run_qemu -blockdev driver=file,filename="$TEST_IMG_FILE",node-name=node0 \
164
+ -object secret,id=keysec0,data="foo" <<EOF
165
+{ "execute": "qmp_capabilities" }
166
+{ "execute": "x-blockdev-create",
167
+ "arguments": {
168
+ "driver": "$IMGFMT",
169
+ "file": "node0",
170
+ "key-secret": "keysec0",
171
+ "size": 0,
172
+ "iter-time": 10
173
+ }
174
+}
175
+{ "execute": "quit" }
176
+EOF
177
+
178
+_img_info | _filter_img_info
179
+
180
+
181
+echo
182
+echo "=== Invalid sizes ==="
183
+echo
184
+
185
+# TODO Negative image sizes aren't handled correctly, but this is a problem
186
+# with QAPI's implementation of the 'size' type and affects other commands as
187
+# well. Once this is fixed, we may want to add a test case here.
188
+
189
+# 1. 2^64 - 512
190
+# 2. 2^63 = 8 EB (qemu-img enforces image sizes less than this)
191
+# 3. 2^63 - 512 (generally valid, but with the crypto header the file will
192
+# exceed 63 bits)
193
+
194
+run_qemu -blockdev driver=file,filename="$TEST_IMG_FILE",node-name=node0 \
195
+ -object secret,id=keysec0,data="foo" <<EOF
196
+{ "execute": "qmp_capabilities" }
197
+{ "execute": "x-blockdev-create",
198
+ "arguments": {
199
+ "driver": "$IMGFMT",
200
+ "file": "node0",
201
+ "key-secret": "keysec0",
202
+ "size": 18446744073709551104
203
+ }
204
+}
205
+{ "execute": "x-blockdev-create",
206
+ "arguments": {
207
+ "driver": "$IMGFMT",
208
+ "file": "node0",
209
+ "key-secret": "keysec0",
210
+ "size": 9223372036854775808
211
+ }
212
+}
213
+{ "execute": "x-blockdev-create",
214
+ "arguments": {
215
+ "driver": "$IMGFMT",
216
+ "file": "node0",
217
+ "key-secret": "keysec0",
218
+ "size": 9223372036854775296
219
+ }
220
+}
221
+{ "execute": "quit" }
222
+EOF
223
+
224
+# success, all done
225
+echo "*** done"
226
+rm -f $seq.full
227
+status=0
228
diff --git a/tests/qemu-iotests/209.out b/tests/qemu-iotests/209.out
229
new file mode 100644
337
new file mode 100644
230
index XXXXXXX..XXXXXXX
338
index XXXXXXX..XXXXXXX
231
--- /dev/null
339
--- /dev/null
232
+++ b/tests/qemu-iotests/209.out
340
+++ b/tests/qemu-iotests/tests/nbd-multiconn.out
233
@@ -XXX,XX +XXX,XX @@
341
@@ -XXX,XX +XXX,XX @@
234
+QA output created by 209
342
+...
235
+
343
+----------------------------------------------------------------------
236
+=== Successful image creation (defaults) ===
344
+Ran 3 tests
237
+
345
+
238
+Testing: -object secret,id=keysec0,data=foo
346
+OK
239
+QMP_VERSION
347
diff --git a/tests/qemu-iotests/tests/nbd-qemu-allocation.out b/tests/qemu-iotests/tests/nbd-qemu-allocation.out
240
+{"return": {}}
348
index XXXXXXX..XXXXXXX 100644
241
+{"return": {}}
349
--- a/tests/qemu-iotests/tests/nbd-qemu-allocation.out
242
+{"return": {}}
350
+++ b/tests/qemu-iotests/tests/nbd-qemu-allocation.out
243
+{"return": {}}
351
@@ -XXX,XX +XXX,XX @@ wrote 2097152/2097152 bytes at offset 1048576
244
+{"return": {}}
352
exports available: 1
245
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
353
export: ''
246
+
354
size: 4194304
247
+image: json:{"driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/t.IMGFMT"}, "key-secret": "keysec0"}
355
- flags: 0x58f ( readonly flush fua df multi cache )
248
+file format: IMGFMT
356
+ flags: 0x48f ( readonly flush fua df cache )
249
+virtual size: 128M (134217728 bytes)
357
min block: 1
250
+Format specific information:
358
opt block: 4096
251
+ ivgen alg: plain64
359
max block: 33554432
252
+ hash alg: sha256
253
+ cipher alg: aes-256
254
+ uuid: 00000000-0000-0000-0000-000000000000
255
+ cipher mode: xts
256
+ slots:
257
+ [0]:
258
+ active: true
259
+ iters: 1024
260
+ key offset: 4096
261
+ stripes: 4000
262
+ [1]:
263
+ active: false
264
+ key offset: 262144
265
+ [2]:
266
+ active: false
267
+ key offset: 520192
268
+ [3]:
269
+ active: false
270
+ key offset: 778240
271
+ [4]:
272
+ active: false
273
+ key offset: 1036288
274
+ [5]:
275
+ active: false
276
+ key offset: 1294336
277
+ [6]:
278
+ active: false
279
+ key offset: 1552384
280
+ [7]:
281
+ active: false
282
+ key offset: 1810432
283
+ payload offset: 2068480
284
+ master key iters: 1024
285
+
286
+=== Successful image creation (with non-default options) ===
287
+
288
+Testing: -object secret,id=keysec0,data=foo
289
+QMP_VERSION
290
+{"return": {}}
291
+{"return": {}}
292
+{"return": {}}
293
+{"return": {}}
294
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
295
+
296
+image: json:{"driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/t.IMGFMT"}, "key-secret": "keysec0"}
297
+file format: IMGFMT
298
+virtual size: 64M (67108864 bytes)
299
+Format specific information:
300
+ ivgen alg: plain64
301
+ hash alg: sha1
302
+ cipher alg: twofish-128
303
+ uuid: 00000000-0000-0000-0000-000000000000
304
+ cipher mode: ctr
305
+ slots:
306
+ [0]:
307
+ active: true
308
+ iters: 1024
309
+ key offset: 4096
310
+ stripes: 4000
311
+ [1]:
312
+ active: false
313
+ key offset: 69632
314
+ [2]:
315
+ active: false
316
+ key offset: 135168
317
+ [3]:
318
+ active: false
319
+ key offset: 200704
320
+ [4]:
321
+ active: false
322
+ key offset: 266240
323
+ [5]:
324
+ active: false
325
+ key offset: 331776
326
+ [6]:
327
+ active: false
328
+ key offset: 397312
329
+ [7]:
330
+ active: false
331
+ key offset: 462848
332
+ payload offset: 528384
333
+ master key iters: 1024
334
+
335
+=== Invalid BlockdevRef ===
336
+
337
+Testing:
338
+QMP_VERSION
339
+{"return": {}}
340
+{"error": {"class": "GenericError", "desc": "Cannot find device=this doesn't exist nor node_name=this doesn't exist"}}
341
+{"return": {}}
342
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
343
+
344
+
345
+=== Zero size ===
346
+
347
+Testing: -blockdev driver=file,filename=TEST_DIR/t.IMGFMT,node-name=node0 -object secret,id=keysec0,data=foo
348
+QMP_VERSION
349
+{"return": {}}
350
+{"return": {}}
351
+{"return": {}}
352
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
353
+
354
+image: json:{"driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/t.IMGFMT"}, "key-secret": "keysec0"}
355
+file format: IMGFMT
356
+virtual size: 0 (0 bytes)
357
+
358
+=== Invalid sizes ===
359
+
360
+Testing: -blockdev driver=file,filename=TEST_DIR/t.IMGFMT,node-name=node0 -object secret,id=keysec0,data=foo
361
+QMP_VERSION
362
+{"return": {}}
363
+{"error": {"class": "GenericError", "desc": "The requested file size is too large"}}
364
+{"error": {"class": "GenericError", "desc": "The requested file size is too large"}}
365
+{"error": {"class": "GenericError", "desc": "The requested file size is too large"}}
366
+{"return": {}}
367
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
368
+
369
+*** done
370
diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc
371
index XXXXXXX..XXXXXXX 100644
372
--- a/tests/qemu-iotests/common.rc
373
+++ b/tests/qemu-iotests/common.rc
374
@@ -XXX,XX +XXX,XX @@ _img_info()
375
376
discard=0
377
regex_json_spec_start='^ *"format-specific": \{'
378
- $QEMU_IMG info "$@" "$TEST_IMG" 2>&1 | \
379
+ $QEMU_IMG info $QEMU_IMG_EXTRA_ARGS "$@" "$TEST_IMG" 2>&1 | \
380
sed -e "s#$IMGPROTO:$TEST_DIR#TEST_DIR#g" \
381
-e "s#$TEST_DIR#TEST_DIR#g" \
382
-e "s#$IMGFMT#IMGFMT#g" \
383
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
384
index XXXXXXX..XXXXXXX 100644
385
--- a/tests/qemu-iotests/group
386
+++ b/tests/qemu-iotests/group
387
@@ -XXX,XX +XXX,XX @@
388
205 rw auto quick
389
206 rw auto
390
207 rw auto
391
+209 rw auto
392
--
360
--
393
2.13.6
361
2.35.3
394
395
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
model all independent jobs as single job transactions.
3
common.rc has some complicated logic to find the common.config that
4
dates back to xfstests and is completely unnecessary now. Just include
5
the contents of the file.
4
6
5
It's one less case we have to worry about when we add more states to the
7
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
6
transition machine. This way, we can just treat all job lifetimes exactly
8
Message-Id: <20220505094723.732116-1-pbonzini@redhat.com>
7
the same. This helps tighten assertions of the STM graph and removes some
8
conditionals that would have been needed in the coming commits adding a
9
more explicit job lifetime management API.
10
11
Signed-off-by: John Snow <jsnow@redhat.com>
12
Reviewed-by: Eric Blake <eblake@redhat.com>
13
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
---
10
---
16
include/block/blockjob.h | 1 -
11
tests/qemu-iotests/common.config | 41 --------------------------------
17
include/block/blockjob_int.h | 3 ++-
12
tests/qemu-iotests/common.rc | 31 ++++++++++++++----------
18
block/backup.c | 3 +--
13
2 files changed, 19 insertions(+), 53 deletions(-)
19
block/commit.c | 2 +-
14
delete mode 100644 tests/qemu-iotests/common.config
20
block/mirror.c | 2 +-
21
block/stream.c | 2 +-
22
blockjob.c | 25 ++++++++++++++++---------
23
tests/test-bdrv-drain.c | 4 ++--
24
tests/test-blockjob-txn.c | 19 +++++++------------
25
tests/test-blockjob.c | 2 +-
26
10 files changed, 32 insertions(+), 31 deletions(-)
27
15
28
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
16
diff --git a/tests/qemu-iotests/common.config b/tests/qemu-iotests/common.config
17
deleted file mode 100644
18
index XXXXXXX..XXXXXXX
19
--- a/tests/qemu-iotests/common.config
20
+++ /dev/null
21
@@ -XXX,XX +XXX,XX @@
22
-#!/usr/bin/env bash
23
-#
24
-# Copyright (C) 2009 Red Hat, Inc.
25
-# Copyright (c) 2000-2003,2006 Silicon Graphics, Inc. All Rights Reserved.
26
-#
27
-# This program is free software; you can redistribute it and/or
28
-# modify it under the terms of the GNU General Public License as
29
-# published by the Free Software Foundation.
30
-#
31
-# This program is distributed in the hope that it would be useful,
32
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
33
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34
-# GNU General Public License for more details.
35
-#
36
-# You should have received a copy of the GNU General Public License
37
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
38
-#
39
-# all tests should use a common language setting to prevent golden
40
-# output mismatches.
41
-export LANG=C
42
-
43
-PATH=".:$PATH"
44
-
45
-HOSTOS=$(uname -s)
46
-arch=$(uname -m)
47
-[[ "$arch" =~ "ppc64" ]] && qemu_arch=ppc64 || qemu_arch="$arch"
48
-
49
-# make sure we have a standard umask
50
-umask 022
51
-
52
-_optstr_add()
53
-{
54
- if [ -n "$1" ]; then
55
- echo "$1,$2"
56
- else
57
- echo "$2"
58
- fi
59
-}
60
-
61
-# make sure this script returns success
62
-true
63
diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc
29
index XXXXXXX..XXXXXXX 100644
64
index XXXXXXX..XXXXXXX 100644
30
--- a/include/block/blockjob.h
65
--- a/tests/qemu-iotests/common.rc
31
+++ b/include/block/blockjob.h
66
+++ b/tests/qemu-iotests/common.rc
32
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
67
@@ -XXX,XX +XXX,XX @@
33
*/
68
# along with this program. If not, see <http://www.gnu.org/licenses/>.
34
QEMUTimer sleep_timer;
69
#
35
70
36
- /** Non-NULL if this job is part of a transaction */
71
+export LANG=C
37
BlockJobTxn *txn;
72
+
38
QLIST_ENTRY(BlockJob) txn_list;
73
+PATH=".:$PATH"
39
} BlockJob;
74
+
40
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
75
+HOSTOS=$(uname -s)
41
index XXXXXXX..XXXXXXX 100644
76
+arch=$(uname -m)
42
--- a/include/block/blockjob_int.h
77
+[[ "$arch" =~ "ppc64" ]] && qemu_arch=ppc64 || qemu_arch="$arch"
43
+++ b/include/block/blockjob_int.h
78
+
44
@@ -XXX,XX +XXX,XX @@ struct BlockJobDriver {
79
+# make sure we have a standard umask
45
* @job_id: The id of the newly-created job, or %NULL to have one
80
+umask 022
46
* generated automatically.
81
+
47
* @job_type: The class object for the newly-created job.
82
# bail out, setting up .notrun file
48
+ * @txn: The transaction this job belongs to, if any. %NULL otherwise.
83
_notrun()
49
* @bs: The block
84
{
50
* @perm, @shared_perm: Permissions to request for @bs
85
@@ -XXX,XX +XXX,XX @@ peek_file_raw()
51
* @speed: The maximum speed, in bytes per second, or 0 for unlimited.
86
dd if="$1" bs=1 skip="$2" count="$3" status=none
52
@@ -XXX,XX +XXX,XX @@ struct BlockJobDriver {
53
* called from a wrapper that is specific to the job type.
54
*/
55
void *block_job_create(const char *job_id, const BlockJobDriver *driver,
56
- BlockDriverState *bs, uint64_t perm,
57
+ BlockJobTxn *txn, BlockDriverState *bs, uint64_t perm,
58
uint64_t shared_perm, int64_t speed, int flags,
59
BlockCompletionFunc *cb, void *opaque, Error **errp);
60
61
diff --git a/block/backup.c b/block/backup.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/block/backup.c
64
+++ b/block/backup.c
65
@@ -XXX,XX +XXX,XX @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
66
}
67
68
/* job->common.len is fixed, so we can't allow resize */
69
- job = block_job_create(job_id, &backup_job_driver, bs,
70
+ job = block_job_create(job_id, &backup_job_driver, txn, bs,
71
BLK_PERM_CONSISTENT_READ,
72
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
73
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD,
74
@@ -XXX,XX +XXX,XX @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
75
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
76
&error_abort);
77
job->common.len = len;
78
- block_job_txn_add_job(txn, &job->common);
79
80
return &job->common;
81
82
diff --git a/block/commit.c b/block/commit.c
83
index XXXXXXX..XXXXXXX 100644
84
--- a/block/commit.c
85
+++ b/block/commit.c
86
@@ -XXX,XX +XXX,XX @@ void commit_start(const char *job_id, BlockDriverState *bs,
87
return;
88
}
89
90
- s = block_job_create(job_id, &commit_job_driver, bs, 0, BLK_PERM_ALL,
91
+ s = block_job_create(job_id, &commit_job_driver, NULL, bs, 0, BLK_PERM_ALL,
92
speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp);
93
if (!s) {
94
return;
95
diff --git a/block/mirror.c b/block/mirror.c
96
index XXXXXXX..XXXXXXX 100644
97
--- a/block/mirror.c
98
+++ b/block/mirror.c
99
@@ -XXX,XX +XXX,XX @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs,
100
}
101
102
/* Make sure that the source is not resized while the job is running */
103
- s = block_job_create(job_id, driver, mirror_top_bs,
104
+ s = block_job_create(job_id, driver, NULL, mirror_top_bs,
105
BLK_PERM_CONSISTENT_READ,
106
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
107
BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
108
diff --git a/block/stream.c b/block/stream.c
109
index XXXXXXX..XXXXXXX 100644
110
--- a/block/stream.c
111
+++ b/block/stream.c
112
@@ -XXX,XX +XXX,XX @@ void stream_start(const char *job_id, BlockDriverState *bs,
113
/* Prevent concurrent jobs trying to modify the graph structure here, we
114
* already have our own plans. Also don't allow resize as the image size is
115
* queried only at the job start and then cached. */
116
- s = block_job_create(job_id, &stream_job_driver, bs,
117
+ s = block_job_create(job_id, &stream_job_driver, NULL, bs,
118
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
119
BLK_PERM_GRAPH_MOD,
120
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
121
diff --git a/blockjob.c b/blockjob.c
122
index XXXXXXX..XXXXXXX 100644
123
--- a/blockjob.c
124
+++ b/blockjob.c
125
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job)
126
}
127
}
128
129
- if (job->txn) {
130
- QLIST_REMOVE(job, txn_list);
131
- block_job_txn_unref(job->txn);
132
- }
133
+ QLIST_REMOVE(job, txn_list);
134
+ block_job_txn_unref(job->txn);
135
block_job_unref(job);
136
}
87
}
137
88
138
@@ -XXX,XX +XXX,XX @@ static void block_job_event_completed(BlockJob *job, const char *msg)
89
-config=common.config
139
*/
90
-test -f $config || config=../common.config
140
91
-if ! test -f $config
141
void *block_job_create(const char *job_id, const BlockJobDriver *driver,
92
-then
142
- BlockDriverState *bs, uint64_t perm,
93
- echo "$0: failed to find common.config"
143
+ BlockJobTxn *txn, BlockDriverState *bs, uint64_t perm,
94
- exit 1
144
uint64_t shared_perm, int64_t speed, int flags,
95
-fi
145
BlockCompletionFunc *cb, void *opaque, Error **errp)
96
-if ! . $config
146
{
97
- then
147
@@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
98
- echo "$0: failed to source common.config"
148
return NULL;
99
- exit 1
149
}
100
-fi
150
}
101
+_optstr_add()
151
+
102
+{
152
+ /* Single jobs are modeled as single-job transactions for sake of
103
+ if [ -n "$1" ]; then
153
+ * consolidating the job management logic */
104
+ echo "$1,$2"
154
+ if (!txn) {
105
+ else
155
+ txn = block_job_txn_new();
106
+ echo "$2"
156
+ block_job_txn_add_job(txn, job);
107
+ fi
157
+ block_job_txn_unref(txn);
108
+}
158
+ } else {
109
159
+ block_job_txn_add_job(txn, job);
110
# Set the variables to the empty string to turn Valgrind off
160
+ }
111
# for specific processes, e.g.
161
+
162
return job;
163
}
164
165
@@ -XXX,XX +XXX,XX @@ void block_job_early_fail(BlockJob *job)
166
167
void block_job_completed(BlockJob *job, int ret)
168
{
169
+ assert(job && job->txn && !job->completed);
170
assert(blk_bs(job->blk)->job == job);
171
- assert(!job->completed);
172
job->completed = true;
173
job->ret = ret;
174
- if (!job->txn) {
175
- block_job_completed_single(job);
176
- } else if (ret < 0 || block_job_is_cancelled(job)) {
177
+ if (ret < 0 || block_job_is_cancelled(job)) {
178
block_job_completed_txn_abort(job);
179
} else {
180
block_job_completed_txn_success(job);
181
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
182
index XXXXXXX..XXXXXXX 100644
183
--- a/tests/test-bdrv-drain.c
184
+++ b/tests/test-bdrv-drain.c
185
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type)
186
blk_target = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
187
blk_insert_bs(blk_target, target, &error_abort);
188
189
- job = block_job_create("job0", &test_job_driver, src, 0, BLK_PERM_ALL, 0,
190
- 0, NULL, NULL, &error_abort);
191
+ job = block_job_create("job0", &test_job_driver, NULL, src, 0, BLK_PERM_ALL,
192
+ 0, 0, NULL, NULL, &error_abort);
193
block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
194
block_job_start(job);
195
196
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
197
index XXXXXXX..XXXXXXX 100644
198
--- a/tests/test-blockjob-txn.c
199
+++ b/tests/test-blockjob-txn.c
200
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_block_job_driver = {
201
*/
202
static BlockJob *test_block_job_start(unsigned int iterations,
203
bool use_timer,
204
- int rc, int *result)
205
+ int rc, int *result, BlockJobTxn *txn)
206
{
207
BlockDriverState *bs;
208
TestBlockJob *s;
209
@@ -XXX,XX +XXX,XX @@ static BlockJob *test_block_job_start(unsigned int iterations,
210
g_assert_nonnull(bs);
211
212
snprintf(job_id, sizeof(job_id), "job%u", counter++);
213
- s = block_job_create(job_id, &test_block_job_driver, bs,
214
+ s = block_job_create(job_id, &test_block_job_driver, txn, bs,
215
0, BLK_PERM_ALL, 0, BLOCK_JOB_DEFAULT,
216
test_block_job_cb, data, &error_abort);
217
s->iterations = iterations;
218
@@ -XXX,XX +XXX,XX @@ static void test_single_job(int expected)
219
int result = -EINPROGRESS;
220
221
txn = block_job_txn_new();
222
- job = test_block_job_start(1, true, expected, &result);
223
- block_job_txn_add_job(txn, job);
224
+ job = test_block_job_start(1, true, expected, &result, txn);
225
block_job_start(job);
226
227
if (expected == -ECANCELED) {
228
@@ -XXX,XX +XXX,XX @@ static void test_pair_jobs(int expected1, int expected2)
229
int result2 = -EINPROGRESS;
230
231
txn = block_job_txn_new();
232
- job1 = test_block_job_start(1, true, expected1, &result1);
233
- block_job_txn_add_job(txn, job1);
234
- job2 = test_block_job_start(2, true, expected2, &result2);
235
- block_job_txn_add_job(txn, job2);
236
+ job1 = test_block_job_start(1, true, expected1, &result1, txn);
237
+ job2 = test_block_job_start(2, true, expected2, &result2, txn);
238
block_job_start(job1);
239
block_job_start(job2);
240
241
@@ -XXX,XX +XXX,XX @@ static void test_pair_jobs_fail_cancel_race(void)
242
int result2 = -EINPROGRESS;
243
244
txn = block_job_txn_new();
245
- job1 = test_block_job_start(1, true, -ECANCELED, &result1);
246
- block_job_txn_add_job(txn, job1);
247
- job2 = test_block_job_start(2, false, 0, &result2);
248
- block_job_txn_add_job(txn, job2);
249
+ job1 = test_block_job_start(1, true, -ECANCELED, &result1, txn);
250
+ job2 = test_block_job_start(2, false, 0, &result2, txn);
251
block_job_start(job1);
252
block_job_start(job2);
253
254
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
255
index XXXXXXX..XXXXXXX 100644
256
--- a/tests/test-blockjob.c
257
+++ b/tests/test-blockjob.c
258
@@ -XXX,XX +XXX,XX @@ static BlockJob *do_test_id(BlockBackend *blk, const char *id,
259
BlockJob *job;
260
Error *errp = NULL;
261
262
- job = block_job_create(id, &test_block_job_driver, blk_bs(blk),
263
+ job = block_job_create(id, &test_block_job_driver, NULL, blk_bs(blk),
264
0, BLK_PERM_ALL, 0, BLOCK_JOB_DEFAULT, block_job_cb,
265
NULL, &errp);
266
if (should_succeed) {
267
--
112
--
268
2.13.6
113
2.35.3
269
270
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
Trivial; Document what the job creation flags do,
4
and some general tidying.
5
6
Signed-off-by: John Snow <jsnow@redhat.com>
7
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
8
---
9
include/block/blockjob.h | 8 ++++----
10
include/block/blockjob_int.h | 4 +++-
11
2 files changed, 7 insertions(+), 5 deletions(-)
12
13
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/block/blockjob.h
16
+++ b/include/block/blockjob.h
17
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
18
/** Reference count of the block job */
19
int refcnt;
20
21
- /* True if this job has reported completion by calling block_job_completed.
22
- */
23
+ /** True when job has reported completion by calling block_job_completed. */
24
bool completed;
25
26
- /* ret code passed to block_job_completed.
27
- */
28
+ /** ret code passed to block_job_completed. */
29
int ret;
30
31
/**
32
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
33
} BlockJob;
34
35
typedef enum BlockJobCreateFlags {
36
+ /* Default behavior */
37
BLOCK_JOB_DEFAULT = 0x00,
38
+ /* BlockJob is not QMP-created and should not send QMP events */
39
BLOCK_JOB_INTERNAL = 0x01,
40
} BlockJobCreateFlags;
41
42
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/include/block/blockjob_int.h
45
+++ b/include/block/blockjob_int.h
46
@@ -XXX,XX +XXX,XX @@ struct BlockJobDriver {
47
* block_job_create:
48
* @job_id: The id of the newly-created job, or %NULL to have one
49
* generated automatically.
50
- * @job_type: The class object for the newly-created job.
51
+ * @driver: The class object for the newly-created job.
52
* @txn: The transaction this job belongs to, if any. %NULL otherwise.
53
* @bs: The block
54
* @perm, @shared_perm: Permissions to request for @bs
55
* @speed: The maximum speed, in bytes per second, or 0 for unlimited.
56
+ * @flags: Creation flags for the Block Job.
57
+ * See @BlockJobCreateFlags
58
* @cb: Completion function for the job.
59
* @opaque: Opaque pointer value passed to @cb.
60
* @errp: Error object.
61
--
62
2.13.6
63
64
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
We're about to add several new states, and booleans are becoming
4
unwieldly and difficult to reason about. It would help to have a
5
more explicit bookkeeping of the state of blockjobs. To this end,
6
add a new "status" field and add our existing states in a redundant
7
manner alongside the bools they are replacing:
8
9
UNDEFINED: Placeholder, default state. Not currently visible to QMP
10
unless changes occur in the future to allow creating jobs
11
without starting them via QMP.
12
CREATED: replaces !!job->co && paused && !busy
13
RUNNING: replaces effectively (!paused && busy)
14
PAUSED: Nearly redundant with info->paused, which shows pause_count.
15
This reports the actual status of the job, which almost always
16
matches the paused request status. It differs in that it is
17
strictly only true when the job has actually gone dormant.
18
READY: replaces job->ready.
19
STANDBY: Paused, but job->ready is true.
20
21
New state additions in coming commits will not be quite so redundant:
22
23
WAITING: Waiting on transaction. This job has finished all the work
24
it can until the transaction converges, fails, or is canceled.
25
PENDING: Pending authorization from user. This job has finished all the
26
work it can until the job or transaction is finalized via
27
block_job_finalize. This implies the transaction has converged
28
and left the WAITING phase.
29
ABORTING: Job has encountered an error condition and is in the process
30
of aborting.
31
CONCLUDED: Job has ceased all operations and has a return code available
32
for query and may be dismissed via block_job_dismiss.
33
NULL: Job has been dismissed and (should) be destroyed. Should never
34
be visible to QMP.
35
36
Some of these states appear somewhat superfluous, but it helps define the
37
expected flow of a job; so some of the states wind up being synchronous
38
empty transitions. Importantly, jobs can be in only one of these states
39
at any given time, which helps code and external users alike reason about
40
the current condition of a job unambiguously.
41
42
Signed-off-by: John Snow <jsnow@redhat.com>
43
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
44
---
45
qapi/block-core.json | 31 ++++++++++++++++++++++++++++++-
46
include/block/blockjob.h | 3 +++
47
blockjob.c | 9 +++++++++
48
tests/qemu-iotests/109.out | 24 ++++++++++++------------
49
4 files changed, 54 insertions(+), 13 deletions(-)
50
51
diff --git a/qapi/block-core.json b/qapi/block-core.json
52
index XXXXXXX..XXXXXXX 100644
53
--- a/qapi/block-core.json
54
+++ b/qapi/block-core.json
55
@@ -XXX,XX +XXX,XX @@
56
'data': ['commit', 'stream', 'mirror', 'backup'] }
57
58
##
59
+# @BlockJobStatus:
60
+#
61
+# Indicates the present state of a given blockjob in its lifetime.
62
+#
63
+# @undefined: Erroneous, default state. Should not ever be visible.
64
+#
65
+# @created: The job has been created, but not yet started.
66
+#
67
+# @running: The job is currently running.
68
+#
69
+# @paused: The job is running, but paused. The pause may be requested by
70
+# either the QMP user or by internal processes.
71
+#
72
+# @ready: The job is running, but is ready for the user to signal completion.
73
+# This is used for long-running jobs like mirror that are designed to
74
+# run indefinitely.
75
+#
76
+# @standby: The job is ready, but paused. This is nearly identical to @paused.
77
+# The job may return to @ready or otherwise be canceled.
78
+#
79
+# Since: 2.12
80
+##
81
+{ 'enum': 'BlockJobStatus',
82
+ 'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby'] }
83
+
84
+##
85
# @BlockJobInfo:
86
#
87
# Information about a long-running block device operation.
88
@@ -XXX,XX +XXX,XX @@
89
#
90
# @ready: true if the job may be completed (since 2.2)
91
#
92
+# @status: Current job state/status (since 2.12)
93
+#
94
# Since: 1.1
95
##
96
{ 'struct': 'BlockJobInfo',
97
'data': {'type': 'str', 'device': 'str', 'len': 'int',
98
'offset': 'int', 'busy': 'bool', 'paused': 'bool', 'speed': 'int',
99
- 'io-status': 'BlockDeviceIoStatus', 'ready': 'bool'} }
100
+ 'io-status': 'BlockDeviceIoStatus', 'ready': 'bool',
101
+ 'status': 'BlockJobStatus' } }
102
103
##
104
# @query-block-jobs:
105
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
106
index XXXXXXX..XXXXXXX 100644
107
--- a/include/block/blockjob.h
108
+++ b/include/block/blockjob.h
109
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
110
*/
111
QEMUTimer sleep_timer;
112
113
+ /** Current state; See @BlockJobStatus for details. */
114
+ BlockJobStatus status;
115
+
116
BlockJobTxn *txn;
117
QLIST_ENTRY(BlockJob) txn_list;
118
} BlockJob;
119
diff --git a/blockjob.c b/blockjob.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/blockjob.c
122
+++ b/blockjob.c
123
@@ -XXX,XX +XXX,XX @@ void block_job_start(BlockJob *job)
124
job->pause_count--;
125
job->busy = true;
126
job->paused = false;
127
+ job->status = BLOCK_JOB_STATUS_RUNNING;
128
bdrv_coroutine_enter(blk_bs(job->blk), job->co);
129
}
130
131
@@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
132
info->speed = job->speed;
133
info->io_status = job->iostatus;
134
info->ready = job->ready;
135
+ info->status = job->status;
136
return info;
137
}
138
139
@@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
140
job->paused = true;
141
job->pause_count = 1;
142
job->refcnt = 1;
143
+ job->status = BLOCK_JOB_STATUS_CREATED;
144
aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
145
QEMU_CLOCK_REALTIME, SCALE_NS,
146
block_job_sleep_timer_cb, job);
147
@@ -XXX,XX +XXX,XX @@ void coroutine_fn block_job_pause_point(BlockJob *job)
148
}
149
150
if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
151
+ BlockJobStatus status = job->status;
152
+ job->status = status == BLOCK_JOB_STATUS_READY ? \
153
+ BLOCK_JOB_STATUS_STANDBY : \
154
+ BLOCK_JOB_STATUS_PAUSED;
155
job->paused = true;
156
block_job_do_yield(job, -1);
157
job->paused = false;
158
+ job->status = status;
159
}
160
161
if (job->driver->resume) {
162
@@ -XXX,XX +XXX,XX @@ void block_job_iostatus_reset(BlockJob *job)
163
164
void block_job_event_ready(BlockJob *job)
165
{
166
+ job->status = BLOCK_JOB_STATUS_READY;
167
job->ready = true;
168
169
if (block_job_is_internal(job)) {
170
diff --git a/tests/qemu-iotests/109.out b/tests/qemu-iotests/109.out
171
index XXXXXXX..XXXXXXX 100644
172
--- a/tests/qemu-iotests/109.out
173
+++ b/tests/qemu-iotests/109.out
174
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
175
{"return": {}}
176
{"return": {}}
177
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
178
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 1024, "offset": 1024, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
179
+{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
180
{"return": {}}
181
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
182
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
183
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
184
{"return": {}}
185
{"return": {}}
186
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}}
187
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 197120, "offset": 197120, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
188
+{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 197120, "offset": 197120, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
189
{"return": {}}
190
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
191
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}}
192
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
193
{"return": {}}
194
{"return": {}}
195
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
196
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 327680, "offset": 327680, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
197
+{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
198
{"return": {}}
199
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
200
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
201
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
202
{"return": {}}
203
{"return": {}}
204
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
205
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 1024, "offset": 1024, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
206
+{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
207
{"return": {}}
208
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
209
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
210
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
211
{"return": {}}
212
{"return": {}}
213
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
214
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 65536, "offset": 65536, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
215
+{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 65536, "offset": 65536, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
216
{"return": {}}
217
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
218
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
219
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
220
{"return": {}}
221
{"return": {}}
222
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
223
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2560, "offset": 2560, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
224
+{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
225
{"return": {}}
226
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
227
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
228
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
229
{"return": {}}
230
{"return": {}}
231
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
232
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2560, "offset": 2560, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
233
+{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
234
{"return": {}}
235
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
236
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
237
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
238
{"return": {}}
239
{"return": {}}
240
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}}
241
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 31457280, "offset": 31457280, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
242
+{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 31457280, "offset": 31457280, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
243
{"return": {}}
244
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
245
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}}
246
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
247
{"return": {}}
248
{"return": {}}
249
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
250
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 327680, "offset": 327680, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
251
+{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
252
{"return": {}}
253
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
254
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
255
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
256
{"return": {}}
257
{"return": {}}
258
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}}
259
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2048, "offset": 2048, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
260
+{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2048, "offset": 2048, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
261
{"return": {}}
262
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
263
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}}
264
@@ -XXX,XX +XXX,XX @@ Automatically detecting the format is dangerous for raw images, write operations
265
Specify the 'raw' format explicitly to remove the restrictions.
266
{"return": {}}
267
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
268
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 512, "offset": 512, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
269
+{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
270
{"return": {}}
271
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
272
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
273
@@ -XXX,XX +XXX,XX @@ Images are identical.
274
{"return": {}}
275
{"return": {}}
276
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
277
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 512, "offset": 512, "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
278
+{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
279
{"return": {}}
280
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
281
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
282
--
283
2.13.6
284
285
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
The state transition table has mostly been implied. We're about to make
4
it a bit more complex, so let's make the STM explicit instead.
5
6
Perform state transitions with a function that for now just asserts the
7
transition is appropriate.
8
9
Transitions:
10
Undefined -> Created: During job initialization.
11
Created -> Running: Once the job is started.
12
Jobs cannot transition from "Created" to "Paused"
13
directly, but will instead synchronously transition
14
to running to paused immediately.
15
Running -> Paused: Normal workflow for pauses.
16
Running -> Ready: Normal workflow for jobs reaching their sync point.
17
(e.g. mirror)
18
Ready -> Standby: Normal workflow for pausing ready jobs.
19
Paused -> Running: Normal resume.
20
Standby -> Ready: Resume of a Standby job.
21
22
+---------+
23
|UNDEFINED|
24
+--+------+
25
|
26
+--v----+
27
|CREATED|
28
+--+----+
29
|
30
+--v----+ +------+
31
|RUNNING<----->PAUSED|
32
+--+----+ +------+
33
|
34
+--v--+ +-------+
35
|READY<------->STANDBY|
36
+-----+ +-------+
37
38
Notably, there is no state presently defined as of this commit that
39
deals with a job after the "running" or "ready" states, so this table
40
will be adjusted alongside the commits that introduce those states.
41
42
Signed-off-by: John Snow <jsnow@redhat.com>
43
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
44
---
45
blockjob.c | 40 +++++++++++++++++++++++++++++++++-------
46
block/trace-events | 3 +++
47
2 files changed, 36 insertions(+), 7 deletions(-)
48
49
diff --git a/blockjob.c b/blockjob.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/blockjob.c
52
+++ b/blockjob.c
53
@@ -XXX,XX +XXX,XX @@
54
#include "block/block.h"
55
#include "block/blockjob_int.h"
56
#include "block/block_int.h"
57
+#include "block/trace.h"
58
#include "sysemu/block-backend.h"
59
#include "qapi/error.h"
60
#include "qapi/qapi-events-block-core.h"
61
@@ -XXX,XX +XXX,XX @@
62
* block_job_enter. */
63
static QemuMutex block_job_mutex;
64
65
+/* BlockJob State Transition Table */
66
+bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = {
67
+ /* U, C, R, P, Y, S */
68
+ /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0},
69
+ /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0},
70
+ /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0},
71
+ /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0},
72
+ /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1},
73
+ /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0},
74
+};
75
+
76
+static void block_job_state_transition(BlockJob *job, BlockJobStatus s1)
77
+{
78
+ BlockJobStatus s0 = job->status;
79
+ assert(s1 >= 0 && s1 <= BLOCK_JOB_STATUS__MAX);
80
+ trace_block_job_state_transition(job, job->ret, BlockJobSTT[s0][s1] ?
81
+ "allowed" : "disallowed",
82
+ qapi_enum_lookup(&BlockJobStatus_lookup,
83
+ s0),
84
+ qapi_enum_lookup(&BlockJobStatus_lookup,
85
+ s1));
86
+ assert(BlockJobSTT[s0][s1]);
87
+ job->status = s1;
88
+}
89
+
90
static void block_job_lock(void)
91
{
92
qemu_mutex_lock(&block_job_mutex);
93
@@ -XXX,XX +XXX,XX @@ void block_job_start(BlockJob *job)
94
job->pause_count--;
95
job->busy = true;
96
job->paused = false;
97
- job->status = BLOCK_JOB_STATUS_RUNNING;
98
+ block_job_state_transition(job, BLOCK_JOB_STATUS_RUNNING);
99
bdrv_coroutine_enter(blk_bs(job->blk), job->co);
100
}
101
102
@@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
103
job->paused = true;
104
job->pause_count = 1;
105
job->refcnt = 1;
106
- job->status = BLOCK_JOB_STATUS_CREATED;
107
+ block_job_state_transition(job, BLOCK_JOB_STATUS_CREATED);
108
aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
109
QEMU_CLOCK_REALTIME, SCALE_NS,
110
block_job_sleep_timer_cb, job);
111
@@ -XXX,XX +XXX,XX @@ void coroutine_fn block_job_pause_point(BlockJob *job)
112
113
if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
114
BlockJobStatus status = job->status;
115
- job->status = status == BLOCK_JOB_STATUS_READY ? \
116
- BLOCK_JOB_STATUS_STANDBY : \
117
- BLOCK_JOB_STATUS_PAUSED;
118
+ block_job_state_transition(job, status == BLOCK_JOB_STATUS_READY ? \
119
+ BLOCK_JOB_STATUS_STANDBY : \
120
+ BLOCK_JOB_STATUS_PAUSED);
121
job->paused = true;
122
block_job_do_yield(job, -1);
123
job->paused = false;
124
- job->status = status;
125
+ block_job_state_transition(job, status);
126
}
127
128
if (job->driver->resume) {
129
@@ -XXX,XX +XXX,XX @@ void block_job_iostatus_reset(BlockJob *job)
130
131
void block_job_event_ready(BlockJob *job)
132
{
133
- job->status = BLOCK_JOB_STATUS_READY;
134
+ block_job_state_transition(job, BLOCK_JOB_STATUS_READY);
135
job->ready = true;
136
137
if (block_job_is_internal(job)) {
138
diff --git a/block/trace-events b/block/trace-events
139
index XXXXXXX..XXXXXXX 100644
140
--- a/block/trace-events
141
+++ b/block/trace-events
142
@@ -XXX,XX +XXX,XX @@
143
bdrv_open_common(void *bs, const char *filename, int flags, const char *format_name) "bs %p filename \"%s\" flags 0x%x format_name \"%s\""
144
bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d"
145
146
+# blockjob.c
147
+block_job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)"
148
+
149
# block/block-backend.c
150
blk_co_preadv(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x"
151
blk_co_pwritev(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x"
152
--
153
2.13.6
154
155
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
Split out the pause command into the actual pause and the wait.
4
Not every usage presently needs to resubmit a pause request.
5
6
The intent with the next commit will be to explicitly disallow
7
redundant or meaningless pause/resume requests, so the tests
8
need to become more judicious to reflect that.
9
10
Signed-off-by: John Snow <jsnow@redhat.com>
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Reviewed-by: Eric Blake <eblake@redhat.com>
13
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
14
---
15
tests/qemu-iotests/030 | 6 ++----
16
tests/qemu-iotests/055 | 17 ++++++-----------
17
tests/qemu-iotests/iotests.py | 12 ++++++++----
18
3 files changed, 16 insertions(+), 19 deletions(-)
19
20
diff --git a/tests/qemu-iotests/030 b/tests/qemu-iotests/030
21
index XXXXXXX..XXXXXXX 100755
22
--- a/tests/qemu-iotests/030
23
+++ b/tests/qemu-iotests/030
24
@@ -XXX,XX +XXX,XX @@ class TestSingleDrive(iotests.QMPTestCase):
25
result = self.vm.qmp('block-stream', device='drive0')
26
self.assert_qmp(result, 'return', {})
27
28
- result = self.vm.qmp('block-job-pause', device='drive0')
29
- self.assert_qmp(result, 'return', {})
30
-
31
+ self.pause_job('drive0', wait=False)
32
self.vm.resume_drive('drive0')
33
- self.pause_job('drive0')
34
+ self.pause_wait('drive0')
35
36
result = self.vm.qmp('query-block-jobs')
37
offset = self.dictpath(result, 'return[0]/offset')
38
diff --git a/tests/qemu-iotests/055 b/tests/qemu-iotests/055
39
index XXXXXXX..XXXXXXX 100755
40
--- a/tests/qemu-iotests/055
41
+++ b/tests/qemu-iotests/055
42
@@ -XXX,XX +XXX,XX @@ class TestSingleDrive(iotests.QMPTestCase):
43
target=target, sync='full')
44
self.assert_qmp(result, 'return', {})
45
46
- result = self.vm.qmp('block-job-pause', device='drive0')
47
- self.assert_qmp(result, 'return', {})
48
-
49
+ self.pause_job('drive0', wait=False)
50
self.vm.resume_drive('drive0')
51
- self.pause_job('drive0')
52
+ self.pause_wait('drive0')
53
54
result = self.vm.qmp('query-block-jobs')
55
offset = self.dictpath(result, 'return[0]/offset')
56
@@ -XXX,XX +XXX,XX @@ class TestSingleTransaction(iotests.QMPTestCase):
57
])
58
self.assert_qmp(result, 'return', {})
59
60
- result = self.vm.qmp('block-job-pause', device='drive0')
61
- self.assert_qmp(result, 'return', {})
62
+ self.pause_job('drive0', wait=False)
63
64
result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0)
65
self.assert_qmp(result, 'return', {})
66
67
- self.pause_job('drive0')
68
+ self.pause_wait('drive0')
69
70
result = self.vm.qmp('query-block-jobs')
71
offset = self.dictpath(result, 'return[0]/offset')
72
@@ -XXX,XX +XXX,XX @@ class TestDriveCompression(iotests.QMPTestCase):
73
result = self.vm.qmp(cmd, device='drive0', sync='full', compress=True, **args)
74
self.assert_qmp(result, 'return', {})
75
76
- result = self.vm.qmp('block-job-pause', device='drive0')
77
- self.assert_qmp(result, 'return', {})
78
-
79
+ self.pause_job('drive0', wait=False)
80
self.vm.resume_drive('drive0')
81
- self.pause_job('drive0')
82
+ self.pause_wait('drive0')
83
84
result = self.vm.qmp('query-block-jobs')
85
offset = self.dictpath(result, 'return[0]/offset')
86
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
87
index XXXXXXX..XXXXXXX 100644
88
--- a/tests/qemu-iotests/iotests.py
89
+++ b/tests/qemu-iotests/iotests.py
90
@@ -XXX,XX +XXX,XX @@ class QMPTestCase(unittest.TestCase):
91
event = self.wait_until_completed(drive=drive)
92
self.assert_qmp(event, 'data/type', 'mirror')
93
94
- def pause_job(self, job_id='job0'):
95
- result = self.vm.qmp('block-job-pause', device=job_id)
96
- self.assert_qmp(result, 'return', {})
97
-
98
+ def pause_wait(self, job_id='job0'):
99
with Timeout(1, "Timeout waiting for job to pause"):
100
while True:
101
result = self.vm.qmp('query-block-jobs')
102
@@ -XXX,XX +XXX,XX @@ class QMPTestCase(unittest.TestCase):
103
if job['device'] == job_id and job['paused'] == True and job['busy'] == False:
104
return job
105
106
+ def pause_job(self, job_id='job0', wait=True):
107
+ result = self.vm.qmp('block-job-pause', device=job_id)
108
+ self.assert_qmp(result, 'return', {})
109
+ if wait:
110
+ return self.pause_wait(job_id)
111
+ return result
112
+
113
114
def notrun(reason):
115
'''Skip this test suite'''
116
--
117
2.13.6
118
119
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
Add a new state ABORTING.
4
5
This makes transitions from normative states to error states explicit
6
in the STM, and serves as a disambiguation for which states may complete
7
normally when normal end-states (CONCLUDED) are added in future commits.
8
9
Notably, Paused/Standby jobs do not transition directly to aborting,
10
as they must wake up first and cooperate in their cancellation.
11
12
Transitions:
13
Created -> Aborting: can be cancelled (by the system)
14
Running -> Aborting: can be cancelled or encounter an error
15
Ready -> Aborting: can be cancelled or encounter an error
16
17
Verbs:
18
None. The job must finish cleaning itself up and report its final status.
19
20
+---------+
21
|UNDEFINED|
22
+--+------+
23
|
24
+--v----+
25
+---------+CREATED|
26
| +--+----+
27
| |
28
| +--v----+ +------+
29
+---------+RUNNING<----->PAUSED|
30
| +--+----+ +------+
31
| |
32
| +--v--+ +-------+
33
+---------+READY<------->STANDBY|
34
| +-----+ +-------+
35
|
36
+--v-----+
37
|ABORTING|
38
+--------+
39
40
Signed-off-by: John Snow <jsnow@redhat.com>
41
Reviewed-by: Eric Blake <eblake@redhat.com>
42
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
43
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
44
---
45
qapi/block-core.json | 7 ++++++-
46
blockjob.c | 31 ++++++++++++++++++-------------
47
2 files changed, 24 insertions(+), 14 deletions(-)
48
49
diff --git a/qapi/block-core.json b/qapi/block-core.json
50
index XXXXXXX..XXXXXXX 100644
51
--- a/qapi/block-core.json
52
+++ b/qapi/block-core.json
53
@@ -XXX,XX +XXX,XX @@
54
# @standby: The job is ready, but paused. This is nearly identical to @paused.
55
# The job may return to @ready or otherwise be canceled.
56
#
57
+# @aborting: The job is in the process of being aborted, and will finish with
58
+# an error.
59
+# This status may not be visible to the management process.
60
+#
61
# Since: 2.12
62
##
63
{ 'enum': 'BlockJobStatus',
64
- 'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby'] }
65
+ 'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby',
66
+ 'aborting' ] }
67
68
##
69
# @BlockJobInfo:
70
diff --git a/blockjob.c b/blockjob.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/blockjob.c
73
+++ b/blockjob.c
74
@@ -XXX,XX +XXX,XX @@ static QemuMutex block_job_mutex;
75
76
/* BlockJob State Transition Table */
77
bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = {
78
- /* U, C, R, P, Y, S */
79
- /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0},
80
- /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0},
81
- /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0},
82
- /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0},
83
- /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1},
84
- /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0},
85
+ /* U, C, R, P, Y, S, X */
86
+ /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0},
87
+ /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 1},
88
+ /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1},
89
+ /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0},
90
+ /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1},
91
+ /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0},
92
+ /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0},
93
};
94
95
bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = {
96
- /* U, C, R, P, Y, S */
97
- [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1},
98
- [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1},
99
- [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1},
100
- [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1},
101
- [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0},
102
+ /* U, C, R, P, Y, S, X */
103
+ [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 0},
104
+ [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0},
105
+ [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0},
106
+ [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0},
107
+ [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0},
108
};
109
110
static void block_job_state_transition(BlockJob *job, BlockJobStatus s1)
111
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job)
112
{
113
assert(job->completed);
114
115
+ if (job->ret || block_job_is_cancelled(job)) {
116
+ block_job_state_transition(job, BLOCK_JOB_STATUS_ABORTING);
117
+ }
118
+
119
if (!job->ret) {
120
if (job->driver->commit) {
121
job->driver->commit(job);
122
--
123
2.13.6
124
125
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
For jobs that have reached their CONCLUDED state, prior to having their
4
last reference put down (meaning jobs that have completed successfully,
5
unsuccessfully, or have been canceled), allow the user to dismiss the
6
job's lingering status report via block-job-dismiss.
7
8
This gives management APIs the chance to conclusively determine if a job
9
failed or succeeded, even if the event broadcast was missed.
10
11
Note: block_job_do_dismiss and block_job_decommission happen to do
12
exactly the same thing, but they're called from different semantic
13
contexts, so both aliases are kept to improve readability.
14
15
Note 2: Don't worry about the 0x04 flag definition for AUTO_DISMISS, she
16
has a friend coming in a future patch to fill the hole where 0x02 is.
17
18
Verbs:
19
Dismiss: operates on CONCLUDED jobs only.
20
Signed-off-by: John Snow <jsnow@redhat.com>
21
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
22
---
23
qapi/block-core.json | 24 +++++++++++++++++++++++-
24
include/block/blockjob.h | 14 ++++++++++++++
25
blockdev.c | 14 ++++++++++++++
26
blockjob.c | 26 ++++++++++++++++++++++++--
27
block/trace-events | 1 +
28
5 files changed, 76 insertions(+), 3 deletions(-)
29
30
diff --git a/qapi/block-core.json b/qapi/block-core.json
31
index XXXXXXX..XXXXXXX 100644
32
--- a/qapi/block-core.json
33
+++ b/qapi/block-core.json
34
@@ -XXX,XX +XXX,XX @@
35
#
36
# @complete: see @block-job-complete
37
#
38
+# @dismiss: see @block-job-dismiss
39
+#
40
# Since: 2.12
41
##
42
{ 'enum': 'BlockJobVerb',
43
- 'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete' ] }
44
+ 'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete', 'dismiss' ] }
45
46
##
47
# @BlockJobStatus:
48
@@ -XXX,XX +XXX,XX @@
49
{ 'command': 'block-job-complete', 'data': { 'device': 'str' } }
50
51
##
52
+# @block-job-dismiss:
53
+#
54
+# For jobs that have already concluded, remove them from the block-job-query
55
+# list. This command only needs to be run for jobs which were started with
56
+# QEMU 2.12+ job lifetime management semantics.
57
+#
58
+# This command will refuse to operate on any job that has not yet reached
59
+# its terminal state, BLOCK_JOB_STATUS_CONCLUDED. For jobs that make use of
60
+# BLOCK_JOB_READY event, block-job-cancel or block-job-complete will still need
61
+# to be used as appropriate.
62
+#
63
+# @id: The job identifier.
64
+#
65
+# Returns: Nothing on success
66
+#
67
+# Since: 2.12
68
+##
69
+{ 'command': 'block-job-dismiss', 'data': { 'id': 'str' } }
70
+
71
+##
72
# @BlockdevDiscardOptions:
73
#
74
# Determines how to handle discard requests.
75
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
76
index XXXXXXX..XXXXXXX 100644
77
--- a/include/block/blockjob.h
78
+++ b/include/block/blockjob.h
79
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
80
/** Current state; See @BlockJobStatus for details. */
81
BlockJobStatus status;
82
83
+ /** True if this job should automatically dismiss itself */
84
+ bool auto_dismiss;
85
+
86
BlockJobTxn *txn;
87
QLIST_ENTRY(BlockJob) txn_list;
88
} BlockJob;
89
@@ -XXX,XX +XXX,XX @@ typedef enum BlockJobCreateFlags {
90
BLOCK_JOB_DEFAULT = 0x00,
91
/* BlockJob is not QMP-created and should not send QMP events */
92
BLOCK_JOB_INTERNAL = 0x01,
93
+ /* BlockJob requires manual dismiss step */
94
+ BLOCK_JOB_MANUAL_DISMISS = 0x04,
95
} BlockJobCreateFlags;
96
97
/**
98
@@ -XXX,XX +XXX,XX @@ void block_job_cancel(BlockJob *job);
99
void block_job_complete(BlockJob *job, Error **errp);
100
101
/**
102
+ * block_job_dismiss:
103
+ * @job: The job to be dismissed.
104
+ * @errp: Error object.
105
+ *
106
+ * Remove a concluded job from the query list.
107
+ */
108
+void block_job_dismiss(BlockJob **job, Error **errp);
109
+
110
+/**
111
* block_job_query:
112
* @job: The job to get information about.
113
*
114
diff --git a/blockdev.c b/blockdev.c
115
index XXXXXXX..XXXXXXX 100644
116
--- a/blockdev.c
117
+++ b/blockdev.c
118
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_complete(const char *device, Error **errp)
119
aio_context_release(aio_context);
120
}
121
122
+void qmp_block_job_dismiss(const char *id, Error **errp)
123
+{
124
+ AioContext *aio_context;
125
+ BlockJob *job = find_block_job(id, &aio_context, errp);
126
+
127
+ if (!job) {
128
+ return;
129
+ }
130
+
131
+ trace_qmp_block_job_dismiss(job);
132
+ block_job_dismiss(&job, errp);
133
+ aio_context_release(aio_context);
134
+}
135
+
136
void qmp_change_backing_file(const char *device,
137
const char *image_node_name,
138
const char *backing_file,
139
diff --git a/blockjob.c b/blockjob.c
140
index XXXXXXX..XXXXXXX 100644
141
--- a/blockjob.c
142
+++ b/blockjob.c
143
@@ -XXX,XX +XXX,XX @@ bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = {
144
[BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
145
[BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
146
[BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0},
147
+ [BLOCK_JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 1, 0},
148
};
149
150
static void block_job_state_transition(BlockJob *job, BlockJobStatus s1)
151
@@ -XXX,XX +XXX,XX @@ static void block_job_decommission(BlockJob *job)
152
block_job_unref(job);
153
}
154
155
+static void block_job_do_dismiss(BlockJob *job)
156
+{
157
+ block_job_decommission(job);
158
+}
159
+
160
static void block_job_conclude(BlockJob *job)
161
{
162
block_job_state_transition(job, BLOCK_JOB_STATUS_CONCLUDED);
163
+ if (job->auto_dismiss || !block_job_started(job)) {
164
+ block_job_do_dismiss(job);
165
+ }
166
}
167
168
static void block_job_completed_single(BlockJob *job)
169
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job)
170
QLIST_REMOVE(job, txn_list);
171
block_job_txn_unref(job->txn);
172
block_job_conclude(job);
173
- block_job_decommission(job);
174
}
175
176
static void block_job_cancel_async(BlockJob *job)
177
@@ -XXX,XX +XXX,XX @@ void block_job_complete(BlockJob *job, Error **errp)
178
job->driver->complete(job, errp);
179
}
180
181
+void block_job_dismiss(BlockJob **jobptr, Error **errp)
182
+{
183
+ BlockJob *job = *jobptr;
184
+ /* similarly to _complete, this is QMP-interface only. */
185
+ assert(job->id);
186
+ if (block_job_apply_verb(job, BLOCK_JOB_VERB_DISMISS, errp)) {
187
+ return;
188
+ }
189
+
190
+ block_job_do_dismiss(job);
191
+ *jobptr = NULL;
192
+}
193
+
194
void block_job_user_pause(BlockJob *job, Error **errp)
195
{
196
if (block_job_apply_verb(job, BLOCK_JOB_VERB_PAUSE, errp)) {
197
@@ -XXX,XX +XXX,XX @@ void block_job_user_resume(BlockJob *job, Error **errp)
198
void block_job_cancel(BlockJob *job)
199
{
200
if (job->status == BLOCK_JOB_STATUS_CONCLUDED) {
201
- return;
202
+ block_job_do_dismiss(job);
203
} else if (block_job_started(job)) {
204
block_job_cancel_async(job);
205
block_job_enter(job);
206
@@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
207
job->paused = true;
208
job->pause_count = 1;
209
job->refcnt = 1;
210
+ job->auto_dismiss = !(flags & BLOCK_JOB_MANUAL_DISMISS);
211
block_job_state_transition(job, BLOCK_JOB_STATUS_CREATED);
212
aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
213
QEMU_CLOCK_REALTIME, SCALE_NS,
214
diff --git a/block/trace-events b/block/trace-events
215
index XXXXXXX..XXXXXXX 100644
216
--- a/block/trace-events
217
+++ b/block/trace-events
218
@@ -XXX,XX +XXX,XX @@ qmp_block_job_cancel(void *job) "job %p"
219
qmp_block_job_pause(void *job) "job %p"
220
qmp_block_job_resume(void *job) "job %p"
221
qmp_block_job_complete(void *job) "job %p"
222
+qmp_block_job_dismiss(void *job) "job %p"
223
qmp_block_stream(void *bs, void *job) "bs %p job %p"
224
225
# block/file-win32.c
226
--
227
2.13.6
228
229
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
Presently, even if a job is canceled post-completion as a result of
4
a failing peer in a transaction, it will still call .commit because
5
nothing has updated or changed its return code.
6
7
The reason why this does not cause problems currently is because
8
backup's implementation of .commit checks for cancellation itself.
9
10
I'd like to simplify this contract:
11
12
(1) Abort is called if the job/transaction fails
13
(2) Commit is called if the job/transaction succeeds
14
15
To this end: A job's return code, if 0, will be forcibly set as
16
-ECANCELED if that job has already concluded. Remove the now
17
redundant check in the backup job implementation.
18
19
We need to check for cancellation in both block_job_completed
20
AND block_job_completed_single, because jobs may be cancelled between
21
those two calls; for instance in transactions. This also necessitates
22
an ABORTING -> ABORTING transition to be allowed.
23
24
The check in block_job_completed could be removed, but there's no
25
point in starting to attempt to succeed a transaction that we know
26
in advance will fail.
27
28
This does NOT affect mirror jobs that are "canceled" during their
29
synchronous phase. The mirror job itself forcibly sets the canceled
30
property to false prior to ceding control, so such cases will invoke
31
the "commit" callback.
32
33
Signed-off-by: John Snow <jsnow@redhat.com>
34
Reviewed-by: Eric Blake <eblake@redhat.com>
35
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
36
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
37
---
38
block/backup.c | 2 +-
39
blockjob.c | 21 ++++++++++++++++-----
40
block/trace-events | 1 +
41
3 files changed, 18 insertions(+), 6 deletions(-)
42
43
diff --git a/block/backup.c b/block/backup.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/block/backup.c
46
+++ b/block/backup.c
47
@@ -XXX,XX +XXX,XX @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
48
BdrvDirtyBitmap *bm;
49
BlockDriverState *bs = blk_bs(job->common.blk);
50
51
- if (ret < 0 || block_job_is_cancelled(&job->common)) {
52
+ if (ret < 0) {
53
/* Merge the successor back into the parent, delete nothing. */
54
bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
55
assert(bm);
56
diff --git a/blockjob.c b/blockjob.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/blockjob.c
59
+++ b/blockjob.c
60
@@ -XXX,XX +XXX,XX @@ bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = {
61
/* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0},
62
/* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 1, 0},
63
/* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0},
64
- /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 1, 0},
65
+ /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 1, 1, 0},
66
/* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 1},
67
/* N: */ [BLOCK_JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0},
68
};
69
@@ -XXX,XX +XXX,XX @@ static void block_job_conclude(BlockJob *job)
70
}
71
}
72
73
+static void block_job_update_rc(BlockJob *job)
74
+{
75
+ if (!job->ret && block_job_is_cancelled(job)) {
76
+ job->ret = -ECANCELED;
77
+ }
78
+ if (job->ret) {
79
+ block_job_state_transition(job, BLOCK_JOB_STATUS_ABORTING);
80
+ }
81
+}
82
+
83
static void block_job_completed_single(BlockJob *job)
84
{
85
assert(job->completed);
86
87
- if (job->ret || block_job_is_cancelled(job)) {
88
- block_job_state_transition(job, BLOCK_JOB_STATUS_ABORTING);
89
- }
90
+ /* Ensure abort is called for late-transactional failures */
91
+ block_job_update_rc(job);
92
93
if (!job->ret) {
94
if (job->driver->commit) {
95
@@ -XXX,XX +XXX,XX @@ void block_job_completed(BlockJob *job, int ret)
96
assert(blk_bs(job->blk)->job == job);
97
job->completed = true;
98
job->ret = ret;
99
- if (ret < 0 || block_job_is_cancelled(job)) {
100
+ block_job_update_rc(job);
101
+ trace_block_job_completed(job, ret, job->ret);
102
+ if (job->ret) {
103
block_job_completed_txn_abort(job);
104
} else {
105
block_job_completed_txn_success(job);
106
diff --git a/block/trace-events b/block/trace-events
107
index XXXXXXX..XXXXXXX 100644
108
--- a/block/trace-events
109
+++ b/block/trace-events
110
@@ -XXX,XX +XXX,XX @@ bdrv_open_common(void *bs, const char *filename, int flags, const char *format_n
111
bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d"
112
113
# blockjob.c
114
+block_job_completed(void *job, int ret, int jret) "job %p ret %d corrected ret %d"
115
block_job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)"
116
block_job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)"
117
118
--
119
2.13.6
120
121
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
The completed_single function is getting a little mucked up with
4
checking to see which callbacks exist, so let's factor them out.
5
6
Signed-off-by: John Snow <jsnow@redhat.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
---
11
blockjob.c | 35 ++++++++++++++++++++++++++---------
12
1 file changed, 26 insertions(+), 9 deletions(-)
13
14
diff --git a/blockjob.c b/blockjob.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/blockjob.c
17
+++ b/blockjob.c
18
@@ -XXX,XX +XXX,XX @@ static void block_job_update_rc(BlockJob *job)
19
}
20
}
21
22
+static void block_job_commit(BlockJob *job)
23
+{
24
+ assert(!job->ret);
25
+ if (job->driver->commit) {
26
+ job->driver->commit(job);
27
+ }
28
+}
29
+
30
+static void block_job_abort(BlockJob *job)
31
+{
32
+ assert(job->ret);
33
+ if (job->driver->abort) {
34
+ job->driver->abort(job);
35
+ }
36
+}
37
+
38
+static void block_job_clean(BlockJob *job)
39
+{
40
+ if (job->driver->clean) {
41
+ job->driver->clean(job);
42
+ }
43
+}
44
+
45
static void block_job_completed_single(BlockJob *job)
46
{
47
assert(job->completed);
48
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job)
49
block_job_update_rc(job);
50
51
if (!job->ret) {
52
- if (job->driver->commit) {
53
- job->driver->commit(job);
54
- }
55
+ block_job_commit(job);
56
} else {
57
- if (job->driver->abort) {
58
- job->driver->abort(job);
59
- }
60
- }
61
- if (job->driver->clean) {
62
- job->driver->clean(job);
63
+ block_job_abort(job);
64
}
65
+ block_job_clean(job);
66
67
if (job->cb) {
68
job->cb(job->opaque, job->ret);
69
--
70
2.13.6
71
72
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
Simply apply a function transaction-wide.
4
A few more uses of this in forthcoming patches.
5
6
Signed-off-by: John Snow <jsnow@redhat.com>
7
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
8
---
9
blockjob.c | 25 ++++++++++++++++---------
10
1 file changed, 16 insertions(+), 9 deletions(-)
11
12
diff --git a/blockjob.c b/blockjob.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/blockjob.c
15
+++ b/blockjob.c
16
@@ -XXX,XX +XXX,XX @@ static void block_job_cancel_async(BlockJob *job)
17
job->cancelled = true;
18
}
19
20
+static void block_job_txn_apply(BlockJobTxn *txn, void fn(BlockJob *))
21
+{
22
+ AioContext *ctx;
23
+ BlockJob *job, *next;
24
+
25
+ QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) {
26
+ ctx = blk_get_aio_context(job->blk);
27
+ aio_context_acquire(ctx);
28
+ fn(job);
29
+ aio_context_release(ctx);
30
+ }
31
+}
32
+
33
static int block_job_finish_sync(BlockJob *job,
34
void (*finish)(BlockJob *, Error **errp),
35
Error **errp)
36
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_abort(BlockJob *job)
37
38
static void block_job_completed_txn_success(BlockJob *job)
39
{
40
- AioContext *ctx;
41
BlockJobTxn *txn = job->txn;
42
- BlockJob *other_job, *next;
43
+ BlockJob *other_job;
44
/*
45
* Successful completion, see if there are other running jobs in this
46
* txn.
47
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_success(BlockJob *job)
48
if (!other_job->completed) {
49
return;
50
}
51
- }
52
- /* We are the last completed job, commit the transaction. */
53
- QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) {
54
- ctx = blk_get_aio_context(other_job->blk);
55
- aio_context_acquire(ctx);
56
assert(other_job->ret == 0);
57
- block_job_completed_single(other_job);
58
- aio_context_release(ctx);
59
}
60
+ /* We are the last completed job, commit the transaction. */
61
+ block_job_txn_apply(txn, block_job_completed_single);
62
}
63
64
/* Assumes the block_job_mutex is held */
65
--
66
2.13.6
67
68
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
For jobs that are stuck waiting on others in a transaction, it would
4
be nice to know that they are no longer "running" in that sense, but
5
instead are waiting on other jobs in the transaction.
6
7
Jobs that are "waiting" in this sense cannot be meaningfully altered
8
any longer as they have left their running loop. The only meaningful
9
user verb for jobs in this state is "cancel," which will cancel the
10
whole transaction, too.
11
12
Transitions:
13
Running -> Waiting: Normal transition.
14
Ready -> Waiting: Normal transition.
15
Waiting -> Aborting: Transactional cancellation.
16
Waiting -> Concluded: Normal transition.
17
18
Removed Transitions:
19
Running -> Concluded: Jobs must go to WAITING first.
20
Ready -> Concluded: Jobs must go to WAITING first.
21
22
Verbs:
23
Cancel: Can be applied to WAITING jobs.
24
25
+---------+
26
|UNDEFINED|
27
+--+------+
28
|
29
+--v----+
30
+---------+CREATED+-----------------+
31
| +--+----+ |
32
| | |
33
| +--v----+ +------+ |
34
+---------+RUNNING<----->PAUSED| |
35
| +--+-+--+ +------+ |
36
| | | |
37
| | +------------------+ |
38
| | | |
39
| +--v--+ +-------+ | |
40
+---------+READY<------->STANDBY| | |
41
| +--+--+ +-------+ | |
42
| | | |
43
| +--v----+ | |
44
+---------+WAITING<---------------+ |
45
| +--+----+ |
46
| | |
47
+--v-----+ +--v------+ |
48
|ABORTING+--->CONCLUDED| |
49
+--------+ +--+------+ |
50
| |
51
+--v-+ |
52
|NULL<--------------------+
53
+----+
54
55
Signed-off-by: John Snow <jsnow@redhat.com>
56
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
57
---
58
qapi/block-core.json | 6 +++++-
59
blockjob.c | 37 ++++++++++++++++++++-----------------
60
2 files changed, 25 insertions(+), 18 deletions(-)
61
62
diff --git a/qapi/block-core.json b/qapi/block-core.json
63
index XXXXXXX..XXXXXXX 100644
64
--- a/qapi/block-core.json
65
+++ b/qapi/block-core.json
66
@@ -XXX,XX +XXX,XX @@
67
# @standby: The job is ready, but paused. This is nearly identical to @paused.
68
# The job may return to @ready or otherwise be canceled.
69
#
70
+# @waiting: The job is waiting for other jobs in the transaction to converge
71
+# to the waiting state. This status will likely not be visible for
72
+# the last job in a transaction.
73
+#
74
# @aborting: The job is in the process of being aborted, and will finish with
75
# an error. The job will afterwards report that it is @concluded.
76
# This status may not be visible to the management process.
77
@@ -XXX,XX +XXX,XX @@
78
##
79
{ 'enum': 'BlockJobStatus',
80
'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby',
81
- 'aborting', 'concluded', 'null' ] }
82
+ 'waiting', 'aborting', 'concluded', 'null' ] }
83
84
##
85
# @BlockJobInfo:
86
diff --git a/blockjob.c b/blockjob.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/blockjob.c
89
+++ b/blockjob.c
90
@@ -XXX,XX +XXX,XX @@ static QemuMutex block_job_mutex;
91
92
/* BlockJob State Transition Table */
93
bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = {
94
- /* U, C, R, P, Y, S, X, E, N */
95
- /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0},
96
- /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 1, 0, 1},
97
- /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 1, 0},
98
- /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0},
99
- /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 1, 0},
100
- /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0},
101
- /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 1, 1, 0},
102
- /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 1},
103
- /* N: */ [BLOCK_JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0},
104
+ /* U, C, R, P, Y, S, W, X, E, N */
105
+ /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
106
+ /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 0, 1, 0, 1},
107
+ /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 1, 0, 0},
108
+ /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0},
109
+ /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 1, 0, 0},
110
+ /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0},
111
+ /* W: */ [BLOCK_JOB_STATUS_WAITING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
112
+ /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
113
+ /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
114
+ /* N: */ [BLOCK_JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
115
};
116
117
bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = {
118
- /* U, C, R, P, Y, S, X, E, N */
119
- [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
120
- [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
121
- [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
122
- [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
123
- [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0},
124
- [BLOCK_JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 1, 0},
125
+ /* U, C, R, P, Y, S, W, X, E, N */
126
+ [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 1, 0, 0, 0},
127
+ [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0},
128
+ [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0},
129
+ [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0},
130
+ [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0},
131
+ [BLOCK_JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
132
};
133
134
static void block_job_state_transition(BlockJob *job, BlockJobStatus s1)
135
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_success(BlockJob *job)
136
BlockJob *other_job;
137
int rc = 0;
138
139
+ block_job_state_transition(job, BLOCK_JOB_STATUS_WAITING);
140
+
141
/*
142
* Successful completion, see if there are other running jobs in this
143
* txn.
144
--
145
2.13.6
146
147
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
For jobs utilizing the new manual workflow, we intend to prohibit
4
them from modifying the block graph until the management layer provides
5
an explicit ACK via block-job-finalize to move the process forward.
6
7
To distinguish this runstate from "ready" or "waiting," we add a new
8
"pending" event and status.
9
10
For now, the transition from PENDING to CONCLUDED/ABORTING is automatic,
11
but a future commit will add the explicit block-job-finalize step.
12
13
Transitions:
14
Waiting -> Pending: Normal transition.
15
Pending -> Concluded: Normal transition.
16
Pending -> Aborting: Late transactional failures and cancellations.
17
18
Removed Transitions:
19
Waiting -> Concluded: Jobs must go to PENDING first.
20
21
Verbs:
22
Cancel: Can be applied to a pending job.
23
24
+---------+
25
|UNDEFINED|
26
+--+------+
27
|
28
+--v----+
29
+---------+CREATED+-----------------+
30
| +--+----+ |
31
| | |
32
| +--+----+ +------+ |
33
+---------+RUNNING<----->PAUSED| |
34
| +--+-+--+ +------+ |
35
| | | |
36
| | +------------------+ |
37
| | | |
38
| +--v--+ +-------+ | |
39
+---------+READY<------->STANDBY| | |
40
| +--+--+ +-------+ | |
41
| | | |
42
| +--v----+ | |
43
+---------+WAITING<---------------+ |
44
| +--+----+ |
45
| | |
46
| +--v----+ |
47
+---------+PENDING| |
48
| +--+----+ |
49
| | |
50
+--v-----+ +--v------+ |
51
|ABORTING+--->CONCLUDED| |
52
+--------+ +--+------+ |
53
| |
54
+--v-+ |
55
|NULL<--------------------+
56
+----+
57
58
Signed-off-by: John Snow <jsnow@redhat.com>
59
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
60
---
61
qapi/block-core.json | 31 +++++++++++++++++++++-
62
include/block/blockjob.h | 5 ++++
63
blockjob.c | 67 +++++++++++++++++++++++++++++++-----------------
64
3 files changed, 78 insertions(+), 25 deletions(-)
65
66
diff --git a/qapi/block-core.json b/qapi/block-core.json
67
index XXXXXXX..XXXXXXX 100644
68
--- a/qapi/block-core.json
69
+++ b/qapi/block-core.json
70
@@ -XXX,XX +XXX,XX @@
71
# to the waiting state. This status will likely not be visible for
72
# the last job in a transaction.
73
#
74
+# @pending: The job has finished its work, but has finalization steps that it
75
+# needs to make prior to completing. These changes may require
76
+# manual intervention by the management process if manual was set
77
+# to true. These changes may still fail.
78
+#
79
# @aborting: The job is in the process of being aborted, and will finish with
80
# an error. The job will afterwards report that it is @concluded.
81
# This status may not be visible to the management process.
82
@@ -XXX,XX +XXX,XX @@
83
##
84
{ 'enum': 'BlockJobStatus',
85
'data': ['undefined', 'created', 'running', 'paused', 'ready', 'standby',
86
- 'waiting', 'aborting', 'concluded', 'null' ] }
87
+ 'waiting', 'pending', 'aborting', 'concluded', 'null' ] }
88
89
##
90
# @BlockJobInfo:
91
@@ -XXX,XX +XXX,XX @@
92
'speed' : 'int' } }
93
94
##
95
+# @BLOCK_JOB_PENDING:
96
+#
97
+# Emitted when a block job is awaiting explicit authorization to finalize graph
98
+# changes via @block-job-finalize. If this job is part of a transaction, it will
99
+# not emit this event until the transaction has converged first.
100
+#
101
+# @type: job type
102
+#
103
+# @id: The job identifier.
104
+#
105
+# Since: 2.12
106
+#
107
+# Example:
108
+#
109
+# <- { "event": "BLOCK_JOB_WAITING",
110
+# "data": { "device": "drive0", "type": "mirror" },
111
+# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
112
+#
113
+##
114
+{ 'event': 'BLOCK_JOB_PENDING',
115
+ 'data': { 'type' : 'BlockJobType',
116
+ 'id' : 'str' } }
117
+
118
+##
119
# @PreallocMode:
120
#
121
# Preallocation mode of QEMU image file
122
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
123
index XXXXXXX..XXXXXXX 100644
124
--- a/include/block/blockjob.h
125
+++ b/include/block/blockjob.h
126
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
127
/** Current state; See @BlockJobStatus for details. */
128
BlockJobStatus status;
129
130
+ /** True if this job should automatically finalize itself */
131
+ bool auto_finalize;
132
+
133
/** True if this job should automatically dismiss itself */
134
bool auto_dismiss;
135
136
@@ -XXX,XX +XXX,XX @@ typedef enum BlockJobCreateFlags {
137
BLOCK_JOB_DEFAULT = 0x00,
138
/* BlockJob is not QMP-created and should not send QMP events */
139
BLOCK_JOB_INTERNAL = 0x01,
140
+ /* BlockJob requires manual finalize step */
141
+ BLOCK_JOB_MANUAL_FINALIZE = 0x02,
142
/* BlockJob requires manual dismiss step */
143
BLOCK_JOB_MANUAL_DISMISS = 0x04,
144
} BlockJobCreateFlags;
145
diff --git a/blockjob.c b/blockjob.c
146
index XXXXXXX..XXXXXXX 100644
147
--- a/blockjob.c
148
+++ b/blockjob.c
149
@@ -XXX,XX +XXX,XX @@ static QemuMutex block_job_mutex;
150
151
/* BlockJob State Transition Table */
152
bool BlockJobSTT[BLOCK_JOB_STATUS__MAX][BLOCK_JOB_STATUS__MAX] = {
153
- /* U, C, R, P, Y, S, W, X, E, N */
154
- /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
155
- /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 0, 1, 0, 1},
156
- /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 1, 0, 0},
157
- /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0},
158
- /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 1, 0, 0},
159
- /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0},
160
- /* W: */ [BLOCK_JOB_STATUS_WAITING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
161
- /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
162
- /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
163
- /* N: */ [BLOCK_JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
164
+ /* U, C, R, P, Y, S, W, D, X, E, N */
165
+ /* U: */ [BLOCK_JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
166
+ /* C: */ [BLOCK_JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1},
167
+ /* R: */ [BLOCK_JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0},
168
+ /* P: */ [BLOCK_JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
169
+ /* Y: */ [BLOCK_JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0},
170
+ /* S: */ [BLOCK_JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
171
+ /* W: */ [BLOCK_JOB_STATUS_WAITING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0},
172
+ /* D: */ [BLOCK_JOB_STATUS_PENDING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
173
+ /* X: */ [BLOCK_JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
174
+ /* E: */ [BLOCK_JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
175
+ /* N: */ [BLOCK_JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
176
};
177
178
bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = {
179
- /* U, C, R, P, Y, S, W, X, E, N */
180
- [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 1, 0, 0, 0},
181
- [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0},
182
- [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0},
183
- [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0},
184
- [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0},
185
- [BLOCK_JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
186
+ /* U, C, R, P, Y, S, W, D, X, E, N */
187
+ [BLOCK_JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0},
188
+ [BLOCK_JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
189
+ [BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
190
+ [BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
191
+ [BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
192
+ [BLOCK_JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
193
};
194
195
static void block_job_state_transition(BlockJob *job, BlockJobStatus s1)
196
@@ -XXX,XX +XXX,XX @@ static void __attribute__((__constructor__)) block_job_init(void)
197
198
static void block_job_event_cancelled(BlockJob *job);
199
static void block_job_event_completed(BlockJob *job, const char *msg);
200
+static int block_job_event_pending(BlockJob *job);
201
static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job));
202
203
/* Transactional group of block jobs */
204
@@ -XXX,XX +XXX,XX @@ static void block_job_cancel_async(BlockJob *job)
205
job->cancelled = true;
206
}
207
208
-static int block_job_txn_apply(BlockJobTxn *txn, int fn(BlockJob *))
209
+static int block_job_txn_apply(BlockJobTxn *txn, int fn(BlockJob *), bool lock)
210
{
211
AioContext *ctx;
212
BlockJob *job, *next;
213
int rc;
214
215
QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) {
216
- ctx = blk_get_aio_context(job->blk);
217
- aio_context_acquire(ctx);
218
+ if (lock) {
219
+ ctx = blk_get_aio_context(job->blk);
220
+ aio_context_acquire(ctx);
221
+ }
222
rc = fn(job);
223
- aio_context_release(ctx);
224
+ if (lock) {
225
+ aio_context_release(ctx);
226
+ }
227
if (rc) {
228
break;
229
}
230
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_success(BlockJob *job)
231
}
232
233
/* Jobs may require some prep-work to complete without failure */
234
- rc = block_job_txn_apply(txn, block_job_prepare);
235
+ rc = block_job_txn_apply(txn, block_job_prepare, true);
236
if (rc) {
237
block_job_completed_txn_abort(job);
238
return;
239
}
240
241
/* We are the last completed job, commit the transaction. */
242
- block_job_txn_apply(txn, block_job_completed_single);
243
+ block_job_txn_apply(txn, block_job_event_pending, false);
244
+ block_job_txn_apply(txn, block_job_completed_single, true);
245
}
246
247
/* Assumes the block_job_mutex is held */
248
@@ -XXX,XX +XXX,XX @@ static void block_job_event_completed(BlockJob *job, const char *msg)
249
&error_abort);
250
}
251
252
+static int block_job_event_pending(BlockJob *job)
253
+{
254
+ block_job_state_transition(job, BLOCK_JOB_STATUS_PENDING);
255
+ if (!job->auto_finalize && !block_job_is_internal(job)) {
256
+ qapi_event_send_block_job_pending(job->driver->job_type,
257
+ job->id,
258
+ &error_abort);
259
+ }
260
+ return 0;
261
+}
262
+
263
/*
264
* API for block job drivers and the block layer. These functions are
265
* declared in blockjob_int.h.
266
@@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
267
job->paused = true;
268
job->pause_count = 1;
269
job->refcnt = 1;
270
+ job->auto_finalize = !(flags & BLOCK_JOB_MANUAL_FINALIZE);
271
job->auto_dismiss = !(flags & BLOCK_JOB_MANUAL_DISMISS);
272
block_job_state_transition(job, BLOCK_JOB_STATUS_CREATED);
273
aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
274
--
275
2.13.6
276
277
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
Instead of automatically transitioning from PENDING to CONCLUDED, gate
4
the .prepare() and .commit() phases behind an explicit acknowledgement
5
provided by the QMP monitor if auto_finalize = false has been requested.
6
7
This allows us to perform graph changes in prepare and/or commit so that
8
graph changes do not occur autonomously without knowledge of the
9
controlling management layer.
10
11
Transactions that have reached the "PENDING" state together can all be
12
moved to invoke their finalization methods by issuing block_job_finalize
13
to any one job in the transaction.
14
15
Jobs in a transaction with mixed job->auto_finalize settings will all
16
remain stuck in the "PENDING" state, as if the entire transaction was
17
specified with auto_finalize = false. Jobs that specified
18
auto_finalize = true, however, will still not emit the PENDING event.
19
20
Signed-off-by: John Snow <jsnow@redhat.com>
21
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
22
---
23
qapi/block-core.json | 23 ++++++++++++++++++-
24
include/block/blockjob.h | 17 ++++++++++++++
25
blockdev.c | 14 +++++++++++
26
blockjob.c | 60 +++++++++++++++++++++++++++++++++++-------------
27
block/trace-events | 1 +
28
5 files changed, 98 insertions(+), 17 deletions(-)
29
30
diff --git a/qapi/block-core.json b/qapi/block-core.json
31
index XXXXXXX..XXXXXXX 100644
32
--- a/qapi/block-core.json
33
+++ b/qapi/block-core.json
34
@@ -XXX,XX +XXX,XX @@
35
#
36
# @dismiss: see @block-job-dismiss
37
#
38
+# @finalize: see @block-job-finalize
39
+#
40
# Since: 2.12
41
##
42
{ 'enum': 'BlockJobVerb',
43
- 'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete', 'dismiss' ] }
44
+ 'data': ['cancel', 'pause', 'resume', 'set-speed', 'complete', 'dismiss',
45
+ 'finalize' ] }
46
47
##
48
# @BlockJobStatus:
49
@@ -XXX,XX +XXX,XX @@
50
{ 'command': 'block-job-dismiss', 'data': { 'id': 'str' } }
51
52
##
53
+# @block-job-finalize:
54
+#
55
+# Once a job that has manual=true reaches the pending state, it can be
56
+# instructed to finalize any graph changes and do any necessary cleanup
57
+# via this command.
58
+# For jobs in a transaction, instructing one job to finalize will force
59
+# ALL jobs in the transaction to finalize, so it is only necessary to instruct
60
+# a single member job to finalize.
61
+#
62
+# @id: The job identifier.
63
+#
64
+# Returns: Nothing on success
65
+#
66
+# Since: 2.12
67
+##
68
+{ 'command': 'block-job-finalize', 'data': { 'id': 'str' } }
69
+
70
+##
71
# @BlockdevDiscardOptions:
72
#
73
# Determines how to handle discard requests.
74
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
75
index XXXXXXX..XXXXXXX 100644
76
--- a/include/block/blockjob.h
77
+++ b/include/block/blockjob.h
78
@@ -XXX,XX +XXX,XX @@ void block_job_cancel(BlockJob *job);
79
*/
80
void block_job_complete(BlockJob *job, Error **errp);
81
82
+
83
+/**
84
+ * block_job_finalize:
85
+ * @job: The job to fully commit and finish.
86
+ * @errp: Error object.
87
+ *
88
+ * For jobs that have finished their work and are pending
89
+ * awaiting explicit acknowledgement to commit their work,
90
+ * This will commit that work.
91
+ *
92
+ * FIXME: Make the below statement universally true:
93
+ * For jobs that support the manual workflow mode, all graph
94
+ * changes that occur as a result will occur after this command
95
+ * and before a successful reply.
96
+ */
97
+void block_job_finalize(BlockJob *job, Error **errp);
98
+
99
/**
100
* block_job_dismiss:
101
* @job: The job to be dismissed.
102
diff --git a/blockdev.c b/blockdev.c
103
index XXXXXXX..XXXXXXX 100644
104
--- a/blockdev.c
105
+++ b/blockdev.c
106
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_complete(const char *device, Error **errp)
107
aio_context_release(aio_context);
108
}
109
110
+void qmp_block_job_finalize(const char *id, Error **errp)
111
+{
112
+ AioContext *aio_context;
113
+ BlockJob *job = find_block_job(id, &aio_context, errp);
114
+
115
+ if (!job) {
116
+ return;
117
+ }
118
+
119
+ trace_qmp_block_job_finalize(job);
120
+ block_job_finalize(job, errp);
121
+ aio_context_release(aio_context);
122
+}
123
+
124
void qmp_block_job_dismiss(const char *id, Error **errp)
125
{
126
AioContext *aio_context;
127
diff --git a/blockjob.c b/blockjob.c
128
index XXXXXXX..XXXXXXX 100644
129
--- a/blockjob.c
130
+++ b/blockjob.c
131
@@ -XXX,XX +XXX,XX @@ bool BlockJobVerbTable[BLOCK_JOB_VERB__MAX][BLOCK_JOB_STATUS__MAX] = {
132
[BLOCK_JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
133
[BLOCK_JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
134
[BLOCK_JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
135
+ [BLOCK_JOB_VERB_FINALIZE] = {0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
136
[BLOCK_JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
137
};
138
139
@@ -XXX,XX +XXX,XX @@ static void block_job_clean(BlockJob *job)
140
}
141
}
142
143
-static int block_job_completed_single(BlockJob *job)
144
+static int block_job_finalize_single(BlockJob *job)
145
{
146
assert(job->completed);
147
148
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_abort(BlockJob *job)
149
assert(other_job->cancelled);
150
block_job_finish_sync(other_job, NULL, NULL);
151
}
152
- block_job_completed_single(other_job);
153
+ block_job_finalize_single(other_job);
154
aio_context_release(ctx);
155
}
156
157
block_job_txn_unref(txn);
158
}
159
160
+static int block_job_needs_finalize(BlockJob *job)
161
+{
162
+ return !job->auto_finalize;
163
+}
164
+
165
+static void block_job_do_finalize(BlockJob *job)
166
+{
167
+ int rc;
168
+ assert(job && job->txn);
169
+
170
+ /* prepare the transaction to complete */
171
+ rc = block_job_txn_apply(job->txn, block_job_prepare, true);
172
+ if (rc) {
173
+ block_job_completed_txn_abort(job);
174
+ } else {
175
+ block_job_txn_apply(job->txn, block_job_finalize_single, true);
176
+ }
177
+}
178
+
179
static void block_job_completed_txn_success(BlockJob *job)
180
{
181
BlockJobTxn *txn = job->txn;
182
BlockJob *other_job;
183
- int rc = 0;
184
185
block_job_state_transition(job, BLOCK_JOB_STATUS_WAITING);
186
187
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_success(BlockJob *job)
188
assert(other_job->ret == 0);
189
}
190
191
- /* Jobs may require some prep-work to complete without failure */
192
- rc = block_job_txn_apply(txn, block_job_prepare, true);
193
- if (rc) {
194
- block_job_completed_txn_abort(job);
195
- return;
196
- }
197
-
198
- /* We are the last completed job, commit the transaction. */
199
block_job_txn_apply(txn, block_job_event_pending, false);
200
- block_job_txn_apply(txn, block_job_completed_single, true);
201
+
202
+ /* If no jobs need manual finalization, automatically do so */
203
+ if (block_job_txn_apply(txn, block_job_needs_finalize, false) == 0) {
204
+ block_job_do_finalize(job);
205
+ }
206
}
207
208
/* Assumes the block_job_mutex is held */
209
@@ -XXX,XX +XXX,XX @@ void block_job_complete(BlockJob *job, Error **errp)
210
job->driver->complete(job, errp);
211
}
212
213
+void block_job_finalize(BlockJob *job, Error **errp)
214
+{
215
+ assert(job && job->id && job->txn);
216
+ if (block_job_apply_verb(job, BLOCK_JOB_VERB_FINALIZE, errp)) {
217
+ return;
218
+ }
219
+ block_job_do_finalize(job);
220
+}
221
+
222
void block_job_dismiss(BlockJob **jobptr, Error **errp)
223
{
224
BlockJob *job = *jobptr;
225
@@ -XXX,XX +XXX,XX @@ void block_job_cancel(BlockJob *job)
226
{
227
if (job->status == BLOCK_JOB_STATUS_CONCLUDED) {
228
block_job_do_dismiss(job);
229
- } else if (block_job_started(job)) {
230
- block_job_cancel_async(job);
231
- block_job_enter(job);
232
- } else {
233
+ return;
234
+ }
235
+ block_job_cancel_async(job);
236
+ if (!block_job_started(job)) {
237
block_job_completed(job, -ECANCELED);
238
+ } else if (job->deferred_to_main_loop) {
239
+ block_job_completed_txn_abort(job);
240
+ } else {
241
+ block_job_enter(job);
242
}
243
}
244
245
diff --git a/block/trace-events b/block/trace-events
246
index XXXXXXX..XXXXXXX 100644
247
--- a/block/trace-events
248
+++ b/block/trace-events
249
@@ -XXX,XX +XXX,XX @@ qmp_block_job_cancel(void *job) "job %p"
250
qmp_block_job_pause(void *job) "job %p"
251
qmp_block_job_resume(void *job) "job %p"
252
qmp_block_job_complete(void *job) "job %p"
253
+qmp_block_job_finalize(void *job) "job %p"
254
qmp_block_job_dismiss(void *job) "job %p"
255
qmp_block_stream(void *bs, void *job) "bs %p job %p"
256
257
--
258
2.13.6
259
260
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
Expose the "manual" property via QAPI for the backup-related jobs.
4
As of this commit, this allows the management API to request the
5
"concluded" and "dismiss" semantics for backup jobs.
6
7
Signed-off-by: John Snow <jsnow@redhat.com>
8
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9
---
10
qapi/block-core.json | 48 ++++++++++++++++++++++++++++++++++++++--------
11
blockdev.c | 31 +++++++++++++++++++++++++++---
12
blockjob.c | 2 ++
13
tests/qemu-iotests/109.out | 24 +++++++++++------------
14
4 files changed, 82 insertions(+), 23 deletions(-)
15
16
diff --git a/qapi/block-core.json b/qapi/block-core.json
17
index XXXXXXX..XXXXXXX 100644
18
--- a/qapi/block-core.json
19
+++ b/qapi/block-core.json
20
@@ -XXX,XX +XXX,XX @@
21
#
22
# @status: Current job state/status (since 2.12)
23
#
24
+# @auto-finalize: Job will finalize itself when PENDING, moving to
25
+# the CONCLUDED state. (since 2.12)
26
+#
27
+# @auto-dismiss: Job will dismiss itself when CONCLUDED, moving to the NULL
28
+# state and disappearing from the query list. (since 2.12)
29
+#
30
# Since: 1.1
31
##
32
{ 'struct': 'BlockJobInfo',
33
'data': {'type': 'str', 'device': 'str', 'len': 'int',
34
'offset': 'int', 'busy': 'bool', 'paused': 'bool', 'speed': 'int',
35
'io-status': 'BlockDeviceIoStatus', 'ready': 'bool',
36
- 'status': 'BlockJobStatus' } }
37
+ 'status': 'BlockJobStatus',
38
+ 'auto-finalize': 'bool', 'auto-dismiss': 'bool' } }
39
40
##
41
# @query-block-jobs:
42
@@ -XXX,XX +XXX,XX @@
43
# default 'report' (no limitations, since this applies to
44
# a different block device than @device).
45
#
46
+# @auto-finalize: When false, this job will wait in a PENDING state after it has
47
+# finished its work, waiting for @block-job-finalize.
48
+# When true, this job will automatically perform its abort or
49
+# commit actions.
50
+# Defaults to true. (Since 2.12)
51
+#
52
+# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
53
+# has completed ceased all work, and wait for @block-job-dismiss.
54
+# When true, this job will automatically disappear from the query
55
+# list without user intervention.
56
+# Defaults to true. (Since 2.12)
57
+#
58
# Note: @on-source-error and @on-target-error only affect background
59
# I/O. If an error occurs during a guest write request, the device's
60
# rerror/werror actions will be used.
61
@@ -XXX,XX +XXX,XX @@
62
##
63
{ 'struct': 'DriveBackup',
64
'data': { '*job-id': 'str', 'device': 'str', 'target': 'str',
65
- '*format': 'str', 'sync': 'MirrorSyncMode', '*mode': 'NewImageMode',
66
- '*speed': 'int', '*bitmap': 'str', '*compress': 'bool',
67
+ '*format': 'str', 'sync': 'MirrorSyncMode',
68
+ '*mode': 'NewImageMode', '*speed': 'int',
69
+ '*bitmap': 'str', '*compress': 'bool',
70
'*on-source-error': 'BlockdevOnError',
71
- '*on-target-error': 'BlockdevOnError' } }
72
+ '*on-target-error': 'BlockdevOnError',
73
+ '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
74
75
##
76
# @BlockdevBackup:
77
@@ -XXX,XX +XXX,XX @@
78
# default 'report' (no limitations, since this applies to
79
# a different block device than @device).
80
#
81
+# @auto-finalize: When false, this job will wait in a PENDING state after it has
82
+# finished its work, waiting for @block-job-finalize.
83
+# When true, this job will automatically perform its abort or
84
+# commit actions.
85
+# Defaults to true. (Since 2.12)
86
+#
87
+# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
88
+# has completed ceased all work, and wait for @block-job-dismiss.
89
+# When true, this job will automatically disappear from the query
90
+# list without user intervention.
91
+# Defaults to true. (Since 2.12)
92
+#
93
# Note: @on-source-error and @on-target-error only affect background
94
# I/O. If an error occurs during a guest write request, the device's
95
# rerror/werror actions will be used.
96
@@ -XXX,XX +XXX,XX @@
97
##
98
{ 'struct': 'BlockdevBackup',
99
'data': { '*job-id': 'str', 'device': 'str', 'target': 'str',
100
- 'sync': 'MirrorSyncMode',
101
- '*speed': 'int',
102
- '*compress': 'bool',
103
+ 'sync': 'MirrorSyncMode', '*speed': 'int', '*compress': 'bool',
104
'*on-source-error': 'BlockdevOnError',
105
- '*on-target-error': 'BlockdevOnError' } }
106
+ '*on-target-error': 'BlockdevOnError',
107
+ '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
108
109
##
110
# @blockdev-snapshot-sync:
111
diff --git a/blockdev.c b/blockdev.c
112
index XXXXXXX..XXXXXXX 100644
113
--- a/blockdev.c
114
+++ b/blockdev.c
115
@@ -XXX,XX +XXX,XX @@ static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn,
116
AioContext *aio_context;
117
QDict *options = NULL;
118
Error *local_err = NULL;
119
- int flags;
120
+ int flags, job_flags = BLOCK_JOB_DEFAULT;
121
int64_t size;
122
bool set_backing_hd = false;
123
124
@@ -XXX,XX +XXX,XX @@ static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn,
125
if (!backup->has_job_id) {
126
backup->job_id = NULL;
127
}
128
+ if (!backup->has_auto_finalize) {
129
+ backup->auto_finalize = true;
130
+ }
131
+ if (!backup->has_auto_dismiss) {
132
+ backup->auto_dismiss = true;
133
+ }
134
if (!backup->has_compress) {
135
backup->compress = false;
136
}
137
@@ -XXX,XX +XXX,XX @@ static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn,
138
goto out;
139
}
140
}
141
+ if (!backup->auto_finalize) {
142
+ job_flags |= BLOCK_JOB_MANUAL_FINALIZE;
143
+ }
144
+ if (!backup->auto_dismiss) {
145
+ job_flags |= BLOCK_JOB_MANUAL_DISMISS;
146
+ }
147
148
job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
149
backup->sync, bmap, backup->compress,
150
backup->on_source_error, backup->on_target_error,
151
- BLOCK_JOB_DEFAULT, NULL, NULL, txn, &local_err);
152
+ job_flags, NULL, NULL, txn, &local_err);
153
bdrv_unref(target_bs);
154
if (local_err != NULL) {
155
error_propagate(errp, local_err);
156
@@ -XXX,XX +XXX,XX @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn,
157
Error *local_err = NULL;
158
AioContext *aio_context;
159
BlockJob *job = NULL;
160
+ int job_flags = BLOCK_JOB_DEFAULT;
161
162
if (!backup->has_speed) {
163
backup->speed = 0;
164
@@ -XXX,XX +XXX,XX @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn,
165
if (!backup->has_job_id) {
166
backup->job_id = NULL;
167
}
168
+ if (!backup->has_auto_finalize) {
169
+ backup->auto_finalize = true;
170
+ }
171
+ if (!backup->has_auto_dismiss) {
172
+ backup->auto_dismiss = true;
173
+ }
174
if (!backup->has_compress) {
175
backup->compress = false;
176
}
177
@@ -XXX,XX +XXX,XX @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn,
178
goto out;
179
}
180
}
181
+ if (!backup->auto_finalize) {
182
+ job_flags |= BLOCK_JOB_MANUAL_FINALIZE;
183
+ }
184
+ if (!backup->auto_dismiss) {
185
+ job_flags |= BLOCK_JOB_MANUAL_DISMISS;
186
+ }
187
job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
188
backup->sync, NULL, backup->compress,
189
backup->on_source_error, backup->on_target_error,
190
- BLOCK_JOB_DEFAULT, NULL, NULL, txn, &local_err);
191
+ job_flags, NULL, NULL, txn, &local_err);
192
if (local_err != NULL) {
193
error_propagate(errp, local_err);
194
}
195
diff --git a/blockjob.c b/blockjob.c
196
index XXXXXXX..XXXXXXX 100644
197
--- a/blockjob.c
198
+++ b/blockjob.c
199
@@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
200
info->io_status = job->iostatus;
201
info->ready = job->ready;
202
info->status = job->status;
203
+ info->auto_finalize = job->auto_finalize;
204
+ info->auto_dismiss = job->auto_dismiss;
205
return info;
206
}
207
208
diff --git a/tests/qemu-iotests/109.out b/tests/qemu-iotests/109.out
209
index XXXXXXX..XXXXXXX 100644
210
--- a/tests/qemu-iotests/109.out
211
+++ b/tests/qemu-iotests/109.out
212
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
213
{"return": {}}
214
{"return": {}}
215
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
216
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
217
+{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
218
{"return": {}}
219
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
220
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
221
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
222
{"return": {}}
223
{"return": {}}
224
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}}
225
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 197120, "offset": 197120, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
226
+{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 197120, "offset": 197120, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
227
{"return": {}}
228
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
229
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}}
230
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
231
{"return": {}}
232
{"return": {}}
233
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
234
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
235
+{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
236
{"return": {}}
237
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
238
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
239
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
240
{"return": {}}
241
{"return": {}}
242
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
243
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
244
+{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 1024, "offset": 1024, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
245
{"return": {}}
246
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
247
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}}
248
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
249
{"return": {}}
250
{"return": {}}
251
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
252
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 65536, "offset": 65536, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
253
+{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 65536, "offset": 65536, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
254
{"return": {}}
255
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
256
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}}
257
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
258
{"return": {}}
259
{"return": {}}
260
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
261
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
262
+{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
263
{"return": {}}
264
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
265
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
266
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
267
{"return": {}}
268
{"return": {}}
269
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
270
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
271
+{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2560, "offset": 2560, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
272
{"return": {}}
273
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
274
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}}
275
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
276
{"return": {}}
277
{"return": {}}
278
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}}
279
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 31457280, "offset": 31457280, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
280
+{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 31457280, "offset": 31457280, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
281
{"return": {}}
282
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
283
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}}
284
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
285
{"return": {}}
286
{"return": {}}
287
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
288
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
289
+{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 327680, "offset": 327680, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
290
{"return": {}}
291
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
292
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}}
293
@@ -XXX,XX +XXX,XX @@ read 65536/65536 bytes at offset 0
294
{"return": {}}
295
{"return": {}}
296
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}}
297
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 2048, "offset": 2048, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
298
+{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 2048, "offset": 2048, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
299
{"return": {}}
300
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
301
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}}
302
@@ -XXX,XX +XXX,XX @@ Automatically detecting the format is dangerous for raw images, write operations
303
Specify the 'raw' format explicitly to remove the restrictions.
304
{"return": {}}
305
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
306
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
307
+{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
308
{"return": {}}
309
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
310
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
311
@@ -XXX,XX +XXX,XX @@ Images are identical.
312
{"return": {}}
313
{"return": {}}
314
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
315
-{"return": [{"io-status": "ok", "device": "src", "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
316
+{"return": [{"auto-finalize": true, "io-status": "ok", "device": "src", "auto-dismiss": true, "busy": false, "len": 512, "offset": 512, "status": "ready", "paused": false, "speed": 0, "ready": true, "type": "mirror"}]}
317
{"return": {}}
318
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false}}
319
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}}
320
--
321
2.13.6
322
323
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
Signed-off-by: John Snow <jsnow@redhat.com>
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
---
6
tests/qemu-iotests/056 | 187 +++++++++++++++++++++++++++++++++++++++++++++
7
tests/qemu-iotests/056.out | 4 +-
8
2 files changed, 189 insertions(+), 2 deletions(-)
9
10
diff --git a/tests/qemu-iotests/056 b/tests/qemu-iotests/056
11
index XXXXXXX..XXXXXXX 100755
12
--- a/tests/qemu-iotests/056
13
+++ b/tests/qemu-iotests/056
14
@@ -XXX,XX +XXX,XX @@ backing_img = os.path.join(iotests.test_dir, 'backing.img')
15
test_img = os.path.join(iotests.test_dir, 'test.img')
16
target_img = os.path.join(iotests.test_dir, 'target.img')
17
18
+def img_create(img, fmt=iotests.imgfmt, size='64M', **kwargs):
19
+ fullname = os.path.join(iotests.test_dir, '%s.%s' % (img, fmt))
20
+ optargs = []
21
+ for k,v in kwargs.iteritems():
22
+ optargs = optargs + ['-o', '%s=%s' % (k,v)]
23
+ args = ['create', '-f', fmt] + optargs + [fullname, size]
24
+ iotests.qemu_img(*args)
25
+ return fullname
26
+
27
+def try_remove(img):
28
+ try:
29
+ os.remove(img)
30
+ except OSError:
31
+ pass
32
+
33
+def io_write_patterns(img, patterns):
34
+ for pattern in patterns:
35
+ iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
36
+
37
+
38
class TestSyncModesNoneAndTop(iotests.QMPTestCase):
39
image_len = 64 * 1024 * 1024 # MB
40
41
@@ -XXX,XX +XXX,XX @@ class TestBeforeWriteNotifier(iotests.QMPTestCase):
42
event = self.cancel_and_wait()
43
self.assert_qmp(event, 'data/type', 'backup')
44
45
+class BackupTest(iotests.QMPTestCase):
46
+ def setUp(self):
47
+ self.vm = iotests.VM()
48
+ self.test_img = img_create('test')
49
+ self.dest_img = img_create('dest')
50
+ self.vm.add_drive(self.test_img)
51
+ self.vm.launch()
52
+
53
+ def tearDown(self):
54
+ self.vm.shutdown()
55
+ try_remove(self.test_img)
56
+ try_remove(self.dest_img)
57
+
58
+ def hmp_io_writes(self, drive, patterns):
59
+ for pattern in patterns:
60
+ self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
61
+ self.vm.hmp_qemu_io(drive, 'flush')
62
+
63
+ def qmp_backup_and_wait(self, cmd='drive-backup', serror=None,
64
+ aerror=None, **kwargs):
65
+ if not self.qmp_backup(cmd, serror, **kwargs):
66
+ return False
67
+ return self.qmp_backup_wait(kwargs['device'], aerror)
68
+
69
+ def qmp_backup(self, cmd='drive-backup',
70
+ error=None, **kwargs):
71
+ self.assertTrue('device' in kwargs)
72
+ res = self.vm.qmp(cmd, **kwargs)
73
+ if error:
74
+ self.assert_qmp(res, 'error/desc', error)
75
+ return False
76
+ self.assert_qmp(res, 'return', {})
77
+ return True
78
+
79
+ def qmp_backup_wait(self, device, error=None):
80
+ event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
81
+ match={'data': {'device': device}})
82
+ self.assertNotEqual(event, None)
83
+ try:
84
+ failure = self.dictpath(event, 'data/error')
85
+ except AssertionError:
86
+ # Backup succeeded.
87
+ self.assert_qmp(event, 'data/offset', event['data']['len'])
88
+ return True
89
+ else:
90
+ # Failure.
91
+ self.assert_qmp(event, 'data/error', qerror)
92
+ return False
93
+
94
+ def test_dismiss_false(self):
95
+ res = self.vm.qmp('query-block-jobs')
96
+ self.assert_qmp(res, 'return', [])
97
+ self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
98
+ sync='full', target=self.dest_img,
99
+ auto_dismiss=True)
100
+ res = self.vm.qmp('query-block-jobs')
101
+ self.assert_qmp(res, 'return', [])
102
+
103
+ def test_dismiss_true(self):
104
+ res = self.vm.qmp('query-block-jobs')
105
+ self.assert_qmp(res, 'return', [])
106
+ self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
107
+ sync='full', target=self.dest_img,
108
+ auto_dismiss=False)
109
+ res = self.vm.qmp('query-block-jobs')
110
+ self.assert_qmp(res, 'return[0]/status', 'concluded')
111
+ res = self.vm.qmp('block-job-dismiss', id='drive0')
112
+ self.assert_qmp(res, 'return', {})
113
+ res = self.vm.qmp('query-block-jobs')
114
+ self.assert_qmp(res, 'return', [])
115
+
116
+ def test_dismiss_bad_id(self):
117
+ res = self.vm.qmp('query-block-jobs')
118
+ self.assert_qmp(res, 'return', [])
119
+ res = self.vm.qmp('block-job-dismiss', id='foobar')
120
+ self.assert_qmp(res, 'error/class', 'DeviceNotActive')
121
+
122
+ def test_dismiss_collision(self):
123
+ res = self.vm.qmp('query-block-jobs')
124
+ self.assert_qmp(res, 'return', [])
125
+ self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
126
+ sync='full', target=self.dest_img,
127
+ auto_dismiss=False)
128
+ res = self.vm.qmp('query-block-jobs')
129
+ self.assert_qmp(res, 'return[0]/status', 'concluded')
130
+ # Leave zombie job un-dismissed, observe a failure:
131
+ res = self.qmp_backup_and_wait(serror='Need a root block node',
132
+ device='drive0', format=iotests.imgfmt,
133
+ sync='full', target=self.dest_img,
134
+ auto_dismiss=False)
135
+ self.assertEqual(res, False)
136
+ # OK, dismiss the zombie.
137
+ res = self.vm.qmp('block-job-dismiss', id='drive0')
138
+ self.assert_qmp(res, 'return', {})
139
+ res = self.vm.qmp('query-block-jobs')
140
+ self.assert_qmp(res, 'return', [])
141
+ # Ensure it's really gone.
142
+ self.qmp_backup_and_wait(device='drive0', format=iotests.imgfmt,
143
+ sync='full', target=self.dest_img,
144
+ auto_dismiss=False)
145
+
146
+ def dismissal_failure(self, dismissal_opt):
147
+ res = self.vm.qmp('query-block-jobs')
148
+ self.assert_qmp(res, 'return', [])
149
+ # Give blkdebug something to chew on
150
+ self.hmp_io_writes('drive0',
151
+ (('0x9a', 0, 512),
152
+ ('0x55', '8M', '352k'),
153
+ ('0x78', '15872k', '1M')))
154
+ # Add destination node via blkdebug
155
+ res = self.vm.qmp('blockdev-add',
156
+ node_name='target0',
157
+ driver=iotests.imgfmt,
158
+ file={
159
+ 'driver': 'blkdebug',
160
+ 'image': {
161
+ 'driver': 'file',
162
+ 'filename': self.dest_img
163
+ },
164
+ 'inject-error': [{
165
+ 'event': 'write_aio',
166
+ 'errno': 5,
167
+ 'immediately': False,
168
+ 'once': True
169
+ }],
170
+ })
171
+ self.assert_qmp(res, 'return', {})
172
+
173
+ res = self.qmp_backup(cmd='blockdev-backup',
174
+ device='drive0', target='target0',
175
+ on_target_error='stop',
176
+ sync='full',
177
+ auto_dismiss=dismissal_opt)
178
+ self.assertTrue(res)
179
+ event = self.vm.event_wait(name="BLOCK_JOB_ERROR",
180
+ match={'data': {'device': 'drive0'}})
181
+ self.assertNotEqual(event, None)
182
+ # OK, job should be wedged
183
+ res = self.vm.qmp('query-block-jobs')
184
+ self.assert_qmp(res, 'return[0]/status', 'paused')
185
+ res = self.vm.qmp('block-job-dismiss', id='drive0')
186
+ self.assert_qmp(res, 'error/desc',
187
+ "Job 'drive0' in state 'paused' cannot accept"
188
+ " command verb 'dismiss'")
189
+ res = self.vm.qmp('query-block-jobs')
190
+ self.assert_qmp(res, 'return[0]/status', 'paused')
191
+ # OK, unstick job and move forward.
192
+ res = self.vm.qmp('block-job-resume', device='drive0')
193
+ self.assert_qmp(res, 'return', {})
194
+ # And now we need to wait for it to conclude;
195
+ res = self.qmp_backup_wait(device='drive0')
196
+ self.assertTrue(res)
197
+ if not dismissal_opt:
198
+ # Job should now be languishing:
199
+ res = self.vm.qmp('query-block-jobs')
200
+ self.assert_qmp(res, 'return[0]/status', 'concluded')
201
+ res = self.vm.qmp('block-job-dismiss', id='drive0')
202
+ self.assert_qmp(res, 'return', {})
203
+ res = self.vm.qmp('query-block-jobs')
204
+ self.assert_qmp(res, 'return', [])
205
+
206
+ def test_dismiss_premature(self):
207
+ self.dismissal_failure(False)
208
+
209
+ def test_dismiss_erroneous(self):
210
+ self.dismissal_failure(True)
211
+
212
if __name__ == '__main__':
213
iotests.main(supported_fmts=['qcow2', 'qed'])
214
diff --git a/tests/qemu-iotests/056.out b/tests/qemu-iotests/056.out
215
index XXXXXXX..XXXXXXX 100644
216
--- a/tests/qemu-iotests/056.out
217
+++ b/tests/qemu-iotests/056.out
218
@@ -XXX,XX +XXX,XX @@
219
-...
220
+.........
221
----------------------------------------------------------------------
222
-Ran 3 tests
223
+Ran 9 tests
224
225
OK
226
--
227
2.13.6
228
229
diff view generated by jsdifflib
Deleted patch
1
The crypto driver used to create the image file in a callback from the
2
crypto subsystem. If we want to implement .bdrv_co_create, this needs to
3
go away because that callback will get a reference to an already
4
existing block node.
5
1
6
Move the image file creation to block_crypto_create_generic().
7
8
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
10
Reviewed-by: Eric Blake <eblake@redhat.com>
11
---
12
block/crypto.c | 37 +++++++++++++++++--------------------
13
1 file changed, 17 insertions(+), 20 deletions(-)
14
15
diff --git a/block/crypto.c b/block/crypto.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/block/crypto.c
18
+++ b/block/crypto.c
19
@@ -XXX,XX +XXX,XX @@ static ssize_t block_crypto_read_func(QCryptoBlock *block,
20
21
22
struct BlockCryptoCreateData {
23
- const char *filename;
24
- QemuOpts *opts;
25
BlockBackend *blk;
26
uint64_t size;
27
};
28
@@ -XXX,XX +XXX,XX @@ static ssize_t block_crypto_init_func(QCryptoBlock *block,
29
Error **errp)
30
{
31
struct BlockCryptoCreateData *data = opaque;
32
- int ret;
33
34
/* User provided size should reflect amount of space made
35
* available to the guest, so we must take account of that
36
* which will be used by the crypto header
37
*/
38
- data->size += headerlen;
39
-
40
- qemu_opt_set_number(data->opts, BLOCK_OPT_SIZE, data->size, &error_abort);
41
- ret = bdrv_create_file(data->filename, data->opts, errp);
42
- if (ret < 0) {
43
- return -1;
44
- }
45
-
46
- data->blk = blk_new_open(data->filename, NULL, NULL,
47
- BDRV_O_RDWR | BDRV_O_PROTOCOL, errp);
48
- if (!data->blk) {
49
- return -1;
50
- }
51
-
52
- return 0;
53
+ return blk_truncate(data->blk, data->size + headerlen, PREALLOC_MODE_OFF,
54
+ errp);
55
}
56
57
58
@@ -XXX,XX +XXX,XX @@ static int block_crypto_create_generic(QCryptoBlockFormat format,
59
struct BlockCryptoCreateData data = {
60
.size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
61
BDRV_SECTOR_SIZE),
62
- .opts = opts,
63
- .filename = filename,
64
};
65
QDict *cryptoopts;
66
67
+ /* Parse options */
68
cryptoopts = qemu_opts_to_qdict(opts, NULL);
69
70
create_opts = block_crypto_create_opts_init(format, cryptoopts, errp);
71
@@ -XXX,XX +XXX,XX @@ static int block_crypto_create_generic(QCryptoBlockFormat format,
72
return -1;
73
}
74
75
+ /* Create protocol layer */
76
+ ret = bdrv_create_file(filename, opts, errp);
77
+ if (ret < 0) {
78
+ return ret;
79
+ }
80
+
81
+ data.blk = blk_new_open(filename, NULL, NULL,
82
+ BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
83
+ errp);
84
+ if (!data.blk) {
85
+ return -EINVAL;
86
+ }
87
+
88
+ /* Create format layer */
89
crypto = qcrypto_block_create(create_opts, NULL,
90
block_crypto_init_func,
91
block_crypto_write_func,
92
--
93
2.13.6
94
95
diff view generated by jsdifflib
Deleted patch
1
Everything that refers to the protocol layer or QemuOpts is moved out of
2
block_crypto_create_generic(), so that the remaining function is
3
suitable to be called by a .bdrv_co_create implementation.
4
1
5
LUKS is the only driver that actually implements the old interface, and
6
we don't intend to use it in any new drivers, so put the moved out code
7
directly into a LUKS function rather than creating a generic
8
intermediate one.
9
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
12
Reviewed-by: Eric Blake <eblake@redhat.com>
13
---
14
block/crypto.c | 95 +++++++++++++++++++++++++++++++++++++---------------------
15
1 file changed, 61 insertions(+), 34 deletions(-)
16
17
diff --git a/block/crypto.c b/block/crypto.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/crypto.c
20
+++ b/block/crypto.c
21
@@ -XXX,XX +XXX,XX @@ static int block_crypto_open_generic(QCryptoBlockFormat format,
22
}
23
24
25
-static int block_crypto_create_generic(QCryptoBlockFormat format,
26
- const char *filename,
27
- QemuOpts *opts,
28
- Error **errp)
29
+static int block_crypto_co_create_generic(BlockDriverState *bs,
30
+ int64_t size,
31
+ QCryptoBlockCreateOptions *opts,
32
+ Error **errp)
33
{
34
- int ret = -EINVAL;
35
- QCryptoBlockCreateOptions *create_opts = NULL;
36
+ int ret;
37
+ BlockBackend *blk;
38
QCryptoBlock *crypto = NULL;
39
- struct BlockCryptoCreateData data = {
40
- .size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
41
- BDRV_SECTOR_SIZE),
42
- };
43
- QDict *cryptoopts;
44
-
45
- /* Parse options */
46
- cryptoopts = qemu_opts_to_qdict(opts, NULL);
47
+ struct BlockCryptoCreateData data;
48
49
- create_opts = block_crypto_create_opts_init(format, cryptoopts, errp);
50
- if (!create_opts) {
51
- return -1;
52
- }
53
+ blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
54
55
- /* Create protocol layer */
56
- ret = bdrv_create_file(filename, opts, errp);
57
+ ret = blk_insert_bs(blk, bs, errp);
58
if (ret < 0) {
59
- return ret;
60
+ goto cleanup;
61
}
62
63
- data.blk = blk_new_open(filename, NULL, NULL,
64
- BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
65
- errp);
66
- if (!data.blk) {
67
- return -EINVAL;
68
- }
69
+ data = (struct BlockCryptoCreateData) {
70
+ .blk = blk,
71
+ .size = size,
72
+ };
73
74
- /* Create format layer */
75
- crypto = qcrypto_block_create(create_opts, NULL,
76
+ crypto = qcrypto_block_create(opts, NULL,
77
block_crypto_init_func,
78
block_crypto_write_func,
79
&data,
80
@@ -XXX,XX +XXX,XX @@ static int block_crypto_create_generic(QCryptoBlockFormat format,
81
82
ret = 0;
83
cleanup:
84
- QDECREF(cryptoopts);
85
qcrypto_block_free(crypto);
86
- blk_unref(data.blk);
87
- qapi_free_QCryptoBlockCreateOptions(create_opts);
88
+ blk_unref(blk);
89
return ret;
90
}
91
92
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn block_crypto_co_create_opts_luks(const char *filename,
93
QemuOpts *opts,
94
Error **errp)
95
{
96
- return block_crypto_create_generic(Q_CRYPTO_BLOCK_FORMAT_LUKS,
97
- filename, opts, errp);
98
+ QCryptoBlockCreateOptions *create_opts = NULL;
99
+ BlockDriverState *bs = NULL;
100
+ QDict *cryptoopts;
101
+ int64_t size;
102
+ int ret;
103
+
104
+ /* Parse options */
105
+ size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0);
106
+
107
+ cryptoopts = qemu_opts_to_qdict_filtered(opts, NULL,
108
+ &block_crypto_create_opts_luks,
109
+ true);
110
+
111
+ create_opts = block_crypto_create_opts_init(Q_CRYPTO_BLOCK_FORMAT_LUKS,
112
+ cryptoopts, errp);
113
+ if (!create_opts) {
114
+ ret = -EINVAL;
115
+ goto fail;
116
+ }
117
+
118
+ /* Create protocol layer */
119
+ ret = bdrv_create_file(filename, opts, errp);
120
+ if (ret < 0) {
121
+ return ret;
122
+ }
123
+
124
+ bs = bdrv_open(filename, NULL, NULL,
125
+ BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
126
+ if (!bs) {
127
+ ret = -EINVAL;
128
+ goto fail;
129
+ }
130
+
131
+ /* Create format layer */
132
+ ret = block_crypto_co_create_generic(bs, size, create_opts, errp);
133
+ if (ret < 0) {
134
+ goto fail;
135
+ }
136
+
137
+ ret = 0;
138
+fail:
139
+ bdrv_unref(bs);
140
+ qapi_free_QCryptoBlockCreateOptions(create_opts);
141
+ QDECREF(cryptoopts);
142
+ return ret;
143
}
144
145
static int block_crypto_get_info_luks(BlockDriverState *bs,
146
--
147
2.13.6
148
149
diff view generated by jsdifflib
Deleted patch
1
This adds the .bdrv_co_create driver callback to luks, which enables
2
image creation over QMP.
3
1
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
6
---
7
qapi/block-core.json | 17 ++++++++++++++++-
8
block/crypto.c | 34 ++++++++++++++++++++++++++++++++++
9
2 files changed, 50 insertions(+), 1 deletion(-)
10
11
diff --git a/qapi/block-core.json b/qapi/block-core.json
12
index XXXXXXX..XXXXXXX 100644
13
--- a/qapi/block-core.json
14
+++ b/qapi/block-core.json
15
@@ -XXX,XX +XXX,XX @@
16
'*preallocation': 'PreallocMode' } }
17
18
##
19
+# @BlockdevCreateOptionsLUKS:
20
+#
21
+# Driver specific image creation options for LUKS.
22
+#
23
+# @file Node to create the image format on
24
+# @size Size of the virtual disk in bytes
25
+#
26
+# Since: 2.12
27
+##
28
+{ 'struct': 'BlockdevCreateOptionsLUKS',
29
+ 'base': 'QCryptoBlockCreateOptionsLUKS',
30
+ 'data': { 'file': 'BlockdevRef',
31
+ 'size': 'size' } }
32
+
33
+##
34
# @BlockdevCreateOptionsNfs:
35
#
36
# Driver specific image creation options for NFS.
37
@@ -XXX,XX +XXX,XX @@
38
'http': 'BlockdevCreateNotSupported',
39
'https': 'BlockdevCreateNotSupported',
40
'iscsi': 'BlockdevCreateNotSupported',
41
- 'luks': 'BlockdevCreateNotSupported',
42
+ 'luks': 'BlockdevCreateOptionsLUKS',
43
'nbd': 'BlockdevCreateNotSupported',
44
'nfs': 'BlockdevCreateOptionsNfs',
45
'null-aio': 'BlockdevCreateNotSupported',
46
diff --git a/block/crypto.c b/block/crypto.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/block/crypto.c
49
+++ b/block/crypto.c
50
@@ -XXX,XX +XXX,XX @@ static int block_crypto_open_luks(BlockDriverState *bs,
51
bs, options, flags, errp);
52
}
53
54
+static int coroutine_fn
55
+block_crypto_co_create_luks(BlockdevCreateOptions *create_options, Error **errp)
56
+{
57
+ BlockdevCreateOptionsLUKS *luks_opts;
58
+ BlockDriverState *bs = NULL;
59
+ QCryptoBlockCreateOptions create_opts;
60
+ int ret;
61
+
62
+ assert(create_options->driver == BLOCKDEV_DRIVER_LUKS);
63
+ luks_opts = &create_options->u.luks;
64
+
65
+ bs = bdrv_open_blockdev_ref(luks_opts->file, errp);
66
+ if (bs == NULL) {
67
+ return -EIO;
68
+ }
69
+
70
+ create_opts = (QCryptoBlockCreateOptions) {
71
+ .format = Q_CRYPTO_BLOCK_FORMAT_LUKS,
72
+ .u.luks = *qapi_BlockdevCreateOptionsLUKS_base(luks_opts),
73
+ };
74
+
75
+ ret = block_crypto_co_create_generic(bs, luks_opts->size, &create_opts,
76
+ errp);
77
+ if (ret < 0) {
78
+ goto fail;
79
+ }
80
+
81
+ ret = 0;
82
+fail:
83
+ bdrv_unref(bs);
84
+ return ret;
85
+}
86
+
87
static int coroutine_fn block_crypto_co_create_opts_luks(const char *filename,
88
QemuOpts *opts,
89
Error **errp)
90
@@ -XXX,XX +XXX,XX @@ BlockDriver bdrv_crypto_luks = {
91
.bdrv_open = block_crypto_open_luks,
92
.bdrv_close = block_crypto_close,
93
.bdrv_child_perm = bdrv_format_default_perms,
94
+ .bdrv_co_create = block_crypto_co_create_luks,
95
.bdrv_co_create_opts = block_crypto_co_create_opts_luks,
96
.bdrv_truncate = block_crypto_truncate,
97
.create_opts = &block_crypto_create_opts_luks,
98
--
99
2.13.6
100
101
diff view generated by jsdifflib
Deleted patch
1
The .bdrv_getlength implementation of the crypto block driver asserted
2
that the payload offset isn't after EOF. This is an invalid assertion to
3
make as the image file could be corrupted. Instead, check it and return
4
-EIO if the file is too small for the payload offset.
5
1
6
Zero length images are fine, so trigger -EIO only on offset > len, not
7
on offset >= len as the assertion did before.
8
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
11
---
12
block/crypto.c | 5 ++++-
13
1 file changed, 4 insertions(+), 1 deletion(-)
14
15
diff --git a/block/crypto.c b/block/crypto.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/block/crypto.c
18
+++ b/block/crypto.c
19
@@ -XXX,XX +XXX,XX @@ static int64_t block_crypto_getlength(BlockDriverState *bs)
20
21
uint64_t offset = qcrypto_block_get_payload_offset(crypto->block);
22
assert(offset < INT64_MAX);
23
- assert(offset < len);
24
+
25
+ if (offset > len) {
26
+ return -EIO;
27
+ }
28
29
len -= offset;
30
31
--
32
2.13.6
33
34
diff view generated by jsdifflib
Deleted patch
1
When you request an image size close to UINT64_MAX, the addition of the
2
crypto header may cause an integer overflow. Catch it instead of
3
silently truncating the image size.
4
1
5
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
6
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
7
---
8
block/crypto.c | 5 +++++
9
1 file changed, 5 insertions(+)
10
11
diff --git a/block/crypto.c b/block/crypto.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/block/crypto.c
14
+++ b/block/crypto.c
15
@@ -XXX,XX +XXX,XX @@ static ssize_t block_crypto_init_func(QCryptoBlock *block,
16
{
17
struct BlockCryptoCreateData *data = opaque;
18
19
+ if (data->size > INT64_MAX || headerlen > INT64_MAX - data->size) {
20
+ error_setg(errp, "The requested file size is too large");
21
+ return -EFBIG;
22
+ }
23
+
24
/* User provided size should reflect amount of space made
25
* available to the guest, so we must take account of that
26
* which will be used by the crypto header
27
--
28
2.13.6
29
30
diff view generated by jsdifflib
Deleted patch
1
From: Max Reitz <mreitz@redhat.com>
2
1
3
In preparation of QAPI-fying VDI image creation, we have to create a
4
BlockdevCreateOptionsVdi type which is received by a (future)
5
vdi_co_create().
6
7
vdi_co_create_opts() now converts the QemuOpts object into such a
8
BlockdevCreateOptionsVdi object. The protocol-layer file is still
9
created in vdi_co_do_create() (and BlockdevCreateOptionsVdi.file is set
10
to an empty string), but that will be addressed by a follow-up patch.
11
12
Note that cluster-size is not part of the QAPI schema because it is not
13
supported by default.
14
15
Signed-off-by: Max Reitz <mreitz@redhat.com>
16
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
17
---
18
qapi/block-core.json | 18 +++++++++++
19
block/vdi.c | 91 ++++++++++++++++++++++++++++++++++++++++++++--------
20
2 files changed, 95 insertions(+), 14 deletions(-)
21
22
diff --git a/qapi/block-core.json b/qapi/block-core.json
23
index XXXXXXX..XXXXXXX 100644
24
--- a/qapi/block-core.json
25
+++ b/qapi/block-core.json
26
@@ -XXX,XX +XXX,XX @@
27
'size': 'size' } }
28
29
##
30
+# @BlockdevCreateOptionsVdi:
31
+#
32
+# Driver specific image creation options for VDI.
33
+#
34
+# @file Node to create the image format on
35
+# @size Size of the virtual disk in bytes
36
+# @static Whether to create a statically (true) or
37
+# dynamically (false) allocated image
38
+# (default: false, i.e. dynamic)
39
+#
40
+# Since: 2.12
41
+##
42
+{ 'struct': 'BlockdevCreateOptionsVdi',
43
+ 'data': { 'file': 'BlockdevRef',
44
+ 'size': 'size',
45
+ '*static': 'bool' } }
46
+
47
+##
48
# @BlockdevCreateNotSupported:
49
#
50
# This is used for all drivers that don't support creating images.
51
diff --git a/block/vdi.c b/block/vdi.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/block/vdi.c
54
+++ b/block/vdi.c
55
@@ -XXX,XX +XXX,XX @@
56
57
#include "qemu/osdep.h"
58
#include "qapi/error.h"
59
+#include "qapi/qmp/qdict.h"
60
+#include "qapi/qobject-input-visitor.h"
61
+#include "qapi/qapi-visit-block-core.h"
62
#include "block/block_int.h"
63
#include "sysemu/block-backend.h"
64
#include "qemu/module.h"
65
@@ -XXX,XX +XXX,XX @@
66
#define VDI_DISK_SIZE_MAX ((uint64_t)VDI_BLOCKS_IN_IMAGE_MAX * \
67
(uint64_t)DEFAULT_CLUSTER_SIZE)
68
69
+static QemuOptsList vdi_create_opts;
70
+
71
typedef struct {
72
char text[0x40];
73
uint32_t signature;
74
@@ -XXX,XX +XXX,XX @@ nonallocating_write:
75
return ret;
76
}
77
78
-static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts,
79
- Error **errp)
80
+static int coroutine_fn vdi_co_do_create(const char *filename,
81
+ QemuOpts *file_opts,
82
+ BlockdevCreateOptionsVdi *vdi_opts,
83
+ size_t block_size, Error **errp)
84
{
85
int ret = 0;
86
uint64_t bytes = 0;
87
uint32_t blocks;
88
- size_t block_size = DEFAULT_CLUSTER_SIZE;
89
uint32_t image_type = VDI_TYPE_DYNAMIC;
90
VdiHeader header;
91
size_t i;
92
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts,
93
logout("\n");
94
95
/* Read out options. */
96
- bytes = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
97
- BDRV_SECTOR_SIZE);
98
-#if defined(CONFIG_VDI_BLOCK_SIZE)
99
- /* TODO: Additional checks (SECTOR_SIZE * 2^n, ...). */
100
- block_size = qemu_opt_get_size_del(opts,
101
- BLOCK_OPT_CLUSTER_SIZE,
102
- DEFAULT_CLUSTER_SIZE);
103
-#endif
104
-#if defined(CONFIG_VDI_STATIC_IMAGE)
105
- if (qemu_opt_get_bool_del(opts, BLOCK_OPT_STATIC, false)) {
106
+ bytes = vdi_opts->size;
107
+ if (vdi_opts->q_static) {
108
image_type = VDI_TYPE_STATIC;
109
}
110
+#ifndef CONFIG_VDI_STATIC_IMAGE
111
+ if (image_type == VDI_TYPE_STATIC) {
112
+ ret = -ENOTSUP;
113
+ error_setg(errp, "Statically allocated images cannot be created in "
114
+ "this build");
115
+ goto exit;
116
+ }
117
+#endif
118
+#ifndef CONFIG_VDI_BLOCK_SIZE
119
+ if (block_size != DEFAULT_CLUSTER_SIZE) {
120
+ ret = -ENOTSUP;
121
+ error_setg(errp,
122
+ "A non-default cluster size is not supported in this build");
123
+ goto exit;
124
+ }
125
#endif
126
127
if (bytes > VDI_DISK_SIZE_MAX) {
128
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts,
129
goto exit;
130
}
131
132
- ret = bdrv_create_file(filename, opts, &local_err);
133
+ ret = bdrv_create_file(filename, file_opts, &local_err);
134
if (ret < 0) {
135
error_propagate(errp, local_err);
136
goto exit;
137
@@ -XXX,XX +XXX,XX @@ exit:
138
return ret;
139
}
140
141
+static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts,
142
+ Error **errp)
143
+{
144
+ QDict *qdict = NULL;
145
+ BlockdevCreateOptionsVdi *create_options = NULL;
146
+ uint64_t block_size = DEFAULT_CLUSTER_SIZE;
147
+ Visitor *v;
148
+ Error *local_err = NULL;
149
+ int ret;
150
+
151
+ /* Since CONFIG_VDI_BLOCK_SIZE is disabled by default,
152
+ * cluster-size is not part of the QAPI schema; therefore we have
153
+ * to parse it before creating the QAPI object. */
154
+#if defined(CONFIG_VDI_BLOCK_SIZE)
155
+ block_size = qemu_opt_get_size_del(opts,
156
+ BLOCK_OPT_CLUSTER_SIZE,
157
+ DEFAULT_CLUSTER_SIZE);
158
+ if (block_size < BDRV_SECTOR_SIZE || block_size > UINT32_MAX ||
159
+ !is_power_of_2(block_size))
160
+ {
161
+ error_setg(errp, "Invalid cluster size");
162
+ ret = -EINVAL;
163
+ goto done;
164
+ }
165
+#endif
166
+
167
+ qdict = qemu_opts_to_qdict_filtered(opts, NULL, &vdi_create_opts, true);
168
+
169
+ qdict_put_str(qdict, "file", ""); /* FIXME */
170
+
171
+ /* Get the QAPI object */
172
+ v = qobject_input_visitor_new_keyval(QOBJECT(qdict));
173
+ visit_type_BlockdevCreateOptionsVdi(v, NULL, &create_options, &local_err);
174
+ visit_free(v);
175
+
176
+ if (local_err) {
177
+ error_propagate(errp, local_err);
178
+ ret = -EINVAL;
179
+ goto done;
180
+ }
181
+
182
+ create_options->size = ROUND_UP(create_options->size, BDRV_SECTOR_SIZE);
183
+
184
+ ret = vdi_co_do_create(filename, opts, create_options, block_size, errp);
185
+done:
186
+ QDECREF(qdict);
187
+ qapi_free_BlockdevCreateOptionsVdi(create_options);
188
+ return ret;
189
+}
190
+
191
static void vdi_close(BlockDriverState *bs)
192
{
193
BDRVVdiState *s = bs->opaque;
194
--
195
2.13.6
196
197
diff view generated by jsdifflib
Deleted patch
1
From: Max Reitz <mreitz@redhat.com>
2
1
3
Signed-off-by: Max Reitz <mreitz@redhat.com>
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
---
6
block/vdi.c | 46 ++++++++++++++++++++++++++++------------------
7
1 file changed, 28 insertions(+), 18 deletions(-)
8
9
diff --git a/block/vdi.c b/block/vdi.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/block/vdi.c
12
+++ b/block/vdi.c
13
@@ -XXX,XX +XXX,XX @@ nonallocating_write:
14
return ret;
15
}
16
17
-static int coroutine_fn vdi_co_do_create(const char *filename,
18
- QemuOpts *file_opts,
19
- BlockdevCreateOptionsVdi *vdi_opts,
20
+static int coroutine_fn vdi_co_do_create(BlockdevCreateOptionsVdi *vdi_opts,
21
size_t block_size, Error **errp)
22
{
23
int ret = 0;
24
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(const char *filename,
25
size_t i;
26
size_t bmap_size;
27
int64_t offset = 0;
28
- Error *local_err = NULL;
29
+ BlockDriverState *bs_file = NULL;
30
BlockBackend *blk = NULL;
31
uint32_t *bmap = NULL;
32
33
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(const char *filename,
34
goto exit;
35
}
36
37
- ret = bdrv_create_file(filename, file_opts, &local_err);
38
- if (ret < 0) {
39
- error_propagate(errp, local_err);
40
+ bs_file = bdrv_open_blockdev_ref(vdi_opts->file, errp);
41
+ if (!bs_file) {
42
+ ret = -EIO;
43
goto exit;
44
}
45
46
- blk = blk_new_open(filename, NULL, NULL,
47
- BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
48
- &local_err);
49
- if (blk == NULL) {
50
- error_propagate(errp, local_err);
51
- ret = -EIO;
52
+ blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
53
+ ret = blk_insert_bs(blk, bs_file, errp);
54
+ if (ret < 0) {
55
goto exit;
56
}
57
58
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(const char *filename,
59
vdi_header_to_le(&header);
60
ret = blk_pwrite(blk, offset, &header, sizeof(header), 0);
61
if (ret < 0) {
62
- error_setg(errp, "Error writing header to %s", filename);
63
+ error_setg(errp, "Error writing header");
64
goto exit;
65
}
66
offset += sizeof(header);
67
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(const char *filename,
68
}
69
ret = blk_pwrite(blk, offset, bmap, bmap_size, 0);
70
if (ret < 0) {
71
- error_setg(errp, "Error writing bmap to %s", filename);
72
+ error_setg(errp, "Error writing bmap");
73
goto exit;
74
}
75
offset += bmap_size;
76
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(const char *filename,
77
ret = blk_truncate(blk, offset + blocks * block_size,
78
PREALLOC_MODE_OFF, errp);
79
if (ret < 0) {
80
- error_prepend(errp, "Failed to statically allocate %s", filename);
81
+ error_prepend(errp, "Failed to statically allocate file");
82
goto exit;
83
}
84
}
85
86
exit:
87
blk_unref(blk);
88
+ bdrv_unref(bs_file);
89
g_free(bmap);
90
return ret;
91
}
92
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts,
93
{
94
QDict *qdict = NULL;
95
BlockdevCreateOptionsVdi *create_options = NULL;
96
+ BlockDriverState *bs_file = NULL;
97
uint64_t block_size = DEFAULT_CLUSTER_SIZE;
98
Visitor *v;
99
Error *local_err = NULL;
100
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts,
101
102
qdict = qemu_opts_to_qdict_filtered(opts, NULL, &vdi_create_opts, true);
103
104
- qdict_put_str(qdict, "file", ""); /* FIXME */
105
+ ret = bdrv_create_file(filename, opts, errp);
106
+ if (ret < 0) {
107
+ goto done;
108
+ }
109
+
110
+ bs_file = bdrv_open(filename, NULL, NULL,
111
+ BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
112
+ if (!bs_file) {
113
+ ret = -EIO;
114
+ goto done;
115
+ }
116
+
117
+ qdict_put_str(qdict, "file", bs_file->node_name);
118
119
/* Get the QAPI object */
120
v = qobject_input_visitor_new_keyval(QOBJECT(qdict));
121
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts,
122
123
create_options->size = ROUND_UP(create_options->size, BDRV_SECTOR_SIZE);
124
125
- ret = vdi_co_do_create(filename, opts, create_options, block_size, errp);
126
+ ret = vdi_co_do_create(create_options, block_size, errp);
127
done:
128
QDECREF(qdict);
129
qapi_free_BlockdevCreateOptionsVdi(create_options);
130
+ bdrv_unref(bs_file);
131
return ret;
132
}
133
134
--
135
2.13.6
136
137
diff view generated by jsdifflib
Deleted patch
1
From: Max Reitz <mreitz@redhat.com>
2
1
3
Signed-off-by: Max Reitz <mreitz@redhat.com>
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
---
6
qapi/block-core.json | 2 +-
7
block/vdi.c | 24 +++++++++++++++++++-----
8
2 files changed, 20 insertions(+), 6 deletions(-)
9
10
diff --git a/qapi/block-core.json b/qapi/block-core.json
11
index XXXXXXX..XXXXXXX 100644
12
--- a/qapi/block-core.json
13
+++ b/qapi/block-core.json
14
@@ -XXX,XX +XXX,XX @@
15
'sheepdog': 'BlockdevCreateOptionsSheepdog',
16
'ssh': 'BlockdevCreateOptionsSsh',
17
'throttle': 'BlockdevCreateNotSupported',
18
- 'vdi': 'BlockdevCreateNotSupported',
19
+ 'vdi': 'BlockdevCreateOptionsVdi',
20
'vhdx': 'BlockdevCreateNotSupported',
21
'vmdk': 'BlockdevCreateNotSupported',
22
'vpc': 'BlockdevCreateNotSupported',
23
diff --git a/block/vdi.c b/block/vdi.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/block/vdi.c
26
+++ b/block/vdi.c
27
@@ -XXX,XX +XXX,XX @@ nonallocating_write:
28
return ret;
29
}
30
31
-static int coroutine_fn vdi_co_do_create(BlockdevCreateOptionsVdi *vdi_opts,
32
+static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options,
33
size_t block_size, Error **errp)
34
{
35
+ BlockdevCreateOptionsVdi *vdi_opts;
36
int ret = 0;
37
uint64_t bytes = 0;
38
uint32_t blocks;
39
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptionsVdi *vdi_opts,
40
BlockBackend *blk = NULL;
41
uint32_t *bmap = NULL;
42
43
+ assert(create_options->driver == BLOCKDEV_DRIVER_VDI);
44
+ vdi_opts = &create_options->u.vdi;
45
+
46
logout("\n");
47
48
/* Read out options. */
49
@@ -XXX,XX +XXX,XX @@ exit:
50
return ret;
51
}
52
53
+static int coroutine_fn vdi_co_create(BlockdevCreateOptions *create_options,
54
+ Error **errp)
55
+{
56
+ return vdi_co_do_create(create_options, DEFAULT_CLUSTER_SIZE, errp);
57
+}
58
+
59
static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts,
60
Error **errp)
61
{
62
QDict *qdict = NULL;
63
- BlockdevCreateOptionsVdi *create_options = NULL;
64
+ BlockdevCreateOptions *create_options = NULL;
65
BlockDriverState *bs_file = NULL;
66
uint64_t block_size = DEFAULT_CLUSTER_SIZE;
67
Visitor *v;
68
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts,
69
goto done;
70
}
71
72
+ qdict_put_str(qdict, "driver", "vdi");
73
qdict_put_str(qdict, "file", bs_file->node_name);
74
75
/* Get the QAPI object */
76
v = qobject_input_visitor_new_keyval(QOBJECT(qdict));
77
- visit_type_BlockdevCreateOptionsVdi(v, NULL, &create_options, &local_err);
78
+ visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err);
79
visit_free(v);
80
81
if (local_err) {
82
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts,
83
goto done;
84
}
85
86
- create_options->size = ROUND_UP(create_options->size, BDRV_SECTOR_SIZE);
87
+ assert(create_options->driver == BLOCKDEV_DRIVER_VDI);
88
+ create_options->u.vdi.size = ROUND_UP(create_options->u.vdi.size,
89
+ BDRV_SECTOR_SIZE);
90
91
ret = vdi_co_do_create(create_options, block_size, errp);
92
done:
93
QDECREF(qdict);
94
- qapi_free_BlockdevCreateOptionsVdi(create_options);
95
+ qapi_free_BlockdevCreateOptions(create_options);
96
bdrv_unref(bs_file);
97
return ret;
98
}
99
@@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_vdi = {
100
.bdrv_reopen_prepare = vdi_reopen_prepare,
101
.bdrv_child_perm = bdrv_format_default_perms,
102
.bdrv_co_create_opts = vdi_co_create_opts,
103
+ .bdrv_co_create = vdi_co_create,
104
.bdrv_has_zero_init = bdrv_has_zero_init_1,
105
.bdrv_co_block_status = vdi_co_block_status,
106
.bdrv_make_empty = vdi_make_empty,
107
--
108
2.13.6
109
110
diff view generated by jsdifflib
Deleted patch
1
This adds the .bdrv_co_create driver callback to parallels, which
2
enables image creation over QMP.
3
1
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
Reviewed-by: Max Reitz <mreitz@redhat.com>
6
Reviewed-by: Jeff Cody <jcody@redhat.com>
7
---
8
qapi/block-core.json | 18 ++++-
9
block/parallels.c | 199 ++++++++++++++++++++++++++++++++++++++-------------
10
2 files changed, 168 insertions(+), 49 deletions(-)
11
12
diff --git a/qapi/block-core.json b/qapi/block-core.json
13
index XXXXXXX..XXXXXXX 100644
14
--- a/qapi/block-core.json
15
+++ b/qapi/block-core.json
16
@@ -XXX,XX +XXX,XX @@
17
'size': 'size' } }
18
19
##
20
+# @BlockdevCreateOptionsParallels:
21
+#
22
+# Driver specific image creation options for parallels.
23
+#
24
+# @file Node to create the image format on
25
+# @size Size of the virtual disk in bytes
26
+# @cluster-size Cluster size in bytes (default: 1 MB)
27
+#
28
+# Since: 2.12
29
+##
30
+{ 'struct': 'BlockdevCreateOptionsParallels',
31
+ 'data': { 'file': 'BlockdevRef',
32
+ 'size': 'size',
33
+ '*cluster-size': 'size' } }
34
+
35
+##
36
# @BlockdevQcow2Version:
37
#
38
# @v2: The original QCOW2 format as introduced in qemu 0.10 (version 2)
39
@@ -XXX,XX +XXX,XX @@
40
'null-aio': 'BlockdevCreateNotSupported',
41
'null-co': 'BlockdevCreateNotSupported',
42
'nvme': 'BlockdevCreateNotSupported',
43
- 'parallels': 'BlockdevCreateNotSupported',
44
+ 'parallels': 'BlockdevCreateOptionsParallels',
45
'qcow2': 'BlockdevCreateOptionsQcow2',
46
'qcow': 'BlockdevCreateNotSupported',
47
'qed': 'BlockdevCreateNotSupported',
48
diff --git a/block/parallels.c b/block/parallels.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/block/parallels.c
51
+++ b/block/parallels.c
52
@@ -XXX,XX +XXX,XX @@
53
#include "sysemu/block-backend.h"
54
#include "qemu/module.h"
55
#include "qemu/option.h"
56
+#include "qapi/qmp/qdict.h"
57
+#include "qapi/qobject-input-visitor.h"
58
+#include "qapi/qapi-visit-block-core.h"
59
#include "qemu/bswap.h"
60
#include "qemu/bitmap.h"
61
#include "migration/blocker.h"
62
@@ -XXX,XX +XXX,XX @@ static QemuOptsList parallels_runtime_opts = {
63
},
64
};
65
66
+static QemuOptsList parallels_create_opts = {
67
+ .name = "parallels-create-opts",
68
+ .head = QTAILQ_HEAD_INITIALIZER(parallels_create_opts.head),
69
+ .desc = {
70
+ {
71
+ .name = BLOCK_OPT_SIZE,
72
+ .type = QEMU_OPT_SIZE,
73
+ .help = "Virtual disk size",
74
+ },
75
+ {
76
+ .name = BLOCK_OPT_CLUSTER_SIZE,
77
+ .type = QEMU_OPT_SIZE,
78
+ .help = "Parallels image cluster size",
79
+ .def_value_str = stringify(DEFAULT_CLUSTER_SIZE),
80
+ },
81
+ { /* end of list */ }
82
+ }
83
+};
84
+
85
86
static int64_t bat2sect(BDRVParallelsState *s, uint32_t idx)
87
{
88
@@ -XXX,XX +XXX,XX @@ out:
89
}
90
91
92
-static int coroutine_fn parallels_co_create_opts(const char *filename,
93
- QemuOpts *opts,
94
- Error **errp)
95
+static int coroutine_fn parallels_co_create(BlockdevCreateOptions* opts,
96
+ Error **errp)
97
{
98
+ BlockdevCreateOptionsParallels *parallels_opts;
99
+ BlockDriverState *bs;
100
+ BlockBackend *blk;
101
int64_t total_size, cl_size;
102
- uint8_t tmp[BDRV_SECTOR_SIZE];
103
- Error *local_err = NULL;
104
- BlockBackend *file;
105
uint32_t bat_entries, bat_sectors;
106
ParallelsHeader header;
107
+ uint8_t tmp[BDRV_SECTOR_SIZE];
108
int ret;
109
110
- total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
111
- BDRV_SECTOR_SIZE);
112
- cl_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE,
113
- DEFAULT_CLUSTER_SIZE), BDRV_SECTOR_SIZE);
114
+ assert(opts->driver == BLOCKDEV_DRIVER_PARALLELS);
115
+ parallels_opts = &opts->u.parallels;
116
+
117
+ /* Sanity checks */
118
+ total_size = parallels_opts->size;
119
+
120
+ if (parallels_opts->has_cluster_size) {
121
+ cl_size = parallels_opts->cluster_size;
122
+ } else {
123
+ cl_size = DEFAULT_CLUSTER_SIZE;
124
+ }
125
+
126
if (total_size >= MAX_PARALLELS_IMAGE_FACTOR * cl_size) {
127
- error_propagate(errp, local_err);
128
+ error_setg(errp, "Image size is too large for this cluster size");
129
return -E2BIG;
130
}
131
132
- ret = bdrv_create_file(filename, opts, &local_err);
133
- if (ret < 0) {
134
- error_propagate(errp, local_err);
135
- return ret;
136
+ if (!QEMU_IS_ALIGNED(total_size, BDRV_SECTOR_SIZE)) {
137
+ error_setg(errp, "Image size must be a multiple of 512 bytes");
138
+ return -EINVAL;
139
}
140
141
- file = blk_new_open(filename, NULL, NULL,
142
- BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
143
- &local_err);
144
- if (file == NULL) {
145
- error_propagate(errp, local_err);
146
+ if (!QEMU_IS_ALIGNED(cl_size, BDRV_SECTOR_SIZE)) {
147
+ error_setg(errp, "Cluster size must be a multiple of 512 bytes");
148
+ return -EINVAL;
149
+ }
150
+
151
+ /* Create BlockBackend to write to the image */
152
+ bs = bdrv_open_blockdev_ref(parallels_opts->file, errp);
153
+ if (bs == NULL) {
154
return -EIO;
155
}
156
157
- blk_set_allow_write_beyond_eof(file, true);
158
+ blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
159
+ ret = blk_insert_bs(blk, bs, errp);
160
+ if (ret < 0) {
161
+ goto out;
162
+ }
163
+ blk_set_allow_write_beyond_eof(blk, true);
164
165
- ret = blk_truncate(file, 0, PREALLOC_MODE_OFF, errp);
166
+ /* Create image format */
167
+ ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp);
168
if (ret < 0) {
169
- goto exit;
170
+ goto out;
171
}
172
173
bat_entries = DIV_ROUND_UP(total_size, cl_size);
174
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn parallels_co_create_opts(const char *filename,
175
memset(tmp, 0, sizeof(tmp));
176
memcpy(tmp, &header, sizeof(header));
177
178
- ret = blk_pwrite(file, 0, tmp, BDRV_SECTOR_SIZE, 0);
179
+ ret = blk_pwrite(blk, 0, tmp, BDRV_SECTOR_SIZE, 0);
180
if (ret < 0) {
181
goto exit;
182
}
183
- ret = blk_pwrite_zeroes(file, BDRV_SECTOR_SIZE,
184
+ ret = blk_pwrite_zeroes(blk, BDRV_SECTOR_SIZE,
185
(bat_sectors - 1) << BDRV_SECTOR_BITS, 0);
186
if (ret < 0) {
187
goto exit;
188
}
189
- ret = 0;
190
191
-done:
192
- blk_unref(file);
193
+ ret = 0;
194
+out:
195
+ blk_unref(blk);
196
+ bdrv_unref(bs);
197
return ret;
198
199
exit:
200
error_setg_errno(errp, -ret, "Failed to create Parallels image");
201
- goto done;
202
+ goto out;
203
+}
204
+
205
+static int coroutine_fn parallels_co_create_opts(const char *filename,
206
+ QemuOpts *opts,
207
+ Error **errp)
208
+{
209
+ BlockdevCreateOptions *create_options = NULL;
210
+ Error *local_err = NULL;
211
+ BlockDriverState *bs = NULL;
212
+ QDict *qdict = NULL;
213
+ QObject *qobj;
214
+ Visitor *v;
215
+ int ret;
216
+
217
+ static const QDictRenames opt_renames[] = {
218
+ { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" },
219
+ { NULL, NULL },
220
+ };
221
+
222
+ /* Parse options and convert legacy syntax */
223
+ qdict = qemu_opts_to_qdict_filtered(opts, NULL, &parallels_create_opts,
224
+ true);
225
+
226
+ if (!qdict_rename_keys(qdict, opt_renames, errp)) {
227
+ ret = -EINVAL;
228
+ goto done;
229
+ }
230
+
231
+ /* Create and open the file (protocol layer) */
232
+ ret = bdrv_create_file(filename, opts, &local_err);
233
+ if (ret < 0) {
234
+ error_propagate(errp, local_err);
235
+ goto done;
236
+ }
237
+
238
+ bs = bdrv_open(filename, NULL, NULL,
239
+ BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
240
+ if (bs == NULL) {
241
+ ret = -EIO;
242
+ goto done;
243
+ }
244
+
245
+ /* Now get the QAPI type BlockdevCreateOptions */
246
+ qdict_put_str(qdict, "driver", "parallels");
247
+ qdict_put_str(qdict, "file", bs->node_name);
248
+
249
+ qobj = qdict_crumple(qdict, errp);
250
+ QDECREF(qdict);
251
+ qdict = qobject_to_qdict(qobj);
252
+ if (qdict == NULL) {
253
+ ret = -EINVAL;
254
+ goto done;
255
+ }
256
+
257
+ v = qobject_input_visitor_new_keyval(QOBJECT(qdict));
258
+ visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err);
259
+ visit_free(v);
260
+
261
+ if (local_err) {
262
+ error_propagate(errp, local_err);
263
+ ret = -EINVAL;
264
+ goto done;
265
+ }
266
+
267
+ /* Silently round up sizes */
268
+ create_options->u.parallels.size =
269
+ ROUND_UP(create_options->u.parallels.size, BDRV_SECTOR_SIZE);
270
+ create_options->u.parallels.cluster_size =
271
+ ROUND_UP(create_options->u.parallels.cluster_size, BDRV_SECTOR_SIZE);
272
+
273
+ /* Create the Parallels image (format layer) */
274
+ ret = parallels_co_create(create_options, errp);
275
+ if (ret < 0) {
276
+ goto done;
277
+ }
278
+ ret = 0;
279
+
280
+done:
281
+ QDECREF(qdict);
282
+ bdrv_unref(bs);
283
+ qapi_free_BlockdevCreateOptions(create_options);
284
+ return ret;
285
}
286
287
288
@@ -XXX,XX +XXX,XX @@ static void parallels_close(BlockDriverState *bs)
289
error_free(s->migration_blocker);
290
}
291
292
-static QemuOptsList parallels_create_opts = {
293
- .name = "parallels-create-opts",
294
- .head = QTAILQ_HEAD_INITIALIZER(parallels_create_opts.head),
295
- .desc = {
296
- {
297
- .name = BLOCK_OPT_SIZE,
298
- .type = QEMU_OPT_SIZE,
299
- .help = "Virtual disk size",
300
- },
301
- {
302
- .name = BLOCK_OPT_CLUSTER_SIZE,
303
- .type = QEMU_OPT_SIZE,
304
- .help = "Parallels image cluster size",
305
- .def_value_str = stringify(DEFAULT_CLUSTER_SIZE),
306
- },
307
- { /* end of list */ }
308
- }
309
-};
310
-
311
static BlockDriver bdrv_parallels = {
312
.format_name    = "parallels",
313
.instance_size    = sizeof(BDRVParallelsState),
314
@@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_parallels = {
315
.bdrv_co_readv = parallels_co_readv,
316
.bdrv_co_writev = parallels_co_writev,
317
.supports_backing = true,
318
+ .bdrv_co_create = parallels_co_create,
319
.bdrv_co_create_opts = parallels_co_create_opts,
320
.bdrv_co_check = parallels_co_check,
321
.create_opts = &parallels_create_opts,
322
--
323
2.13.6
324
325
diff view generated by jsdifflib
Deleted patch
1
Originally we added parallels as a read-only format to qemu-iotests
2
where we did just some tests with a binary image. Since then, write and
3
image creation support has been added to the driver, so we can now
4
enable it in _supported_fmt generic.
5
1
6
The driver doesn't support migration yet, though, so we need to add it
7
to the list of exceptions in 181.
8
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
Reviewed-by: Max Reitz <mreitz@redhat.com>
11
Reviewed-by: Jeff Cody <jcody@redhat.com>
12
---
13
tests/qemu-iotests/181 | 2 +-
14
tests/qemu-iotests/check | 1 -
15
2 files changed, 1 insertion(+), 2 deletions(-)
16
17
diff --git a/tests/qemu-iotests/181 b/tests/qemu-iotests/181
18
index XXXXXXX..XXXXXXX 100755
19
--- a/tests/qemu-iotests/181
20
+++ b/tests/qemu-iotests/181
21
@@ -XXX,XX +XXX,XX @@ trap "_cleanup; exit \$status" 0 1 2 3 15
22
23
_supported_fmt generic
24
# Formats that do not support live migration
25
-_unsupported_fmt qcow vdi vhdx vmdk vpc vvfat
26
+_unsupported_fmt qcow vdi vhdx vmdk vpc vvfat parallels
27
_supported_proto generic
28
_supported_os Linux
29
30
diff --git a/tests/qemu-iotests/check b/tests/qemu-iotests/check
31
index XXXXXXX..XXXXXXX 100755
32
--- a/tests/qemu-iotests/check
33
+++ b/tests/qemu-iotests/check
34
@@ -XXX,XX +XXX,XX @@ testlist options
35
36
-parallels)
37
IMGFMT=parallels
38
- IMGFMT_GENERIC=false
39
xpand=false
40
;;
41
42
--
43
2.13.6
44
45
diff view generated by jsdifflib
Deleted patch
1
This adds the .bdrv_co_create driver callback to qcow, which
2
enables image creation over QMP.
3
1
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
Reviewed-by: Max Reitz <mreitz@redhat.com>
6
Reviewed-by: Jeff Cody <jcody@redhat.com>
7
---
8
qapi/block-core.json | 21 +++++-
9
block/qcow.c | 196 ++++++++++++++++++++++++++++++++++-----------------
10
2 files changed, 150 insertions(+), 67 deletions(-)
11
12
diff --git a/qapi/block-core.json b/qapi/block-core.json
13
index XXXXXXX..XXXXXXX 100644
14
--- a/qapi/block-core.json
15
+++ b/qapi/block-core.json
16
@@ -XXX,XX +XXX,XX @@
17
'*cluster-size': 'size' } }
18
19
##
20
+# @BlockdevCreateOptionsQcow:
21
+#
22
+# Driver specific image creation options for qcow.
23
+#
24
+# @file Node to create the image format on
25
+# @size Size of the virtual disk in bytes
26
+# @backing-file File name of the backing file if a backing file
27
+# should be used
28
+# @encrypt Encryption options if the image should be encrypted
29
+#
30
+# Since: 2.12
31
+##
32
+{ 'struct': 'BlockdevCreateOptionsQcow',
33
+ 'data': { 'file': 'BlockdevRef',
34
+ 'size': 'size',
35
+ '*backing-file': 'str',
36
+ '*encrypt': 'QCryptoBlockCreateOptions' } }
37
+
38
+##
39
# @BlockdevQcow2Version:
40
#
41
# @v2: The original QCOW2 format as introduced in qemu 0.10 (version 2)
42
@@ -XXX,XX +XXX,XX @@
43
'null-co': 'BlockdevCreateNotSupported',
44
'nvme': 'BlockdevCreateNotSupported',
45
'parallels': 'BlockdevCreateOptionsParallels',
46
+ 'qcow': 'BlockdevCreateOptionsQcow',
47
'qcow2': 'BlockdevCreateOptionsQcow2',
48
- 'qcow': 'BlockdevCreateNotSupported',
49
'qed': 'BlockdevCreateNotSupported',
50
'quorum': 'BlockdevCreateNotSupported',
51
'raw': 'BlockdevCreateNotSupported',
52
diff --git a/block/qcow.c b/block/qcow.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/block/qcow.c
55
+++ b/block/qcow.c
56
@@ -XXX,XX +XXX,XX @@
57
#include <zlib.h>
58
#include "qapi/qmp/qdict.h"
59
#include "qapi/qmp/qstring.h"
60
+#include "qapi/qobject-input-visitor.h"
61
+#include "qapi/qapi-visit-block-core.h"
62
#include "crypto/block.h"
63
#include "migration/blocker.h"
64
#include "block/crypto.h"
65
@@ -XXX,XX +XXX,XX @@ typedef struct BDRVQcowState {
66
Error *migration_blocker;
67
} BDRVQcowState;
68
69
+static QemuOptsList qcow_create_opts;
70
+
71
static int decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset);
72
73
static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename)
74
@@ -XXX,XX +XXX,XX @@ static void qcow_close(BlockDriverState *bs)
75
error_free(s->migration_blocker);
76
}
77
78
-static int coroutine_fn qcow_co_create_opts(const char *filename, QemuOpts *opts,
79
- Error **errp)
80
+static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts,
81
+ Error **errp)
82
{
83
+ BlockdevCreateOptionsQcow *qcow_opts;
84
int header_size, backing_filename_len, l1_size, shift, i;
85
QCowHeader header;
86
uint8_t *tmp;
87
int64_t total_size = 0;
88
- char *backing_file = NULL;
89
- Error *local_err = NULL;
90
int ret;
91
+ BlockDriverState *bs;
92
BlockBackend *qcow_blk;
93
- char *encryptfmt = NULL;
94
- QDict *options;
95
- QDict *encryptopts = NULL;
96
- QCryptoBlockCreateOptions *crypto_opts = NULL;
97
QCryptoBlock *crypto = NULL;
98
99
- /* Read out options */
100
- total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
101
- BDRV_SECTOR_SIZE);
102
+ assert(opts->driver == BLOCKDEV_DRIVER_QCOW);
103
+ qcow_opts = &opts->u.qcow;
104
+
105
+ /* Sanity checks */
106
+ total_size = qcow_opts->size;
107
if (total_size == 0) {
108
error_setg(errp, "Image size is too small, cannot be zero length");
109
- ret = -EINVAL;
110
- goto cleanup;
111
+ return -EINVAL;
112
}
113
114
- backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
115
- encryptfmt = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT);
116
- if (encryptfmt) {
117
- if (qemu_opt_get(opts, BLOCK_OPT_ENCRYPT)) {
118
- error_setg(errp, "Options " BLOCK_OPT_ENCRYPT " and "
119
- BLOCK_OPT_ENCRYPT_FORMAT " are mutually exclusive");
120
- ret = -EINVAL;
121
- goto cleanup;
122
- }
123
- } else if (qemu_opt_get_bool_del(opts, BLOCK_OPT_ENCRYPT, false)) {
124
- encryptfmt = g_strdup("aes");
125
+ if (qcow_opts->has_encrypt &&
126
+ qcow_opts->encrypt->format != Q_CRYPTO_BLOCK_FORMAT_QCOW)
127
+ {
128
+ error_setg(errp, "Unsupported encryption format");
129
+ return -EINVAL;
130
}
131
132
- ret = bdrv_create_file(filename, opts, &local_err);
133
- if (ret < 0) {
134
- error_propagate(errp, local_err);
135
- goto cleanup;
136
+ /* Create BlockBackend to write to the image */
137
+ bs = bdrv_open_blockdev_ref(qcow_opts->file, errp);
138
+ if (bs == NULL) {
139
+ return -EIO;
140
}
141
142
- qcow_blk = blk_new_open(filename, NULL, NULL,
143
- BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
144
- &local_err);
145
- if (qcow_blk == NULL) {
146
- error_propagate(errp, local_err);
147
- ret = -EIO;
148
- goto cleanup;
149
+ qcow_blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
150
+ ret = blk_insert_bs(qcow_blk, bs, errp);
151
+ if (ret < 0) {
152
+ goto exit;
153
}
154
-
155
blk_set_allow_write_beyond_eof(qcow_blk, true);
156
157
+ /* Create image format */
158
ret = blk_truncate(qcow_blk, 0, PREALLOC_MODE_OFF, errp);
159
if (ret < 0) {
160
goto exit;
161
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn qcow_co_create_opts(const char *filename, QemuOpts *opts
162
header.size = cpu_to_be64(total_size);
163
header_size = sizeof(header);
164
backing_filename_len = 0;
165
- if (backing_file) {
166
- if (strcmp(backing_file, "fat:")) {
167
+ if (qcow_opts->has_backing_file) {
168
+ if (strcmp(qcow_opts->backing_file, "fat:")) {
169
header.backing_file_offset = cpu_to_be64(header_size);
170
- backing_filename_len = strlen(backing_file);
171
+ backing_filename_len = strlen(qcow_opts->backing_file);
172
header.backing_file_size = cpu_to_be32(backing_filename_len);
173
header_size += backing_filename_len;
174
} else {
175
/* special backing file for vvfat */
176
- g_free(backing_file);
177
- backing_file = NULL;
178
+ qcow_opts->has_backing_file = false;
179
}
180
header.cluster_bits = 9; /* 512 byte cluster to avoid copying
181
unmodified sectors */
182
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn qcow_co_create_opts(const char *filename, QemuOpts *opts
183
184
header.l1_table_offset = cpu_to_be64(header_size);
185
186
- options = qemu_opts_to_qdict(opts, NULL);
187
- qdict_extract_subqdict(options, &encryptopts, "encrypt.");
188
- QDECREF(options);
189
- if (encryptfmt) {
190
- if (!g_str_equal(encryptfmt, "aes")) {
191
- error_setg(errp, "Unknown encryption format '%s', expected 'aes'",
192
- encryptfmt);
193
- ret = -EINVAL;
194
- goto exit;
195
- }
196
+ if (qcow_opts->has_encrypt) {
197
header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
198
199
- crypto_opts = block_crypto_create_opts_init(
200
- Q_CRYPTO_BLOCK_FORMAT_QCOW, encryptopts, errp);
201
- if (!crypto_opts) {
202
- ret = -EINVAL;
203
- goto exit;
204
- }
205
-
206
- crypto = qcrypto_block_create(crypto_opts, "encrypt.",
207
+ crypto = qcrypto_block_create(qcow_opts->encrypt, "encrypt.",
208
NULL, NULL, NULL, errp);
209
if (!crypto) {
210
ret = -EINVAL;
211
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn qcow_co_create_opts(const char *filename, QemuOpts *opts
212
goto exit;
213
}
214
215
- if (backing_file) {
216
+ if (qcow_opts->has_backing_file) {
217
ret = blk_pwrite(qcow_blk, sizeof(header),
218
- backing_file, backing_filename_len, 0);
219
+ qcow_opts->backing_file, backing_filename_len, 0);
220
if (ret != backing_filename_len) {
221
goto exit;
222
}
223
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn qcow_co_create_opts(const char *filename, QemuOpts *opts
224
ret = 0;
225
exit:
226
blk_unref(qcow_blk);
227
-cleanup:
228
- QDECREF(encryptopts);
229
- g_free(encryptfmt);
230
qcrypto_block_free(crypto);
231
- qapi_free_QCryptoBlockCreateOptions(crypto_opts);
232
- g_free(backing_file);
233
+ return ret;
234
+}
235
+
236
+static int coroutine_fn qcow_co_create_opts(const char *filename,
237
+ QemuOpts *opts, Error **errp)
238
+{
239
+ BlockdevCreateOptions *create_options = NULL;
240
+ BlockDriverState *bs = NULL;
241
+ QDict *qdict = NULL;
242
+ QObject *qobj;
243
+ Visitor *v;
244
+ const char *val;
245
+ Error *local_err = NULL;
246
+ int ret;
247
+
248
+ static const QDictRenames opt_renames[] = {
249
+ { BLOCK_OPT_BACKING_FILE, "backing-file" },
250
+ { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT },
251
+ { NULL, NULL },
252
+ };
253
+
254
+ /* Parse options and convert legacy syntax */
255
+ qdict = qemu_opts_to_qdict_filtered(opts, NULL, &qcow_create_opts, true);
256
+
257
+ val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT);
258
+ if (val && !strcmp(val, "on")) {
259
+ qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow");
260
+ } else if (val && !strcmp(val, "off")) {
261
+ qdict_del(qdict, BLOCK_OPT_ENCRYPT);
262
+ }
263
+
264
+ val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT);
265
+ if (val && !strcmp(val, "aes")) {
266
+ qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow");
267
+ }
268
+
269
+ if (!qdict_rename_keys(qdict, opt_renames, errp)) {
270
+ ret = -EINVAL;
271
+ goto fail;
272
+ }
273
+
274
+ /* Create and open the file (protocol layer) */
275
+ ret = bdrv_create_file(filename, opts, &local_err);
276
+ if (ret < 0) {
277
+ error_propagate(errp, local_err);
278
+ goto fail;
279
+ }
280
+
281
+ bs = bdrv_open(filename, NULL, NULL,
282
+ BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
283
+ if (bs == NULL) {
284
+ ret = -EIO;
285
+ goto fail;
286
+ }
287
+
288
+ /* Now get the QAPI type BlockdevCreateOptions */
289
+ qdict_put_str(qdict, "driver", "qcow");
290
+ qdict_put_str(qdict, "file", bs->node_name);
291
+
292
+ qobj = qdict_crumple(qdict, errp);
293
+ QDECREF(qdict);
294
+ qdict = qobject_to_qdict(qobj);
295
+ if (qdict == NULL) {
296
+ ret = -EINVAL;
297
+ goto fail;
298
+ }
299
+
300
+ v = qobject_input_visitor_new_keyval(QOBJECT(qdict));
301
+ visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err);
302
+ visit_free(v);
303
+
304
+ if (local_err) {
305
+ error_propagate(errp, local_err);
306
+ ret = -EINVAL;
307
+ goto fail;
308
+ }
309
+
310
+ /* Silently round up size */
311
+ assert(create_options->driver == BLOCKDEV_DRIVER_QCOW);
312
+ create_options->u.qcow.size =
313
+ ROUND_UP(create_options->u.qcow.size, BDRV_SECTOR_SIZE);
314
+
315
+ /* Create the qcow image (format layer) */
316
+ ret = qcow_co_create(create_options, errp);
317
+ if (ret < 0) {
318
+ goto fail;
319
+ }
320
+
321
+ ret = 0;
322
+fail:
323
+ QDECREF(qdict);
324
+ bdrv_unref(bs);
325
+ qapi_free_BlockdevCreateOptions(create_options);
326
return ret;
327
}
328
329
@@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_qcow = {
330
.bdrv_close        = qcow_close,
331
.bdrv_child_perm = bdrv_format_default_perms,
332
.bdrv_reopen_prepare = qcow_reopen_prepare,
333
+ .bdrv_co_create = qcow_co_create,
334
.bdrv_co_create_opts = qcow_co_create_opts,
335
.bdrv_has_zero_init = bdrv_has_zero_init_1,
336
.supports_backing = true,
337
--
338
2.13.6
339
340
diff view generated by jsdifflib
Deleted patch
1
This adds the .bdrv_co_create driver callback to qed, which
2
enables image creation over QMP.
3
1
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
Reviewed-by: Max Reitz <mreitz@redhat.com>
6
---
7
qapi/block-core.json | 25 ++++++-
8
block/qed.c | 204 ++++++++++++++++++++++++++++++++++-----------------
9
2 files changed, 162 insertions(+), 67 deletions(-)
10
11
diff --git a/qapi/block-core.json b/qapi/block-core.json
12
index XXXXXXX..XXXXXXX 100644
13
--- a/qapi/block-core.json
14
+++ b/qapi/block-core.json
15
@@ -XXX,XX +XXX,XX @@
16
'*refcount-bits': 'int' } }
17
18
##
19
+# @BlockdevCreateOptionsQed:
20
+#
21
+# Driver specific image creation options for qed.
22
+#
23
+# @file Node to create the image format on
24
+# @size Size of the virtual disk in bytes
25
+# @backing-file File name of the backing file if a backing file
26
+# should be used
27
+# @backing-fmt Name of the block driver to use for the backing file
28
+# @cluster-size Cluster size in bytes (default: 65536)
29
+# @table-size L1/L2 table size (in clusters)
30
+#
31
+# Since: 2.12
32
+##
33
+{ 'struct': 'BlockdevCreateOptionsQed',
34
+ 'data': { 'file': 'BlockdevRef',
35
+ 'size': 'size',
36
+ '*backing-file': 'str',
37
+ '*backing-fmt': 'BlockdevDriver',
38
+ '*cluster-size': 'size',
39
+ '*table-size': 'int' } }
40
+
41
+##
42
# @BlockdevCreateOptionsRbd:
43
#
44
# Driver specific image creation options for rbd/Ceph.
45
@@ -XXX,XX +XXX,XX @@
46
'parallels': 'BlockdevCreateOptionsParallels',
47
'qcow': 'BlockdevCreateOptionsQcow',
48
'qcow2': 'BlockdevCreateOptionsQcow2',
49
- 'qed': 'BlockdevCreateNotSupported',
50
+ 'qed': 'BlockdevCreateOptionsQed',
51
'quorum': 'BlockdevCreateNotSupported',
52
'raw': 'BlockdevCreateNotSupported',
53
'rbd': 'BlockdevCreateOptionsRbd',
54
diff --git a/block/qed.c b/block/qed.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/block/qed.c
57
+++ b/block/qed.c
58
@@ -XXX,XX +XXX,XX @@
59
#include "trace.h"
60
#include "qed.h"
61
#include "sysemu/block-backend.h"
62
+#include "qapi/qmp/qdict.h"
63
+#include "qapi/qobject-input-visitor.h"
64
+#include "qapi/qapi-visit-block-core.h"
65
+
66
+static QemuOptsList qed_create_opts;
67
68
static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
69
const char *filename)
70
@@ -XXX,XX +XXX,XX @@ static void bdrv_qed_close(BlockDriverState *bs)
71
qemu_vfree(s->l1_table);
72
}
73
74
-static int qed_create(const char *filename, uint32_t cluster_size,
75
- uint64_t image_size, uint32_t table_size,
76
- const char *backing_file, const char *backing_fmt,
77
- QemuOpts *opts, Error **errp)
78
+static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts,
79
+ Error **errp)
80
{
81
- QEDHeader header = {
82
- .magic = QED_MAGIC,
83
- .cluster_size = cluster_size,
84
- .table_size = table_size,
85
- .header_size = 1,
86
- .features = 0,
87
- .compat_features = 0,
88
- .l1_table_offset = cluster_size,
89
- .image_size = image_size,
90
- };
91
+ BlockdevCreateOptionsQed *qed_opts;
92
+ BlockBackend *blk = NULL;
93
+ BlockDriverState *bs = NULL;
94
+
95
+ QEDHeader header;
96
QEDHeader le_header;
97
uint8_t *l1_table = NULL;
98
- size_t l1_size = header.cluster_size * header.table_size;
99
- Error *local_err = NULL;
100
+ size_t l1_size;
101
int ret = 0;
102
- BlockBackend *blk;
103
104
- ret = bdrv_create_file(filename, opts, &local_err);
105
- if (ret < 0) {
106
- error_propagate(errp, local_err);
107
- return ret;
108
+ assert(opts->driver == BLOCKDEV_DRIVER_QED);
109
+ qed_opts = &opts->u.qed;
110
+
111
+ /* Validate options and set default values */
112
+ if (!qed_opts->has_cluster_size) {
113
+ qed_opts->cluster_size = QED_DEFAULT_CLUSTER_SIZE;
114
+ }
115
+ if (!qed_opts->has_table_size) {
116
+ qed_opts->table_size = QED_DEFAULT_TABLE_SIZE;
117
}
118
119
- blk = blk_new_open(filename, NULL, NULL,
120
- BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
121
- &local_err);
122
- if (blk == NULL) {
123
- error_propagate(errp, local_err);
124
+ if (!qed_is_cluster_size_valid(qed_opts->cluster_size)) {
125
+ error_setg(errp, "QED cluster size must be within range [%u, %u] "
126
+ "and power of 2",
127
+ QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
128
+ return -EINVAL;
129
+ }
130
+ if (!qed_is_table_size_valid(qed_opts->table_size)) {
131
+ error_setg(errp, "QED table size must be within range [%u, %u] "
132
+ "and power of 2",
133
+ QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
134
+ return -EINVAL;
135
+ }
136
+ if (!qed_is_image_size_valid(qed_opts->size, qed_opts->cluster_size,
137
+ qed_opts->table_size))
138
+ {
139
+ error_setg(errp, "QED image size must be a non-zero multiple of "
140
+ "cluster size and less than %" PRIu64 " bytes",
141
+ qed_max_image_size(qed_opts->cluster_size,
142
+ qed_opts->table_size));
143
+ return -EINVAL;
144
+ }
145
+
146
+ /* Create BlockBackend to write to the image */
147
+ bs = bdrv_open_blockdev_ref(qed_opts->file, errp);
148
+ if (bs == NULL) {
149
return -EIO;
150
}
151
152
+ blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
153
+ ret = blk_insert_bs(blk, bs, errp);
154
+ if (ret < 0) {
155
+ goto out;
156
+ }
157
blk_set_allow_write_beyond_eof(blk, true);
158
159
+ /* Prepare image format */
160
+ header = (QEDHeader) {
161
+ .magic = QED_MAGIC,
162
+ .cluster_size = qed_opts->cluster_size,
163
+ .table_size = qed_opts->table_size,
164
+ .header_size = 1,
165
+ .features = 0,
166
+ .compat_features = 0,
167
+ .l1_table_offset = qed_opts->cluster_size,
168
+ .image_size = qed_opts->size,
169
+ };
170
+
171
+ l1_size = header.cluster_size * header.table_size;
172
+
173
/* File must start empty and grow, check truncate is supported */
174
ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp);
175
if (ret < 0) {
176
goto out;
177
}
178
179
- if (backing_file) {
180
+ if (qed_opts->has_backing_file) {
181
header.features |= QED_F_BACKING_FILE;
182
header.backing_filename_offset = sizeof(le_header);
183
- header.backing_filename_size = strlen(backing_file);
184
+ header.backing_filename_size = strlen(qed_opts->backing_file);
185
186
- if (qed_fmt_is_raw(backing_fmt)) {
187
- header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
188
+ if (qed_opts->has_backing_fmt) {
189
+ const char *backing_fmt = BlockdevDriver_str(qed_opts->backing_fmt);
190
+ if (qed_fmt_is_raw(backing_fmt)) {
191
+ header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
192
+ }
193
}
194
}
195
196
@@ -XXX,XX +XXX,XX @@ static int qed_create(const char *filename, uint32_t cluster_size,
197
if (ret < 0) {
198
goto out;
199
}
200
- ret = blk_pwrite(blk, sizeof(le_header), backing_file,
201
+ ret = blk_pwrite(blk, sizeof(le_header), qed_opts->backing_file,
202
header.backing_filename_size, 0);
203
if (ret < 0) {
204
goto out;
205
@@ -XXX,XX +XXX,XX @@ static int qed_create(const char *filename, uint32_t cluster_size,
206
out:
207
g_free(l1_table);
208
blk_unref(blk);
209
+ bdrv_unref(bs);
210
return ret;
211
}
212
213
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_qed_co_create_opts(const char *filename,
214
QemuOpts *opts,
215
Error **errp)
216
{
217
- uint64_t image_size = 0;
218
- uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
219
- uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
220
- char *backing_file = NULL;
221
- char *backing_fmt = NULL;
222
+ BlockdevCreateOptions *create_options = NULL;
223
+ QDict *qdict = NULL;
224
+ QObject *qobj;
225
+ Visitor *v;
226
+ BlockDriverState *bs = NULL;
227
+ Error *local_err = NULL;
228
int ret;
229
230
- image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
231
- BDRV_SECTOR_SIZE);
232
- backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
233
- backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT);
234
- cluster_size = qemu_opt_get_size_del(opts,
235
- BLOCK_OPT_CLUSTER_SIZE,
236
- QED_DEFAULT_CLUSTER_SIZE);
237
- table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE,
238
- QED_DEFAULT_TABLE_SIZE);
239
-
240
- if (!qed_is_cluster_size_valid(cluster_size)) {
241
- error_setg(errp, "QED cluster size must be within range [%u, %u] "
242
- "and power of 2",
243
- QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
244
+ static const QDictRenames opt_renames[] = {
245
+ { BLOCK_OPT_BACKING_FILE, "backing-file" },
246
+ { BLOCK_OPT_BACKING_FMT, "backing-fmt" },
247
+ { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" },
248
+ { BLOCK_OPT_TABLE_SIZE, "table-size" },
249
+ { NULL, NULL },
250
+ };
251
+
252
+ /* Parse options and convert legacy syntax */
253
+ qdict = qemu_opts_to_qdict_filtered(opts, NULL, &qed_create_opts, true);
254
+
255
+ if (!qdict_rename_keys(qdict, opt_renames, errp)) {
256
ret = -EINVAL;
257
- goto finish;
258
+ goto fail;
259
}
260
- if (!qed_is_table_size_valid(table_size)) {
261
- error_setg(errp, "QED table size must be within range [%u, %u] "
262
- "and power of 2",
263
- QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
264
+
265
+ /* Create and open the file (protocol layer) */
266
+ ret = bdrv_create_file(filename, opts, &local_err);
267
+ if (ret < 0) {
268
+ error_propagate(errp, local_err);
269
+ goto fail;
270
+ }
271
+
272
+ bs = bdrv_open(filename, NULL, NULL,
273
+ BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
274
+ if (bs == NULL) {
275
+ ret = -EIO;
276
+ goto fail;
277
+ }
278
+
279
+ /* Now get the QAPI type BlockdevCreateOptions */
280
+ qdict_put_str(qdict, "driver", "qed");
281
+ qdict_put_str(qdict, "file", bs->node_name);
282
+
283
+ qobj = qdict_crumple(qdict, errp);
284
+ QDECREF(qdict);
285
+ qdict = qobject_to_qdict(qobj);
286
+ if (qdict == NULL) {
287
ret = -EINVAL;
288
- goto finish;
289
+ goto fail;
290
}
291
- if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
292
- error_setg(errp, "QED image size must be a non-zero multiple of "
293
- "cluster size and less than %" PRIu64 " bytes",
294
- qed_max_image_size(cluster_size, table_size));
295
+
296
+ v = qobject_input_visitor_new_keyval(QOBJECT(qdict));
297
+ visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err);
298
+ visit_free(v);
299
+
300
+ if (local_err) {
301
+ error_propagate(errp, local_err);
302
ret = -EINVAL;
303
- goto finish;
304
+ goto fail;
305
}
306
307
- ret = qed_create(filename, cluster_size, image_size, table_size,
308
- backing_file, backing_fmt, opts, errp);
309
+ /* Silently round up size */
310
+ assert(create_options->driver == BLOCKDEV_DRIVER_QED);
311
+ create_options->u.qed.size =
312
+ ROUND_UP(create_options->u.qed.size, BDRV_SECTOR_SIZE);
313
+
314
+ /* Create the qed image (format layer) */
315
+ ret = bdrv_qed_co_create(create_options, errp);
316
317
-finish:
318
- g_free(backing_file);
319
- g_free(backing_fmt);
320
+fail:
321
+ QDECREF(qdict);
322
+ bdrv_unref(bs);
323
+ qapi_free_BlockdevCreateOptions(create_options);
324
return ret;
325
}
326
327
@@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_qed = {
328
.bdrv_close = bdrv_qed_close,
329
.bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
330
.bdrv_child_perm = bdrv_format_default_perms,
331
+ .bdrv_co_create = bdrv_qed_co_create,
332
.bdrv_co_create_opts = bdrv_qed_co_create_opts,
333
.bdrv_has_zero_init = bdrv_has_zero_init_1,
334
.bdrv_co_block_status = bdrv_qed_co_block_status,
335
--
336
2.13.6
337
338
diff view generated by jsdifflib
Deleted patch
1
This makes the .bdrv_co_create(_opts) implementation of vdi look more
2
like the other recently converted block drivers.
3
1
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
Reviewed-by: Max Reitz <mreitz@redhat.com>
6
---
7
block/vdi.c | 12 +++++++++---
8
1 file changed, 9 insertions(+), 3 deletions(-)
9
10
diff --git a/block/vdi.c b/block/vdi.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/block/vdi.c
13
+++ b/block/vdi.c
14
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options,
15
16
logout("\n");
17
18
- /* Read out options. */
19
+ /* Validate options and set default values */
20
bytes = vdi_opts->size;
21
if (vdi_opts->q_static) {
22
image_type = VDI_TYPE_STATIC;
23
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options,
24
goto exit;
25
}
26
27
+ /* Create BlockBackend to write to the image */
28
bs_file = bdrv_open_blockdev_ref(vdi_opts->file, errp);
29
if (!bs_file) {
30
ret = -EIO;
31
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts,
32
Error *local_err = NULL;
33
int ret;
34
35
- /* Since CONFIG_VDI_BLOCK_SIZE is disabled by default,
36
+ /* Parse options and convert legacy syntax.
37
+ *
38
+ * Since CONFIG_VDI_BLOCK_SIZE is disabled by default,
39
* cluster-size is not part of the QAPI schema; therefore we have
40
* to parse it before creating the QAPI object. */
41
#if defined(CONFIG_VDI_BLOCK_SIZE)
42
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts,
43
44
qdict = qemu_opts_to_qdict_filtered(opts, NULL, &vdi_create_opts, true);
45
46
+ /* Create and open the file (protocol layer) */
47
ret = bdrv_create_file(filename, opts, errp);
48
if (ret < 0) {
49
goto done;
50
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vdi_co_create_opts(const char *filename, QemuOpts *opts,
51
goto done;
52
}
53
54
+ /* Silently round up size */
55
assert(create_options->driver == BLOCKDEV_DRIVER_VDI);
56
create_options->u.vdi.size = ROUND_UP(create_options->u.vdi.size,
57
BDRV_SECTOR_SIZE);
58
59
+ /* Create the vdi image (format layer) */
60
ret = vdi_co_do_create(create_options, block_size, errp);
61
done:
62
QDECREF(qdict);
63
@@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_vdi = {
64
.bdrv_close = vdi_close,
65
.bdrv_reopen_prepare = vdi_reopen_prepare,
66
.bdrv_child_perm = bdrv_format_default_perms,
67
- .bdrv_co_create_opts = vdi_co_create_opts,
68
.bdrv_co_create = vdi_co_create,
69
+ .bdrv_co_create_opts = vdi_co_create_opts,
70
.bdrv_has_zero_init = bdrv_has_zero_init_1,
71
.bdrv_co_block_status = vdi_co_block_status,
72
.bdrv_make_empty = vdi_make_empty,
73
--
74
2.13.6
75
76
diff view generated by jsdifflib
Deleted patch
1
This adds the .bdrv_co_create driver callback to vhdx, which
2
enables image creation over QMP.
3
1
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
Reviewed-by: Max Reitz <mreitz@redhat.com>
6
---
7
qapi/block-core.json | 40 +++++++++-
8
block/vhdx.c | 216 ++++++++++++++++++++++++++++++++++++++-------------
9
2 files changed, 203 insertions(+), 53 deletions(-)
10
11
diff --git a/qapi/block-core.json b/qapi/block-core.json
12
index XXXXXXX..XXXXXXX 100644
13
--- a/qapi/block-core.json
14
+++ b/qapi/block-core.json
15
@@ -XXX,XX +XXX,XX @@
16
'*static': 'bool' } }
17
18
##
19
+# @BlockdevVhdxSubformat:
20
+#
21
+# @dynamic: Growing image file
22
+# @fixed: Preallocated fixed-size image file
23
+#
24
+# Since: 2.12
25
+##
26
+{ 'enum': 'BlockdevVhdxSubformat',
27
+ 'data': [ 'dynamic', 'fixed' ] }
28
+
29
+##
30
+# @BlockdevCreateOptionsVhdx:
31
+#
32
+# Driver specific image creation options for vhdx.
33
+#
34
+# @file Node to create the image format on
35
+# @size Size of the virtual disk in bytes
36
+# @log-size Log size in bytes, must be a multiple of 1 MB
37
+# (default: 1 MB)
38
+# @block-size Block size in bytes, must be a multiple of 1 MB and not
39
+# larger than 256 MB (default: automatically choose a block
40
+# size depending on the image size)
41
+# @subformat vhdx subformat (default: dynamic)
42
+# @block-state-zero Force use of payload blocks of type 'ZERO'. Non-standard,
43
+# but default. Do not set to 'off' when using 'qemu-img
44
+# convert' with subformat=dynamic.
45
+#
46
+# Since: 2.12
47
+##
48
+{ 'struct': 'BlockdevCreateOptionsVhdx',
49
+ 'data': { 'file': 'BlockdevRef',
50
+ 'size': 'size',
51
+ '*log-size': 'size',
52
+ '*block-size': 'size',
53
+ '*subformat': 'BlockdevVhdxSubformat',
54
+ '*block-state-zero': 'bool' } }
55
+
56
+##
57
# @BlockdevCreateNotSupported:
58
#
59
# This is used for all drivers that don't support creating images.
60
@@ -XXX,XX +XXX,XX @@
61
'ssh': 'BlockdevCreateOptionsSsh',
62
'throttle': 'BlockdevCreateNotSupported',
63
'vdi': 'BlockdevCreateOptionsVdi',
64
- 'vhdx': 'BlockdevCreateNotSupported',
65
+ 'vhdx': 'BlockdevCreateOptionsVhdx',
66
'vmdk': 'BlockdevCreateNotSupported',
67
'vpc': 'BlockdevCreateNotSupported',
68
'vvfat': 'BlockdevCreateNotSupported',
69
diff --git a/block/vhdx.c b/block/vhdx.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/block/vhdx.c
72
+++ b/block/vhdx.c
73
@@ -XXX,XX +XXX,XX @@
74
#include "block/vhdx.h"
75
#include "migration/blocker.h"
76
#include "qemu/uuid.h"
77
+#include "qapi/qmp/qdict.h"
78
+#include "qapi/qobject-input-visitor.h"
79
+#include "qapi/qapi-visit-block-core.h"
80
81
/* Options for VHDX creation */
82
83
@@ -XXX,XX +XXX,XX @@ typedef enum VHDXImageType {
84
VHDX_TYPE_DIFFERENCING, /* Currently unsupported */
85
} VHDXImageType;
86
87
+static QemuOptsList vhdx_create_opts;
88
+
89
/* Several metadata and region table data entries are identified by
90
* guids in a MS-specific GUID format. */
91
92
@@ -XXX,XX +XXX,XX @@ exit:
93
* .---- ~ ----------- ~ ------------ ~ ---------------- ~ -----------.
94
* 1MB
95
*/
96
-static int coroutine_fn vhdx_co_create_opts(const char *filename, QemuOpts *opts,
97
- Error **errp)
98
+static int coroutine_fn vhdx_co_create(BlockdevCreateOptions *opts,
99
+ Error **errp)
100
{
101
+ BlockdevCreateOptionsVhdx *vhdx_opts;
102
+ BlockBackend *blk = NULL;
103
+ BlockDriverState *bs = NULL;
104
+
105
int ret = 0;
106
- uint64_t image_size = (uint64_t) 2 * GiB;
107
- uint32_t log_size = 1 * MiB;
108
- uint32_t block_size = 0;
109
+ uint64_t image_size;
110
+ uint32_t log_size;
111
+ uint32_t block_size;
112
uint64_t signature;
113
uint64_t metadata_offset;
114
bool use_zero_blocks = false;
115
116
gunichar2 *creator = NULL;
117
glong creator_items;
118
- BlockBackend *blk;
119
- char *type = NULL;
120
VHDXImageType image_type;
121
- Error *local_err = NULL;
122
123
- image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
124
- BDRV_SECTOR_SIZE);
125
- log_size = qemu_opt_get_size_del(opts, VHDX_BLOCK_OPT_LOG_SIZE, 0);
126
- block_size = qemu_opt_get_size_del(opts, VHDX_BLOCK_OPT_BLOCK_SIZE, 0);
127
- type = qemu_opt_get_del(opts, BLOCK_OPT_SUBFMT);
128
- use_zero_blocks = qemu_opt_get_bool_del(opts, VHDX_BLOCK_OPT_ZERO, true);
129
+ assert(opts->driver == BLOCKDEV_DRIVER_VHDX);
130
+ vhdx_opts = &opts->u.vhdx;
131
132
+ /* Validate options and set default values */
133
+ image_size = vhdx_opts->size;
134
if (image_size > VHDX_MAX_IMAGE_SIZE) {
135
error_setg_errno(errp, EINVAL, "Image size too large; max of 64TB");
136
- ret = -EINVAL;
137
- goto exit;
138
+ return -EINVAL;
139
}
140
141
- if (type == NULL) {
142
- type = g_strdup("dynamic");
143
+ if (!vhdx_opts->has_log_size) {
144
+ log_size = DEFAULT_LOG_SIZE;
145
+ } else {
146
+ log_size = vhdx_opts->log_size;
147
+ }
148
+ if (log_size < MiB || (log_size % MiB) != 0) {
149
+ error_setg_errno(errp, EINVAL, "Log size must be a multiple of 1 MB");
150
+ return -EINVAL;
151
}
152
153
- if (!strcmp(type, "dynamic")) {
154
+ if (!vhdx_opts->has_block_state_zero) {
155
+ use_zero_blocks = true;
156
+ } else {
157
+ use_zero_blocks = vhdx_opts->block_state_zero;
158
+ }
159
+
160
+ if (!vhdx_opts->has_subformat) {
161
+ vhdx_opts->subformat = BLOCKDEV_VHDX_SUBFORMAT_DYNAMIC;
162
+ }
163
+
164
+ switch (vhdx_opts->subformat) {
165
+ case BLOCKDEV_VHDX_SUBFORMAT_DYNAMIC:
166
image_type = VHDX_TYPE_DYNAMIC;
167
- } else if (!strcmp(type, "fixed")) {
168
+ break;
169
+ case BLOCKDEV_VHDX_SUBFORMAT_FIXED:
170
image_type = VHDX_TYPE_FIXED;
171
- } else if (!strcmp(type, "differencing")) {
172
- error_setg_errno(errp, ENOTSUP,
173
- "Differencing files not yet supported");
174
- ret = -ENOTSUP;
175
- goto exit;
176
- } else {
177
- error_setg(errp, "Invalid subformat '%s'", type);
178
- ret = -EINVAL;
179
- goto exit;
180
+ break;
181
+ default:
182
+ g_assert_not_reached();
183
}
184
185
/* These are pretty arbitrary, and mainly designed to keep the BAT
186
* size reasonable to load into RAM */
187
- if (block_size == 0) {
188
+ if (vhdx_opts->has_block_size) {
189
+ block_size = vhdx_opts->block_size;
190
+ } else {
191
if (image_size > 32 * TiB) {
192
block_size = 64 * MiB;
193
} else if (image_size > (uint64_t) 100 * GiB) {
194
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vhdx_co_create_opts(const char *filename, QemuOpts *opts
195
}
196
}
197
198
-
199
- /* make the log size close to what was specified, but must be
200
- * min 1MB, and multiple of 1MB */
201
- log_size = ROUND_UP(log_size, MiB);
202
-
203
- block_size = ROUND_UP(block_size, MiB);
204
- block_size = block_size > VHDX_BLOCK_SIZE_MAX ? VHDX_BLOCK_SIZE_MAX :
205
- block_size;
206
-
207
- ret = bdrv_create_file(filename, opts, &local_err);
208
- if (ret < 0) {
209
- error_propagate(errp, local_err);
210
- goto exit;
211
+ if (block_size < MiB || (block_size % MiB) != 0) {
212
+ error_setg_errno(errp, EINVAL, "Block size must be a multiple of 1 MB");
213
+ return -EINVAL;
214
+ }
215
+ if (block_size > VHDX_BLOCK_SIZE_MAX) {
216
+ error_setg_errno(errp, EINVAL, "Block size must not exceed %d",
217
+ VHDX_BLOCK_SIZE_MAX);
218
+ return -EINVAL;
219
}
220
221
- blk = blk_new_open(filename, NULL, NULL,
222
- BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
223
- &local_err);
224
- if (blk == NULL) {
225
- error_propagate(errp, local_err);
226
- ret = -EIO;
227
- goto exit;
228
+ /* Create BlockBackend to write to the image */
229
+ bs = bdrv_open_blockdev_ref(vhdx_opts->file, errp);
230
+ if (bs == NULL) {
231
+ return -EIO;
232
}
233
234
+ blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
235
+ ret = blk_insert_bs(blk, bs, errp);
236
+ if (ret < 0) {
237
+ goto delete_and_exit;
238
+ }
239
blk_set_allow_write_beyond_eof(blk, true);
240
241
/* Create (A) */
242
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vhdx_co_create_opts(const char *filename, QemuOpts *opts
243
244
delete_and_exit:
245
blk_unref(blk);
246
-exit:
247
- g_free(type);
248
+ bdrv_unref(bs);
249
g_free(creator);
250
return ret;
251
}
252
253
+static int coroutine_fn vhdx_co_create_opts(const char *filename,
254
+ QemuOpts *opts,
255
+ Error **errp)
256
+{
257
+ BlockdevCreateOptions *create_options = NULL;
258
+ QDict *qdict = NULL;
259
+ QObject *qobj;
260
+ Visitor *v;
261
+ BlockDriverState *bs = NULL;
262
+ Error *local_err = NULL;
263
+ int ret;
264
+
265
+ static const QDictRenames opt_renames[] = {
266
+ { VHDX_BLOCK_OPT_LOG_SIZE, "log-size" },
267
+ { VHDX_BLOCK_OPT_BLOCK_SIZE, "block-size" },
268
+ { VHDX_BLOCK_OPT_ZERO, "block-state-zero" },
269
+ { NULL, NULL },
270
+ };
271
+
272
+ /* Parse options and convert legacy syntax */
273
+ qdict = qemu_opts_to_qdict_filtered(opts, NULL, &vhdx_create_opts, true);
274
+
275
+ if (!qdict_rename_keys(qdict, opt_renames, errp)) {
276
+ ret = -EINVAL;
277
+ goto fail;
278
+ }
279
+
280
+ /* Create and open the file (protocol layer) */
281
+ ret = bdrv_create_file(filename, opts, &local_err);
282
+ if (ret < 0) {
283
+ error_propagate(errp, local_err);
284
+ goto fail;
285
+ }
286
+
287
+ bs = bdrv_open(filename, NULL, NULL,
288
+ BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
289
+ if (bs == NULL) {
290
+ ret = -EIO;
291
+ goto fail;
292
+ }
293
+
294
+ /* Now get the QAPI type BlockdevCreateOptions */
295
+ qdict_put_str(qdict, "driver", "vhdx");
296
+ qdict_put_str(qdict, "file", bs->node_name);
297
+
298
+ qobj = qdict_crumple(qdict, errp);
299
+ QDECREF(qdict);
300
+ qdict = qobject_to_qdict(qobj);
301
+ if (qdict == NULL) {
302
+ ret = -EINVAL;
303
+ goto fail;
304
+ }
305
+
306
+ v = qobject_input_visitor_new_keyval(QOBJECT(qdict));
307
+ visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err);
308
+ visit_free(v);
309
+
310
+ if (local_err) {
311
+ error_propagate(errp, local_err);
312
+ ret = -EINVAL;
313
+ goto fail;
314
+ }
315
+
316
+ /* Silently round up sizes:
317
+ * The image size is rounded to 512 bytes. Make the block and log size
318
+ * close to what was specified, but must be at least 1MB, and a multiple of
319
+ * 1 MB. Also respect VHDX_BLOCK_SIZE_MAX for block sizes. block_size = 0
320
+ * means auto, which is represented by a missing key in QAPI. */
321
+ assert(create_options->driver == BLOCKDEV_DRIVER_VHDX);
322
+ create_options->u.vhdx.size =
323
+ ROUND_UP(create_options->u.vhdx.size, BDRV_SECTOR_SIZE);
324
+
325
+ if (create_options->u.vhdx.has_log_size) {
326
+ create_options->u.vhdx.log_size =
327
+ ROUND_UP(create_options->u.vhdx.log_size, MiB);
328
+ }
329
+ if (create_options->u.vhdx.has_block_size) {
330
+ create_options->u.vhdx.block_size =
331
+ ROUND_UP(create_options->u.vhdx.block_size, MiB);
332
+
333
+ if (create_options->u.vhdx.block_size == 0) {
334
+ create_options->u.vhdx.has_block_size = false;
335
+ }
336
+ if (create_options->u.vhdx.block_size > VHDX_BLOCK_SIZE_MAX) {
337
+ create_options->u.vhdx.block_size = VHDX_BLOCK_SIZE_MAX;
338
+ }
339
+ }
340
+
341
+ /* Create the vhdx image (format layer) */
342
+ ret = vhdx_co_create(create_options, errp);
343
+
344
+fail:
345
+ QDECREF(qdict);
346
+ bdrv_unref(bs);
347
+ qapi_free_BlockdevCreateOptions(create_options);
348
+ return ret;
349
+}
350
+
351
/* If opened r/w, the VHDX driver will automatically replay the log,
352
* if one is present, inside the vhdx_open() call.
353
*
354
@@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_vhdx = {
355
.bdrv_child_perm = bdrv_format_default_perms,
356
.bdrv_co_readv = vhdx_co_readv,
357
.bdrv_co_writev = vhdx_co_writev,
358
+ .bdrv_co_create = vhdx_co_create,
359
.bdrv_co_create_opts = vhdx_co_create_opts,
360
.bdrv_get_info = vhdx_get_info,
361
.bdrv_co_check = vhdx_co_check,
362
--
363
2.13.6
364
365
diff view generated by jsdifflib
Deleted patch
1
This adds the .bdrv_co_create driver callback to vpc, which
2
enables image creation over QMP.
3
1
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
Reviewed-by: Max Reitz <mreitz@redhat.com>
6
---
7
qapi/block-core.json | 33 ++++++++++-
8
block/vpc.c | 152 ++++++++++++++++++++++++++++++++++++++-------------
9
2 files changed, 147 insertions(+), 38 deletions(-)
10
11
diff --git a/qapi/block-core.json b/qapi/block-core.json
12
index XXXXXXX..XXXXXXX 100644
13
--- a/qapi/block-core.json
14
+++ b/qapi/block-core.json
15
@@ -XXX,XX +XXX,XX @@
16
'*block-state-zero': 'bool' } }
17
18
##
19
+# @BlockdevVpcSubformat:
20
+#
21
+# @dynamic: Growing image file
22
+# @fixed: Preallocated fixed-size image file
23
+#
24
+# Since: 2.12
25
+##
26
+{ 'enum': 'BlockdevVpcSubformat',
27
+ 'data': [ 'dynamic', 'fixed' ] }
28
+
29
+##
30
+# @BlockdevCreateOptionsVpc:
31
+#
32
+# Driver specific image creation options for vpc (VHD).
33
+#
34
+# @file Node to create the image format on
35
+# @size Size of the virtual disk in bytes
36
+# @subformat vhdx subformat (default: dynamic)
37
+# @force-size Force use of the exact byte size instead of rounding to the
38
+# next size that can be represented in CHS geometry
39
+# (default: false)
40
+#
41
+# Since: 2.12
42
+##
43
+{ 'struct': 'BlockdevCreateOptionsVpc',
44
+ 'data': { 'file': 'BlockdevRef',
45
+ 'size': 'size',
46
+ '*subformat': 'BlockdevVpcSubformat',
47
+ '*force-size': 'bool' } }
48
+
49
+##
50
# @BlockdevCreateNotSupported:
51
#
52
# This is used for all drivers that don't support creating images.
53
@@ -XXX,XX +XXX,XX @@
54
'vdi': 'BlockdevCreateOptionsVdi',
55
'vhdx': 'BlockdevCreateOptionsVhdx',
56
'vmdk': 'BlockdevCreateNotSupported',
57
- 'vpc': 'BlockdevCreateNotSupported',
58
+ 'vpc': 'BlockdevCreateOptionsVpc',
59
'vvfat': 'BlockdevCreateNotSupported',
60
'vxhs': 'BlockdevCreateNotSupported'
61
} }
62
diff --git a/block/vpc.c b/block/vpc.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/block/vpc.c
65
+++ b/block/vpc.c
66
@@ -XXX,XX +XXX,XX @@
67
#include "migration/blocker.h"
68
#include "qemu/bswap.h"
69
#include "qemu/uuid.h"
70
+#include "qapi/qmp/qdict.h"
71
+#include "qapi/qobject-input-visitor.h"
72
+#include "qapi/qapi-visit-block-core.h"
73
74
/**************************************************************/
75
76
@@ -XXX,XX +XXX,XX @@ static QemuOptsList vpc_runtime_opts = {
77
}
78
};
79
80
+static QemuOptsList vpc_create_opts;
81
+
82
static uint32_t vpc_checksum(uint8_t* buf, size_t size)
83
{
84
uint32_t res = 0;
85
@@ -XXX,XX +XXX,XX @@ static int create_fixed_disk(BlockBackend *blk, uint8_t *buf,
86
return ret;
87
}
88
89
-static int coroutine_fn vpc_co_create_opts(const char *filename, QemuOpts *opts,
90
- Error **errp)
91
+static int coroutine_fn vpc_co_create(BlockdevCreateOptions *opts,
92
+ Error **errp)
93
{
94
+ BlockdevCreateOptionsVpc *vpc_opts;
95
+ BlockBackend *blk = NULL;
96
+ BlockDriverState *bs = NULL;
97
+
98
uint8_t buf[1024];
99
VHDFooter *footer = (VHDFooter *) buf;
100
- char *disk_type_param;
101
int i;
102
uint16_t cyls = 0;
103
uint8_t heads = 0;
104
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vpc_co_create_opts(const char *filename, QemuOpts *opts,
105
int64_t total_size;
106
int disk_type;
107
int ret = -EIO;
108
- bool force_size;
109
- Error *local_err = NULL;
110
- BlockBackend *blk = NULL;
111
112
- /* Read out options */
113
- total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
114
- BDRV_SECTOR_SIZE);
115
- disk_type_param = qemu_opt_get_del(opts, BLOCK_OPT_SUBFMT);
116
- if (disk_type_param) {
117
- if (!strcmp(disk_type_param, "dynamic")) {
118
- disk_type = VHD_DYNAMIC;
119
- } else if (!strcmp(disk_type_param, "fixed")) {
120
- disk_type = VHD_FIXED;
121
- } else {
122
- error_setg(errp, "Invalid disk type, %s", disk_type_param);
123
- ret = -EINVAL;
124
- goto out;
125
- }
126
- } else {
127
+ assert(opts->driver == BLOCKDEV_DRIVER_VPC);
128
+ vpc_opts = &opts->u.vpc;
129
+
130
+ /* Validate options and set default values */
131
+ total_size = vpc_opts->size;
132
+
133
+ if (!vpc_opts->has_subformat) {
134
+ vpc_opts->subformat = BLOCKDEV_VPC_SUBFORMAT_DYNAMIC;
135
+ }
136
+ switch (vpc_opts->subformat) {
137
+ case BLOCKDEV_VPC_SUBFORMAT_DYNAMIC:
138
disk_type = VHD_DYNAMIC;
139
+ break;
140
+ case BLOCKDEV_VPC_SUBFORMAT_FIXED:
141
+ disk_type = VHD_FIXED;
142
+ break;
143
+ default:
144
+ g_assert_not_reached();
145
}
146
147
- force_size = qemu_opt_get_bool_del(opts, VPC_OPT_FORCE_SIZE, false);
148
-
149
- ret = bdrv_create_file(filename, opts, &local_err);
150
- if (ret < 0) {
151
- error_propagate(errp, local_err);
152
- goto out;
153
+ /* Create BlockBackend to write to the image */
154
+ bs = bdrv_open_blockdev_ref(vpc_opts->file, errp);
155
+ if (bs == NULL) {
156
+ return -EIO;
157
}
158
159
- blk = blk_new_open(filename, NULL, NULL,
160
- BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
161
- &local_err);
162
- if (blk == NULL) {
163
- error_propagate(errp, local_err);
164
- ret = -EIO;
165
+ blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
166
+ ret = blk_insert_bs(blk, bs, errp);
167
+ if (ret < 0) {
168
goto out;
169
}
170
-
171
blk_set_allow_write_beyond_eof(blk, true);
172
173
/*
174
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vpc_co_create_opts(const char *filename, QemuOpts *opts,
175
* we set the geometry to 65535 x 16 x 255 (CxHxS) sectors and use
176
* the image size from the VHD footer to calculate total_sectors.
177
*/
178
- if (force_size) {
179
+ if (vpc_opts->force_size) {
180
/* This will force the use of total_size for sector count, below */
181
cyls = VHD_CHS_MAX_C;
182
heads = VHD_CHS_MAX_H;
183
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vpc_co_create_opts(const char *filename, QemuOpts *opts,
184
memset(buf, 0, 1024);
185
186
memcpy(footer->creator, "conectix", 8);
187
- if (force_size) {
188
+ if (vpc_opts->force_size) {
189
memcpy(footer->creator_app, "qem2", 4);
190
} else {
191
memcpy(footer->creator_app, "qemu", 4);
192
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vpc_co_create_opts(const char *filename, QemuOpts *opts,
193
194
out:
195
blk_unref(blk);
196
- g_free(disk_type_param);
197
+ bdrv_unref(bs);
198
+ return ret;
199
+}
200
+
201
+static int coroutine_fn vpc_co_create_opts(const char *filename,
202
+ QemuOpts *opts, Error **errp)
203
+{
204
+ BlockdevCreateOptions *create_options = NULL;
205
+ QDict *qdict = NULL;
206
+ QObject *qobj;
207
+ Visitor *v;
208
+ BlockDriverState *bs = NULL;
209
+ Error *local_err = NULL;
210
+ int ret;
211
+
212
+ static const QDictRenames opt_renames[] = {
213
+ { VPC_OPT_FORCE_SIZE, "force-size" },
214
+ { NULL, NULL },
215
+ };
216
+
217
+ /* Parse options and convert legacy syntax */
218
+ qdict = qemu_opts_to_qdict_filtered(opts, NULL, &vpc_create_opts, true);
219
+
220
+ if (!qdict_rename_keys(qdict, opt_renames, errp)) {
221
+ ret = -EINVAL;
222
+ goto fail;
223
+ }
224
+
225
+ /* Create and open the file (protocol layer) */
226
+ ret = bdrv_create_file(filename, opts, &local_err);
227
+ if (ret < 0) {
228
+ error_propagate(errp, local_err);
229
+ goto fail;
230
+ }
231
+
232
+ bs = bdrv_open(filename, NULL, NULL,
233
+ BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
234
+ if (bs == NULL) {
235
+ ret = -EIO;
236
+ goto fail;
237
+ }
238
+
239
+ /* Now get the QAPI type BlockdevCreateOptions */
240
+ qdict_put_str(qdict, "driver", "vpc");
241
+ qdict_put_str(qdict, "file", bs->node_name);
242
+
243
+ qobj = qdict_crumple(qdict, errp);
244
+ QDECREF(qdict);
245
+ qdict = qobject_to_qdict(qobj);
246
+ if (qdict == NULL) {
247
+ ret = -EINVAL;
248
+ goto fail;
249
+ }
250
+
251
+ v = qobject_input_visitor_new_keyval(QOBJECT(qdict));
252
+ visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err);
253
+ visit_free(v);
254
+
255
+ if (local_err) {
256
+ error_propagate(errp, local_err);
257
+ ret = -EINVAL;
258
+ goto fail;
259
+ }
260
+
261
+ /* Silently round up size */
262
+ assert(create_options->driver == BLOCKDEV_DRIVER_VPC);
263
+ create_options->u.vpc.size =
264
+ ROUND_UP(create_options->u.vpc.size, BDRV_SECTOR_SIZE);
265
+
266
+ /* Create the vpc image (format layer) */
267
+ ret = vpc_co_create(create_options, errp);
268
+
269
+fail:
270
+ QDECREF(qdict);
271
+ bdrv_unref(bs);
272
+ qapi_free_BlockdevCreateOptions(create_options);
273
return ret;
274
}
275
276
+
277
static int vpc_has_zero_init(BlockDriverState *bs)
278
{
279
BDRVVPCState *s = bs->opaque;
280
@@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_vpc = {
281
.bdrv_close = vpc_close,
282
.bdrv_reopen_prepare = vpc_reopen_prepare,
283
.bdrv_child_perm = bdrv_format_default_perms,
284
+ .bdrv_co_create = vpc_co_create,
285
.bdrv_co_create_opts = vpc_co_create_opts,
286
287
.bdrv_co_preadv = vpc_co_preadv,
288
--
289
2.13.6
290
291
diff view generated by jsdifflib
Deleted patch
1
Perform the rounding to match a CHS geometry only in the legacy code
2
path in .bdrv_co_create_opts. QMP now requires that the user already
3
passes a CHS aligned image size, unless force-size=true is given.
4
1
5
CHS alignment is required to make the image compatible with Virtual PC,
6
but not for use with newer Microsoft hypervisors.
7
8
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9
Reviewed-by: Max Reitz <mreitz@redhat.com>
10
---
11
block/vpc.c | 113 +++++++++++++++++++++++++++++++++++++++++++-----------------
12
1 file changed, 82 insertions(+), 31 deletions(-)
13
14
diff --git a/block/vpc.c b/block/vpc.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/block/vpc.c
17
+++ b/block/vpc.c
18
@@ -XXX,XX +XXX,XX @@ static int create_fixed_disk(BlockBackend *blk, uint8_t *buf,
19
return ret;
20
}
21
22
+static int calculate_rounded_image_size(BlockdevCreateOptionsVpc *vpc_opts,
23
+ uint16_t *out_cyls,
24
+ uint8_t *out_heads,
25
+ uint8_t *out_secs_per_cyl,
26
+ int64_t *out_total_sectors,
27
+ Error **errp)
28
+{
29
+ int64_t total_size = vpc_opts->size;
30
+ uint16_t cyls = 0;
31
+ uint8_t heads = 0;
32
+ uint8_t secs_per_cyl = 0;
33
+ int64_t total_sectors;
34
+ int i;
35
+
36
+ /*
37
+ * Calculate matching total_size and geometry. Increase the number of
38
+ * sectors requested until we get enough (or fail). This ensures that
39
+ * qemu-img convert doesn't truncate images, but rather rounds up.
40
+ *
41
+ * If the image size can't be represented by a spec conformant CHS geometry,
42
+ * we set the geometry to 65535 x 16 x 255 (CxHxS) sectors and use
43
+ * the image size from the VHD footer to calculate total_sectors.
44
+ */
45
+ if (vpc_opts->force_size) {
46
+ /* This will force the use of total_size for sector count, below */
47
+ cyls = VHD_CHS_MAX_C;
48
+ heads = VHD_CHS_MAX_H;
49
+ secs_per_cyl = VHD_CHS_MAX_S;
50
+ } else {
51
+ total_sectors = MIN(VHD_MAX_GEOMETRY, total_size / BDRV_SECTOR_SIZE);
52
+ for (i = 0; total_sectors > (int64_t)cyls * heads * secs_per_cyl; i++) {
53
+ calculate_geometry(total_sectors + i, &cyls, &heads, &secs_per_cyl);
54
+ }
55
+ }
56
+
57
+ if ((int64_t)cyls * heads * secs_per_cyl == VHD_MAX_GEOMETRY) {
58
+ total_sectors = total_size / BDRV_SECTOR_SIZE;
59
+ /* Allow a maximum disk size of 2040 GiB */
60
+ if (total_sectors > VHD_MAX_SECTORS) {
61
+ error_setg(errp, "Disk size is too large, max size is 2040 GiB");
62
+ return -EFBIG;
63
+ }
64
+ } else {
65
+ total_sectors = (int64_t) cyls * heads * secs_per_cyl;
66
+ }
67
+
68
+ *out_total_sectors = total_sectors;
69
+ if (out_cyls) {
70
+ *out_cyls = cyls;
71
+ *out_heads = heads;
72
+ *out_secs_per_cyl = secs_per_cyl;
73
+ }
74
+
75
+ return 0;
76
+}
77
+
78
static int coroutine_fn vpc_co_create(BlockdevCreateOptions *opts,
79
Error **errp)
80
{
81
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vpc_co_create(BlockdevCreateOptions *opts,
82
83
uint8_t buf[1024];
84
VHDFooter *footer = (VHDFooter *) buf;
85
- int i;
86
uint16_t cyls = 0;
87
uint8_t heads = 0;
88
uint8_t secs_per_cyl = 0;
89
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vpc_co_create(BlockdevCreateOptions *opts,
90
}
91
blk_set_allow_write_beyond_eof(blk, true);
92
93
- /*
94
- * Calculate matching total_size and geometry. Increase the number of
95
- * sectors requested until we get enough (or fail). This ensures that
96
- * qemu-img convert doesn't truncate images, but rather rounds up.
97
- *
98
- * If the image size can't be represented by a spec conformant CHS geometry,
99
- * we set the geometry to 65535 x 16 x 255 (CxHxS) sectors and use
100
- * the image size from the VHD footer to calculate total_sectors.
101
- */
102
- if (vpc_opts->force_size) {
103
- /* This will force the use of total_size for sector count, below */
104
- cyls = VHD_CHS_MAX_C;
105
- heads = VHD_CHS_MAX_H;
106
- secs_per_cyl = VHD_CHS_MAX_S;
107
- } else {
108
- total_sectors = MIN(VHD_MAX_GEOMETRY, total_size / BDRV_SECTOR_SIZE);
109
- for (i = 0; total_sectors > (int64_t)cyls * heads * secs_per_cyl; i++) {
110
- calculate_geometry(total_sectors + i, &cyls, &heads, &secs_per_cyl);
111
- }
112
+ /* Get geometry and check that it matches the image size*/
113
+ ret = calculate_rounded_image_size(vpc_opts, &cyls, &heads, &secs_per_cyl,
114
+ &total_sectors, errp);
115
+ if (ret < 0) {
116
+ goto out;
117
}
118
119
- if ((int64_t)cyls * heads * secs_per_cyl == VHD_MAX_GEOMETRY) {
120
- total_sectors = total_size / BDRV_SECTOR_SIZE;
121
- /* Allow a maximum disk size of 2040 GiB */
122
- if (total_sectors > VHD_MAX_SECTORS) {
123
- error_setg(errp, "Disk size is too large, max size is 2040 GiB");
124
- ret = -EFBIG;
125
- goto out;
126
- }
127
- } else {
128
- total_sectors = (int64_t)cyls * heads * secs_per_cyl;
129
- total_size = total_sectors * BDRV_SECTOR_SIZE;
130
+ if (total_size != total_sectors * BDRV_SECTOR_SIZE) {
131
+ error_setg(errp, "The requested image size cannot be represented in "
132
+ "CHS geometry");
133
+ error_append_hint(errp, "Try size=%llu or force-size=on (the "
134
+ "latter makes the image incompatible with "
135
+ "Virtual PC)",
136
+ total_sectors * BDRV_SECTOR_SIZE);
137
+ ret = -EINVAL;
138
+ goto out;
139
}
140
141
/* Prepare the Hard Disk Footer */
142
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vpc_co_create_opts(const char *filename,
143
create_options->u.vpc.size =
144
ROUND_UP(create_options->u.vpc.size, BDRV_SECTOR_SIZE);
145
146
+ if (!create_options->u.vpc.force_size) {
147
+ int64_t total_sectors;
148
+ ret = calculate_rounded_image_size(&create_options->u.vpc, NULL, NULL,
149
+ NULL, &total_sectors, errp);
150
+ if (ret < 0) {
151
+ goto fail;
152
+ }
153
+
154
+ create_options->u.vpc.size = total_sectors * BDRV_SECTOR_SIZE;
155
+ }
156
+
157
+
158
/* Create the vpc image (format layer) */
159
ret = vpc_co_create(create_options, errp);
160
161
--
162
2.13.6
163
164
diff view generated by jsdifflib