1
The following changes since commit 55b1f14cefcb19ce6d5e28c4c83404230888aa7e:
1
The following changes since commit 281f327487c9c9b1599f93c589a408bbf4a651b8:
2
2
3
Merge remote-tracking branch 'remotes/vivier2/tags/linux-user-for-3.0-pull-request' into staging (2018-07-23 14:03:14 +0100)
3
Merge remote-tracking branch 'remotes/vivier/tags/m68k-for-2.12-pull-request' into staging (2017-12-22 00:11:36 +0000)
4
4
5
are available in the git repository at:
5
are available in the git repository at:
6
6
7
git://repo.or.cz/qemu/kevin.git tags/for-upstream
7
git://repo.or.cz/qemu/kevin.git tags/for-upstream
8
8
9
for you to fetch changes up to 3e31b4e17064d22e533071aaa57d3f01499ef910:
9
for you to fetch changes up to 1a63a907507fbbcfaee3f622907ec244b7eabda8:
10
10
11
block/vvfat: Disable debug message by default (2018-07-23 16:50:43 +0200)
11
block: Keep nodes drained between reopen_queue/multiple (2017-12-22 15:05:32 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Block layer patches:
14
Block layer patches
15
16
- vvfat: Disable debug message by default
17
- qemu-iotests fixes
18
- Fix typos in comments
19
15
20
----------------------------------------------------------------
16
----------------------------------------------------------------
17
Doug Gale (1):
18
nvme: Add tracing
19
20
Edgar Kaziakhmedov (1):
21
qcow2: get rid of qcow2_backing_read1 routine
22
23
Fam Zheng (2):
24
block: Open backing image in force share mode for size probe
25
block: Remove unused bdrv_requests_pending
26
21
John Snow (1):
27
John Snow (1):
22
iotests: remove LUKS support from test 226
28
iotests: fix 197 for vpc
23
29
24
Kevin Wolf (1):
30
Kevin Wolf (27):
25
qemu-iotests: Use host_device instead of file in 149
31
block: Formats don't need CONSISTENT_READ with NO_IO
32
block: Make bdrv_drain_invoke() recursive
33
block: Call .drain_begin only once in bdrv_drain_all_begin()
34
test-bdrv-drain: Test BlockDriver callbacks for drain
35
block: bdrv_drain_recurse(): Remove unused begin parameter
36
block: Don't wait for requests in bdrv_drain*_end()
37
block: Unify order in drain functions
38
block: Don't acquire AioContext in hmp_qemu_io()
39
block: Document that x-blockdev-change breaks quorum children list
40
block: Assert drain_all is only called from main AioContext
41
block: Make bdrv_drain() driver callbacks non-recursive
42
test-bdrv-drain: Test callback for bdrv_drain
43
test-bdrv-drain: Test bs->quiesce_counter
44
blockjob: Pause job on draining any job BDS
45
test-bdrv-drain: Test drain vs. block jobs
46
block: Don't block_job_pause_all() in bdrv_drain_all()
47
block: Nested drain_end must still call callbacks
48
test-bdrv-drain: Test nested drain sections
49
block: Don't notify parents in drain call chain
50
block: Add bdrv_subtree_drained_begin/end()
51
test-bdrv-drain: Tests for bdrv_subtree_drain
52
test-bdrv-drain: Test behaviour in coroutine context
53
test-bdrv-drain: Recursive draining with multiple parents
54
block: Allow graph changes in subtree drained section
55
test-bdrv-drain: Test graph changes in drained section
56
commit: Simplify reopen of base
57
block: Keep nodes drained between reopen_queue/multiple
26
58
27
Max Reitz (2):
59
Thomas Huth (3):
28
iotest: Fix filtering order in 226
60
block: Remove the obsolete -drive boot=on|off parameter
29
iotests: Disallow compat=0.10 in 223
61
block: Remove the deprecated -hdachs option
62
block: Mention -drive cyls/heads/secs/trans/serial/addr in deprecation chapter
30
63
31
Peter Lieven (1):
64
qapi/block-core.json | 4 +
32
qemu-img: avoid overflow of min_sparse parameter
65
block/qcow2.h | 3 -
66
include/block/block.h | 15 +-
67
include/block/block_int.h | 6 +-
68
block.c | 75 ++++-
69
block/commit.c | 8 +-
70
block/io.c | 164 +++++++---
71
block/qcow2.c | 51 +--
72
block/replication.c | 6 +
73
blockdev.c | 11 -
74
blockjob.c | 22 +-
75
hmp.c | 6 -
76
hw/block/nvme.c | 349 +++++++++++++++++----
77
qemu-io-cmds.c | 3 +
78
tests/test-bdrv-drain.c | 651 +++++++++++++++++++++++++++++++++++++++
79
vl.c | 86 +-----
80
hw/block/trace-events | 93 ++++++
81
qemu-doc.texi | 29 +-
82
qemu-options.hx | 19 +-
83
tests/Makefile.include | 2 +
84
tests/qemu-iotests/197 | 4 +
85
tests/qemu-iotests/common.filter | 3 +-
86
22 files changed, 1294 insertions(+), 316 deletions(-)
87
create mode 100644 tests/test-bdrv-drain.c
33
88
34
Stefan Weil (1):
35
block: Fix typos in comments (found by codespell)
36
37
Thomas Huth (1):
38
block/vvfat: Disable debug message by default
39
40
block.c | 2 +-
41
block/backup.c | 4 +-
42
block/curl.c | 2 +-
43
block/gluster.c | 2 +-
44
block/vhdx.c | 2 +-
45
block/vvfat.c | 4 +-
46
qemu-img.c | 16 ++-
47
tests/qemu-iotests/149 | 2 +-
48
tests/qemu-iotests/149.out | 344 ++++++++++++++++++++++-----------------------
49
tests/qemu-iotests/223 | 2 +
50
tests/qemu-iotests/226 | 7 +-
51
11 files changed, 199 insertions(+), 188 deletions(-)
52
diff view generated by jsdifflib
New patch
1
Commit 1f4ad7d fixed 'qemu-img info' for raw images that are currently
2
in use as a mirror target. It is not enough for image formats, though,
3
as these still unconditionally request BLK_PERM_CONSISTENT_READ.
1
4
5
As this permission is geared towards whether the guest-visible data is
6
consistent, and has no impact on whether the metadata is sane, and
7
'qemu-img info' does not read guest-visible data (except for the raw
8
format), it makes sense to not require BLK_PERM_CONSISTENT_READ if there
9
is not going to be any guest I/O performed, regardless of image format.
10
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
---
13
block.c | 6 +++++-
14
1 file changed, 5 insertions(+), 1 deletion(-)
15
16
diff --git a/block.c b/block.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block.c
19
+++ b/block.c
20
@@ -XXX,XX +XXX,XX @@ void bdrv_format_default_perms(BlockDriverState *bs, BdrvChild *c,
21
assert(role == &child_backing || role == &child_file);
22
23
if (!backing) {
24
+ int flags = bdrv_reopen_get_flags(reopen_queue, bs);
25
+
26
/* Apart from the modifications below, the same permissions are
27
* forwarded and left alone as for filters */
28
bdrv_filter_default_perms(bs, c, role, reopen_queue, perm, shared,
29
@@ -XXX,XX +XXX,XX @@ void bdrv_format_default_perms(BlockDriverState *bs, BdrvChild *c,
30
31
/* bs->file always needs to be consistent because of the metadata. We
32
* can never allow other users to resize or write to it. */
33
- perm |= BLK_PERM_CONSISTENT_READ;
34
+ if (!(flags & BDRV_O_NO_IO)) {
35
+ perm |= BLK_PERM_CONSISTENT_READ;
36
+ }
37
shared &= ~(BLK_PERM_WRITE | BLK_PERM_RESIZE);
38
} else {
39
/* We want consistent read from backing files if the parent needs it.
40
--
41
2.13.6
42
43
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: John Snow <jsnow@redhat.com>
2
2
3
This test doesn't actually care about the format anyway, it just
3
VPC has some difficulty creating geometries of particular size.
4
supports "all formats" as a convenience. LUKS however does not use a
4
However, we can indeed force it to use a literal one, so let's
5
simple image filename which confuses this iotest.
5
do that for the sake of test 197, which is testing some specific
6
6
offsets.
7
We can simply skip the test for formats that use IMGOPTSSYNTAX for
8
their filenames without missing much coverage.
9
7
10
Signed-off-by: John Snow <jsnow@redhat.com>
8
Signed-off-by: John Snow <jsnow@redhat.com>
9
Reviewed-by: Eric Blake <eblake@redhat.com>
10
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
Reviewed-by: Lukáš Doktor <ldoktor@redhat.com>
12
---
13
---
13
tests/qemu-iotests/226 | 3 +++
14
tests/qemu-iotests/197 | 4 ++++
14
1 file changed, 3 insertions(+)
15
tests/qemu-iotests/common.filter | 3 ++-
16
2 files changed, 6 insertions(+), 1 deletion(-)
15
17
16
diff --git a/tests/qemu-iotests/226 b/tests/qemu-iotests/226
18
diff --git a/tests/qemu-iotests/197 b/tests/qemu-iotests/197
17
index XXXXXXX..XXXXXXX 100755
19
index XXXXXXX..XXXXXXX 100755
18
--- a/tests/qemu-iotests/226
20
--- a/tests/qemu-iotests/197
19
+++ b/tests/qemu-iotests/226
21
+++ b/tests/qemu-iotests/197
20
@@ -XXX,XX +XXX,XX @@ trap "_cleanup; exit \$status" 0 1 2 3 15
22
@@ -XXX,XX +XXX,XX @@ echo '=== Copy-on-read ==='
21
23
echo
22
# Generic format, but tests file-protocol specific error handling
24
23
_supported_fmt generic
25
# Prep the images
24
+if [ "$IMGOPTSSYNTAX" = "true" ]; then
26
+# VPC rounds image sizes to a specific geometry, force a specific size.
25
+ _unsupported_fmt $IMGFMT
27
+if [ "$IMGFMT" = "vpc" ]; then
28
+ IMGOPTS=$(_optstr_add "$IMGOPTS" "force_size")
26
+fi
29
+fi
27
_supported_proto file
30
_make_test_img 4G
28
_supported_os Linux
31
$QEMU_IO -c "write -P 55 3G 1k" "$TEST_IMG" | _filter_qemu_io
29
32
IMGPROTO=file IMGFMT=qcow2 IMGOPTS= TEST_IMG_FILE="$TEST_WRAP" \
33
diff --git a/tests/qemu-iotests/common.filter b/tests/qemu-iotests/common.filter
34
index XXXXXXX..XXXXXXX 100644
35
--- a/tests/qemu-iotests/common.filter
36
+++ b/tests/qemu-iotests/common.filter
37
@@ -XXX,XX +XXX,XX @@ _filter_img_create()
38
-e "s# log_size=[0-9]\\+##g" \
39
-e "s# refcount_bits=[0-9]\\+##g" \
40
-e "s# key-secret=[a-zA-Z0-9]\\+##g" \
41
- -e "s# iter-time=[0-9]\\+##g"
42
+ -e "s# iter-time=[0-9]\\+##g" \
43
+ -e "s# force_size=\\(on\\|off\\)##g"
44
}
45
46
_filter_img_info()
30
--
47
--
31
2.13.6
48
2.13.6
32
49
33
50
diff view generated by jsdifflib
New patch
1
This change separates bdrv_drain_invoke(), which calls the BlockDriver
2
drain callbacks, from bdrv_drain_recurse(). Instead, the function
3
performs its own recursion now.
1
4
5
One reason for this is that bdrv_drain_recurse() can be called multiple
6
times by bdrv_drain_all_begin(), but the callbacks may only be called
7
once. The separation is necessary to fix this bug.
8
9
The other reason is that we intend to go to a model where we call all
10
driver callbacks first, and only then start polling. This is not fully
11
achieved yet with this patch, as bdrv_drain_invoke() contains a
12
BDRV_POLL_WHILE() loop for the block driver callbacks, which can still
13
call callbacks for any unrelated event. It's a step in this direction
14
anyway.
15
16
Cc: qemu-stable@nongnu.org
17
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
18
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
19
---
20
block/io.c | 14 +++++++++++---
21
1 file changed, 11 insertions(+), 3 deletions(-)
22
23
diff --git a/block/io.c b/block/io.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/block/io.c
26
+++ b/block/io.c
27
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
28
bdrv_wakeup(bs);
29
}
30
31
+/* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
32
static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
33
{
34
+ BdrvChild *child, *tmp;
35
BdrvCoDrainData data = { .bs = bs, .done = false, .begin = begin};
36
37
if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
38
@@ -XXX,XX +XXX,XX @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
39
data.co = qemu_coroutine_create(bdrv_drain_invoke_entry, &data);
40
bdrv_coroutine_enter(bs, data.co);
41
BDRV_POLL_WHILE(bs, !data.done);
42
+
43
+ QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) {
44
+ bdrv_drain_invoke(child->bs, begin);
45
+ }
46
}
47
48
static bool bdrv_drain_recurse(BlockDriverState *bs, bool begin)
49
@@ -XXX,XX +XXX,XX @@ static bool bdrv_drain_recurse(BlockDriverState *bs, bool begin)
50
BdrvChild *child, *tmp;
51
bool waited;
52
53
- /* Ensure any pending metadata writes are submitted to bs->file. */
54
- bdrv_drain_invoke(bs, begin);
55
-
56
/* Wait for drained requests to finish */
57
waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);
58
59
@@ -XXX,XX +XXX,XX @@ void bdrv_drained_begin(BlockDriverState *bs)
60
bdrv_parent_drained_begin(bs);
61
}
62
63
+ bdrv_drain_invoke(bs, true);
64
bdrv_drain_recurse(bs, true);
65
}
66
67
@@ -XXX,XX +XXX,XX @@ void bdrv_drained_end(BlockDriverState *bs)
68
}
69
70
bdrv_parent_drained_end(bs);
71
+ bdrv_drain_invoke(bs, false);
72
bdrv_drain_recurse(bs, false);
73
aio_enable_external(bdrv_get_aio_context(bs));
74
}
75
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void)
76
aio_context_acquire(aio_context);
77
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
78
if (aio_context == bdrv_get_aio_context(bs)) {
79
+ /* FIXME Calling this multiple times is wrong */
80
+ bdrv_drain_invoke(bs, true);
81
waited |= bdrv_drain_recurse(bs, true);
82
}
83
}
84
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_end(void)
85
aio_context_acquire(aio_context);
86
aio_enable_external(aio_context);
87
bdrv_parent_drained_end(bs);
88
+ bdrv_drain_invoke(bs, false);
89
bdrv_drain_recurse(bs, false);
90
aio_context_release(aio_context);
91
}
92
--
93
2.13.6
94
95
diff view generated by jsdifflib
New patch
1
bdrv_drain_all_begin() used to call the .bdrv_co_drain_begin() driver
2
callback inside its polling loop. This means that how many times it got
3
called for each node depended on long it had to poll the event loop.
1
4
5
This is obviously not right and results in nodes that stay drained even
6
after bdrv_drain_all_end(), which calls .bdrv_co_drain_begin() once per
7
node.
8
9
Fix bdrv_drain_all_begin() to call the callback only once, too.
10
11
Cc: qemu-stable@nongnu.org
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
14
---
15
block/io.c | 3 +--
16
1 file changed, 1 insertion(+), 2 deletions(-)
17
18
diff --git a/block/io.c b/block/io.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/block/io.c
21
+++ b/block/io.c
22
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void)
23
aio_context_acquire(aio_context);
24
bdrv_parent_drained_begin(bs);
25
aio_disable_external(aio_context);
26
+ bdrv_drain_invoke(bs, true);
27
aio_context_release(aio_context);
28
29
if (!g_slist_find(aio_ctxs, aio_context)) {
30
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void)
31
aio_context_acquire(aio_context);
32
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
33
if (aio_context == bdrv_get_aio_context(bs)) {
34
- /* FIXME Calling this multiple times is wrong */
35
- bdrv_drain_invoke(bs, true);
36
waited |= bdrv_drain_recurse(bs, true);
37
}
38
}
39
--
40
2.13.6
41
42
diff view generated by jsdifflib
New patch
1
This adds a test case that the BlockDriver callbacks for drain are
2
called in bdrv_drained_all_begin/end(), and that both of them are called
3
exactly once.
1
4
5
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
6
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
---
9
tests/test-bdrv-drain.c | 137 ++++++++++++++++++++++++++++++++++++++++++++++++
10
tests/Makefile.include | 2 +
11
2 files changed, 139 insertions(+)
12
create mode 100644 tests/test-bdrv-drain.c
13
14
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
15
new file mode 100644
16
index XXXXXXX..XXXXXXX
17
--- /dev/null
18
+++ b/tests/test-bdrv-drain.c
19
@@ -XXX,XX +XXX,XX @@
20
+/*
21
+ * Block node draining tests
22
+ *
23
+ * Copyright (c) 2017 Kevin Wolf <kwolf@redhat.com>
24
+ *
25
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
26
+ * of this software and associated documentation files (the "Software"), to deal
27
+ * in the Software without restriction, including without limitation the rights
28
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
29
+ * copies of the Software, and to permit persons to whom the Software is
30
+ * furnished to do so, subject to the following conditions:
31
+ *
32
+ * The above copyright notice and this permission notice shall be included in
33
+ * all copies or substantial portions of the Software.
34
+ *
35
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
38
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
39
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
40
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
41
+ * THE SOFTWARE.
42
+ */
43
+
44
+#include "qemu/osdep.h"
45
+#include "block/block.h"
46
+#include "sysemu/block-backend.h"
47
+#include "qapi/error.h"
48
+
49
+typedef struct BDRVTestState {
50
+ int drain_count;
51
+} BDRVTestState;
52
+
53
+static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs)
54
+{
55
+ BDRVTestState *s = bs->opaque;
56
+ s->drain_count++;
57
+}
58
+
59
+static void coroutine_fn bdrv_test_co_drain_end(BlockDriverState *bs)
60
+{
61
+ BDRVTestState *s = bs->opaque;
62
+ s->drain_count--;
63
+}
64
+
65
+static void bdrv_test_close(BlockDriverState *bs)
66
+{
67
+ BDRVTestState *s = bs->opaque;
68
+ g_assert_cmpint(s->drain_count, >, 0);
69
+}
70
+
71
+static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
72
+ uint64_t offset, uint64_t bytes,
73
+ QEMUIOVector *qiov, int flags)
74
+{
75
+ /* We want this request to stay until the polling loop in drain waits for
76
+ * it to complete. We need to sleep a while as bdrv_drain_invoke() comes
77
+ * first and polls its result, too, but it shouldn't accidentally complete
78
+ * this request yet. */
79
+ qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
80
+
81
+ return 0;
82
+}
83
+
84
+static BlockDriver bdrv_test = {
85
+ .format_name = "test",
86
+ .instance_size = sizeof(BDRVTestState),
87
+
88
+ .bdrv_close = bdrv_test_close,
89
+ .bdrv_co_preadv = bdrv_test_co_preadv,
90
+
91
+ .bdrv_co_drain_begin = bdrv_test_co_drain_begin,
92
+ .bdrv_co_drain_end = bdrv_test_co_drain_end,
93
+};
94
+
95
+static void aio_ret_cb(void *opaque, int ret)
96
+{
97
+ int *aio_ret = opaque;
98
+ *aio_ret = ret;
99
+}
100
+
101
+static void test_drv_cb_drain_all(void)
102
+{
103
+ BlockBackend *blk;
104
+ BlockDriverState *bs;
105
+ BDRVTestState *s;
106
+ BlockAIOCB *acb;
107
+ int aio_ret;
108
+
109
+ QEMUIOVector qiov;
110
+ struct iovec iov = {
111
+ .iov_base = NULL,
112
+ .iov_len = 0,
113
+ };
114
+ qemu_iovec_init_external(&qiov, &iov, 1);
115
+
116
+ blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
117
+ bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
118
+ &error_abort);
119
+ s = bs->opaque;
120
+ blk_insert_bs(blk, bs, &error_abort);
121
+
122
+ /* Simple bdrv_drain_all_begin/end pair, check that CBs are called */
123
+ g_assert_cmpint(s->drain_count, ==, 0);
124
+ bdrv_drain_all_begin();
125
+ g_assert_cmpint(s->drain_count, ==, 1);
126
+ bdrv_drain_all_end();
127
+ g_assert_cmpint(s->drain_count, ==, 0);
128
+
129
+ /* Now do the same while a request is pending */
130
+ aio_ret = -EINPROGRESS;
131
+ acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
132
+ g_assert(acb != NULL);
133
+ g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
134
+
135
+ g_assert_cmpint(s->drain_count, ==, 0);
136
+ bdrv_drain_all_begin();
137
+ g_assert_cmpint(aio_ret, ==, 0);
138
+ g_assert_cmpint(s->drain_count, ==, 1);
139
+ bdrv_drain_all_end();
140
+ g_assert_cmpint(s->drain_count, ==, 0);
141
+
142
+ bdrv_unref(bs);
143
+ blk_unref(blk);
144
+}
145
+
146
+int main(int argc, char **argv)
147
+{
148
+ bdrv_init();
149
+ qemu_init_main_loop(&error_abort);
150
+
151
+ g_test_init(&argc, &argv, NULL);
152
+
153
+ g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all);
154
+
155
+ return g_test_run();
156
+}
157
diff --git a/tests/Makefile.include b/tests/Makefile.include
158
index XXXXXXX..XXXXXXX 100644
159
--- a/tests/Makefile.include
160
+++ b/tests/Makefile.include
161
@@ -XXX,XX +XXX,XX @@ gcov-files-test-thread-pool-y = thread-pool.c
162
gcov-files-test-hbitmap-y = util/hbitmap.c
163
check-unit-y += tests/test-hbitmap$(EXESUF)
164
gcov-files-test-hbitmap-y = blockjob.c
165
+check-unit-y += tests/test-bdrv-drain$(EXESUF)
166
check-unit-y += tests/test-blockjob$(EXESUF)
167
check-unit-y += tests/test-blockjob-txn$(EXESUF)
168
check-unit-y += tests/test-x86-cpuid$(EXESUF)
169
@@ -XXX,XX +XXX,XX @@ tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(test-block-obj-y)
170
tests/test-aio$(EXESUF): tests/test-aio.o $(test-block-obj-y)
171
tests/test-aio-multithread$(EXESUF): tests/test-aio-multithread.o $(test-block-obj-y)
172
tests/test-throttle$(EXESUF): tests/test-throttle.o $(test-block-obj-y)
173
+tests/test-bdrv-drain$(EXESUF): tests/test-bdrv-drain.o $(test-block-obj-y) $(test-util-obj-y)
174
tests/test-blockjob$(EXESUF): tests/test-blockjob.o $(test-block-obj-y) $(test-util-obj-y)
175
tests/test-blockjob-txn$(EXESUF): tests/test-blockjob-txn.o $(test-block-obj-y) $(test-util-obj-y)
176
tests/test-thread-pool$(EXESUF): tests/test-thread-pool.o $(test-block-obj-y)
177
--
178
2.13.6
179
180
diff view generated by jsdifflib
New patch
1
Now that the bdrv_drain_invoke() calls are pulled up to the callers of
2
bdrv_drain_recurse(), the 'begin' parameter isn't needed any more.
1
3
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
6
---
7
block/io.c | 12 ++++++------
8
1 file changed, 6 insertions(+), 6 deletions(-)
9
10
diff --git a/block/io.c b/block/io.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/block/io.c
13
+++ b/block/io.c
14
@@ -XXX,XX +XXX,XX @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
15
}
16
}
17
18
-static bool bdrv_drain_recurse(BlockDriverState *bs, bool begin)
19
+static bool bdrv_drain_recurse(BlockDriverState *bs)
20
{
21
BdrvChild *child, *tmp;
22
bool waited;
23
@@ -XXX,XX +XXX,XX @@ static bool bdrv_drain_recurse(BlockDriverState *bs, bool begin)
24
*/
25
bdrv_ref(bs);
26
}
27
- waited |= bdrv_drain_recurse(bs, begin);
28
+ waited |= bdrv_drain_recurse(bs);
29
if (in_main_loop) {
30
bdrv_unref(bs);
31
}
32
@@ -XXX,XX +XXX,XX @@ void bdrv_drained_begin(BlockDriverState *bs)
33
}
34
35
bdrv_drain_invoke(bs, true);
36
- bdrv_drain_recurse(bs, true);
37
+ bdrv_drain_recurse(bs);
38
}
39
40
void bdrv_drained_end(BlockDriverState *bs)
41
@@ -XXX,XX +XXX,XX @@ void bdrv_drained_end(BlockDriverState *bs)
42
43
bdrv_parent_drained_end(bs);
44
bdrv_drain_invoke(bs, false);
45
- bdrv_drain_recurse(bs, false);
46
+ bdrv_drain_recurse(bs);
47
aio_enable_external(bdrv_get_aio_context(bs));
48
}
49
50
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void)
51
aio_context_acquire(aio_context);
52
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
53
if (aio_context == bdrv_get_aio_context(bs)) {
54
- waited |= bdrv_drain_recurse(bs, true);
55
+ waited |= bdrv_drain_recurse(bs);
56
}
57
}
58
aio_context_release(aio_context);
59
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_end(void)
60
aio_enable_external(aio_context);
61
bdrv_parent_drained_end(bs);
62
bdrv_drain_invoke(bs, false);
63
- bdrv_drain_recurse(bs, false);
64
+ bdrv_drain_recurse(bs);
65
aio_context_release(aio_context);
66
}
67
68
--
69
2.13.6
70
71
diff view generated by jsdifflib
New patch
1
The device is drained, so there is no point in waiting for requests at
2
the end of the drained section. Remove the bdrv_drain_recurse() calls
3
there.
1
4
5
The bdrv_drain_recurse() calls were introduced in commit 481cad48e5e
6
in order to call the .bdrv_co_drain_end() driver callback. This is now
7
done by a separate bdrv_drain_invoke() call.
8
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
11
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
block/io.c | 2 --
14
1 file changed, 2 deletions(-)
15
16
diff --git a/block/io.c b/block/io.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/io.c
19
+++ b/block/io.c
20
@@ -XXX,XX +XXX,XX @@ void bdrv_drained_end(BlockDriverState *bs)
21
22
bdrv_parent_drained_end(bs);
23
bdrv_drain_invoke(bs, false);
24
- bdrv_drain_recurse(bs);
25
aio_enable_external(bdrv_get_aio_context(bs));
26
}
27
28
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_end(void)
29
aio_enable_external(aio_context);
30
bdrv_parent_drained_end(bs);
31
bdrv_drain_invoke(bs, false);
32
- bdrv_drain_recurse(bs);
33
aio_context_release(aio_context);
34
}
35
36
--
37
2.13.6
38
39
diff view generated by jsdifflib
New patch
1
Drain requests are propagated to child nodes, parent nodes and directly
2
to the AioContext. The order in which this happened was different
3
between all combinations of drain/drain_all and begin/end.
1
4
5
The correct order is to keep children only drained when their parents
6
are also drained. This means that at the start of a drained section, the
7
AioContext needs to be drained first, the parents second and only then
8
the children. The correct order for the end of a drained section is the
9
opposite.
10
11
This patch changes the three other functions to follow the example of
12
bdrv_drained_begin(), which is the only one that got it right.
13
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
16
---
17
block/io.c | 12 ++++++++----
18
1 file changed, 8 insertions(+), 4 deletions(-)
19
20
diff --git a/block/io.c b/block/io.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/block/io.c
23
+++ b/block/io.c
24
@@ -XXX,XX +XXX,XX @@ void bdrv_drained_begin(BlockDriverState *bs)
25
return;
26
}
27
28
+ /* Stop things in parent-to-child order */
29
if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
30
aio_disable_external(bdrv_get_aio_context(bs));
31
bdrv_parent_drained_begin(bs);
32
@@ -XXX,XX +XXX,XX @@ void bdrv_drained_end(BlockDriverState *bs)
33
return;
34
}
35
36
- bdrv_parent_drained_end(bs);
37
+ /* Re-enable things in child-to-parent order */
38
bdrv_drain_invoke(bs, false);
39
+ bdrv_parent_drained_end(bs);
40
aio_enable_external(bdrv_get_aio_context(bs));
41
}
42
43
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void)
44
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
45
AioContext *aio_context = bdrv_get_aio_context(bs);
46
47
+ /* Stop things in parent-to-child order */
48
aio_context_acquire(aio_context);
49
- bdrv_parent_drained_begin(bs);
50
aio_disable_external(aio_context);
51
+ bdrv_parent_drained_begin(bs);
52
bdrv_drain_invoke(bs, true);
53
aio_context_release(aio_context);
54
55
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_end(void)
56
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
57
AioContext *aio_context = bdrv_get_aio_context(bs);
58
59
+ /* Re-enable things in child-to-parent order */
60
aio_context_acquire(aio_context);
61
- aio_enable_external(aio_context);
62
- bdrv_parent_drained_end(bs);
63
bdrv_drain_invoke(bs, false);
64
+ bdrv_parent_drained_end(bs);
65
+ aio_enable_external(aio_context);
66
aio_context_release(aio_context);
67
}
68
69
--
70
2.13.6
71
72
diff view generated by jsdifflib
New patch
1
Commit 15afd94a047 added code to acquire and release the AioContext in
2
qemuio_command(). This means that the lock is taken twice now in the
3
call path from hmp_qemu_io(). This causes BDRV_POLL_WHILE() to hang for
4
any requests issued to nodes in a non-mainloop AioContext.
1
5
6
Dropping the first locking from hmp_qemu_io() fixes the problem.
7
8
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
11
hmp.c | 6 ------
12
1 file changed, 6 deletions(-)
13
14
diff --git a/hmp.c b/hmp.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/hmp.c
17
+++ b/hmp.c
18
@@ -XXX,XX +XXX,XX @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict)
19
{
20
BlockBackend *blk;
21
BlockBackend *local_blk = NULL;
22
- AioContext *aio_context;
23
const char* device = qdict_get_str(qdict, "device");
24
const char* command = qdict_get_str(qdict, "command");
25
Error *err = NULL;
26
@@ -XXX,XX +XXX,XX @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict)
27
}
28
}
29
30
- aio_context = blk_get_aio_context(blk);
31
- aio_context_acquire(aio_context);
32
-
33
/*
34
* Notably absent: Proper permission management. This is sad, but it seems
35
* almost impossible to achieve without changing the semantics and thereby
36
@@ -XXX,XX +XXX,XX @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict)
37
*/
38
qemuio_command(blk, command);
39
40
- aio_context_release(aio_context);
41
-
42
fail:
43
blk_unref(local_blk);
44
hmp_handle_error(mon, &err);
45
--
46
2.13.6
47
48
diff view generated by jsdifflib
New patch
1
From: Edgar Kaziakhmedov <edgar.kaziakhmedov@virtuozzo.com>
1
2
3
Since bdrv_co_preadv does all neccessary checks including
4
reading after the end of the backing file, avoid duplication
5
of verification before bdrv_co_preadv call.
6
7
Signed-off-by: Edgar Kaziakhmedov <edgar.kaziakhmedov@virtuozzo.com>
8
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
9
Reviewed-by: Eric Blake <eblake@redhat.com>
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
---
12
block/qcow2.h | 3 ---
13
block/qcow2.c | 51 ++++++++-------------------------------------------
14
2 files changed, 8 insertions(+), 46 deletions(-)
15
16
diff --git a/block/qcow2.h b/block/qcow2.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/qcow2.h
19
+++ b/block/qcow2.h
20
@@ -XXX,XX +XXX,XX @@ uint32_t offset_to_reftable_index(BDRVQcow2State *s, uint64_t offset)
21
}
22
23
/* qcow2.c functions */
24
-int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov,
25
- int64_t sector_num, int nb_sectors);
26
-
27
int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
28
int refcount_order, bool generous_increase,
29
uint64_t *refblock_count);
30
diff --git a/block/qcow2.c b/block/qcow2.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/block/qcow2.c
33
+++ b/block/qcow2.c
34
@@ -XXX,XX +XXX,XX @@ static int64_t coroutine_fn qcow2_co_get_block_status(BlockDriverState *bs,
35
return status;
36
}
37
38
-/* handle reading after the end of the backing file */
39
-int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov,
40
- int64_t offset, int bytes)
41
-{
42
- uint64_t bs_size = bs->total_sectors * BDRV_SECTOR_SIZE;
43
- int n1;
44
-
45
- if ((offset + bytes) <= bs_size) {
46
- return bytes;
47
- }
48
-
49
- if (offset >= bs_size) {
50
- n1 = 0;
51
- } else {
52
- n1 = bs_size - offset;
53
- }
54
-
55
- qemu_iovec_memset(qiov, n1, 0, bytes - n1);
56
-
57
- return n1;
58
-}
59
-
60
static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
61
uint64_t bytes, QEMUIOVector *qiov,
62
int flags)
63
{
64
BDRVQcow2State *s = bs->opaque;
65
- int offset_in_cluster, n1;
66
+ int offset_in_cluster;
67
int ret;
68
unsigned int cur_bytes; /* number of bytes in current iteration */
69
uint64_t cluster_offset = 0;
70
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
71
case QCOW2_CLUSTER_UNALLOCATED:
72
73
if (bs->backing) {
74
- /* read from the base image */
75
- n1 = qcow2_backing_read1(bs->backing->bs, &hd_qiov,
76
- offset, cur_bytes);
77
- if (n1 > 0) {
78
- QEMUIOVector local_qiov;
79
-
80
- qemu_iovec_init(&local_qiov, hd_qiov.niov);
81
- qemu_iovec_concat(&local_qiov, &hd_qiov, 0, n1);
82
-
83
- BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
84
- qemu_co_mutex_unlock(&s->lock);
85
- ret = bdrv_co_preadv(bs->backing, offset, n1,
86
- &local_qiov, 0);
87
- qemu_co_mutex_lock(&s->lock);
88
-
89
- qemu_iovec_destroy(&local_qiov);
90
-
91
- if (ret < 0) {
92
- goto fail;
93
- }
94
+ BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
95
+ qemu_co_mutex_unlock(&s->lock);
96
+ ret = bdrv_co_preadv(bs->backing, offset, cur_bytes,
97
+ &hd_qiov, 0);
98
+ qemu_co_mutex_lock(&s->lock);
99
+ if (ret < 0) {
100
+ goto fail;
101
}
102
} else {
103
/* Note: in this case, no need to wait */
104
--
105
2.13.6
106
107
diff view generated by jsdifflib
New patch
1
Removing a quorum child node with x-blockdev-change results in a quorum
2
driver state that cannot be recreated with create options because it
3
would require a list with gaps. This causes trouble in at least
4
.bdrv_refresh_filename().
1
5
6
Document this problem so that we won't accidentally mark the command
7
stable without having addressed it.
8
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
Reviewed-by: Alberto Garcia <berto@igalia.com>
11
---
12
qapi/block-core.json | 4 ++++
13
1 file changed, 4 insertions(+)
14
15
diff --git a/qapi/block-core.json b/qapi/block-core.json
16
index XXXXXXX..XXXXXXX 100644
17
--- a/qapi/block-core.json
18
+++ b/qapi/block-core.json
19
@@ -XXX,XX +XXX,XX @@
20
# does not support all kinds of operations, all kinds of children, nor
21
# all block drivers.
22
#
23
+# FIXME Removing children from a quorum node means introducing gaps in the
24
+# child indices. This cannot be represented in the 'children' list of
25
+# BlockdevOptionsQuorum, as returned by .bdrv_refresh_filename().
26
+#
27
# Warning: The data in a new quorum child MUST be consistent with that of
28
# the rest of the array.
29
#
30
--
31
2.13.6
32
33
diff view generated by jsdifflib
New patch
1
From: Doug Gale <doug16k@gmail.com>
1
2
3
Add trace output for commands, errors, and undefined behavior.
4
Add guest error log output for undefined behavior.
5
Report invalid undefined accesses to MMIO.
6
Annotate unlikely error checks with unlikely.
7
8
Signed-off-by: Doug Gale <doug16k@gmail.com>
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
---
13
hw/block/nvme.c | 349 ++++++++++++++++++++++++++++++++++++++++++--------
14
hw/block/trace-events | 93 ++++++++++++++
15
2 files changed, 390 insertions(+), 52 deletions(-)
16
17
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/block/nvme.c
20
+++ b/hw/block/nvme.c
21
@@ -XXX,XX +XXX,XX @@
22
#include "qapi/visitor.h"
23
#include "sysemu/block-backend.h"
24
25
+#include "qemu/log.h"
26
+#include "trace.h"
27
#include "nvme.h"
28
29
+#define NVME_GUEST_ERR(trace, fmt, ...) \
30
+ do { \
31
+ (trace_##trace)(__VA_ARGS__); \
32
+ qemu_log_mask(LOG_GUEST_ERROR, #trace \
33
+ " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
34
+ } while (0)
35
+
36
static void nvme_process_sq(void *opaque);
37
38
static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
39
@@ -XXX,XX +XXX,XX @@ static void nvme_isr_notify(NvmeCtrl *n, NvmeCQueue *cq)
40
{
41
if (cq->irq_enabled) {
42
if (msix_enabled(&(n->parent_obj))) {
43
+ trace_nvme_irq_msix(cq->vector);
44
msix_notify(&(n->parent_obj), cq->vector);
45
} else {
46
+ trace_nvme_irq_pin();
47
pci_irq_pulse(&n->parent_obj);
48
}
49
+ } else {
50
+ trace_nvme_irq_masked();
51
}
52
}
53
54
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
55
trans_len = MIN(len, trans_len);
56
int num_prps = (len >> n->page_bits) + 1;
57
58
- if (!prp1) {
59
+ if (unlikely(!prp1)) {
60
+ trace_nvme_err_invalid_prp();
61
return NVME_INVALID_FIELD | NVME_DNR;
62
} else if (n->cmbsz && prp1 >= n->ctrl_mem.addr &&
63
prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
64
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
65
}
66
len -= trans_len;
67
if (len) {
68
- if (!prp2) {
69
+ if (unlikely(!prp2)) {
70
+ trace_nvme_err_invalid_prp2_missing();
71
goto unmap;
72
}
73
if (len > n->page_size) {
74
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
75
uint64_t prp_ent = le64_to_cpu(prp_list[i]);
76
77
if (i == n->max_prp_ents - 1 && len > n->page_size) {
78
- if (!prp_ent || prp_ent & (n->page_size - 1)) {
79
+ if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
80
+ trace_nvme_err_invalid_prplist_ent(prp_ent);
81
goto unmap;
82
}
83
84
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
85
prp_ent = le64_to_cpu(prp_list[i]);
86
}
87
88
- if (!prp_ent || prp_ent & (n->page_size - 1)) {
89
+ if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
90
+ trace_nvme_err_invalid_prplist_ent(prp_ent);
91
goto unmap;
92
}
93
94
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
95
i++;
96
}
97
} else {
98
- if (prp2 & (n->page_size - 1)) {
99
+ if (unlikely(prp2 & (n->page_size - 1))) {
100
+ trace_nvme_err_invalid_prp2_align(prp2);
101
goto unmap;
102
}
103
if (qsg->nsg) {
104
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
105
QEMUIOVector iov;
106
uint16_t status = NVME_SUCCESS;
107
108
+ trace_nvme_dma_read(prp1, prp2);
109
+
110
if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
111
return NVME_INVALID_FIELD | NVME_DNR;
112
}
113
if (qsg.nsg > 0) {
114
- if (dma_buf_read(ptr, len, &qsg)) {
115
+ if (unlikely(dma_buf_read(ptr, len, &qsg))) {
116
+ trace_nvme_err_invalid_dma();
117
status = NVME_INVALID_FIELD | NVME_DNR;
118
}
119
qemu_sglist_destroy(&qsg);
120
} else {
121
- if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
122
+ if (unlikely(qemu_iovec_to_buf(&iov, 0, ptr, len) != len)) {
123
+ trace_nvme_err_invalid_dma();
124
status = NVME_INVALID_FIELD | NVME_DNR;
125
}
126
qemu_iovec_destroy(&iov);
127
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
128
uint64_t aio_slba = slba << (data_shift - BDRV_SECTOR_BITS);
129
uint32_t aio_nlb = nlb << (data_shift - BDRV_SECTOR_BITS);
130
131
- if (slba + nlb > ns->id_ns.nsze) {
132
+ if (unlikely(slba + nlb > ns->id_ns.nsze)) {
133
+ trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
134
return NVME_LBA_RANGE | NVME_DNR;
135
}
136
137
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
138
int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0;
139
enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
140
141
- if ((slba + nlb) > ns->id_ns.nsze) {
142
+ trace_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba);
143
+
144
+ if (unlikely((slba + nlb) > ns->id_ns.nsze)) {
145
block_acct_invalid(blk_get_stats(n->conf.blk), acct);
146
+ trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
147
return NVME_LBA_RANGE | NVME_DNR;
148
}
149
150
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
151
NvmeNamespace *ns;
152
uint32_t nsid = le32_to_cpu(cmd->nsid);
153
154
- if (nsid == 0 || nsid > n->num_namespaces) {
155
+ if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
156
+ trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
157
return NVME_INVALID_NSID | NVME_DNR;
158
}
159
160
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
161
case NVME_CMD_READ:
162
return nvme_rw(n, ns, cmd, req);
163
default:
164
+ trace_nvme_err_invalid_opc(cmd->opcode);
165
return NVME_INVALID_OPCODE | NVME_DNR;
166
}
167
}
168
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
169
NvmeCQueue *cq;
170
uint16_t qid = le16_to_cpu(c->qid);
171
172
- if (!qid || nvme_check_sqid(n, qid)) {
173
+ if (unlikely(!qid || nvme_check_sqid(n, qid))) {
174
+ trace_nvme_err_invalid_del_sq(qid);
175
return NVME_INVALID_QID | NVME_DNR;
176
}
177
178
+ trace_nvme_del_sq(qid);
179
+
180
sq = n->sq[qid];
181
while (!QTAILQ_EMPTY(&sq->out_req_list)) {
182
req = QTAILQ_FIRST(&sq->out_req_list);
183
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
184
uint16_t qflags = le16_to_cpu(c->sq_flags);
185
uint64_t prp1 = le64_to_cpu(c->prp1);
186
187
- if (!cqid || nvme_check_cqid(n, cqid)) {
188
+ trace_nvme_create_sq(prp1, sqid, cqid, qsize, qflags);
189
+
190
+ if (unlikely(!cqid || nvme_check_cqid(n, cqid))) {
191
+ trace_nvme_err_invalid_create_sq_cqid(cqid);
192
return NVME_INVALID_CQID | NVME_DNR;
193
}
194
- if (!sqid || !nvme_check_sqid(n, sqid)) {
195
+ if (unlikely(!sqid || !nvme_check_sqid(n, sqid))) {
196
+ trace_nvme_err_invalid_create_sq_sqid(sqid);
197
return NVME_INVALID_QID | NVME_DNR;
198
}
199
- if (!qsize || qsize > NVME_CAP_MQES(n->bar.cap)) {
200
+ if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
201
+ trace_nvme_err_invalid_create_sq_size(qsize);
202
return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
203
}
204
- if (!prp1 || prp1 & (n->page_size - 1)) {
205
+ if (unlikely(!prp1 || prp1 & (n->page_size - 1))) {
206
+ trace_nvme_err_invalid_create_sq_addr(prp1);
207
return NVME_INVALID_FIELD | NVME_DNR;
208
}
209
- if (!(NVME_SQ_FLAGS_PC(qflags))) {
210
+ if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) {
211
+ trace_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags));
212
return NVME_INVALID_FIELD | NVME_DNR;
213
}
214
sq = g_malloc0(sizeof(*sq));
215
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
216
NvmeCQueue *cq;
217
uint16_t qid = le16_to_cpu(c->qid);
218
219
- if (!qid || nvme_check_cqid(n, qid)) {
220
+ if (unlikely(!qid || nvme_check_cqid(n, qid))) {
221
+ trace_nvme_err_invalid_del_cq_cqid(qid);
222
return NVME_INVALID_CQID | NVME_DNR;
223
}
224
225
cq = n->cq[qid];
226
- if (!QTAILQ_EMPTY(&cq->sq_list)) {
227
+ if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) {
228
+ trace_nvme_err_invalid_del_cq_notempty(qid);
229
return NVME_INVALID_QUEUE_DEL;
230
}
231
+ trace_nvme_del_cq(qid);
232
nvme_free_cq(cq, n);
233
return NVME_SUCCESS;
234
}
235
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
236
uint16_t qflags = le16_to_cpu(c->cq_flags);
237
uint64_t prp1 = le64_to_cpu(c->prp1);
238
239
- if (!cqid || !nvme_check_cqid(n, cqid)) {
240
+ trace_nvme_create_cq(prp1, cqid, vector, qsize, qflags,
241
+ NVME_CQ_FLAGS_IEN(qflags) != 0);
242
+
243
+ if (unlikely(!cqid || !nvme_check_cqid(n, cqid))) {
244
+ trace_nvme_err_invalid_create_cq_cqid(cqid);
245
return NVME_INVALID_CQID | NVME_DNR;
246
}
247
- if (!qsize || qsize > NVME_CAP_MQES(n->bar.cap)) {
248
+ if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
249
+ trace_nvme_err_invalid_create_cq_size(qsize);
250
return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
251
}
252
- if (!prp1) {
253
+ if (unlikely(!prp1)) {
254
+ trace_nvme_err_invalid_create_cq_addr(prp1);
255
return NVME_INVALID_FIELD | NVME_DNR;
256
}
257
- if (vector > n->num_queues) {
258
+ if (unlikely(vector > n->num_queues)) {
259
+ trace_nvme_err_invalid_create_cq_vector(vector);
260
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
261
}
262
- if (!(NVME_CQ_FLAGS_PC(qflags))) {
263
+ if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) {
264
+ trace_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags));
265
return NVME_INVALID_FIELD | NVME_DNR;
266
}
267
268
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c)
269
uint64_t prp1 = le64_to_cpu(c->prp1);
270
uint64_t prp2 = le64_to_cpu(c->prp2);
271
272
+ trace_nvme_identify_ctrl();
273
+
274
return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
275
prp1, prp2);
276
}
277
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
278
uint64_t prp1 = le64_to_cpu(c->prp1);
279
uint64_t prp2 = le64_to_cpu(c->prp2);
280
281
- if (nsid == 0 || nsid > n->num_namespaces) {
282
+ trace_nvme_identify_ns(nsid);
283
+
284
+ if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
285
+ trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
286
return NVME_INVALID_NSID | NVME_DNR;
287
}
288
289
ns = &n->namespaces[nsid - 1];
290
+
291
return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns),
292
prp1, prp2);
293
}
294
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
295
uint16_t ret;
296
int i, j = 0;
297
298
+ trace_nvme_identify_nslist(min_nsid);
299
+
300
list = g_malloc0(data_len);
301
for (i = 0; i < n->num_namespaces; i++) {
302
if (i < min_nsid) {
303
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
304
case 0x02:
305
return nvme_identify_nslist(n, c);
306
default:
307
+ trace_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
308
return NVME_INVALID_FIELD | NVME_DNR;
309
}
310
}
311
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
312
switch (dw10) {
313
case NVME_VOLATILE_WRITE_CACHE:
314
result = blk_enable_write_cache(n->conf.blk);
315
+ trace_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
316
break;
317
case NVME_NUMBER_OF_QUEUES:
318
result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
319
+ trace_nvme_getfeat_numq(result);
320
break;
321
default:
322
+ trace_nvme_err_invalid_getfeat(dw10);
323
return NVME_INVALID_FIELD | NVME_DNR;
324
}
325
326
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
327
blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
328
break;
329
case NVME_NUMBER_OF_QUEUES:
330
+ trace_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
331
+ ((dw11 >> 16) & 0xFFFF) + 1,
332
+ n->num_queues - 1, n->num_queues - 1);
333
req->cqe.result =
334
cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
335
break;
336
default:
337
+ trace_nvme_err_invalid_setfeat(dw10);
338
return NVME_INVALID_FIELD | NVME_DNR;
339
}
340
return NVME_SUCCESS;
341
@@ -XXX,XX +XXX,XX @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
342
case NVME_ADM_CMD_GET_FEATURES:
343
return nvme_get_feature(n, cmd, req);
344
default:
345
+ trace_nvme_err_invalid_admin_opc(cmd->opcode);
346
return NVME_INVALID_OPCODE | NVME_DNR;
347
}
348
}
349
@@ -XXX,XX +XXX,XX @@ static int nvme_start_ctrl(NvmeCtrl *n)
350
uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
351
uint32_t page_size = 1 << page_bits;
352
353
- if (n->cq[0] || n->sq[0] || !n->bar.asq || !n->bar.acq ||
354
- n->bar.asq & (page_size - 1) || n->bar.acq & (page_size - 1) ||
355
- NVME_CC_MPS(n->bar.cc) < NVME_CAP_MPSMIN(n->bar.cap) ||
356
- NVME_CC_MPS(n->bar.cc) > NVME_CAP_MPSMAX(n->bar.cap) ||
357
- NVME_CC_IOCQES(n->bar.cc) < NVME_CTRL_CQES_MIN(n->id_ctrl.cqes) ||
358
- NVME_CC_IOCQES(n->bar.cc) > NVME_CTRL_CQES_MAX(n->id_ctrl.cqes) ||
359
- NVME_CC_IOSQES(n->bar.cc) < NVME_CTRL_SQES_MIN(n->id_ctrl.sqes) ||
360
- NVME_CC_IOSQES(n->bar.cc) > NVME_CTRL_SQES_MAX(n->id_ctrl.sqes) ||
361
- !NVME_AQA_ASQS(n->bar.aqa) || !NVME_AQA_ACQS(n->bar.aqa)) {
362
+ if (unlikely(n->cq[0])) {
363
+ trace_nvme_err_startfail_cq();
364
+ return -1;
365
+ }
366
+ if (unlikely(n->sq[0])) {
367
+ trace_nvme_err_startfail_sq();
368
+ return -1;
369
+ }
370
+ if (unlikely(!n->bar.asq)) {
371
+ trace_nvme_err_startfail_nbarasq();
372
+ return -1;
373
+ }
374
+ if (unlikely(!n->bar.acq)) {
375
+ trace_nvme_err_startfail_nbaracq();
376
+ return -1;
377
+ }
378
+ if (unlikely(n->bar.asq & (page_size - 1))) {
379
+ trace_nvme_err_startfail_asq_misaligned(n->bar.asq);
380
+ return -1;
381
+ }
382
+ if (unlikely(n->bar.acq & (page_size - 1))) {
383
+ trace_nvme_err_startfail_acq_misaligned(n->bar.acq);
384
+ return -1;
385
+ }
386
+ if (unlikely(NVME_CC_MPS(n->bar.cc) <
387
+ NVME_CAP_MPSMIN(n->bar.cap))) {
388
+ trace_nvme_err_startfail_page_too_small(
389
+ NVME_CC_MPS(n->bar.cc),
390
+ NVME_CAP_MPSMIN(n->bar.cap));
391
+ return -1;
392
+ }
393
+ if (unlikely(NVME_CC_MPS(n->bar.cc) >
394
+ NVME_CAP_MPSMAX(n->bar.cap))) {
395
+ trace_nvme_err_startfail_page_too_large(
396
+ NVME_CC_MPS(n->bar.cc),
397
+ NVME_CAP_MPSMAX(n->bar.cap));
398
+ return -1;
399
+ }
400
+ if (unlikely(NVME_CC_IOCQES(n->bar.cc) <
401
+ NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) {
402
+ trace_nvme_err_startfail_cqent_too_small(
403
+ NVME_CC_IOCQES(n->bar.cc),
404
+ NVME_CTRL_CQES_MIN(n->bar.cap));
405
+ return -1;
406
+ }
407
+ if (unlikely(NVME_CC_IOCQES(n->bar.cc) >
408
+ NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) {
409
+ trace_nvme_err_startfail_cqent_too_large(
410
+ NVME_CC_IOCQES(n->bar.cc),
411
+ NVME_CTRL_CQES_MAX(n->bar.cap));
412
+ return -1;
413
+ }
414
+ if (unlikely(NVME_CC_IOSQES(n->bar.cc) <
415
+ NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) {
416
+ trace_nvme_err_startfail_sqent_too_small(
417
+ NVME_CC_IOSQES(n->bar.cc),
418
+ NVME_CTRL_SQES_MIN(n->bar.cap));
419
+ return -1;
420
+ }
421
+ if (unlikely(NVME_CC_IOSQES(n->bar.cc) >
422
+ NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) {
423
+ trace_nvme_err_startfail_sqent_too_large(
424
+ NVME_CC_IOSQES(n->bar.cc),
425
+ NVME_CTRL_SQES_MAX(n->bar.cap));
426
+ return -1;
427
+ }
428
+ if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) {
429
+ trace_nvme_err_startfail_asqent_sz_zero();
430
+ return -1;
431
+ }
432
+ if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) {
433
+ trace_nvme_err_startfail_acqent_sz_zero();
434
return -1;
435
}
436
437
@@ -XXX,XX +XXX,XX @@ static int nvme_start_ctrl(NvmeCtrl *n)
438
static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
439
unsigned size)
440
{
441
+ if (unlikely(offset & (sizeof(uint32_t) - 1))) {
442
+ NVME_GUEST_ERR(nvme_ub_mmiowr_misaligned32,
443
+ "MMIO write not 32-bit aligned,"
444
+ " offset=0x%"PRIx64"", offset);
445
+ /* should be ignored, fall through for now */
446
+ }
447
+
448
+ if (unlikely(size < sizeof(uint32_t))) {
449
+ NVME_GUEST_ERR(nvme_ub_mmiowr_toosmall,
450
+ "MMIO write smaller than 32-bits,"
451
+ " offset=0x%"PRIx64", size=%u",
452
+ offset, size);
453
+ /* should be ignored, fall through for now */
454
+ }
455
+
456
switch (offset) {
457
- case 0xc:
458
+ case 0xc: /* INTMS */
459
+ if (unlikely(msix_enabled(&(n->parent_obj)))) {
460
+ NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix,
461
+ "undefined access to interrupt mask set"
462
+ " when MSI-X is enabled");
463
+ /* should be ignored, fall through for now */
464
+ }
465
n->bar.intms |= data & 0xffffffff;
466
n->bar.intmc = n->bar.intms;
467
+ trace_nvme_mmio_intm_set(data & 0xffffffff,
468
+ n->bar.intmc);
469
break;
470
- case 0x10:
471
+ case 0x10: /* INTMC */
472
+ if (unlikely(msix_enabled(&(n->parent_obj)))) {
473
+ NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix,
474
+ "undefined access to interrupt mask clr"
475
+ " when MSI-X is enabled");
476
+ /* should be ignored, fall through for now */
477
+ }
478
n->bar.intms &= ~(data & 0xffffffff);
479
n->bar.intmc = n->bar.intms;
480
+ trace_nvme_mmio_intm_clr(data & 0xffffffff,
481
+ n->bar.intmc);
482
break;
483
- case 0x14:
484
+ case 0x14: /* CC */
485
+ trace_nvme_mmio_cfg(data & 0xffffffff);
486
/* Windows first sends data, then sends enable bit */
487
if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) &&
488
!NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc))
489
@@ -XXX,XX +XXX,XX @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
490
491
if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) {
492
n->bar.cc = data;
493
- if (nvme_start_ctrl(n)) {
494
+ if (unlikely(nvme_start_ctrl(n))) {
495
+ trace_nvme_err_startfail();
496
n->bar.csts = NVME_CSTS_FAILED;
497
} else {
498
+ trace_nvme_mmio_start_success();
499
n->bar.csts = NVME_CSTS_READY;
500
}
501
} else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) {
502
+ trace_nvme_mmio_stopped();
503
nvme_clear_ctrl(n);
504
n->bar.csts &= ~NVME_CSTS_READY;
505
}
506
if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) {
507
- nvme_clear_ctrl(n);
508
- n->bar.cc = data;
509
- n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
510
+ trace_nvme_mmio_shutdown_set();
511
+ nvme_clear_ctrl(n);
512
+ n->bar.cc = data;
513
+ n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
514
} else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) {
515
- n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
516
- n->bar.cc = data;
517
+ trace_nvme_mmio_shutdown_cleared();
518
+ n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
519
+ n->bar.cc = data;
520
+ }
521
+ break;
522
+ case 0x1C: /* CSTS */
523
+ if (data & (1 << 4)) {
524
+ NVME_GUEST_ERR(nvme_ub_mmiowr_ssreset_w1c_unsupported,
525
+ "attempted to W1C CSTS.NSSRO"
526
+ " but CAP.NSSRS is zero (not supported)");
527
+ } else if (data != 0) {
528
+ NVME_GUEST_ERR(nvme_ub_mmiowr_ro_csts,
529
+ "attempted to set a read only bit"
530
+ " of controller status");
531
+ }
532
+ break;
533
+ case 0x20: /* NSSR */
534
+ if (data == 0x4E564D65) {
535
+ trace_nvme_ub_mmiowr_ssreset_unsupported();
536
+ } else {
537
+ /* The spec says that writes of other values have no effect */
538
+ return;
539
}
540
break;
541
- case 0x24:
542
+ case 0x24: /* AQA */
543
n->bar.aqa = data & 0xffffffff;
544
+ trace_nvme_mmio_aqattr(data & 0xffffffff);
545
break;
546
- case 0x28:
547
+ case 0x28: /* ASQ */
548
n->bar.asq = data;
549
+ trace_nvme_mmio_asqaddr(data);
550
break;
551
- case 0x2c:
552
+ case 0x2c: /* ASQ hi */
553
n->bar.asq |= data << 32;
554
+ trace_nvme_mmio_asqaddr_hi(data, n->bar.asq);
555
break;
556
- case 0x30:
557
+ case 0x30: /* ACQ */
558
+ trace_nvme_mmio_acqaddr(data);
559
n->bar.acq = data;
560
break;
561
- case 0x34:
562
+ case 0x34: /* ACQ hi */
563
n->bar.acq |= data << 32;
564
+ trace_nvme_mmio_acqaddr_hi(data, n->bar.acq);
565
break;
566
+ case 0x38: /* CMBLOC */
567
+ NVME_GUEST_ERR(nvme_ub_mmiowr_cmbloc_reserved,
568
+ "invalid write to reserved CMBLOC"
569
+ " when CMBSZ is zero, ignored");
570
+ return;
571
+ case 0x3C: /* CMBSZ */
572
+ NVME_GUEST_ERR(nvme_ub_mmiowr_cmbsz_readonly,
573
+ "invalid write to read only CMBSZ, ignored");
574
+ return;
575
default:
576
+ NVME_GUEST_ERR(nvme_ub_mmiowr_invalid,
577
+ "invalid MMIO write,"
578
+ " offset=0x%"PRIx64", data=%"PRIx64"",
579
+ offset, data);
580
break;
581
}
582
}
583
@@ -XXX,XX +XXX,XX @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
584
uint8_t *ptr = (uint8_t *)&n->bar;
585
uint64_t val = 0;
586
587
+ if (unlikely(addr & (sizeof(uint32_t) - 1))) {
588
+ NVME_GUEST_ERR(nvme_ub_mmiord_misaligned32,
589
+ "MMIO read not 32-bit aligned,"
590
+ " offset=0x%"PRIx64"", addr);
591
+ /* should RAZ, fall through for now */
592
+ } else if (unlikely(size < sizeof(uint32_t))) {
593
+ NVME_GUEST_ERR(nvme_ub_mmiord_toosmall,
594
+ "MMIO read smaller than 32-bits,"
595
+ " offset=0x%"PRIx64"", addr);
596
+ /* should RAZ, fall through for now */
597
+ }
598
+
599
if (addr < sizeof(n->bar)) {
600
memcpy(&val, ptr + addr, size);
601
+ } else {
602
+ NVME_GUEST_ERR(nvme_ub_mmiord_invalid_ofs,
603
+ "MMIO read beyond last register,"
604
+ " offset=0x%"PRIx64", returning 0", addr);
605
}
606
+
607
return val;
608
}
609
610
@@ -XXX,XX +XXX,XX @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
611
{
612
uint32_t qid;
613
614
- if (addr & ((1 << 2) - 1)) {
615
+ if (unlikely(addr & ((1 << 2) - 1))) {
616
+ NVME_GUEST_ERR(nvme_ub_db_wr_misaligned,
617
+ "doorbell write not 32-bit aligned,"
618
+ " offset=0x%"PRIx64", ignoring", addr);
619
return;
620
}
621
622
if (((addr - 0x1000) >> 2) & 1) {
623
+ /* Completion queue doorbell write */
624
+
625
uint16_t new_head = val & 0xffff;
626
int start_sqs;
627
NvmeCQueue *cq;
628
629
qid = (addr - (0x1000 + (1 << 2))) >> 3;
630
- if (nvme_check_cqid(n, qid)) {
631
+ if (unlikely(nvme_check_cqid(n, qid))) {
632
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cq,
633
+ "completion queue doorbell write"
634
+ " for nonexistent queue,"
635
+ " sqid=%"PRIu32", ignoring", qid);
636
return;
637
}
638
639
cq = n->cq[qid];
640
- if (new_head >= cq->size) {
641
+ if (unlikely(new_head >= cq->size)) {
642
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cqhead,
643
+ "completion queue doorbell write value"
644
+ " beyond queue size, sqid=%"PRIu32","
645
+ " new_head=%"PRIu16", ignoring",
646
+ qid, new_head);
647
return;
648
}
649
650
@@ -XXX,XX +XXX,XX @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
651
nvme_isr_notify(n, cq);
652
}
653
} else {
654
+ /* Submission queue doorbell write */
655
+
656
uint16_t new_tail = val & 0xffff;
657
NvmeSQueue *sq;
658
659
qid = (addr - 0x1000) >> 3;
660
- if (nvme_check_sqid(n, qid)) {
661
+ if (unlikely(nvme_check_sqid(n, qid))) {
662
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sq,
663
+ "submission queue doorbell write"
664
+ " for nonexistent queue,"
665
+ " sqid=%"PRIu32", ignoring", qid);
666
return;
667
}
668
669
sq = n->sq[qid];
670
- if (new_tail >= sq->size) {
671
+ if (unlikely(new_tail >= sq->size)) {
672
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sqtail,
673
+ "submission queue doorbell write value"
674
+ " beyond queue size, sqid=%"PRIu32","
675
+ " new_tail=%"PRIu16", ignoring",
676
+ qid, new_tail);
677
return;
678
}
679
680
diff --git a/hw/block/trace-events b/hw/block/trace-events
681
index XXXXXXX..XXXXXXX 100644
682
--- a/hw/block/trace-events
683
+++ b/hw/block/trace-events
684
@@ -XXX,XX +XXX,XX @@ virtio_blk_submit_multireq(void *vdev, void *mrb, int start, int num_reqs, uint6
685
hd_geometry_lchs_guess(void *blk, int cyls, int heads, int secs) "blk %p LCHS %d %d %d"
686
hd_geometry_guess(void *blk, uint32_t cyls, uint32_t heads, uint32_t secs, int trans) "blk %p CHS %u %u %u trans %d"
687
688
+# hw/block/nvme.c
689
+# nvme traces for successful events
690
+nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ vector %u"
691
+nvme_irq_pin(void) "pulsing IRQ pin"
692
+nvme_irq_masked(void) "IRQ is masked"
693
+nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2=0x%"PRIx64""
694
+nvme_rw(char const *verb, uint32_t blk_count, uint64_t byte_count, uint64_t lba) "%s %"PRIu32" blocks (%"PRIu64" bytes) from LBA %"PRIu64""
695
+nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64", sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16""
696
+nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t size, uint16_t qflags, int ien) "create completion queue, addr=0x%"PRIx64", cqid=%"PRIu16", vector=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16", ien=%d"
697
+nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16""
698
+nvme_del_cq(uint16_t cqid) "deleted completion queue, sqid=%"PRIu16""
699
+nvme_identify_ctrl(void) "identify controller"
700
+nvme_identify_ns(uint16_t ns) "identify namespace, nsid=%"PRIu16""
701
+nvme_identify_nslist(uint16_t ns) "identify namespace list, nsid=%"PRIu16""
702
+nvme_getfeat_vwcache(char const* result) "get feature volatile write cache, result=%s"
703
+nvme_getfeat_numq(int result) "get feature number of queues, result=%d"
704
+nvme_setfeat_numq(int reqcq, int reqsq, int gotcq, int gotsq) "requested cq_count=%d sq_count=%d, responding with cq_count=%d sq_count=%d"
705
+nvme_mmio_intm_set(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask set, data=0x%"PRIx64", new_mask=0x%"PRIx64""
706
+nvme_mmio_intm_clr(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask clr, data=0x%"PRIx64", new_mask=0x%"PRIx64""
707
+nvme_mmio_cfg(uint64_t data) "wrote MMIO, config controller config=0x%"PRIx64""
708
+nvme_mmio_aqattr(uint64_t data) "wrote MMIO, admin queue attributes=0x%"PRIx64""
709
+nvme_mmio_asqaddr(uint64_t data) "wrote MMIO, admin submission queue address=0x%"PRIx64""
710
+nvme_mmio_acqaddr(uint64_t data) "wrote MMIO, admin completion queue address=0x%"PRIx64""
711
+nvme_mmio_asqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin submission queue high half=0x%"PRIx64", new_address=0x%"PRIx64""
712
+nvme_mmio_acqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin completion queue high half=0x%"PRIx64", new_address=0x%"PRIx64""
713
+nvme_mmio_start_success(void) "setting controller enable bit succeeded"
714
+nvme_mmio_stopped(void) "cleared controller enable bit"
715
+nvme_mmio_shutdown_set(void) "shutdown bit set"
716
+nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
717
+
718
+# nvme traces for error conditions
719
+nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size"
720
+nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is null or not page aligned: 0x%"PRIx64""
721
+nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64""
722
+nvme_err_invalid_prp2_missing(void) "PRP2 is null and more data to be transferred"
723
+nvme_err_invalid_field(void) "invalid field"
724
+nvme_err_invalid_prp(void) "invalid PRP"
725
+nvme_err_invalid_sgl(void) "invalid SGL"
726
+nvme_err_invalid_ns(uint32_t ns, uint32_t limit) "invalid namespace %u not within 1-%u"
727
+nvme_err_invalid_opc(uint8_t opc) "invalid opcode 0x%"PRIx8""
728
+nvme_err_invalid_admin_opc(uint8_t opc) "invalid admin opcode 0x%"PRIx8""
729
+nvme_err_invalid_lba_range(uint64_t start, uint64_t len, uint64_t limit) "Invalid LBA start=%"PRIu64" len=%"PRIu64" limit=%"PRIu64""
730
+nvme_err_invalid_del_sq(uint16_t qid) "invalid submission queue deletion, sid=%"PRIu16""
731
+nvme_err_invalid_create_sq_cqid(uint16_t cqid) "failed creating submission queue, invalid cqid=%"PRIu16""
732
+nvme_err_invalid_create_sq_sqid(uint16_t sqid) "failed creating submission queue, invalid sqid=%"PRIu16""
733
+nvme_err_invalid_create_sq_size(uint16_t qsize) "failed creating submission queue, invalid qsize=%"PRIu16""
734
+nvme_err_invalid_create_sq_addr(uint64_t addr) "failed creating submission queue, addr=0x%"PRIx64""
735
+nvme_err_invalid_create_sq_qflags(uint16_t qflags) "failed creating submission queue, qflags=%"PRIu16""
736
+nvme_err_invalid_del_cq_cqid(uint16_t cqid) "failed deleting completion queue, cqid=%"PRIu16""
737
+nvme_err_invalid_del_cq_notempty(uint16_t cqid) "failed deleting completion queue, it is not empty, cqid=%"PRIu16""
738
+nvme_err_invalid_create_cq_cqid(uint16_t cqid) "failed creating completion queue, cqid=%"PRIu16""
739
+nvme_err_invalid_create_cq_size(uint16_t size) "failed creating completion queue, size=%"PRIu16""
740
+nvme_err_invalid_create_cq_addr(uint64_t addr) "failed creating completion queue, addr=0x%"PRIx64""
741
+nvme_err_invalid_create_cq_vector(uint16_t vector) "failed creating completion queue, vector=%"PRIu16""
742
+nvme_err_invalid_create_cq_qflags(uint16_t qflags) "failed creating completion queue, qflags=%"PRIu16""
743
+nvme_err_invalid_identify_cns(uint16_t cns) "identify, invalid cns=0x%"PRIx16""
744
+nvme_err_invalid_getfeat(int dw10) "invalid get features, dw10=0x%"PRIx32""
745
+nvme_err_invalid_setfeat(uint32_t dw10) "invalid set features, dw10=0x%"PRIx32""
746
+nvme_err_startfail_cq(void) "nvme_start_ctrl failed because there are non-admin completion queues"
747
+nvme_err_startfail_sq(void) "nvme_start_ctrl failed because there are non-admin submission queues"
748
+nvme_err_startfail_nbarasq(void) "nvme_start_ctrl failed because the admin submission queue address is null"
749
+nvme_err_startfail_nbaracq(void) "nvme_start_ctrl failed because the admin completion queue address is null"
750
+nvme_err_startfail_asq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin submission queue address is misaligned: 0x%"PRIx64""
751
+nvme_err_startfail_acq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin completion queue address is misaligned: 0x%"PRIx64""
752
+nvme_err_startfail_page_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too small: log2size=%u, min=%u"
753
+nvme_err_startfail_page_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too large: log2size=%u, max=%u"
754
+nvme_err_startfail_cqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too small: log2size=%u, min=%u"
755
+nvme_err_startfail_cqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too large: log2size=%u, max=%u"
756
+nvme_err_startfail_sqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too small: log2size=%u, min=%u"
757
+nvme_err_startfail_sqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too large: log2size=%u, max=%u"
758
+nvme_err_startfail_asqent_sz_zero(void) "nvme_start_ctrl failed because the admin submission queue size is zero"
759
+nvme_err_startfail_acqent_sz_zero(void) "nvme_start_ctrl failed because the admin completion queue size is zero"
760
+nvme_err_startfail(void) "setting controller enable bit failed"
761
+
762
+# Traces for undefined behavior
763
+nvme_ub_mmiowr_misaligned32(uint64_t offset) "MMIO write not 32-bit aligned, offset=0x%"PRIx64""
764
+nvme_ub_mmiowr_toosmall(uint64_t offset, unsigned size) "MMIO write smaller than 32 bits, offset=0x%"PRIx64", size=%u"
765
+nvme_ub_mmiowr_intmask_with_msix(void) "undefined access to interrupt mask set when MSI-X is enabled"
766
+nvme_ub_mmiowr_ro_csts(void) "attempted to set a read only bit of controller status"
767
+nvme_ub_mmiowr_ssreset_w1c_unsupported(void) "attempted to W1C CSTS.NSSRO but CAP.NSSRS is zero (not supported)"
768
+nvme_ub_mmiowr_ssreset_unsupported(void) "attempted NVM subsystem reset but CAP.NSSRS is zero (not supported)"
769
+nvme_ub_mmiowr_cmbloc_reserved(void) "invalid write to reserved CMBLOC when CMBSZ is zero, ignored"
770
+nvme_ub_mmiowr_cmbsz_readonly(void) "invalid write to read only CMBSZ, ignored"
771
+nvme_ub_mmiowr_invalid(uint64_t offset, uint64_t data) "invalid MMIO write, offset=0x%"PRIx64", data=0x%"PRIx64""
772
+nvme_ub_mmiord_misaligned32(uint64_t offset) "MMIO read not 32-bit aligned, offset=0x%"PRIx64""
773
+nvme_ub_mmiord_toosmall(uint64_t offset) "MMIO read smaller than 32-bits, offset=0x%"PRIx64""
774
+nvme_ub_mmiord_invalid_ofs(uint64_t offset) "MMIO read beyond last register, offset=0x%"PRIx64", returning 0"
775
+nvme_ub_db_wr_misaligned(uint64_t offset) "doorbell write not 32-bit aligned, offset=0x%"PRIx64", ignoring"
776
+nvme_ub_db_wr_invalid_cq(uint32_t qid) "completion queue doorbell write for nonexistent queue, cqid=%"PRIu32", ignoring"
777
+nvme_ub_db_wr_invalid_cqhead(uint32_t qid, uint16_t new_head) "completion queue doorbell write value beyond queue size, cqid=%"PRIu32", new_head=%"PRIu16", ignoring"
778
+nvme_ub_db_wr_invalid_sq(uint32_t qid) "submission queue doorbell write for nonexistent queue, sqid=%"PRIu32", ignoring"
779
+nvme_ub_db_wr_invalid_sqtail(uint32_t qid, uint16_t new_tail) "submission queue doorbell write value beyond queue size, sqid=%"PRIu32", new_head=%"PRIu16", ignoring"
780
+
781
# hw/block/xen_disk.c
782
xen_disk_alloc(char *name) "%s"
783
xen_disk_init(char *name) "%s"
784
--
785
2.13.6
786
787
diff view generated by jsdifflib
New patch
1
From: Fam Zheng <famz@redhat.com>
1
2
3
Management tools create overlays of running guests with qemu-img:
4
5
$ qemu-img create -b /image/in/use.qcow2 -f qcow2 /overlay/image.qcow2
6
7
but this doesn't work anymore due to image locking:
8
9
qemu-img: /overlay/image.qcow2: Failed to get shared "write" lock
10
Is another process using the image?
11
Could not open backing image to determine size.
12
Use the force share option to allow this use case again.
13
14
Cc: qemu-stable@nongnu.org
15
Signed-off-by: Fam Zheng <famz@redhat.com>
16
Reviewed-by: Eric Blake <eblake@redhat.com>
17
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
18
---
19
block.c | 3 ++-
20
1 file changed, 2 insertions(+), 1 deletion(-)
21
22
diff --git a/block.c b/block.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/block.c
25
+++ b/block.c
26
@@ -XXX,XX +XXX,XX @@ void bdrv_img_create(const char *filename, const char *fmt,
27
back_flags = flags;
28
back_flags &= ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
29
30
+ backing_options = qdict_new();
31
if (backing_fmt) {
32
- backing_options = qdict_new();
33
qdict_put_str(backing_options, "driver", backing_fmt);
34
}
35
+ qdict_put_bool(backing_options, BDRV_OPT_FORCE_SHARE, true);
36
37
bs = bdrv_open(full_backing, NULL, backing_options, back_flags,
38
&local_err);
39
--
40
2.13.6
41
42
diff view generated by jsdifflib
New patch
1
From: Thomas Huth <thuth@redhat.com>
1
2
3
It's not working anymore since QEMU v1.3.0 - time to remove it now.
4
5
Signed-off-by: Thomas Huth <thuth@redhat.com>
6
Reviewed-by: John Snow <jsnow@redhat.com>
7
Reviewed-by: Markus Armbruster <armbru@redhat.com>
8
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9
---
10
blockdev.c | 11 -----------
11
qemu-doc.texi | 6 ------
12
2 files changed, 17 deletions(-)
13
14
diff --git a/blockdev.c b/blockdev.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/blockdev.c
17
+++ b/blockdev.c
18
@@ -XXX,XX +XXX,XX @@ QemuOptsList qemu_legacy_drive_opts = {
19
.type = QEMU_OPT_STRING,
20
.help = "chs translation (auto, lba, none)",
21
},{
22
- .name = "boot",
23
- .type = QEMU_OPT_BOOL,
24
- .help = "(deprecated, ignored)",
25
- },{
26
.name = "addr",
27
.type = QEMU_OPT_STRING,
28
.help = "pci address (virtio only)",
29
@@ -XXX,XX +XXX,XX @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type)
30
goto fail;
31
}
32
33
- /* Deprecated option boot=[on|off] */
34
- if (qemu_opt_get(legacy_opts, "boot") != NULL) {
35
- fprintf(stderr, "qemu-kvm: boot=on|off is deprecated and will be "
36
- "ignored. Future versions will reject this parameter. Please "
37
- "update your scripts.\n");
38
- }
39
-
40
/* Other deprecated options */
41
if (!qtest_enabled()) {
42
for (i = 0; i < ARRAY_SIZE(deprecated); i++) {
43
diff --git a/qemu-doc.texi b/qemu-doc.texi
44
index XXXXXXX..XXXXXXX 100644
45
--- a/qemu-doc.texi
46
+++ b/qemu-doc.texi
47
@@ -XXX,XX +XXX,XX @@ deprecated.
48
49
@section System emulator command line arguments
50
51
-@subsection -drive boot=on|off (since 1.3.0)
52
-
53
-The ``boot=on|off'' option to the ``-drive'' argument is
54
-ignored. Applications should use the ``bootindex=N'' parameter
55
-to set an absolute ordering between devices instead.
56
-
57
@subsection -tdf (since 1.3.0)
58
59
The ``-tdf'' argument is ignored. The behaviour implemented
60
--
61
2.13.6
62
63
diff view generated by jsdifflib
New patch
1
1
From: Thomas Huth <thuth@redhat.com>
2
3
It's been marked as deprecated since QEMU v2.10.0, and so far nobody
4
complained that we should keep it, so let's remove this legacy option
5
now to simplify the code quite a bit.
6
7
Signed-off-by: Thomas Huth <thuth@redhat.com>
8
Reviewed-by: John Snow <jsnow@redhat.com>
9
Reviewed-by: Markus Armbruster <armbru@redhat.com>
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
---
12
vl.c | 86 ++-------------------------------------------------------
13
qemu-doc.texi | 8 ------
14
qemu-options.hx | 19 ++-----------
15
3 files changed, 4 insertions(+), 109 deletions(-)
16
17
diff --git a/vl.c b/vl.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/vl.c
20
+++ b/vl.c
21
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
22
const char *boot_order = NULL;
23
const char *boot_once = NULL;
24
DisplayState *ds;
25
- int cyls, heads, secs, translation;
26
QemuOpts *opts, *machine_opts;
27
- QemuOpts *hda_opts = NULL, *icount_opts = NULL, *accel_opts = NULL;
28
+ QemuOpts *icount_opts = NULL, *accel_opts = NULL;
29
QemuOptsList *olist;
30
int optind;
31
const char *optarg;
32
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
33
34
cpu_model = NULL;
35
snapshot = 0;
36
- cyls = heads = secs = 0;
37
- translation = BIOS_ATA_TRANSLATION_AUTO;
38
39
nb_nics = 0;
40
41
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
42
if (optind >= argc)
43
break;
44
if (argv[optind][0] != '-') {
45
- hda_opts = drive_add(IF_DEFAULT, 0, argv[optind++], HD_OPTS);
46
+ drive_add(IF_DEFAULT, 0, argv[optind++], HD_OPTS);
47
} else {
48
const QEMUOption *popt;
49
50
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
51
cpu_model = optarg;
52
break;
53
case QEMU_OPTION_hda:
54
- {
55
- char buf[256];
56
- if (cyls == 0)
57
- snprintf(buf, sizeof(buf), "%s", HD_OPTS);
58
- else
59
- snprintf(buf, sizeof(buf),
60
- "%s,cyls=%d,heads=%d,secs=%d%s",
61
- HD_OPTS , cyls, heads, secs,
62
- translation == BIOS_ATA_TRANSLATION_LBA ?
63
- ",trans=lba" :
64
- translation == BIOS_ATA_TRANSLATION_NONE ?
65
- ",trans=none" : "");
66
- drive_add(IF_DEFAULT, 0, optarg, buf);
67
- break;
68
- }
69
case QEMU_OPTION_hdb:
70
case QEMU_OPTION_hdc:
71
case QEMU_OPTION_hdd:
72
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
73
case QEMU_OPTION_snapshot:
74
snapshot = 1;
75
break;
76
- case QEMU_OPTION_hdachs:
77
- {
78
- const char *p;
79
- p = optarg;
80
- cyls = strtol(p, (char **)&p, 0);
81
- if (cyls < 1 || cyls > 16383)
82
- goto chs_fail;
83
- if (*p != ',')
84
- goto chs_fail;
85
- p++;
86
- heads = strtol(p, (char **)&p, 0);
87
- if (heads < 1 || heads > 16)
88
- goto chs_fail;
89
- if (*p != ',')
90
- goto chs_fail;
91
- p++;
92
- secs = strtol(p, (char **)&p, 0);
93
- if (secs < 1 || secs > 63)
94
- goto chs_fail;
95
- if (*p == ',') {
96
- p++;
97
- if (!strcmp(p, "large")) {
98
- translation = BIOS_ATA_TRANSLATION_LARGE;
99
- } else if (!strcmp(p, "rechs")) {
100
- translation = BIOS_ATA_TRANSLATION_RECHS;
101
- } else if (!strcmp(p, "none")) {
102
- translation = BIOS_ATA_TRANSLATION_NONE;
103
- } else if (!strcmp(p, "lba")) {
104
- translation = BIOS_ATA_TRANSLATION_LBA;
105
- } else if (!strcmp(p, "auto")) {
106
- translation = BIOS_ATA_TRANSLATION_AUTO;
107
- } else {
108
- goto chs_fail;
109
- }
110
- } else if (*p != '\0') {
111
- chs_fail:
112
- error_report("invalid physical CHS format");
113
- exit(1);
114
- }
115
- if (hda_opts != NULL) {
116
- qemu_opt_set_number(hda_opts, "cyls", cyls,
117
- &error_abort);
118
- qemu_opt_set_number(hda_opts, "heads", heads,
119
- &error_abort);
120
- qemu_opt_set_number(hda_opts, "secs", secs,
121
- &error_abort);
122
- if (translation == BIOS_ATA_TRANSLATION_LARGE) {
123
- qemu_opt_set(hda_opts, "trans", "large",
124
- &error_abort);
125
- } else if (translation == BIOS_ATA_TRANSLATION_RECHS) {
126
- qemu_opt_set(hda_opts, "trans", "rechs",
127
- &error_abort);
128
- } else if (translation == BIOS_ATA_TRANSLATION_LBA) {
129
- qemu_opt_set(hda_opts, "trans", "lba",
130
- &error_abort);
131
- } else if (translation == BIOS_ATA_TRANSLATION_NONE) {
132
- qemu_opt_set(hda_opts, "trans", "none",
133
- &error_abort);
134
- }
135
- }
136
- }
137
- error_report("'-hdachs' is deprecated, please use '-device"
138
- " ide-hd,cyls=c,heads=h,secs=s,...' instead");
139
- break;
140
case QEMU_OPTION_numa:
141
opts = qemu_opts_parse_noisily(qemu_find_opts("numa"),
142
optarg, true);
143
diff --git a/qemu-doc.texi b/qemu-doc.texi
144
index XXXXXXX..XXXXXXX 100644
145
--- a/qemu-doc.texi
146
+++ b/qemu-doc.texi
147
@@ -XXX,XX +XXX,XX @@ The ``--net dump'' argument is now replaced with the
148
``-object filter-dump'' argument which works in combination
149
with the modern ``-netdev`` backends instead.
150
151
-@subsection -hdachs (since 2.10.0)
152
-
153
-The ``-hdachs'' argument is now a synonym for setting
154
-the ``cyls'', ``heads'', ``secs'', and ``trans'' properties
155
-on the ``ide-hd'' device using the ``-device'' argument.
156
-The new syntax allows different settings to be provided
157
-per disk.
158
-
159
@subsection -usbdevice (since 2.10.0)
160
161
The ``-usbdevice DEV'' argument is now a synonym for setting
162
diff --git a/qemu-options.hx b/qemu-options.hx
163
index XXXXXXX..XXXXXXX 100644
164
--- a/qemu-options.hx
165
+++ b/qemu-options.hx
166
@@ -XXX,XX +XXX,XX @@ of available connectors of a given interface type.
167
@item media=@var{media}
168
This option defines the type of the media: disk or cdrom.
169
@item cyls=@var{c},heads=@var{h},secs=@var{s}[,trans=@var{t}]
170
-These options have the same definition as they have in @option{-hdachs}.
171
-These parameters are deprecated, use the corresponding parameters
172
+Force disk physical geometry and the optional BIOS translation (trans=none or
173
+lba). These parameters are deprecated, use the corresponding parameters
174
of @code{-device} instead.
175
@item snapshot=@var{snapshot}
176
@var{snapshot} is "on" or "off" and controls snapshot mode for the given drive
177
@@ -XXX,XX +XXX,XX @@ the raw disk image you use is not written back. You can however force
178
the write back by pressing @key{C-a s} (@pxref{disk_images}).
179
ETEXI
180
181
-DEF("hdachs", HAS_ARG, QEMU_OPTION_hdachs, \
182
- "-hdachs c,h,s[,t]\n" \
183
- " force hard disk 0 physical geometry and the optional BIOS\n" \
184
- " translation (t=none or lba) (usually QEMU can guess them)\n",
185
- QEMU_ARCH_ALL)
186
-STEXI
187
-@item -hdachs @var{c},@var{h},@var{s},[,@var{t}]
188
-@findex -hdachs
189
-Force hard disk 0 physical geometry (1 <= @var{c} <= 16383, 1 <=
190
-@var{h} <= 16, 1 <= @var{s} <= 63) and optionally force the BIOS
191
-translation mode (@var{t}=none, lba or auto). Usually QEMU can guess
192
-all those parameters. This option is deprecated, please use
193
-@code{-device ide-hd,cyls=c,heads=h,secs=s,...} instead.
194
-ETEXI
195
-
196
DEF("fsdev", HAS_ARG, QEMU_OPTION_fsdev,
197
"-fsdev fsdriver,id=id[,path=path,][security_model={mapped-xattr|mapped-file|passthrough|none}]\n"
198
" [,writeout=immediate][,readonly][,socket=socket|sock_fd=sock_fd][,fmode=fmode][,dmode=dmode]\n"
199
--
200
2.13.6
201
202
diff view generated by jsdifflib
1
From: Thomas Huth <thuth@redhat.com>
1
From: Thomas Huth <thuth@redhat.com>
2
2
3
It's annoying to see this debug message every time you use vvfat.
3
Looks like we forgot to announce the deprecation of these options in
4
Disable it with the DLOG() macro by default, as it is done with the
4
the corresponding chapter of the qemu-doc text, so let's do that now.
5
other debug messages in this file.
6
5
7
Signed-off-by: Thomas Huth <thuth@redhat.com>
6
Signed-off-by: Thomas Huth <thuth@redhat.com>
8
Reviewed-by: John Snow <jsnow@redhat.com>
7
Reviewed-by: John Snow <jsnow@redhat.com>
8
Reviewed-by: Markus Armbruster <armbru@redhat.com>
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
---
10
---
11
block/vvfat.c | 4 ++--
11
qemu-doc.texi | 15 +++++++++++++++
12
1 file changed, 2 insertions(+), 2 deletions(-)
12
1 file changed, 15 insertions(+)
13
13
14
diff --git a/block/vvfat.c b/block/vvfat.c
14
diff --git a/qemu-doc.texi b/qemu-doc.texi
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/block/vvfat.c
16
--- a/qemu-doc.texi
17
+++ b/block/vvfat.c
17
+++ b/qemu-doc.texi
18
@@ -XXX,XX +XXX,XX @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags,
18
@@ -XXX,XX +XXX,XX @@ longer be directly supported in QEMU.
19
s->fat2 = NULL;
19
The ``-drive if=scsi'' argument is replaced by the the
20
s->downcase_short_names = 1;
20
``-device BUS-TYPE'' argument combined with ``-drive if=none''.
21
21
22
- fprintf(stderr, "vvfat %s chs %d,%d,%d\n",
22
+@subsection -drive cyls=...,heads=...,secs=...,trans=... (since 2.10.0)
23
- dirname, cyls, heads, secs);
23
+
24
+ DLOG(fprintf(stderr, "vvfat %s chs %d,%d,%d\n",
24
+The drive geometry arguments are replaced by the the geometry arguments
25
+ dirname, cyls, heads, secs));
25
+that can be specified with the ``-device'' parameter.
26
26
+
27
s->sector_count = cyls * heads * secs - s->offset_to_bootsector;
27
+@subsection -drive serial=... (since 2.10.0)
28
28
+
29
+The drive serial argument is replaced by the the serial argument
30
+that can be specified with the ``-device'' parameter.
31
+
32
+@subsection -drive addr=... (since 2.10.0)
33
+
34
+The drive addr argument is replaced by the the addr argument
35
+that can be specified with the ``-device'' parameter.
36
+
37
@subsection -net dump (since 2.10.0)
38
39
The ``--net dump'' argument is now replaced with the
29
--
40
--
30
2.13.6
41
2.13.6
31
42
32
43
diff view generated by jsdifflib
New patch
1
From: Fam Zheng <famz@redhat.com>
1
2
3
Signed-off-by: Fam Zheng <famz@redhat.com>
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
---
6
include/block/block_int.h | 1 -
7
block/io.c | 18 ------------------
8
2 files changed, 19 deletions(-)
9
10
diff --git a/include/block/block_int.h b/include/block/block_int.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/block/block_int.h
13
+++ b/include/block/block_int.h
14
@@ -XXX,XX +XXX,XX @@ bool blk_dev_is_tray_open(BlockBackend *blk);
15
bool blk_dev_is_medium_locked(BlockBackend *blk);
16
17
void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes);
18
-bool bdrv_requests_pending(BlockDriverState *bs);
19
20
void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out);
21
void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *in);
22
diff --git a/block/io.c b/block/io.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/block/io.c
25
+++ b/block/io.c
26
@@ -XXX,XX +XXX,XX @@ void bdrv_disable_copy_on_read(BlockDriverState *bs)
27
assert(old >= 1);
28
}
29
30
-/* Check if any requests are in-flight (including throttled requests) */
31
-bool bdrv_requests_pending(BlockDriverState *bs)
32
-{
33
- BdrvChild *child;
34
-
35
- if (atomic_read(&bs->in_flight)) {
36
- return true;
37
- }
38
-
39
- QLIST_FOREACH(child, &bs->children, next) {
40
- if (bdrv_requests_pending(child->bs)) {
41
- return true;
42
- }
43
- }
44
-
45
- return false;
46
-}
47
-
48
typedef struct {
49
Coroutine *co;
50
BlockDriverState *bs;
51
--
52
2.13.6
53
54
diff view generated by jsdifflib
New patch
1
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2
Reviewed-by: Fam Zheng <famz@redhat.com>
3
---
4
block/io.c | 6 ++++++
5
1 file changed, 6 insertions(+)
1
6
7
diff --git a/block/io.c b/block/io.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/block/io.c
10
+++ b/block/io.c
11
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void)
12
BdrvNextIterator it;
13
GSList *aio_ctxs = NULL, *ctx;
14
15
+ /* BDRV_POLL_WHILE() for a node can only be called from its own I/O thread
16
+ * or the main loop AioContext. We potentially use BDRV_POLL_WHILE() on
17
+ * nodes in several different AioContexts, so make sure we're in the main
18
+ * context. */
19
+ assert(qemu_get_current_aio_context() == qemu_get_aio_context());
20
+
21
block_job_pause_all();
22
23
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
24
--
25
2.13.6
26
27
diff view generated by jsdifflib
New patch
1
bdrv_drained_begin() doesn't increase bs->quiesce_counter recursively
2
and also doesn't notify other parent nodes of children, which both means
3
that the child nodes are not actually drained, and bdrv_drained_begin()
4
is providing useful functionality only on a single node.
1
5
6
To keep things consistent, we also shouldn't call the block driver
7
callbacks recursively.
8
9
A proper recursive drain version that provides an actually working
10
drained section for child nodes will be introduced later.
11
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
Reviewed-by: Fam Zheng <famz@redhat.com>
14
---
15
block/io.c | 16 +++++++++-------
16
1 file changed, 9 insertions(+), 7 deletions(-)
17
18
diff --git a/block/io.c b/block/io.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/block/io.c
21
+++ b/block/io.c
22
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
23
}
24
25
/* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
26
-static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
27
+static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, bool recursive)
28
{
29
BdrvChild *child, *tmp;
30
BdrvCoDrainData data = { .bs = bs, .done = false, .begin = begin};
31
@@ -XXX,XX +XXX,XX @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
32
bdrv_coroutine_enter(bs, data.co);
33
BDRV_POLL_WHILE(bs, !data.done);
34
35
- QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) {
36
- bdrv_drain_invoke(child->bs, begin);
37
+ if (recursive) {
38
+ QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) {
39
+ bdrv_drain_invoke(child->bs, begin, true);
40
+ }
41
}
42
}
43
44
@@ -XXX,XX +XXX,XX @@ void bdrv_drained_begin(BlockDriverState *bs)
45
bdrv_parent_drained_begin(bs);
46
}
47
48
- bdrv_drain_invoke(bs, true);
49
+ bdrv_drain_invoke(bs, true, false);
50
bdrv_drain_recurse(bs);
51
}
52
53
@@ -XXX,XX +XXX,XX @@ void bdrv_drained_end(BlockDriverState *bs)
54
}
55
56
/* Re-enable things in child-to-parent order */
57
- bdrv_drain_invoke(bs, false);
58
+ bdrv_drain_invoke(bs, false, false);
59
bdrv_parent_drained_end(bs);
60
aio_enable_external(bdrv_get_aio_context(bs));
61
}
62
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void)
63
aio_context_acquire(aio_context);
64
aio_disable_external(aio_context);
65
bdrv_parent_drained_begin(bs);
66
- bdrv_drain_invoke(bs, true);
67
+ bdrv_drain_invoke(bs, true, true);
68
aio_context_release(aio_context);
69
70
if (!g_slist_find(aio_ctxs, aio_context)) {
71
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_end(void)
72
73
/* Re-enable things in child-to-parent order */
74
aio_context_acquire(aio_context);
75
- bdrv_drain_invoke(bs, false);
76
+ bdrv_drain_invoke(bs, false, true);
77
bdrv_parent_drained_end(bs);
78
aio_enable_external(aio_context);
79
aio_context_release(aio_context);
80
--
81
2.13.6
82
83
diff view generated by jsdifflib
New patch
1
The existing test is for bdrv_drain_all_begin/end() only. Generalise the
2
test case so that it can be run for the other variants as well. At the
3
moment this is only bdrv_drain_begin/end(), but in a while, we'll add
4
another one.
1
5
6
Also, add a backing file to the test node to test whether the operations
7
work recursively.
8
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
---
11
tests/test-bdrv-drain.c | 69 ++++++++++++++++++++++++++++++++++++++++++++-----
12
1 file changed, 62 insertions(+), 7 deletions(-)
13
14
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tests/test-bdrv-drain.c
17
+++ b/tests/test-bdrv-drain.c
18
@@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_test = {
19
20
.bdrv_co_drain_begin = bdrv_test_co_drain_begin,
21
.bdrv_co_drain_end = bdrv_test_co_drain_end,
22
+
23
+ .bdrv_child_perm = bdrv_format_default_perms,
24
};
25
26
static void aio_ret_cb(void *opaque, int ret)
27
@@ -XXX,XX +XXX,XX @@ static void aio_ret_cb(void *opaque, int ret)
28
*aio_ret = ret;
29
}
30
31
-static void test_drv_cb_drain_all(void)
32
+enum drain_type {
33
+ BDRV_DRAIN_ALL,
34
+ BDRV_DRAIN,
35
+};
36
+
37
+static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs)
38
+{
39
+ switch (drain_type) {
40
+ case BDRV_DRAIN_ALL: bdrv_drain_all_begin(); break;
41
+ case BDRV_DRAIN: bdrv_drained_begin(bs); break;
42
+ default: g_assert_not_reached();
43
+ }
44
+}
45
+
46
+static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs)
47
+{
48
+ switch (drain_type) {
49
+ case BDRV_DRAIN_ALL: bdrv_drain_all_end(); break;
50
+ case BDRV_DRAIN: bdrv_drained_end(bs); break;
51
+ default: g_assert_not_reached();
52
+ }
53
+}
54
+
55
+static void test_drv_cb_common(enum drain_type drain_type, bool recursive)
56
{
57
BlockBackend *blk;
58
- BlockDriverState *bs;
59
- BDRVTestState *s;
60
+ BlockDriverState *bs, *backing;
61
+ BDRVTestState *s, *backing_s;
62
BlockAIOCB *acb;
63
int aio_ret;
64
65
@@ -XXX,XX +XXX,XX @@ static void test_drv_cb_drain_all(void)
66
s = bs->opaque;
67
blk_insert_bs(blk, bs, &error_abort);
68
69
+ backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
70
+ backing_s = backing->opaque;
71
+ bdrv_set_backing_hd(bs, backing, &error_abort);
72
+
73
/* Simple bdrv_drain_all_begin/end pair, check that CBs are called */
74
g_assert_cmpint(s->drain_count, ==, 0);
75
- bdrv_drain_all_begin();
76
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
77
+
78
+ do_drain_begin(drain_type, bs);
79
+
80
g_assert_cmpint(s->drain_count, ==, 1);
81
- bdrv_drain_all_end();
82
+ g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
83
+
84
+ do_drain_end(drain_type, bs);
85
+
86
g_assert_cmpint(s->drain_count, ==, 0);
87
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
88
89
/* Now do the same while a request is pending */
90
aio_ret = -EINPROGRESS;
91
@@ -XXX,XX +XXX,XX @@ static void test_drv_cb_drain_all(void)
92
g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
93
94
g_assert_cmpint(s->drain_count, ==, 0);
95
- bdrv_drain_all_begin();
96
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
97
+
98
+ do_drain_begin(drain_type, bs);
99
+
100
g_assert_cmpint(aio_ret, ==, 0);
101
g_assert_cmpint(s->drain_count, ==, 1);
102
- bdrv_drain_all_end();
103
+ g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
104
+
105
+ do_drain_end(drain_type, bs);
106
+
107
g_assert_cmpint(s->drain_count, ==, 0);
108
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
109
110
+ bdrv_unref(backing);
111
bdrv_unref(bs);
112
blk_unref(blk);
113
}
114
115
+static void test_drv_cb_drain_all(void)
116
+{
117
+ test_drv_cb_common(BDRV_DRAIN_ALL, true);
118
+}
119
+
120
+static void test_drv_cb_drain(void)
121
+{
122
+ test_drv_cb_common(BDRV_DRAIN, false);
123
+}
124
+
125
int main(int argc, char **argv)
126
{
127
bdrv_init();
128
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
129
g_test_init(&argc, &argv, NULL);
130
131
g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all);
132
+ g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain);
133
134
return g_test_run();
135
}
136
--
137
2.13.6
138
139
diff view generated by jsdifflib
New patch
1
This is currently only working correctly for bdrv_drain(), not for
2
bdrv_drain_all(). Leave a comment for the drain_all case, we'll address
3
it later.
1
4
5
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
6
---
7
tests/test-bdrv-drain.c | 45 +++++++++++++++++++++++++++++++++++++++++++++
8
1 file changed, 45 insertions(+)
9
10
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tests/test-bdrv-drain.c
13
+++ b/tests/test-bdrv-drain.c
14
@@ -XXX,XX +XXX,XX @@ static void test_drv_cb_drain(void)
15
test_drv_cb_common(BDRV_DRAIN, false);
16
}
17
18
+static void test_quiesce_common(enum drain_type drain_type, bool recursive)
19
+{
20
+ BlockBackend *blk;
21
+ BlockDriverState *bs, *backing;
22
+
23
+ blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
24
+ bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
25
+ &error_abort);
26
+ blk_insert_bs(blk, bs, &error_abort);
27
+
28
+ backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
29
+ bdrv_set_backing_hd(bs, backing, &error_abort);
30
+
31
+ g_assert_cmpint(bs->quiesce_counter, ==, 0);
32
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
33
+
34
+ do_drain_begin(drain_type, bs);
35
+
36
+ g_assert_cmpint(bs->quiesce_counter, ==, 1);
37
+ g_assert_cmpint(backing->quiesce_counter, ==, !!recursive);
38
+
39
+ do_drain_end(drain_type, bs);
40
+
41
+ g_assert_cmpint(bs->quiesce_counter, ==, 0);
42
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
43
+
44
+ bdrv_unref(backing);
45
+ bdrv_unref(bs);
46
+ blk_unref(blk);
47
+}
48
+
49
+static void test_quiesce_drain_all(void)
50
+{
51
+ // XXX drain_all doesn't quiesce
52
+ //test_quiesce_common(BDRV_DRAIN_ALL, true);
53
+}
54
+
55
+static void test_quiesce_drain(void)
56
+{
57
+ test_quiesce_common(BDRV_DRAIN, false);
58
+}
59
+
60
int main(int argc, char **argv)
61
{
62
bdrv_init();
63
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
64
g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all);
65
g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain);
66
67
+ g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all);
68
+ g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain);
69
+
70
return g_test_run();
71
}
72
--
73
2.13.6
74
75
diff view generated by jsdifflib
New patch
1
Block jobs already paused themselves when their main BlockBackend
2
entered a drained section. This is not good enough: We also want to
3
pause a block job and may not submit new requests if, for example, the
4
mirror target node should be drained.
1
5
6
This implements .drained_begin/end callbacks in child_job in order to
7
consider all block nodes related to the job, and removes the
8
BlockBackend callbacks which are unnecessary now because the root of the
9
job main BlockBackend is always referenced with a child_job, too.
10
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
---
13
blockjob.c | 22 +++++++++-------------
14
1 file changed, 9 insertions(+), 13 deletions(-)
15
16
diff --git a/blockjob.c b/blockjob.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/blockjob.c
19
+++ b/blockjob.c
20
@@ -XXX,XX +XXX,XX @@ static char *child_job_get_parent_desc(BdrvChild *c)
21
job->id);
22
}
23
24
-static const BdrvChildRole child_job = {
25
- .get_parent_desc = child_job_get_parent_desc,
26
- .stay_at_node = true,
27
-};
28
-
29
-static void block_job_drained_begin(void *opaque)
30
+static void child_job_drained_begin(BdrvChild *c)
31
{
32
- BlockJob *job = opaque;
33
+ BlockJob *job = c->opaque;
34
block_job_pause(job);
35
}
36
37
-static void block_job_drained_end(void *opaque)
38
+static void child_job_drained_end(BdrvChild *c)
39
{
40
- BlockJob *job = opaque;
41
+ BlockJob *job = c->opaque;
42
block_job_resume(job);
43
}
44
45
-static const BlockDevOps block_job_dev_ops = {
46
- .drained_begin = block_job_drained_begin,
47
- .drained_end = block_job_drained_end,
48
+static const BdrvChildRole child_job = {
49
+ .get_parent_desc = child_job_get_parent_desc,
50
+ .drained_begin = child_job_drained_begin,
51
+ .drained_end = child_job_drained_end,
52
+ .stay_at_node = true,
53
};
54
55
void block_job_remove_all_bdrv(BlockJob *job)
56
@@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
57
block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
58
bs->job = job;
59
60
- blk_set_dev_ops(blk, &block_job_dev_ops, job);
61
bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
62
63
QLIST_INSERT_HEAD(&block_jobs, job, job_list);
64
--
65
2.13.6
66
67
diff view generated by jsdifflib
New patch
1
Block jobs must be paused if any of the involved nodes are drained.
1
2
3
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
4
---
5
tests/test-bdrv-drain.c | 121 ++++++++++++++++++++++++++++++++++++++++++++++++
6
1 file changed, 121 insertions(+)
7
8
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tests/test-bdrv-drain.c
11
+++ b/tests/test-bdrv-drain.c
12
@@ -XXX,XX +XXX,XX @@
13
14
#include "qemu/osdep.h"
15
#include "block/block.h"
16
+#include "block/blockjob_int.h"
17
#include "sysemu/block-backend.h"
18
#include "qapi/error.h"
19
20
@@ -XXX,XX +XXX,XX @@ static void test_quiesce_drain(void)
21
test_quiesce_common(BDRV_DRAIN, false);
22
}
23
24
+
25
+typedef struct TestBlockJob {
26
+ BlockJob common;
27
+ bool should_complete;
28
+} TestBlockJob;
29
+
30
+static void test_job_completed(BlockJob *job, void *opaque)
31
+{
32
+ block_job_completed(job, 0);
33
+}
34
+
35
+static void coroutine_fn test_job_start(void *opaque)
36
+{
37
+ TestBlockJob *s = opaque;
38
+
39
+ while (!s->should_complete) {
40
+ block_job_sleep_ns(&s->common, 100000);
41
+ }
42
+
43
+ block_job_defer_to_main_loop(&s->common, test_job_completed, NULL);
44
+}
45
+
46
+static void test_job_complete(BlockJob *job, Error **errp)
47
+{
48
+ TestBlockJob *s = container_of(job, TestBlockJob, common);
49
+ s->should_complete = true;
50
+}
51
+
52
+BlockJobDriver test_job_driver = {
53
+ .instance_size = sizeof(TestBlockJob),
54
+ .start = test_job_start,
55
+ .complete = test_job_complete,
56
+};
57
+
58
+static void test_blockjob_common(enum drain_type drain_type)
59
+{
60
+ BlockBackend *blk_src, *blk_target;
61
+ BlockDriverState *src, *target;
62
+ BlockJob *job;
63
+ int ret;
64
+
65
+ src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
66
+ &error_abort);
67
+ blk_src = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
68
+ blk_insert_bs(blk_src, src, &error_abort);
69
+
70
+ target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
71
+ &error_abort);
72
+ blk_target = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
73
+ blk_insert_bs(blk_target, target, &error_abort);
74
+
75
+ job = block_job_create("job0", &test_job_driver, src, 0, BLK_PERM_ALL, 0,
76
+ 0, NULL, NULL, &error_abort);
77
+ block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
78
+ block_job_start(job);
79
+
80
+ g_assert_cmpint(job->pause_count, ==, 0);
81
+ g_assert_false(job->paused);
82
+ g_assert_false(job->busy); /* We're in block_job_sleep_ns() */
83
+
84
+ do_drain_begin(drain_type, src);
85
+
86
+ if (drain_type == BDRV_DRAIN_ALL) {
87
+ /* bdrv_drain_all() drains both src and target, and involves an
88
+ * additional block_job_pause_all() */
89
+ g_assert_cmpint(job->pause_count, ==, 3);
90
+ } else {
91
+ g_assert_cmpint(job->pause_count, ==, 1);
92
+ }
93
+ /* XXX We don't wait until the job is actually paused. Is this okay? */
94
+ /* g_assert_true(job->paused); */
95
+ g_assert_false(job->busy); /* The job is paused */
96
+
97
+ do_drain_end(drain_type, src);
98
+
99
+ g_assert_cmpint(job->pause_count, ==, 0);
100
+ g_assert_false(job->paused);
101
+ g_assert_false(job->busy); /* We're in block_job_sleep_ns() */
102
+
103
+ do_drain_begin(drain_type, target);
104
+
105
+ if (drain_type == BDRV_DRAIN_ALL) {
106
+ /* bdrv_drain_all() drains both src and target, and involves an
107
+ * additional block_job_pause_all() */
108
+ g_assert_cmpint(job->pause_count, ==, 3);
109
+ } else {
110
+ g_assert_cmpint(job->pause_count, ==, 1);
111
+ }
112
+ /* XXX We don't wait until the job is actually paused. Is this okay? */
113
+ /* g_assert_true(job->paused); */
114
+ g_assert_false(job->busy); /* The job is paused */
115
+
116
+ do_drain_end(drain_type, target);
117
+
118
+ g_assert_cmpint(job->pause_count, ==, 0);
119
+ g_assert_false(job->paused);
120
+ g_assert_false(job->busy); /* We're in block_job_sleep_ns() */
121
+
122
+ ret = block_job_complete_sync(job, &error_abort);
123
+ g_assert_cmpint(ret, ==, 0);
124
+
125
+ blk_unref(blk_src);
126
+ blk_unref(blk_target);
127
+ bdrv_unref(src);
128
+ bdrv_unref(target);
129
+}
130
+
131
+static void test_blockjob_drain_all(void)
132
+{
133
+ test_blockjob_common(BDRV_DRAIN_ALL);
134
+}
135
+
136
+static void test_blockjob_drain(void)
137
+{
138
+ test_blockjob_common(BDRV_DRAIN);
139
+}
140
+
141
int main(int argc, char **argv)
142
{
143
bdrv_init();
144
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
145
g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all);
146
g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain);
147
148
+ g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
149
+ g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain);
150
+
151
return g_test_run();
152
}
153
--
154
2.13.6
155
156
diff view generated by jsdifflib
New patch
1
Block jobs are already paused using the BdrvChildRole drain callbacks,
2
so we don't need an additional block_job_pause_all() call.
1
3
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
---
6
block/io.c | 4 ----
7
tests/test-bdrv-drain.c | 10 ++++------
8
2 files changed, 4 insertions(+), 10 deletions(-)
9
10
diff --git a/block/io.c b/block/io.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/block/io.c
13
+++ b/block/io.c
14
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void)
15
* context. */
16
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
17
18
- block_job_pause_all();
19
-
20
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
21
AioContext *aio_context = bdrv_get_aio_context(bs);
22
23
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_end(void)
24
aio_enable_external(aio_context);
25
aio_context_release(aio_context);
26
}
27
-
28
- block_job_resume_all();
29
}
30
31
void bdrv_drain_all(void)
32
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tests/test-bdrv-drain.c
35
+++ b/tests/test-bdrv-drain.c
36
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type)
37
do_drain_begin(drain_type, src);
38
39
if (drain_type == BDRV_DRAIN_ALL) {
40
- /* bdrv_drain_all() drains both src and target, and involves an
41
- * additional block_job_pause_all() */
42
- g_assert_cmpint(job->pause_count, ==, 3);
43
+ /* bdrv_drain_all() drains both src and target */
44
+ g_assert_cmpint(job->pause_count, ==, 2);
45
} else {
46
g_assert_cmpint(job->pause_count, ==, 1);
47
}
48
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type)
49
do_drain_begin(drain_type, target);
50
51
if (drain_type == BDRV_DRAIN_ALL) {
52
- /* bdrv_drain_all() drains both src and target, and involves an
53
- * additional block_job_pause_all() */
54
- g_assert_cmpint(job->pause_count, ==, 3);
55
+ /* bdrv_drain_all() drains both src and target */
56
+ g_assert_cmpint(job->pause_count, ==, 2);
57
} else {
58
g_assert_cmpint(job->pause_count, ==, 1);
59
}
60
--
61
2.13.6
62
63
diff view generated by jsdifflib
New patch
1
bdrv_do_drained_begin() restricts the call of parent callbacks and
2
aio_disable_external() to the outermost drain section, but the block
3
driver callbacks are always called. bdrv_do_drained_end() must match
4
this behaviour, otherwise nodes stay drained even if begin/end calls
5
were balanced.
1
6
7
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
8
---
9
block/io.c | 12 +++++++-----
10
1 file changed, 7 insertions(+), 5 deletions(-)
11
12
diff --git a/block/io.c b/block/io.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/block/io.c
15
+++ b/block/io.c
16
@@ -XXX,XX +XXX,XX @@ void bdrv_drained_begin(BlockDriverState *bs)
17
18
void bdrv_drained_end(BlockDriverState *bs)
19
{
20
+ int old_quiesce_counter;
21
+
22
if (qemu_in_coroutine()) {
23
bdrv_co_yield_to_drain(bs, false);
24
return;
25
}
26
assert(bs->quiesce_counter > 0);
27
- if (atomic_fetch_dec(&bs->quiesce_counter) > 1) {
28
- return;
29
- }
30
+ old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter);
31
32
/* Re-enable things in child-to-parent order */
33
bdrv_drain_invoke(bs, false, false);
34
- bdrv_parent_drained_end(bs);
35
- aio_enable_external(bdrv_get_aio_context(bs));
36
+ if (old_quiesce_counter == 1) {
37
+ bdrv_parent_drained_end(bs);
38
+ aio_enable_external(bdrv_get_aio_context(bs));
39
+ }
40
}
41
42
/*
43
--
44
2.13.6
45
46
diff view generated by jsdifflib
1
From: Max Reitz <mreitz@redhat.com>
2
3
The test directory should be filtered before the image format, otherwise
4
the test will fail if the image format is part of the test directory,
5
like so:
6
7
[...]
8
-can't open: Could not open 'TEST_DIR/t.IMGFMT': Is a directory
9
+can't open: Could not open '/tmp/test-IMGFMT/t.IMGFMT': Is a directory
10
[...]
11
12
Signed-off-by: Max Reitz <mreitz@redhat.com>
13
Reviewed-by: John Snow <jsnow@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
1
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
---
2
---
16
tests/qemu-iotests/226 | 4 ++--
3
tests/test-bdrv-drain.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++++
17
1 file changed, 2 insertions(+), 2 deletions(-)
4
1 file changed, 57 insertions(+)
18
5
19
diff --git a/tests/qemu-iotests/226 b/tests/qemu-iotests/226
6
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
20
index XXXXXXX..XXXXXXX 100755
7
index XXXXXXX..XXXXXXX 100644
21
--- a/tests/qemu-iotests/226
8
--- a/tests/test-bdrv-drain.c
22
+++ b/tests/qemu-iotests/226
9
+++ b/tests/test-bdrv-drain.c
23
@@ -XXX,XX +XXX,XX @@ for PROTO in "file" "host_device" "host_cdrom"; do
10
@@ -XXX,XX +XXX,XX @@ static void aio_ret_cb(void *opaque, int ret)
24
echo "=== Testing with driver:$PROTO ==="
11
enum drain_type {
25
echo
12
BDRV_DRAIN_ALL,
26
echo "== Testing RO =="
13
BDRV_DRAIN,
27
- $QEMU_IO -c "open -r -o driver=$PROTO,filename=$TEST_IMG" 2>&1 | _filter_imgfmt | _filter_testdir
14
+ DRAIN_TYPE_MAX,
28
+ $QEMU_IO -c "open -r -o driver=$PROTO,filename=$TEST_IMG" 2>&1 | _filter_testdir | _filter_imgfmt
15
};
29
$QEMU_IO -c "open -r -o driver=$PROTO,filename=/dev/null" 2>&1 | _filter_imgfmt
16
30
echo "== Testing RW =="
17
static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs)
31
- $QEMU_IO -c "open -o driver=$PROTO,filename=$TEST_IMG" 2>&1 | _filter_imgfmt | _filter_testdir
18
@@ -XXX,XX +XXX,XX @@ static void test_quiesce_drain(void)
32
+ $QEMU_IO -c "open -o driver=$PROTO,filename=$TEST_IMG" 2>&1 | _filter_testdir | _filter_imgfmt
19
test_quiesce_common(BDRV_DRAIN, false);
33
$QEMU_IO -c "open -o driver=$PROTO,filename=/dev/null" 2>&1 | _filter_imgfmt
20
}
34
done
21
22
+static void test_nested(void)
23
+{
24
+ BlockBackend *blk;
25
+ BlockDriverState *bs, *backing;
26
+ BDRVTestState *s, *backing_s;
27
+ enum drain_type outer, inner;
28
+
29
+ blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
30
+ bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
31
+ &error_abort);
32
+ s = bs->opaque;
33
+ blk_insert_bs(blk, bs, &error_abort);
34
+
35
+ backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
36
+ backing_s = backing->opaque;
37
+ bdrv_set_backing_hd(bs, backing, &error_abort);
38
+
39
+ for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) {
40
+ for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) {
41
+ /* XXX bdrv_drain_all() doesn't increase the quiesce_counter */
42
+ int bs_quiesce = (outer != BDRV_DRAIN_ALL) +
43
+ (inner != BDRV_DRAIN_ALL);
44
+ int backing_quiesce = 0;
45
+ int backing_cb_cnt = (outer != BDRV_DRAIN) +
46
+ (inner != BDRV_DRAIN);
47
+
48
+ g_assert_cmpint(bs->quiesce_counter, ==, 0);
49
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
50
+ g_assert_cmpint(s->drain_count, ==, 0);
51
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
52
+
53
+ do_drain_begin(outer, bs);
54
+ do_drain_begin(inner, bs);
55
+
56
+ g_assert_cmpint(bs->quiesce_counter, ==, bs_quiesce);
57
+ g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce);
58
+ g_assert_cmpint(s->drain_count, ==, 2);
59
+ g_assert_cmpint(backing_s->drain_count, ==, backing_cb_cnt);
60
+
61
+ do_drain_end(inner, bs);
62
+ do_drain_end(outer, bs);
63
+
64
+ g_assert_cmpint(bs->quiesce_counter, ==, 0);
65
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
66
+ g_assert_cmpint(s->drain_count, ==, 0);
67
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
68
+ }
69
+ }
70
+
71
+ bdrv_unref(backing);
72
+ bdrv_unref(bs);
73
+ blk_unref(blk);
74
+}
75
+
76
77
typedef struct TestBlockJob {
78
BlockJob common;
79
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
80
g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all);
81
g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain);
82
83
+ g_test_add_func("/bdrv-drain/nested", test_nested);
84
+
85
g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
86
g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain);
35
87
36
--
88
--
37
2.13.6
89
2.13.6
38
90
39
91
diff view generated by jsdifflib
New patch
1
1
This is in preparation for subtree drains, i.e. drained sections that
2
affect not only a single node, but recursively all child nodes, too.
3
4
Calling the parent callbacks for drain is pointless when we just came
5
from that parent node recursively and leads to multiple increases of
6
bs->quiesce_counter in a single drain call. Don't do it.
7
8
In order for this to work correctly, the parent callback must be called
9
for every bdrv_drain_begin/end() call, not only for the outermost one:
10
11
If we have a node N with two parents A and B, recursive draining of A
12
should cause the quiesce_counter of B to increase because its child N is
13
drained independently of B. If now B is recursively drained, too, A must
14
increase its quiesce_counter because N is drained independently of A
15
only now, even if N is going from quiesce_counter 1 to 2.
16
17
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
18
---
19
include/block/block.h | 4 ++--
20
block.c | 13 +++++++++----
21
block/io.c | 47 ++++++++++++++++++++++++++++++++++-------------
22
3 files changed, 45 insertions(+), 19 deletions(-)
23
24
diff --git a/include/block/block.h b/include/block/block.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/include/block/block.h
27
+++ b/include/block/block.h
28
@@ -XXX,XX +XXX,XX @@ void bdrv_io_unplug(BlockDriverState *bs);
29
* Begin a quiesced section of all users of @bs. This is part of
30
* bdrv_drained_begin.
31
*/
32
-void bdrv_parent_drained_begin(BlockDriverState *bs);
33
+void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore);
34
35
/**
36
* bdrv_parent_drained_end:
37
@@ -XXX,XX +XXX,XX @@ void bdrv_parent_drained_begin(BlockDriverState *bs);
38
* End a quiesced section of all users of @bs. This is part of
39
* bdrv_drained_end.
40
*/
41
-void bdrv_parent_drained_end(BlockDriverState *bs);
42
+void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore);
43
44
/**
45
* bdrv_drained_begin:
46
diff --git a/block.c b/block.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/block.c
49
+++ b/block.c
50
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_noperm(BdrvChild *child,
51
BlockDriverState *new_bs)
52
{
53
BlockDriverState *old_bs = child->bs;
54
+ int i;
55
56
if (old_bs && new_bs) {
57
assert(bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs));
58
}
59
if (old_bs) {
60
if (old_bs->quiesce_counter && child->role->drained_end) {
61
- child->role->drained_end(child);
62
+ for (i = 0; i < old_bs->quiesce_counter; i++) {
63
+ child->role->drained_end(child);
64
+ }
65
}
66
if (child->role->detach) {
67
child->role->detach(child);
68
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_noperm(BdrvChild *child,
69
if (new_bs) {
70
QLIST_INSERT_HEAD(&new_bs->parents, child, next_parent);
71
if (new_bs->quiesce_counter && child->role->drained_begin) {
72
- child->role->drained_begin(child);
73
+ for (i = 0; i < new_bs->quiesce_counter; i++) {
74
+ child->role->drained_begin(child);
75
+ }
76
}
77
78
if (child->role->attach) {
79
@@ -XXX,XX +XXX,XX @@ void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
80
AioContext *ctx = bdrv_get_aio_context(bs);
81
82
aio_disable_external(ctx);
83
- bdrv_parent_drained_begin(bs);
84
+ bdrv_parent_drained_begin(bs, NULL);
85
bdrv_drain(bs); /* ensure there are no in-flight requests */
86
87
while (aio_poll(ctx, false)) {
88
@@ -XXX,XX +XXX,XX @@ void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
89
*/
90
aio_context_acquire(new_context);
91
bdrv_attach_aio_context(bs, new_context);
92
- bdrv_parent_drained_end(bs);
93
+ bdrv_parent_drained_end(bs, NULL);
94
aio_enable_external(ctx);
95
aio_context_release(new_context);
96
}
97
diff --git a/block/io.c b/block/io.c
98
index XXXXXXX..XXXXXXX 100644
99
--- a/block/io.c
100
+++ b/block/io.c
101
@@ -XXX,XX +XXX,XX @@
102
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
103
int64_t offset, int bytes, BdrvRequestFlags flags);
104
105
-void bdrv_parent_drained_begin(BlockDriverState *bs)
106
+void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
107
{
108
BdrvChild *c, *next;
109
110
QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
111
+ if (c == ignore) {
112
+ continue;
113
+ }
114
if (c->role->drained_begin) {
115
c->role->drained_begin(c);
116
}
117
}
118
}
119
120
-void bdrv_parent_drained_end(BlockDriverState *bs)
121
+void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
122
{
123
BdrvChild *c, *next;
124
125
QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
126
+ if (c == ignore) {
127
+ continue;
128
+ }
129
if (c->role->drained_end) {
130
c->role->drained_end(c);
131
}
132
@@ -XXX,XX +XXX,XX @@ typedef struct {
133
BlockDriverState *bs;
134
bool done;
135
bool begin;
136
+ BdrvChild *parent;
137
} BdrvCoDrainData;
138
139
static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
140
@@ -XXX,XX +XXX,XX @@ static bool bdrv_drain_recurse(BlockDriverState *bs)
141
return waited;
142
}
143
144
+static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent);
145
+static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent);
146
+
147
static void bdrv_co_drain_bh_cb(void *opaque)
148
{
149
BdrvCoDrainData *data = opaque;
150
@@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque)
151
152
bdrv_dec_in_flight(bs);
153
if (data->begin) {
154
- bdrv_drained_begin(bs);
155
+ bdrv_do_drained_begin(bs, data->parent);
156
} else {
157
- bdrv_drained_end(bs);
158
+ bdrv_do_drained_end(bs, data->parent);
159
}
160
161
data->done = true;
162
@@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque)
163
}
164
165
static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
166
- bool begin)
167
+ bool begin, BdrvChild *parent)
168
{
169
BdrvCoDrainData data;
170
171
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
172
.bs = bs,
173
.done = false,
174
.begin = begin,
175
+ .parent = parent,
176
};
177
bdrv_inc_in_flight(bs);
178
aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
179
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
180
assert(data.done);
181
}
182
183
-void bdrv_drained_begin(BlockDriverState *bs)
184
+static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent)
185
{
186
if (qemu_in_coroutine()) {
187
- bdrv_co_yield_to_drain(bs, true);
188
+ bdrv_co_yield_to_drain(bs, true, parent);
189
return;
190
}
191
192
/* Stop things in parent-to-child order */
193
if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
194
aio_disable_external(bdrv_get_aio_context(bs));
195
- bdrv_parent_drained_begin(bs);
196
}
197
198
+ bdrv_parent_drained_begin(bs, parent);
199
bdrv_drain_invoke(bs, true, false);
200
bdrv_drain_recurse(bs);
201
}
202
203
-void bdrv_drained_end(BlockDriverState *bs)
204
+void bdrv_drained_begin(BlockDriverState *bs)
205
+{
206
+ bdrv_do_drained_begin(bs, NULL);
207
+}
208
+
209
+static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
210
{
211
int old_quiesce_counter;
212
213
if (qemu_in_coroutine()) {
214
- bdrv_co_yield_to_drain(bs, false);
215
+ bdrv_co_yield_to_drain(bs, false, parent);
216
return;
217
}
218
assert(bs->quiesce_counter > 0);
219
@@ -XXX,XX +XXX,XX @@ void bdrv_drained_end(BlockDriverState *bs)
220
221
/* Re-enable things in child-to-parent order */
222
bdrv_drain_invoke(bs, false, false);
223
+ bdrv_parent_drained_end(bs, parent);
224
if (old_quiesce_counter == 1) {
225
- bdrv_parent_drained_end(bs);
226
aio_enable_external(bdrv_get_aio_context(bs));
227
}
228
}
229
230
+void bdrv_drained_end(BlockDriverState *bs)
231
+{
232
+ bdrv_do_drained_end(bs, NULL);
233
+}
234
+
235
/*
236
* Wait for pending requests to complete on a single BlockDriverState subtree,
237
* and suspend block driver's internal I/O until next request arrives.
238
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void)
239
/* Stop things in parent-to-child order */
240
aio_context_acquire(aio_context);
241
aio_disable_external(aio_context);
242
- bdrv_parent_drained_begin(bs);
243
+ bdrv_parent_drained_begin(bs, NULL);
244
bdrv_drain_invoke(bs, true, true);
245
aio_context_release(aio_context);
246
247
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_end(void)
248
/* Re-enable things in child-to-parent order */
249
aio_context_acquire(aio_context);
250
bdrv_drain_invoke(bs, false, true);
251
- bdrv_parent_drained_end(bs);
252
+ bdrv_parent_drained_end(bs, NULL);
253
aio_enable_external(aio_context);
254
aio_context_release(aio_context);
255
}
256
--
257
2.13.6
258
259
diff view generated by jsdifflib
New patch
1
bdrv_drained_begin() waits for the completion of requests in the whole
2
subtree, but it only actually keeps its immediate bs parameter quiesced
3
until bdrv_drained_end().
1
4
5
Add a version that keeps the whole subtree drained. As of this commit,
6
graph changes cannot be allowed during a subtree drained section, but
7
this will be fixed soon.
8
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
---
11
include/block/block.h | 13 +++++++++++++
12
block/io.c | 54 ++++++++++++++++++++++++++++++++++++++++-----------
13
2 files changed, 56 insertions(+), 11 deletions(-)
14
15
diff --git a/include/block/block.h b/include/block/block.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/block/block.h
18
+++ b/include/block/block.h
19
@@ -XXX,XX +XXX,XX @@ void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore);
20
void bdrv_drained_begin(BlockDriverState *bs);
21
22
/**
23
+ * Like bdrv_drained_begin, but recursively begins a quiesced section for
24
+ * exclusive access to all child nodes as well.
25
+ *
26
+ * Graph changes are not allowed during a subtree drain section.
27
+ */
28
+void bdrv_subtree_drained_begin(BlockDriverState *bs);
29
+
30
+/**
31
* bdrv_drained_end:
32
*
33
* End a quiescent section started by bdrv_drained_begin().
34
*/
35
void bdrv_drained_end(BlockDriverState *bs);
36
37
+/**
38
+ * End a quiescent section started by bdrv_subtree_drained_begin().
39
+ */
40
+void bdrv_subtree_drained_end(BlockDriverState *bs);
41
+
42
void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child,
43
Error **errp);
44
void bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp);
45
diff --git a/block/io.c b/block/io.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/block/io.c
48
+++ b/block/io.c
49
@@ -XXX,XX +XXX,XX @@ typedef struct {
50
BlockDriverState *bs;
51
bool done;
52
bool begin;
53
+ bool recursive;
54
BdrvChild *parent;
55
} BdrvCoDrainData;
56
57
@@ -XXX,XX +XXX,XX @@ static bool bdrv_drain_recurse(BlockDriverState *bs)
58
return waited;
59
}
60
61
-static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent);
62
-static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent);
63
+static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
64
+ BdrvChild *parent);
65
+static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
66
+ BdrvChild *parent);
67
68
static void bdrv_co_drain_bh_cb(void *opaque)
69
{
70
@@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque)
71
72
bdrv_dec_in_flight(bs);
73
if (data->begin) {
74
- bdrv_do_drained_begin(bs, data->parent);
75
+ bdrv_do_drained_begin(bs, data->recursive, data->parent);
76
} else {
77
- bdrv_do_drained_end(bs, data->parent);
78
+ bdrv_do_drained_end(bs, data->recursive, data->parent);
79
}
80
81
data->done = true;
82
@@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque)
83
}
84
85
static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
86
- bool begin, BdrvChild *parent)
87
+ bool begin, bool recursive,
88
+ BdrvChild *parent)
89
{
90
BdrvCoDrainData data;
91
92
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
93
.bs = bs,
94
.done = false,
95
.begin = begin,
96
+ .recursive = recursive,
97
.parent = parent,
98
};
99
bdrv_inc_in_flight(bs);
100
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
101
assert(data.done);
102
}
103
104
-static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent)
105
+static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
106
+ BdrvChild *parent)
107
{
108
+ BdrvChild *child, *next;
109
+
110
if (qemu_in_coroutine()) {
111
- bdrv_co_yield_to_drain(bs, true, parent);
112
+ bdrv_co_yield_to_drain(bs, true, recursive, parent);
113
return;
114
}
115
116
@@ -XXX,XX +XXX,XX @@ static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent)
117
bdrv_parent_drained_begin(bs, parent);
118
bdrv_drain_invoke(bs, true, false);
119
bdrv_drain_recurse(bs);
120
+
121
+ if (recursive) {
122
+ QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
123
+ bdrv_do_drained_begin(child->bs, true, child);
124
+ }
125
+ }
126
}
127
128
void bdrv_drained_begin(BlockDriverState *bs)
129
{
130
- bdrv_do_drained_begin(bs, NULL);
131
+ bdrv_do_drained_begin(bs, false, NULL);
132
+}
133
+
134
+void bdrv_subtree_drained_begin(BlockDriverState *bs)
135
+{
136
+ bdrv_do_drained_begin(bs, true, NULL);
137
}
138
139
-static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
140
+static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
141
+ BdrvChild *parent)
142
{
143
+ BdrvChild *child, *next;
144
int old_quiesce_counter;
145
146
if (qemu_in_coroutine()) {
147
- bdrv_co_yield_to_drain(bs, false, parent);
148
+ bdrv_co_yield_to_drain(bs, false, recursive, parent);
149
return;
150
}
151
assert(bs->quiesce_counter > 0);
152
@@ -XXX,XX +XXX,XX @@ static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
153
if (old_quiesce_counter == 1) {
154
aio_enable_external(bdrv_get_aio_context(bs));
155
}
156
+
157
+ if (recursive) {
158
+ QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
159
+ bdrv_do_drained_end(child->bs, true, child);
160
+ }
161
+ }
162
}
163
164
void bdrv_drained_end(BlockDriverState *bs)
165
{
166
- bdrv_do_drained_end(bs, NULL);
167
+ bdrv_do_drained_end(bs, false, NULL);
168
+}
169
+
170
+void bdrv_subtree_drained_end(BlockDriverState *bs)
171
+{
172
+ bdrv_do_drained_end(bs, true, NULL);
173
}
174
175
/*
176
--
177
2.13.6
178
179
diff view generated by jsdifflib
New patch
1
Add a subtree drain version to the existing test cases.
1
2
3
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
4
---
5
tests/test-bdrv-drain.c | 27 ++++++++++++++++++++++++++-
6
1 file changed, 26 insertions(+), 1 deletion(-)
7
8
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tests/test-bdrv-drain.c
11
+++ b/tests/test-bdrv-drain.c
12
@@ -XXX,XX +XXX,XX @@ static void aio_ret_cb(void *opaque, int ret)
13
enum drain_type {
14
BDRV_DRAIN_ALL,
15
BDRV_DRAIN,
16
+ BDRV_SUBTREE_DRAIN,
17
DRAIN_TYPE_MAX,
18
};
19
20
@@ -XXX,XX +XXX,XX @@ static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs)
21
switch (drain_type) {
22
case BDRV_DRAIN_ALL: bdrv_drain_all_begin(); break;
23
case BDRV_DRAIN: bdrv_drained_begin(bs); break;
24
+ case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_begin(bs); break;
25
default: g_assert_not_reached();
26
}
27
}
28
@@ -XXX,XX +XXX,XX @@ static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs)
29
switch (drain_type) {
30
case BDRV_DRAIN_ALL: bdrv_drain_all_end(); break;
31
case BDRV_DRAIN: bdrv_drained_end(bs); break;
32
+ case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_end(bs); break;
33
default: g_assert_not_reached();
34
}
35
}
36
@@ -XXX,XX +XXX,XX @@ static void test_drv_cb_drain(void)
37
test_drv_cb_common(BDRV_DRAIN, false);
38
}
39
40
+static void test_drv_cb_drain_subtree(void)
41
+{
42
+ test_drv_cb_common(BDRV_SUBTREE_DRAIN, true);
43
+}
44
+
45
static void test_quiesce_common(enum drain_type drain_type, bool recursive)
46
{
47
BlockBackend *blk;
48
@@ -XXX,XX +XXX,XX @@ static void test_quiesce_drain(void)
49
test_quiesce_common(BDRV_DRAIN, false);
50
}
51
52
+static void test_quiesce_drain_subtree(void)
53
+{
54
+ test_quiesce_common(BDRV_SUBTREE_DRAIN, true);
55
+}
56
+
57
static void test_nested(void)
58
{
59
BlockBackend *blk;
60
@@ -XXX,XX +XXX,XX @@ static void test_nested(void)
61
/* XXX bdrv_drain_all() doesn't increase the quiesce_counter */
62
int bs_quiesce = (outer != BDRV_DRAIN_ALL) +
63
(inner != BDRV_DRAIN_ALL);
64
- int backing_quiesce = 0;
65
+ int backing_quiesce = (outer == BDRV_SUBTREE_DRAIN) +
66
+ (inner == BDRV_SUBTREE_DRAIN);
67
int backing_cb_cnt = (outer != BDRV_DRAIN) +
68
(inner != BDRV_DRAIN);
69
70
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_drain(void)
71
test_blockjob_common(BDRV_DRAIN);
72
}
73
74
+static void test_blockjob_drain_subtree(void)
75
+{
76
+ test_blockjob_common(BDRV_SUBTREE_DRAIN);
77
+}
78
+
79
int main(int argc, char **argv)
80
{
81
bdrv_init();
82
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
83
84
g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all);
85
g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain);
86
+ g_test_add_func("/bdrv-drain/driver-cb/drain_subtree",
87
+ test_drv_cb_drain_subtree);
88
89
g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all);
90
g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain);
91
+ g_test_add_func("/bdrv-drain/quiesce/drain_subtree",
92
+ test_quiesce_drain_subtree);
93
94
g_test_add_func("/bdrv-drain/nested", test_nested);
95
96
g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
97
g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain);
98
+ g_test_add_func("/bdrv-drain/blockjob/drain_subtree",
99
+ test_blockjob_drain_subtree);
100
101
return g_test_run();
102
}
103
--
104
2.13.6
105
106
diff view generated by jsdifflib
New patch
1
If bdrv_do_drained_begin/end() are called in coroutine context, they
2
first use a BH to get out of the coroutine context. Call some existing
3
tests again from a coroutine to cover this code path.
1
4
5
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
6
---
7
tests/test-bdrv-drain.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++
8
1 file changed, 59 insertions(+)
9
10
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tests/test-bdrv-drain.c
13
+++ b/tests/test-bdrv-drain.c
14
@@ -XXX,XX +XXX,XX @@ static void aio_ret_cb(void *opaque, int ret)
15
*aio_ret = ret;
16
}
17
18
+typedef struct CallInCoroutineData {
19
+ void (*entry)(void);
20
+ bool done;
21
+} CallInCoroutineData;
22
+
23
+static coroutine_fn void call_in_coroutine_entry(void *opaque)
24
+{
25
+ CallInCoroutineData *data = opaque;
26
+
27
+ data->entry();
28
+ data->done = true;
29
+}
30
+
31
+static void call_in_coroutine(void (*entry)(void))
32
+{
33
+ Coroutine *co;
34
+ CallInCoroutineData data = {
35
+ .entry = entry,
36
+ .done = false,
37
+ };
38
+
39
+ co = qemu_coroutine_create(call_in_coroutine_entry, &data);
40
+ qemu_coroutine_enter(co);
41
+ while (!data.done) {
42
+ aio_poll(qemu_get_aio_context(), true);
43
+ }
44
+}
45
+
46
enum drain_type {
47
BDRV_DRAIN_ALL,
48
BDRV_DRAIN,
49
@@ -XXX,XX +XXX,XX @@ static void test_drv_cb_drain_subtree(void)
50
test_drv_cb_common(BDRV_SUBTREE_DRAIN, true);
51
}
52
53
+static void test_drv_cb_co_drain(void)
54
+{
55
+ call_in_coroutine(test_drv_cb_drain);
56
+}
57
+
58
+static void test_drv_cb_co_drain_subtree(void)
59
+{
60
+ call_in_coroutine(test_drv_cb_drain_subtree);
61
+}
62
+
63
static void test_quiesce_common(enum drain_type drain_type, bool recursive)
64
{
65
BlockBackend *blk;
66
@@ -XXX,XX +XXX,XX @@ static void test_quiesce_drain_subtree(void)
67
test_quiesce_common(BDRV_SUBTREE_DRAIN, true);
68
}
69
70
+static void test_quiesce_co_drain(void)
71
+{
72
+ call_in_coroutine(test_quiesce_drain);
73
+}
74
+
75
+static void test_quiesce_co_drain_subtree(void)
76
+{
77
+ call_in_coroutine(test_quiesce_drain_subtree);
78
+}
79
+
80
static void test_nested(void)
81
{
82
BlockBackend *blk;
83
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
84
g_test_add_func("/bdrv-drain/driver-cb/drain_subtree",
85
test_drv_cb_drain_subtree);
86
87
+ // XXX bdrv_drain_all() doesn't work in coroutine context
88
+ g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain);
89
+ g_test_add_func("/bdrv-drain/driver-cb/co/drain_subtree",
90
+ test_drv_cb_co_drain_subtree);
91
+
92
+
93
g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all);
94
g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain);
95
g_test_add_func("/bdrv-drain/quiesce/drain_subtree",
96
test_quiesce_drain_subtree);
97
98
+ // XXX bdrv_drain_all() doesn't work in coroutine context
99
+ g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain);
100
+ g_test_add_func("/bdrv-drain/quiesce/co/drain_subtree",
101
+ test_quiesce_co_drain_subtree);
102
+
103
g_test_add_func("/bdrv-drain/nested", test_nested);
104
105
g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
106
--
107
2.13.6
108
109
diff view generated by jsdifflib
1
From: Max Reitz <mreitz@redhat.com>
1
Test that drain sections are correctly propagated through the graph.
2
2
3
223 tests persistent dirty bitmaps which are not supported in
4
compat=0.10, so that option is unsupported for this test.
5
6
Signed-off-by: Max Reitz <mreitz@redhat.com>
7
Tested-by: John Snow <jsnow@redhat.com>
8
Reviewed-by: John Snow <jsnow@redhat.com>
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
3
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
---
4
---
11
tests/qemu-iotests/223 | 2 ++
5
tests/test-bdrv-drain.c | 74 +++++++++++++++++++++++++++++++++++++++++++++++++
12
1 file changed, 2 insertions(+)
6
1 file changed, 74 insertions(+)
13
7
14
diff --git a/tests/qemu-iotests/223 b/tests/qemu-iotests/223
8
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
15
index XXXXXXX..XXXXXXX 100755
9
index XXXXXXX..XXXXXXX 100644
16
--- a/tests/qemu-iotests/223
10
--- a/tests/test-bdrv-drain.c
17
+++ b/tests/qemu-iotests/223
11
+++ b/tests/test-bdrv-drain.c
18
@@ -XXX,XX +XXX,XX @@ trap "_cleanup; exit \$status" 0 1 2 3 15
12
@@ -XXX,XX +XXX,XX @@ static void test_nested(void)
19
_supported_fmt qcow2
13
blk_unref(blk);
20
_supported_proto file # uses NBD as well
14
}
21
_supported_os Linux
15
22
+# Persistent dirty bitmaps require compat=1.1
16
+static void test_multiparent(void)
23
+_unsupported_imgopts 'compat=0.10'
17
+{
24
18
+ BlockBackend *blk_a, *blk_b;
25
function do_run_qemu()
19
+ BlockDriverState *bs_a, *bs_b, *backing;
26
{
20
+ BDRVTestState *a_s, *b_s, *backing_s;
21
+
22
+ blk_a = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
23
+ bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
24
+ &error_abort);
25
+ a_s = bs_a->opaque;
26
+ blk_insert_bs(blk_a, bs_a, &error_abort);
27
+
28
+ blk_b = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
29
+ bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
30
+ &error_abort);
31
+ b_s = bs_b->opaque;
32
+ blk_insert_bs(blk_b, bs_b, &error_abort);
33
+
34
+ backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
35
+ backing_s = backing->opaque;
36
+ bdrv_set_backing_hd(bs_a, backing, &error_abort);
37
+ bdrv_set_backing_hd(bs_b, backing, &error_abort);
38
+
39
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
40
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
41
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
42
+ g_assert_cmpint(a_s->drain_count, ==, 0);
43
+ g_assert_cmpint(b_s->drain_count, ==, 0);
44
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
45
+
46
+ do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
47
+
48
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
49
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
50
+ g_assert_cmpint(backing->quiesce_counter, ==, 1);
51
+ g_assert_cmpint(a_s->drain_count, ==, 1);
52
+ g_assert_cmpint(b_s->drain_count, ==, 1);
53
+ g_assert_cmpint(backing_s->drain_count, ==, 1);
54
+
55
+ do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
56
+
57
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 2);
58
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 2);
59
+ g_assert_cmpint(backing->quiesce_counter, ==, 2);
60
+ g_assert_cmpint(a_s->drain_count, ==, 2);
61
+ g_assert_cmpint(b_s->drain_count, ==, 2);
62
+ g_assert_cmpint(backing_s->drain_count, ==, 2);
63
+
64
+ do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
65
+
66
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
67
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
68
+ g_assert_cmpint(backing->quiesce_counter, ==, 1);
69
+ g_assert_cmpint(a_s->drain_count, ==, 1);
70
+ g_assert_cmpint(b_s->drain_count, ==, 1);
71
+ g_assert_cmpint(backing_s->drain_count, ==, 1);
72
+
73
+ do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
74
+
75
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
76
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
77
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
78
+ g_assert_cmpint(a_s->drain_count, ==, 0);
79
+ g_assert_cmpint(b_s->drain_count, ==, 0);
80
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
81
+
82
+ bdrv_unref(backing);
83
+ bdrv_unref(bs_a);
84
+ bdrv_unref(bs_b);
85
+ blk_unref(blk_a);
86
+ blk_unref(blk_b);
87
+}
88
+
89
90
typedef struct TestBlockJob {
91
BlockJob common;
92
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
93
test_quiesce_co_drain_subtree);
94
95
g_test_add_func("/bdrv-drain/nested", test_nested);
96
+ g_test_add_func("/bdrv-drain/multiparent", test_multiparent);
97
98
g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
99
g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain);
27
--
100
--
28
2.13.6
101
2.13.6
29
102
30
103
diff view generated by jsdifflib
1
The test case uses block devices with driver=file, which causes the test
1
We need to remember how many of the drain sections in which a node is
2
to fail after commit 230ff73904 added a deprecation warning for this.
2
were recursive (i.e. subtree drain rather than node drain), so that they
3
Fix the test case to use driver=host_device and update the reference
3
can be correctly applied when children are added or removed during the
4
output accordingly.
4
drained section.
5
6
With this change, it is safe to modify the graph even inside a
7
bdrv_subtree_drained_begin/end() section.
5
8
6
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
7
---
10
---
8
tests/qemu-iotests/149 | 2 +-
11
include/block/block.h | 2 --
9
tests/qemu-iotests/149.out | 344 ++++++++++++++++++++++-----------------------
12
include/block/block_int.h | 5 +++++
10
2 files changed, 173 insertions(+), 173 deletions(-)
13
block.c | 32 +++++++++++++++++++++++++++++---
11
14
block/io.c | 28 ++++++++++++++++++++++++----
12
diff --git a/tests/qemu-iotests/149 b/tests/qemu-iotests/149
15
4 files changed, 58 insertions(+), 9 deletions(-)
13
index XXXXXXX..XXXXXXX 100755
16
14
--- a/tests/qemu-iotests/149
17
diff --git a/include/block/block.h b/include/block/block.h
15
+++ b/tests/qemu-iotests/149
18
index XXXXXXX..XXXXXXX 100644
16
@@ -XXX,XX +XXX,XX @@ def qemu_io_image_args(config, dev=False):
19
--- a/include/block/block.h
17
if dev:
20
+++ b/include/block/block.h
18
return [
21
@@ -XXX,XX +XXX,XX @@ void bdrv_drained_begin(BlockDriverState *bs);
19
"--image-opts",
22
/**
20
- "driver=file,filename=%s" % config.device_path()]
23
* Like bdrv_drained_begin, but recursively begins a quiesced section for
21
+ "driver=host_device,filename=%s" % config.device_path()]
24
* exclusive access to all child nodes as well.
22
else:
25
- *
23
return [
26
- * Graph changes are not allowed during a subtree drain section.
24
"--object",
27
*/
25
diff --git a/tests/qemu-iotests/149.out b/tests/qemu-iotests/149.out
28
void bdrv_subtree_drained_begin(BlockDriverState *bs);
26
index XXXXXXX..XXXXXXX 100644
29
27
--- a/tests/qemu-iotests/149.out
30
diff --git a/include/block/block_int.h b/include/block/block_int.h
28
+++ b/tests/qemu-iotests/149.out
31
index XXXXXXX..XXXXXXX 100644
29
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 512 --hash
32
--- a/include/block/block_int.h
30
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha1.img qiotest-145-aes-256-xts-plain64-sha1
33
+++ b/include/block/block_int.h
31
# Write test pattern 0xa7
34
@@ -XXX,XX +XXX,XX @@ struct BlockDriverState {
32
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
35
33
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
36
/* Accessed with atomic ops. */
34
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
37
int quiesce_counter;
35
wrote 10485760/10485760 bytes at offset 104857600
38
+ int recursive_quiesce_counter;
36
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
39
+
37
40
unsigned int write_gen; /* Current data generation */
38
# Write test pattern 0x13
41
39
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
42
/* Protected by reqs_lock. */
40
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
43
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
41
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
44
int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
42
wrote 10485760/10485760 bytes at offset 3298534883328
45
BdrvRequestFlags flags);
43
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
46
44
47
+void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent);
45
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
48
+void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent);
46
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha1.img qiotest-145-aes-256-xts-plain64-sha1
49
+
47
# Read test pattern 0x91
50
int get_tmp_filename(char *filename, int size);
48
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
51
BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
49
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
52
const char *filename);
50
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
53
diff --git a/block.c b/block.c
51
read 10485760/10485760 bytes at offset 104857600
54
index XXXXXXX..XXXXXXX 100644
52
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
55
--- a/block.c
53
56
+++ b/block.c
54
# Read test pattern 0x5e
57
@@ -XXX,XX +XXX,XX @@ static void bdrv_child_cb_drained_end(BdrvChild *child)
55
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
58
bdrv_drained_end(bs);
56
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
59
}
57
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
60
58
read 10485760/10485760 bytes at offset 3298534883328
61
+static void bdrv_child_cb_attach(BdrvChild *child)
59
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
62
+{
60
63
+ BlockDriverState *bs = child->opaque;
61
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-256-xts-plain64-sha1.img', fmt=luks size=439804651
64
+ bdrv_apply_subtree_drain(child, bs);
62
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha1.img qiotest-145-aes-256-xts-plain64-sha1
65
+}
63
# Write test pattern 0xa7
66
+
64
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
67
+static void bdrv_child_cb_detach(BdrvChild *child)
65
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
68
+{
66
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
69
+ BlockDriverState *bs = child->opaque;
67
wrote 10485760/10485760 bytes at offset 104857600
70
+ bdrv_unapply_subtree_drain(child, bs);
68
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
71
+}
69
72
+
70
# Write test pattern 0x13
73
static int bdrv_child_cb_inactivate(BdrvChild *child)
71
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
74
{
72
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
75
BlockDriverState *bs = child->opaque;
73
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
76
@@ -XXX,XX +XXX,XX @@ const BdrvChildRole child_file = {
74
wrote 10485760/10485760 bytes at offset 3298534883328
77
.inherit_options = bdrv_inherited_options,
75
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
78
.drained_begin = bdrv_child_cb_drained_begin,
76
79
.drained_end = bdrv_child_cb_drained_end,
77
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
80
+ .attach = bdrv_child_cb_attach,
78
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha1.img qiotest-145-aes-256-xts-plain64-sha1
81
+ .detach = bdrv_child_cb_detach,
79
# Read test pattern 0x91
82
.inactivate = bdrv_child_cb_inactivate,
80
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
83
};
81
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
84
82
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
85
@@ -XXX,XX +XXX,XX @@ const BdrvChildRole child_format = {
83
read 10485760/10485760 bytes at offset 104857600
86
.inherit_options = bdrv_inherited_fmt_options,
84
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
87
.drained_begin = bdrv_child_cb_drained_begin,
85
88
.drained_end = bdrv_child_cb_drained_end,
86
# Read test pattern 0x5e
89
+ .attach = bdrv_child_cb_attach,
87
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
90
+ .detach = bdrv_child_cb_detach,
88
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
91
.inactivate = bdrv_child_cb_inactivate,
89
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha1
92
};
90
read 10485760/10485760 bytes at offset 3298534883328
93
91
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
94
@@ -XXX,XX +XXX,XX @@ static void bdrv_backing_attach(BdrvChild *c)
92
95
parent->backing_blocker);
93
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher twofish-xts-plain64 --key-size 512 --h
96
bdrv_op_unblock(backing_hd, BLOCK_OP_TYPE_BACKUP_TARGET,
94
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-twofish-256-xts-plain64-sha1.img qiotest-145-twofish-256-xts-plain64-sha1
97
parent->backing_blocker);
95
# Write test pattern 0xa7
98
+
96
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
99
+ bdrv_child_cb_attach(c);
97
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
100
}
98
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
101
99
wrote 10485760/10485760 bytes at offset 104857600
102
static void bdrv_backing_detach(BdrvChild *c)
100
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
103
@@ -XXX,XX +XXX,XX @@ static void bdrv_backing_detach(BdrvChild *c)
101
104
bdrv_op_unblock_all(c->bs, parent->backing_blocker);
102
# Write test pattern 0x13
105
error_free(parent->backing_blocker);
103
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
106
parent->backing_blocker = NULL;
104
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
107
+
105
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
108
+ bdrv_child_cb_detach(c);
106
wrote 10485760/10485760 bytes at offset 3298534883328
109
}
107
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
110
108
111
/*
109
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
112
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_noperm(BdrvChild *child,
110
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-twofish-256-xts-plain64-sha1.img qiotest-145-twofish-256-xts-plain64-sha1
113
assert(bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs));
111
# Read test pattern 0x91
114
}
112
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
115
if (old_bs) {
113
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
116
+ /* Detach first so that the recursive drain sections coming from @child
114
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
117
+ * are already gone and we only end the drain sections that came from
115
read 10485760/10485760 bytes at offset 104857600
118
+ * elsewhere. */
116
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
119
+ if (child->role->detach) {
117
120
+ child->role->detach(child);
118
# Read test pattern 0x5e
121
+ }
119
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
122
if (old_bs->quiesce_counter && child->role->drained_end) {
120
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
123
for (i = 0; i < old_bs->quiesce_counter; i++) {
121
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
124
child->role->drained_end(child);
122
read 10485760/10485760 bytes at offset 3298534883328
125
}
123
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
126
}
124
127
- if (child->role->detach) {
125
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-twofish-256-xts-plain64-sha1.img', fmt=luks size=43980
128
- child->role->detach(child);
126
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-twofish-256-xts-plain64-sha1.img qiotest-145-twofish-256-xts-plain64-sha1
129
- }
127
# Write test pattern 0xa7
130
QLIST_REMOVE(child, next_parent);
128
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
131
}
129
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
132
130
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
133
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_noperm(BdrvChild *child,
131
wrote 10485760/10485760 bytes at offset 104857600
134
}
132
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
135
}
133
136
134
# Write test pattern 0x13
137
+ /* Attach only after starting new drained sections, so that recursive
135
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
138
+ * drain sections coming from @child don't get an extra .drained_begin
136
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
139
+ * callback. */
137
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
140
if (child->role->attach) {
138
wrote 10485760/10485760 bytes at offset 3298534883328
141
child->role->attach(child);
139
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
142
}
140
143
diff --git a/block/io.c b/block/io.c
141
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
144
index XXXXXXX..XXXXXXX 100644
142
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-twofish-256-xts-plain64-sha1.img qiotest-145-twofish-256-xts-plain64-sha1
145
--- a/block/io.c
143
# Read test pattern 0x91
146
+++ b/block/io.c
144
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
147
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
145
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
148
assert(data.done);
146
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
149
}
147
read 10485760/10485760 bytes at offset 104857600
150
148
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
151
-static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
149
152
- BdrvChild *parent)
150
# Read test pattern 0x5e
153
+void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
151
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
154
+ BdrvChild *parent)
152
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
155
{
153
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-256-xts-plain64-sha1
156
BdrvChild *child, *next;
154
read 10485760/10485760 bytes at offset 3298534883328
157
155
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
158
@@ -XXX,XX +XXX,XX @@ static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
156
159
bdrv_drain_recurse(bs);
157
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher serpent-xts-plain64 --key-size 512 --h
160
158
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-256-xts-plain64-sha1.img qiotest-145-serpent-256-xts-plain64-sha1
161
if (recursive) {
159
# Write test pattern 0xa7
162
+ bs->recursive_quiesce_counter++;
160
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
163
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
161
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
164
bdrv_do_drained_begin(child->bs, true, child);
162
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
165
}
163
wrote 10485760/10485760 bytes at offset 104857600
166
@@ -XXX,XX +XXX,XX @@ void bdrv_subtree_drained_begin(BlockDriverState *bs)
164
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
167
bdrv_do_drained_begin(bs, true, NULL);
165
168
}
166
# Write test pattern 0x13
169
167
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
170
-static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
168
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
171
- BdrvChild *parent)
169
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
172
+void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
170
wrote 10485760/10485760 bytes at offset 3298534883328
173
+ BdrvChild *parent)
171
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
174
{
172
175
BdrvChild *child, *next;
173
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
176
int old_quiesce_counter;
174
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-256-xts-plain64-sha1.img qiotest-145-serpent-256-xts-plain64-sha1
177
@@ -XXX,XX +XXX,XX @@ static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
175
# Read test pattern 0x91
178
}
176
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
179
177
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
180
if (recursive) {
178
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
181
+ bs->recursive_quiesce_counter--;
179
read 10485760/10485760 bytes at offset 104857600
182
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
180
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
183
bdrv_do_drained_end(child->bs, true, child);
181
184
}
182
# Read test pattern 0x5e
185
@@ -XXX,XX +XXX,XX @@ void bdrv_subtree_drained_end(BlockDriverState *bs)
183
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
186
bdrv_do_drained_end(bs, true, NULL);
184
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
187
}
185
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
188
186
read 10485760/10485760 bytes at offset 3298534883328
189
+void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
187
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
190
+{
188
191
+ int i;
189
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-serpent-256-xts-plain64-sha1.img', fmt=luks size=43980
192
+
190
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-256-xts-plain64-sha1.img qiotest-145-serpent-256-xts-plain64-sha1
193
+ for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
191
# Write test pattern 0xa7
194
+ bdrv_do_drained_begin(child->bs, true, child);
192
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
195
+ }
193
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
196
+}
194
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
197
+
195
wrote 10485760/10485760 bytes at offset 104857600
198
+void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
196
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
199
+{
197
200
+ int i;
198
# Write test pattern 0x13
201
+
199
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
202
+ for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
200
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
203
+ bdrv_do_drained_end(child->bs, true, child);
201
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
204
+ }
202
wrote 10485760/10485760 bytes at offset 3298534883328
205
+}
203
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
206
+
204
207
/*
205
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
208
* Wait for pending requests to complete on a single BlockDriverState subtree,
206
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-256-xts-plain64-sha1.img qiotest-145-serpent-256-xts-plain64-sha1
209
* and suspend block driver's internal I/O until next request arrives.
207
# Read test pattern 0x91
208
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
209
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
210
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
211
read 10485760/10485760 bytes at offset 104857600
212
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
213
214
# Read test pattern 0x5e
215
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
216
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
217
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-256-xts-plain64-sha1
218
read 10485760/10485760 bytes at offset 3298534883328
219
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
220
221
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher cast5-cbc-plain64 --key-size 128 --has
222
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-cast5-128-cbc-plain64-sha1.img qiotest-145-cast5-128-cbc-plain64-sha1
223
# Write test pattern 0xa7
224
sudo chown UID:GID /dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
225
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
226
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
227
wrote 10485760/10485760 bytes at offset 104857600
228
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
229
230
# Write test pattern 0x13
231
sudo chown UID:GID /dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
232
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
233
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
234
wrote 10485760/10485760 bytes at offset 3298534883328
235
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
236
237
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
238
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-cast5-128-cbc-plain64-sha1.img qiotest-145-cast5-128-cbc-plain64-sha1
239
# Read test pattern 0x91
240
sudo chown UID:GID /dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
241
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
242
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
243
read 10485760/10485760 bytes at offset 104857600
244
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
245
246
# Read test pattern 0x5e
247
sudo chown UID:GID /dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
248
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
249
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
250
read 10485760/10485760 bytes at offset 3298534883328
251
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
252
253
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-cast5-128-cbc-plain64-sha1.img', fmt=luks size=4398046
254
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-cast5-128-cbc-plain64-sha1.img qiotest-145-cast5-128-cbc-plain64-sha1
255
# Write test pattern 0xa7
256
sudo chown UID:GID /dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
257
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
258
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
259
wrote 10485760/10485760 bytes at offset 104857600
260
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
261
262
# Write test pattern 0x13
263
sudo chown UID:GID /dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
264
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
265
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
266
wrote 10485760/10485760 bytes at offset 3298534883328
267
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
268
269
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
270
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-cast5-128-cbc-plain64-sha1.img qiotest-145-cast5-128-cbc-plain64-sha1
271
# Read test pattern 0x91
272
sudo chown UID:GID /dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
273
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
274
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
275
read 10485760/10485760 bytes at offset 104857600
276
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
277
278
# Read test pattern 0x5e
279
sudo chown UID:GID /dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
280
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
281
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-cast5-128-cbc-plain64-sha1
282
read 10485760/10485760 bytes at offset 3298534883328
283
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
284
285
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-cbc-plain --key-size 256 --hash sh
286
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain-sha1.img qiotest-145-aes-256-cbc-plain-sha1
287
# Write test pattern 0xa7
288
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
289
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
290
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
291
wrote 10485760/10485760 bytes at offset 104857600
292
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
293
294
# Write test pattern 0x13
295
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
296
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
297
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
298
wrote 10485760/10485760 bytes at offset 3298534883328
299
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
300
301
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
302
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain-sha1.img qiotest-145-aes-256-cbc-plain-sha1
303
# Read test pattern 0x91
304
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
305
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
306
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
307
read 10485760/10485760 bytes at offset 104857600
308
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
309
310
# Read test pattern 0x5e
311
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
312
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
313
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
314
read 10485760/10485760 bytes at offset 3298534883328
315
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
316
317
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-256-cbc-plain-sha1.img', fmt=luks size=43980465111
318
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain-sha1.img qiotest-145-aes-256-cbc-plain-sha1
319
# Write test pattern 0xa7
320
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
321
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
322
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
323
wrote 10485760/10485760 bytes at offset 104857600
324
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
325
326
# Write test pattern 0x13
327
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
328
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
329
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
330
wrote 10485760/10485760 bytes at offset 3298534883328
331
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
332
333
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
334
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain-sha1.img qiotest-145-aes-256-cbc-plain-sha1
335
# Read test pattern 0x91
336
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
337
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
338
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
339
read 10485760/10485760 bytes at offset 104857600
340
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
341
342
# Read test pattern 0x5e
343
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
344
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
345
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain-sha1
346
read 10485760/10485760 bytes at offset 3298534883328
347
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
348
349
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-cbc-plain64 --key-size 256 --hash
350
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain64-sha1.img qiotest-145-aes-256-cbc-plain64-sha1
351
# Write test pattern 0xa7
352
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
353
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
354
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
355
wrote 10485760/10485760 bytes at offset 104857600
356
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
357
358
# Write test pattern 0x13
359
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
360
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
361
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
362
wrote 10485760/10485760 bytes at offset 3298534883328
363
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
364
365
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
366
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain64-sha1.img qiotest-145-aes-256-cbc-plain64-sha1
367
# Read test pattern 0x91
368
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
369
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
370
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
371
read 10485760/10485760 bytes at offset 104857600
372
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
373
374
# Read test pattern 0x5e
375
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
376
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
377
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
378
read 10485760/10485760 bytes at offset 3298534883328
379
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
380
381
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-256-cbc-plain64-sha1.img', fmt=luks size=439804651
382
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain64-sha1.img qiotest-145-aes-256-cbc-plain64-sha1
383
# Write test pattern 0xa7
384
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
385
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
386
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
387
wrote 10485760/10485760 bytes at offset 104857600
388
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
389
390
# Write test pattern 0x13
391
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
392
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
393
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
394
wrote 10485760/10485760 bytes at offset 3298534883328
395
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
396
397
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
398
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain64-sha1.img qiotest-145-aes-256-cbc-plain64-sha1
399
# Read test pattern 0x91
400
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
401
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
402
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
403
read 10485760/10485760 bytes at offset 104857600
404
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
405
406
# Read test pattern 0x5e
407
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
408
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
409
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha1
410
read 10485760/10485760 bytes at offset 3298534883328
411
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
412
413
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-cbc-essiv:sha256 --key-size 256 --
414
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-essiv-sha256-sha1.img qiotest-145-aes-256-cbc-essiv-sha256-sha1
415
# Write test pattern 0xa7
416
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
417
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
418
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
419
wrote 10485760/10485760 bytes at offset 104857600
420
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
421
422
# Write test pattern 0x13
423
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
424
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
425
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
426
wrote 10485760/10485760 bytes at offset 3298534883328
427
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
428
429
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
430
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-essiv-sha256-sha1.img qiotest-145-aes-256-cbc-essiv-sha256-sha1
431
# Read test pattern 0x91
432
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
433
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
434
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
435
read 10485760/10485760 bytes at offset 104857600
436
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
437
438
# Read test pattern 0x5e
439
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
440
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
441
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
442
read 10485760/10485760 bytes at offset 3298534883328
443
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
444
445
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-256-cbc-essiv-sha256-sha1.img', fmt=luks size=4398
446
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-essiv-sha256-sha1.img qiotest-145-aes-256-cbc-essiv-sha256-sha1
447
# Write test pattern 0xa7
448
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
449
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
450
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
451
wrote 10485760/10485760 bytes at offset 104857600
452
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
453
454
# Write test pattern 0x13
455
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
456
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
457
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
458
wrote 10485760/10485760 bytes at offset 3298534883328
459
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
460
461
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
462
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-essiv-sha256-sha1.img qiotest-145-aes-256-cbc-essiv-sha256-sha1
463
# Read test pattern 0x91
464
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
465
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
466
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
467
read 10485760/10485760 bytes at offset 104857600
468
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
469
470
# Read test pattern 0x5e
471
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
472
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
473
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-sha256-sha1
474
read 10485760/10485760 bytes at offset 3298534883328
475
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
476
477
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-xts-essiv:sha256 --key-size 512 --
478
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-essiv-sha256-sha1.img qiotest-145-aes-256-xts-essiv-sha256-sha1
479
# Write test pattern 0xa7
480
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
481
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
482
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
483
wrote 10485760/10485760 bytes at offset 104857600
484
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
485
486
# Write test pattern 0x13
487
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
488
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
489
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
490
wrote 10485760/10485760 bytes at offset 3298534883328
491
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
492
493
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
494
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-essiv-sha256-sha1.img qiotest-145-aes-256-xts-essiv-sha256-sha1
495
# Read test pattern 0x91
496
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
497
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
498
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
499
read 10485760/10485760 bytes at offset 104857600
500
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
501
502
# Read test pattern 0x5e
503
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
504
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
505
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
506
read 10485760/10485760 bytes at offset 3298534883328
507
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
508
509
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-256-xts-essiv-sha256-sha1.img', fmt=luks size=4398
510
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-essiv-sha256-sha1.img qiotest-145-aes-256-xts-essiv-sha256-sha1
511
# Write test pattern 0xa7
512
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
513
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
514
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
515
wrote 10485760/10485760 bytes at offset 104857600
516
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
517
518
# Write test pattern 0x13
519
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
520
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
521
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
522
wrote 10485760/10485760 bytes at offset 3298534883328
523
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
524
525
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
526
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-essiv-sha256-sha1.img qiotest-145-aes-256-xts-essiv-sha256-sha1
527
# Read test pattern 0x91
528
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
529
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
530
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
531
read 10485760/10485760 bytes at offset 104857600
532
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
533
534
# Read test pattern 0x5e
535
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
536
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
537
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-essiv-sha256-sha1
538
read 10485760/10485760 bytes at offset 3298534883328
539
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
540
541
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 256 --hash
542
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-128-xts-plain64-sha256-sha1.img qiotest-145-aes-128-xts-plain64-sha256-sha1
543
# Write test pattern 0xa7
544
sudo chown UID:GID /dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
545
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
546
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
547
wrote 10485760/10485760 bytes at offset 104857600
548
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
549
550
# Write test pattern 0x13
551
sudo chown UID:GID /dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
552
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
553
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
554
wrote 10485760/10485760 bytes at offset 3298534883328
555
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
556
557
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
558
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-128-xts-plain64-sha256-sha1.img qiotest-145-aes-128-xts-plain64-sha256-sha1
559
# Read test pattern 0x91
560
sudo chown UID:GID /dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
561
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
562
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
563
read 10485760/10485760 bytes at offset 104857600
564
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
565
566
# Read test pattern 0x5e
567
sudo chown UID:GID /dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
568
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
569
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
570
read 10485760/10485760 bytes at offset 3298534883328
571
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
572
573
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-128-xts-plain64-sha256-sha1.img', fmt=luks size=43
574
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-128-xts-plain64-sha256-sha1.img qiotest-145-aes-128-xts-plain64-sha256-sha1
575
# Write test pattern 0xa7
576
sudo chown UID:GID /dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
577
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
578
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
579
wrote 10485760/10485760 bytes at offset 104857600
580
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
581
582
# Write test pattern 0x13
583
sudo chown UID:GID /dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
584
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
585
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
586
wrote 10485760/10485760 bytes at offset 3298534883328
587
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
588
589
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
590
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-128-xts-plain64-sha256-sha1.img qiotest-145-aes-128-xts-plain64-sha256-sha1
591
# Read test pattern 0x91
592
sudo chown UID:GID /dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
593
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
594
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
595
read 10485760/10485760 bytes at offset 104857600
596
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
597
598
# Read test pattern 0x5e
599
sudo chown UID:GID /dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
600
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
601
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-128-xts-plain64-sha256-sha1
602
read 10485760/10485760 bytes at offset 3298534883328
603
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
604
605
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 384 --hash
606
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-192-xts-plain64-sha256-sha1.img qiotest-145-aes-192-xts-plain64-sha256-sha1
607
# Write test pattern 0xa7
608
sudo chown UID:GID /dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
609
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
610
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
611
wrote 10485760/10485760 bytes at offset 104857600
612
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
613
614
# Write test pattern 0x13
615
sudo chown UID:GID /dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
616
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
617
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
618
wrote 10485760/10485760 bytes at offset 3298534883328
619
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
620
621
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
622
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-192-xts-plain64-sha256-sha1.img qiotest-145-aes-192-xts-plain64-sha256-sha1
623
# Read test pattern 0x91
624
sudo chown UID:GID /dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
625
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
626
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
627
read 10485760/10485760 bytes at offset 104857600
628
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
629
630
# Read test pattern 0x5e
631
sudo chown UID:GID /dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
632
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
633
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
634
read 10485760/10485760 bytes at offset 3298534883328
635
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
636
637
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-192-xts-plain64-sha256-sha1.img', fmt=luks size=43
638
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-192-xts-plain64-sha256-sha1.img qiotest-145-aes-192-xts-plain64-sha256-sha1
639
# Write test pattern 0xa7
640
sudo chown UID:GID /dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
641
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
642
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
643
wrote 10485760/10485760 bytes at offset 104857600
644
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
645
646
# Write test pattern 0x13
647
sudo chown UID:GID /dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
648
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
649
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
650
wrote 10485760/10485760 bytes at offset 3298534883328
651
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
652
653
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
654
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-192-xts-plain64-sha256-sha1.img qiotest-145-aes-192-xts-plain64-sha256-sha1
655
# Read test pattern 0x91
656
sudo chown UID:GID /dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
657
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
658
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
659
read 10485760/10485760 bytes at offset 104857600
660
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
661
662
# Read test pattern 0x5e
663
sudo chown UID:GID /dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
664
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
665
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-192-xts-plain64-sha256-sha1
666
read 10485760/10485760 bytes at offset 3298534883328
667
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
668
669
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher twofish-xts-plain64 --key-size 256 --h
670
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-twofish-128-xts-plain64-sha1.img qiotest-145-twofish-128-xts-plain64-sha1
671
# Write test pattern 0xa7
672
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
673
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
674
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
675
wrote 10485760/10485760 bytes at offset 104857600
676
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
677
678
# Write test pattern 0x13
679
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
680
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
681
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
682
wrote 10485760/10485760 bytes at offset 3298534883328
683
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
684
685
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
686
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-twofish-128-xts-plain64-sha1.img qiotest-145-twofish-128-xts-plain64-sha1
687
# Read test pattern 0x91
688
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
689
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
690
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
691
read 10485760/10485760 bytes at offset 104857600
692
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
693
694
# Read test pattern 0x5e
695
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
696
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
697
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
698
read 10485760/10485760 bytes at offset 3298534883328
699
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
700
701
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-twofish-128-xts-plain64-sha1.img', fmt=luks size=43980
702
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-twofish-128-xts-plain64-sha1.img qiotest-145-twofish-128-xts-plain64-sha1
703
# Write test pattern 0xa7
704
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
705
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
706
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
707
wrote 10485760/10485760 bytes at offset 104857600
708
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
709
710
# Write test pattern 0x13
711
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
712
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
713
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
714
wrote 10485760/10485760 bytes at offset 3298534883328
715
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
716
717
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
718
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-twofish-128-xts-plain64-sha1.img qiotest-145-twofish-128-xts-plain64-sha1
719
# Read test pattern 0x91
720
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
721
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
722
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
723
read 10485760/10485760 bytes at offset 104857600
724
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
725
726
# Read test pattern 0x5e
727
sudo chown UID:GID /dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
728
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
729
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-twofish-128-xts-plain64-sha1
730
read 10485760/10485760 bytes at offset 3298534883328
731
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
732
733
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher serpent-xts-plain64 --key-size 256 --h
734
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-128-xts-plain64-sha1.img qiotest-145-serpent-128-xts-plain64-sha1
735
# Write test pattern 0xa7
736
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
737
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
738
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
739
wrote 10485760/10485760 bytes at offset 104857600
740
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
741
742
# Write test pattern 0x13
743
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
744
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
745
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
746
wrote 10485760/10485760 bytes at offset 3298534883328
747
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
748
749
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
750
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-128-xts-plain64-sha1.img qiotest-145-serpent-128-xts-plain64-sha1
751
# Read test pattern 0x91
752
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
753
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
754
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
755
read 10485760/10485760 bytes at offset 104857600
756
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
757
758
# Read test pattern 0x5e
759
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
760
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
761
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
762
read 10485760/10485760 bytes at offset 3298534883328
763
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
764
765
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-serpent-128-xts-plain64-sha1.img', fmt=luks size=43980
766
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-128-xts-plain64-sha1.img qiotest-145-serpent-128-xts-plain64-sha1
767
# Write test pattern 0xa7
768
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
769
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
770
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
771
wrote 10485760/10485760 bytes at offset 104857600
772
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
773
774
# Write test pattern 0x13
775
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
776
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
777
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
778
wrote 10485760/10485760 bytes at offset 3298534883328
779
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
780
781
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
782
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-128-xts-plain64-sha1.img qiotest-145-serpent-128-xts-plain64-sha1
783
# Read test pattern 0x91
784
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
785
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
786
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
787
read 10485760/10485760 bytes at offset 104857600
788
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
789
790
# Read test pattern 0x5e
791
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
792
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
793
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-128-xts-plain64-sha1
794
read 10485760/10485760 bytes at offset 3298534883328
795
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
796
797
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher serpent-xts-plain64 --key-size 384 --h
798
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-192-xts-plain64-sha1.img qiotest-145-serpent-192-xts-plain64-sha1
799
# Write test pattern 0xa7
800
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
801
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
802
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
803
wrote 10485760/10485760 bytes at offset 104857600
804
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
805
806
# Write test pattern 0x13
807
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
808
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
809
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
810
wrote 10485760/10485760 bytes at offset 3298534883328
811
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
812
813
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
814
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-192-xts-plain64-sha1.img qiotest-145-serpent-192-xts-plain64-sha1
815
# Read test pattern 0x91
816
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
817
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
818
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
819
read 10485760/10485760 bytes at offset 104857600
820
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
821
822
# Read test pattern 0x5e
823
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
824
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
825
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
826
read 10485760/10485760 bytes at offset 3298534883328
827
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
828
829
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-serpent-192-xts-plain64-sha1.img', fmt=luks size=43980
830
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-192-xts-plain64-sha1.img qiotest-145-serpent-192-xts-plain64-sha1
831
# Write test pattern 0xa7
832
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
833
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
834
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
835
wrote 10485760/10485760 bytes at offset 104857600
836
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
837
838
# Write test pattern 0x13
839
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
840
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
841
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
842
wrote 10485760/10485760 bytes at offset 3298534883328
843
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
844
845
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
846
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-serpent-192-xts-plain64-sha1.img qiotest-145-serpent-192-xts-plain64-sha1
847
# Read test pattern 0x91
848
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
849
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
850
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
851
read 10485760/10485760 bytes at offset 104857600
852
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
853
854
# Read test pattern 0x5e
855
sudo chown UID:GID /dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
856
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
857
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-serpent-192-xts-plain64-sha1
858
read 10485760/10485760 bytes at offset 3298534883328
859
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
860
861
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 512 --hash
862
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha224.img qiotest-145-aes-256-xts-plain64-sha224
863
# Write test pattern 0xa7
864
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
865
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
866
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
867
wrote 10485760/10485760 bytes at offset 104857600
868
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
869
870
# Write test pattern 0x13
871
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
872
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
873
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
874
wrote 10485760/10485760 bytes at offset 3298534883328
875
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
876
877
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
878
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha224.img qiotest-145-aes-256-xts-plain64-sha224
879
# Read test pattern 0x91
880
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
881
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
882
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
883
read 10485760/10485760 bytes at offset 104857600
884
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
885
886
# Read test pattern 0x5e
887
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
888
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
889
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
890
read 10485760/10485760 bytes at offset 3298534883328
891
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
892
893
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-256-xts-plain64-sha224.img', fmt=luks size=4398046
894
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha224.img qiotest-145-aes-256-xts-plain64-sha224
895
# Write test pattern 0xa7
896
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
897
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
898
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
899
wrote 10485760/10485760 bytes at offset 104857600
900
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
901
902
# Write test pattern 0x13
903
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
904
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
905
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
906
wrote 10485760/10485760 bytes at offset 3298534883328
907
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
908
909
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
910
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha224.img qiotest-145-aes-256-xts-plain64-sha224
911
# Read test pattern 0x91
912
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
913
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
914
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
915
read 10485760/10485760 bytes at offset 104857600
916
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
917
918
# Read test pattern 0x5e
919
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
920
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
921
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha224
922
read 10485760/10485760 bytes at offset 3298534883328
923
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
924
925
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 512 --hash
926
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha256.img qiotest-145-aes-256-xts-plain64-sha256
927
# Write test pattern 0xa7
928
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
929
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
930
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
931
wrote 10485760/10485760 bytes at offset 104857600
932
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
933
934
# Write test pattern 0x13
935
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
936
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
937
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
938
wrote 10485760/10485760 bytes at offset 3298534883328
939
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
940
941
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
942
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha256.img qiotest-145-aes-256-xts-plain64-sha256
943
# Read test pattern 0x91
944
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
945
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
946
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
947
read 10485760/10485760 bytes at offset 104857600
948
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
949
950
# Read test pattern 0x5e
951
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
952
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
953
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
954
read 10485760/10485760 bytes at offset 3298534883328
955
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
956
957
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-256-xts-plain64-sha256.img', fmt=luks size=4398046
958
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha256.img qiotest-145-aes-256-xts-plain64-sha256
959
# Write test pattern 0xa7
960
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
961
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
962
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
963
wrote 10485760/10485760 bytes at offset 104857600
964
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
965
966
# Write test pattern 0x13
967
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
968
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
969
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
970
wrote 10485760/10485760 bytes at offset 3298534883328
971
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
972
973
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
974
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha256.img qiotest-145-aes-256-xts-plain64-sha256
975
# Read test pattern 0x91
976
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
977
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
978
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
979
read 10485760/10485760 bytes at offset 104857600
980
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
981
982
# Read test pattern 0x5e
983
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
984
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
985
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha256
986
read 10485760/10485760 bytes at offset 3298534883328
987
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
988
989
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 512 --hash
990
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha384.img qiotest-145-aes-256-xts-plain64-sha384
991
# Write test pattern 0xa7
992
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
993
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
994
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
995
wrote 10485760/10485760 bytes at offset 104857600
996
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
997
998
# Write test pattern 0x13
999
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1000
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1001
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1002
wrote 10485760/10485760 bytes at offset 3298534883328
1003
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1004
1005
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
1006
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha384.img qiotest-145-aes-256-xts-plain64-sha384
1007
# Read test pattern 0x91
1008
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1009
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1010
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1011
read 10485760/10485760 bytes at offset 104857600
1012
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1013
1014
# Read test pattern 0x5e
1015
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1016
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1017
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1018
read 10485760/10485760 bytes at offset 3298534883328
1019
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1020
1021
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-256-xts-plain64-sha384.img', fmt=luks size=4398046
1022
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha384.img qiotest-145-aes-256-xts-plain64-sha384
1023
# Write test pattern 0xa7
1024
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1025
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1026
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1027
wrote 10485760/10485760 bytes at offset 104857600
1028
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1029
1030
# Write test pattern 0x13
1031
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1032
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1033
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1034
wrote 10485760/10485760 bytes at offset 3298534883328
1035
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1036
1037
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
1038
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha384.img qiotest-145-aes-256-xts-plain64-sha384
1039
# Read test pattern 0x91
1040
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1041
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1042
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1043
read 10485760/10485760 bytes at offset 104857600
1044
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1045
1046
# Read test pattern 0x5e
1047
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1048
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1049
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha384
1050
read 10485760/10485760 bytes at offset 3298534883328
1051
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1052
1053
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 512 --hash
1054
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha512.img qiotest-145-aes-256-xts-plain64-sha512
1055
# Write test pattern 0xa7
1056
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1057
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1058
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1059
wrote 10485760/10485760 bytes at offset 104857600
1060
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1061
1062
# Write test pattern 0x13
1063
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1064
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1065
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1066
wrote 10485760/10485760 bytes at offset 3298534883328
1067
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1068
1069
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
1070
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha512.img qiotest-145-aes-256-xts-plain64-sha512
1071
# Read test pattern 0x91
1072
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1073
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1074
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1075
read 10485760/10485760 bytes at offset 104857600
1076
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1077
1078
# Read test pattern 0x5e
1079
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1080
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1081
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1082
read 10485760/10485760 bytes at offset 3298534883328
1083
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1084
1085
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-256-xts-plain64-sha512.img', fmt=luks size=4398046
1086
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha512.img qiotest-145-aes-256-xts-plain64-sha512
1087
# Write test pattern 0xa7
1088
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1089
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1090
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1091
wrote 10485760/10485760 bytes at offset 104857600
1092
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1093
1094
# Write test pattern 0x13
1095
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1096
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1097
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1098
wrote 10485760/10485760 bytes at offset 3298534883328
1099
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1100
1101
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
1102
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-sha512.img qiotest-145-aes-256-xts-plain64-sha512
1103
# Read test pattern 0x91
1104
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1105
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1106
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1107
read 10485760/10485760 bytes at offset 104857600
1108
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1109
1110
# Read test pattern 0x5e
1111
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1112
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1113
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-sha512
1114
read 10485760/10485760 bytes at offset 3298534883328
1115
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1116
1117
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain64 --key-size 512 --hash
1118
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-ripemd160.img qiotest-145-aes-256-xts-plain64-ripemd160
1119
# Write test pattern 0xa7
1120
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1121
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1122
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1123
wrote 10485760/10485760 bytes at offset 104857600
1124
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1125
1126
# Write test pattern 0x13
1127
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1128
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1129
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1130
wrote 10485760/10485760 bytes at offset 3298534883328
1131
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1132
1133
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
1134
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-ripemd160.img qiotest-145-aes-256-xts-plain64-ripemd160
1135
# Read test pattern 0x91
1136
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1137
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1138
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1139
read 10485760/10485760 bytes at offset 104857600
1140
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1141
1142
# Read test pattern 0x5e
1143
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1144
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1145
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1146
read 10485760/10485760 bytes at offset 3298534883328
1147
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1148
1149
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-256-xts-plain64-ripemd160.img', fmt=luks size=4398
1150
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-ripemd160.img qiotest-145-aes-256-xts-plain64-ripemd160
1151
# Write test pattern 0xa7
1152
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1153
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1154
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1155
wrote 10485760/10485760 bytes at offset 104857600
1156
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1157
1158
# Write test pattern 0x13
1159
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1160
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1161
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1162
wrote 10485760/10485760 bytes at offset 3298534883328
1163
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1164
1165
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
1166
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain64-ripemd160.img qiotest-145-aes-256-xts-plain64-ripemd160
1167
# Read test pattern 0x91
1168
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1169
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1170
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1171
read 10485760/10485760 bytes at offset 104857600
1172
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1173
1174
# Read test pattern 0x5e
1175
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1176
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1177
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain64-ripemd160
1178
read 10485760/10485760 bytes at offset 3298534883328
1179
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1180
1181
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-xts-plain --key-size 512 --hash sh
1182
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain-sha1-pwslot3.img qiotest-145-aes-256-xts-plain-sha1-pwslot3
1183
# Write test pattern 0xa7
1184
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwslot3
1185
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwslot3
1186
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwslot3
1187
wrote 10485760/10485760 bytes at offset 104857600
1188
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1189
1190
# Write test pattern 0x13
1191
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwslot3
1192
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwslot3
1193
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwslot3
1194
wrote 10485760/10485760 bytes at offset 3298534883328
1195
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1196
1197
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
1198
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain-sha1-pwslot3.img qiotest-145-aes-256-xts-plain-sha1-pwslot3
1199
# Read test pattern 0x91
1200
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwslot3
1201
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwslot3
1202
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwslot3
1203
read 10485760/10485760 bytes at offset 104857600
1204
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1205
1206
# Read test pattern 0x5e
1207
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwslot3
1208
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwslot3
1209
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwslot3
1210
read 10485760/10485760 bytes at offset 3298534883328
1211
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1212
1213
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksAddKey TEST_DIR/luks-aes-256-xts-plain-sha1-pwallslots
1214
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain-sha1-pwallslots.img qiotest-145-aes-256-xts-plain-sha1-pwallslots
1215
# Write test pattern 0xa7
1216
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1217
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1218
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1219
wrote 10485760/10485760 bytes at offset 104857600
1220
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1221
1222
# Write test pattern 0x13
1223
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1224
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1225
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1226
wrote 10485760/10485760 bytes at offset 3298534883328
1227
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1228
1229
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
1230
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain-sha1-pwallslots.img qiotest-145-aes-256-xts-plain-sha1-pwallslots
1231
# Read test pattern 0x91
1232
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1233
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1234
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1235
read 10485760/10485760 bytes at offset 104857600
1236
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1237
1238
# Read test pattern 0x5e
1239
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1240
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1241
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1242
read 10485760/10485760 bytes at offset 3298534883328
1243
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1244
1245
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-256-xts-plain-sha1-pwallslots.img', fmt=luks size=
1246
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain-sha1-pwallslots.img qiotest-145-aes-256-xts-plain-sha1-pwallslots
1247
# Write test pattern 0xa7
1248
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1249
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1250
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1251
wrote 10485760/10485760 bytes at offset 104857600
1252
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1253
1254
# Write test pattern 0x13
1255
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1256
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1257
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1258
wrote 10485760/10485760 bytes at offset 3298534883328
1259
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1260
1261
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
1262
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-xts-plain-sha1-pwallslots.img qiotest-145-aes-256-xts-plain-sha1-pwallslots
1263
# Read test pattern 0x91
1264
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1265
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1266
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1267
read 10485760/10485760 bytes at offset 104857600
1268
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1269
1270
# Read test pattern 0x5e
1271
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1272
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1273
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-xts-plain-sha1-pwallslots
1274
read 10485760/10485760 bytes at offset 3298534883328
1275
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1276
1277
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-cbc-essiv:sha256 --key-size 256 --
1278
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img qiotest-145-aes-256-cbc-essiv-auto-sha1
1279
# Write test pattern 0xa7
1280
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1281
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1282
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1283
wrote 10485760/10485760 bytes at offset 104857600
1284
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1285
1286
# Write test pattern 0x13
1287
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1288
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1289
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1290
wrote 10485760/10485760 bytes at offset 3298534883328
1291
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1292
1293
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
1294
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img qiotest-145-aes-256-cbc-essiv-auto-sha1
1295
# Read test pattern 0x91
1296
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1297
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1298
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1299
read 10485760/10485760 bytes at offset 104857600
1300
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1301
1302
# Read test pattern 0x5e
1303
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1304
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1305
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1306
read 10485760/10485760 bytes at offset 3298534883328
1307
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1308
1309
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img', fmt=luks size=439804
1310
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img qiotest-145-aes-256-cbc-essiv-auto-sha1
1311
# Write test pattern 0xa7
1312
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1313
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1314
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1315
wrote 10485760/10485760 bytes at offset 104857600
1316
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1317
1318
# Write test pattern 0x13
1319
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1320
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1321
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1322
wrote 10485760/10485760 bytes at offset 3298534883328
1323
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1324
1325
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
1326
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img qiotest-145-aes-256-cbc-essiv-auto-sha1
1327
# Read test pattern 0x91
1328
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1329
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1330
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1331
read 10485760/10485760 bytes at offset 104857600
1332
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1333
1334
# Read test pattern 0x5e
1335
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1336
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1337
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
1338
read 10485760/10485760 bytes at offset 3298534883328
1339
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1340
1341
@@ -XXX,XX +XXX,XX @@ sudo cryptsetup -q -v luksFormat --cipher aes-cbc-plain64:sha256 --key-size 256
1342
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img qiotest-145-aes-256-cbc-plain64-sha256-sha1
1343
# Write test pattern 0xa7
1344
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1345
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1346
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1347
wrote 10485760/10485760 bytes at offset 104857600
1348
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1349
1350
# Write test pattern 0x13
1351
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1352
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1353
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1354
wrote 10485760/10485760 bytes at offset 3298534883328
1355
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1356
1357
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
1358
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img qiotest-145-aes-256-cbc-plain64-sha256-sha1
1359
# Read test pattern 0x91
1360
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1361
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1362
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1363
read 10485760/10485760 bytes at offset 104857600
1364
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1365
1366
# Read test pattern 0x5e
1367
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1368
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1369
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1370
read 10485760/10485760 bytes at offset 3298534883328
1371
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1372
1373
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img', fmt=luks size=43
1374
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img qiotest-145-aes-256-cbc-plain64-sha256-sha1
1375
# Write test pattern 0xa7
1376
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1377
-qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1378
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1379
wrote 10485760/10485760 bytes at offset 104857600
1380
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1381
1382
# Write test pattern 0x13
1383
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1384
-qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1385
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1386
wrote 10485760/10485760 bytes at offset 3298534883328
1387
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1388
1389
@@ -XXX,XX +XXX,XX @@ wrote 10485760/10485760 bytes at offset 3298534883328
1390
sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img qiotest-145-aes-256-cbc-plain64-sha256-sha1
1391
# Read test pattern 0x91
1392
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1393
-qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1394
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1395
read 10485760/10485760 bytes at offset 104857600
1396
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1397
1398
# Read test pattern 0x5e
1399
sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1400
-qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1401
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=host_device,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
1402
read 10485760/10485760 bytes at offset 3298534883328
1403
10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
1404
1405
--
210
--
1406
2.13.6
211
2.13.6
1407
212
1408
213
diff view generated by jsdifflib
1
From: Peter Lieven <pl@kamp.de>
2
3
the min_sparse convert parameter can overflow (e.g. -S 1024G)
4
in the conversion from int64_t to int resulting in a negative
5
min_sparse parameter. Avoid this by limiting the valid parameters
6
to sane values. In fact anything exceeding the convert buffer size
7
is also pointless. While at it also forbid values that are non
8
multiple of 512 to avoid undesired behaviour. For instance, values
9
between 1 and 511 were legal, but resulted in full allocation.
10
11
Cc: qemu-stable@nongnu.org
12
Signed-off-by: Peter Lieven <pl@kamp.de>
13
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
1
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
14
---
2
---
15
qemu-img.c | 16 +++++++++++-----
3
tests/test-bdrv-drain.c | 80 +++++++++++++++++++++++++++++++++++++++++++++++++
16
1 file changed, 11 insertions(+), 5 deletions(-)
4
1 file changed, 80 insertions(+)
17
5
18
diff --git a/qemu-img.c b/qemu-img.c
6
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
19
index XXXXXXX..XXXXXXX 100644
7
index XXXXXXX..XXXXXXX 100644
20
--- a/qemu-img.c
8
--- a/tests/test-bdrv-drain.c
21
+++ b/qemu-img.c
9
+++ b/tests/test-bdrv-drain.c
22
@@ -XXX,XX +XXX,XX @@ static int convert_do_copy(ImgConvertState *s)
10
@@ -XXX,XX +XXX,XX @@ static void test_multiparent(void)
23
return s->ret;
11
blk_unref(blk_b);
24
}
12
}
25
13
26
+#define MAX_BUF_SECTORS 32768
14
+static void test_graph_change(void)
15
+{
16
+ BlockBackend *blk_a, *blk_b;
17
+ BlockDriverState *bs_a, *bs_b, *backing;
18
+ BDRVTestState *a_s, *b_s, *backing_s;
27
+
19
+
28
static int img_convert(int argc, char **argv)
20
+ blk_a = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
29
{
21
+ bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
30
int c, bs_i, flags, src_flags = 0;
22
+ &error_abort);
31
@@ -XXX,XX +XXX,XX @@ static int img_convert(int argc, char **argv)
23
+ a_s = bs_a->opaque;
32
int64_t sval;
24
+ blk_insert_bs(blk_a, bs_a, &error_abort);
33
25
+
34
sval = cvtnum(optarg);
26
+ blk_b = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
35
- if (sval < 0) {
27
+ bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
36
- error_report("Invalid minimum zero buffer size for sparse output specified");
28
+ &error_abort);
37
+ if (sval < 0 || sval & (BDRV_SECTOR_SIZE - 1) ||
29
+ b_s = bs_b->opaque;
38
+ sval / BDRV_SECTOR_SIZE > MAX_BUF_SECTORS) {
30
+ blk_insert_bs(blk_b, bs_b, &error_abort);
39
+ error_report("Invalid buffer size for sparse output specified. "
31
+
40
+ "Valid sizes are multiples of %llu up to %llu. Select "
32
+ backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
41
+ "0 to disable sparse detection (fully allocates output).",
33
+ backing_s = backing->opaque;
42
+ BDRV_SECTOR_SIZE, MAX_BUF_SECTORS * BDRV_SECTOR_SIZE);
34
+ bdrv_set_backing_hd(bs_a, backing, &error_abort);
43
goto fail_getopt;
35
+
44
}
36
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
45
37
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
46
@@ -XXX,XX +XXX,XX @@ static int img_convert(int argc, char **argv)
38
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
47
}
39
+ g_assert_cmpint(a_s->drain_count, ==, 0);
48
40
+ g_assert_cmpint(b_s->drain_count, ==, 0);
49
/* increase bufsectors from the default 4096 (2M) if opt_transfer
41
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
50
- * or discard_alignment of the out_bs is greater. Limit to 32768 (16MB)
42
+
51
- * as maximum. */
43
+ do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
52
- s.buf_sectors = MIN(32768,
44
+ do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
53
+ * or discard_alignment of the out_bs is greater. Limit to
45
+ do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
54
+ * MAX_BUF_SECTORS as maximum which is currently 32768 (16MB). */
46
+ do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
55
+ s.buf_sectors = MIN(MAX_BUF_SECTORS,
47
+ do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
56
MAX(s.buf_sectors,
48
+
57
MAX(out_bs->bl.opt_transfer >> BDRV_SECTOR_BITS,
49
+ bdrv_set_backing_hd(bs_b, backing, &error_abort);
58
out_bs->bl.pdiscard_alignment >>
50
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 5);
51
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 5);
52
+ g_assert_cmpint(backing->quiesce_counter, ==, 5);
53
+ g_assert_cmpint(a_s->drain_count, ==, 5);
54
+ g_assert_cmpint(b_s->drain_count, ==, 5);
55
+ g_assert_cmpint(backing_s->drain_count, ==, 5);
56
+
57
+ bdrv_set_backing_hd(bs_b, NULL, &error_abort);
58
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 3);
59
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 2);
60
+ g_assert_cmpint(backing->quiesce_counter, ==, 3);
61
+ g_assert_cmpint(a_s->drain_count, ==, 3);
62
+ g_assert_cmpint(b_s->drain_count, ==, 2);
63
+ g_assert_cmpint(backing_s->drain_count, ==, 3);
64
+
65
+ bdrv_set_backing_hd(bs_b, backing, &error_abort);
66
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 5);
67
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 5);
68
+ g_assert_cmpint(backing->quiesce_counter, ==, 5);
69
+ g_assert_cmpint(a_s->drain_count, ==, 5);
70
+ g_assert_cmpint(b_s->drain_count, ==, 5);
71
+ g_assert_cmpint(backing_s->drain_count, ==, 5);
72
+
73
+ do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
74
+ do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
75
+ do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
76
+ do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
77
+ do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
78
+
79
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
80
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
81
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
82
+ g_assert_cmpint(a_s->drain_count, ==, 0);
83
+ g_assert_cmpint(b_s->drain_count, ==, 0);
84
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
85
+
86
+ bdrv_unref(backing);
87
+ bdrv_unref(bs_a);
88
+ bdrv_unref(bs_b);
89
+ blk_unref(blk_a);
90
+ blk_unref(blk_b);
91
+}
92
+
93
94
typedef struct TestBlockJob {
95
BlockJob common;
96
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
97
98
g_test_add_func("/bdrv-drain/nested", test_nested);
99
g_test_add_func("/bdrv-drain/multiparent", test_multiparent);
100
+ g_test_add_func("/bdrv-drain/graph-change", test_graph_change);
101
102
g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
103
g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain);
59
--
104
--
60
2.13.6
105
2.13.6
61
106
62
107
diff view generated by jsdifflib
New patch
1
Since commit bde70715, base is the only node that is reopened in
2
commit_start(). This means that the code, which still involves an
3
explicit BlockReopenQueue, can now be simplified by using bdrv_reopen().
1
4
5
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
6
Reviewed-by: Fam Zheng <famz@redhat.com>
7
---
8
block/commit.c | 8 +-------
9
1 file changed, 1 insertion(+), 7 deletions(-)
10
11
diff --git a/block/commit.c b/block/commit.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/block/commit.c
14
+++ b/block/commit.c
15
@@ -XXX,XX +XXX,XX @@ void commit_start(const char *job_id, BlockDriverState *bs,
16
const char *filter_node_name, Error **errp)
17
{
18
CommitBlockJob *s;
19
- BlockReopenQueue *reopen_queue = NULL;
20
int orig_base_flags;
21
BlockDriverState *iter;
22
BlockDriverState *commit_top_bs = NULL;
23
@@ -XXX,XX +XXX,XX @@ void commit_start(const char *job_id, BlockDriverState *bs,
24
/* convert base to r/w, if necessary */
25
orig_base_flags = bdrv_get_flags(base);
26
if (!(orig_base_flags & BDRV_O_RDWR)) {
27
- reopen_queue = bdrv_reopen_queue(reopen_queue, base, NULL,
28
- orig_base_flags | BDRV_O_RDWR);
29
- }
30
-
31
- if (reopen_queue) {
32
- bdrv_reopen_multiple(bdrv_get_aio_context(bs), reopen_queue, &local_err);
33
+ bdrv_reopen(base, orig_base_flags | BDRV_O_RDWR, &local_err);
34
if (local_err != NULL) {
35
error_propagate(errp, local_err);
36
goto fail;
37
--
38
2.13.6
39
40
diff view generated by jsdifflib
1
From: Stefan Weil <sw@weilnetz.de>
1
The bdrv_reopen*() implementation doesn't like it if the graph is
2
changed between queuing nodes for reopen and actually reopening them
3
(one of the reasons is that queuing can be recursive).
2
4
3
Signed-off-by: Stefan Weil <sw@weilnetz.de>
5
So instead of draining the device only in bdrv_reopen_multiple(),
4
Reviewed-by: John Snow <jsnow@redhat.com>
6
require that callers already drained all affected nodes, and assert this
5
Reviewed-by: Jeff Cody <jcody@redhat.com>
7
in bdrv_reopen_queue().
8
6
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
Reviewed-by: Fam Zheng <famz@redhat.com>
7
---
11
---
8
block.c | 2 +-
12
block.c | 23 ++++++++++++++++-------
9
block/backup.c | 4 ++--
13
block/replication.c | 6 ++++++
10
block/curl.c | 2 +-
14
qemu-io-cmds.c | 3 +++
11
block/gluster.c | 2 +-
15
3 files changed, 25 insertions(+), 7 deletions(-)
12
block/vhdx.c | 2 +-
13
5 files changed, 6 insertions(+), 6 deletions(-)
14
16
15
diff --git a/block.c b/block.c
17
diff --git a/block.c b/block.c
16
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
17
--- a/block.c
19
--- a/block.c
18
+++ b/block.c
20
+++ b/block.c
21
@@ -XXX,XX +XXX,XX @@ BlockDriverState *bdrv_open(const char *filename, const char *reference,
22
* returns a pointer to bs_queue, which is either the newly allocated
23
* bs_queue, or the existing bs_queue being used.
24
*
25
+ * bs must be drained between bdrv_reopen_queue() and bdrv_reopen_multiple().
26
*/
27
static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
28
BlockDriverState *bs,
29
@@ -XXX,XX +XXX,XX @@ static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
30
BdrvChild *child;
31
QDict *old_options, *explicit_options;
32
33
+ /* Make sure that the caller remembered to use a drained section. This is
34
+ * important to avoid graph changes between the recursive queuing here and
35
+ * bdrv_reopen_multiple(). */
36
+ assert(bs->quiesce_counter > 0);
37
+
38
if (bs_queue == NULL) {
39
bs_queue = g_new0(BlockReopenQueue, 1);
40
QSIMPLEQ_INIT(bs_queue);
19
@@ -XXX,XX +XXX,XX @@ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
41
@@ -XXX,XX +XXX,XX @@ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
42
* If all devices prepare successfully, then the changes are committed
43
* to all devices.
20
*
44
*
21
* Reopens all BDS specified in the queue, with the appropriate
45
+ * All affected nodes must be drained between bdrv_reopen_queue() and
22
* flags. All devices are prepared for reopen, and failure of any
46
+ * bdrv_reopen_multiple().
23
- * device will cause all device changes to be abandonded, and intermediate
47
*/
24
+ * device will cause all device changes to be abandoned, and intermediate
48
int bdrv_reopen_multiple(AioContext *ctx, BlockReopenQueue *bs_queue, Error **errp)
25
* data cleaned up.
49
{
26
*
50
@@ -XXX,XX +XXX,XX @@ int bdrv_reopen_multiple(AioContext *ctx, BlockReopenQueue *bs_queue, Error **er
27
* If all devices prepare successfully, then the changes are committed
51
28
diff --git a/block/backup.c b/block/backup.c
52
assert(bs_queue != NULL);
53
54
- aio_context_release(ctx);
55
- bdrv_drain_all_begin();
56
- aio_context_acquire(ctx);
57
-
58
QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
59
+ assert(bs_entry->state.bs->quiesce_counter > 0);
60
if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
61
error_propagate(errp, local_err);
62
goto cleanup;
63
@@ -XXX,XX +XXX,XX @@ cleanup:
64
}
65
g_free(bs_queue);
66
67
- bdrv_drain_all_end();
68
-
69
return ret;
70
}
71
72
@@ -XXX,XX +XXX,XX @@ int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
73
{
74
int ret = -1;
75
Error *local_err = NULL;
76
- BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, NULL, bdrv_flags);
77
+ BlockReopenQueue *queue;
78
79
+ bdrv_subtree_drained_begin(bs);
80
+
81
+ queue = bdrv_reopen_queue(NULL, bs, NULL, bdrv_flags);
82
ret = bdrv_reopen_multiple(bdrv_get_aio_context(bs), queue, &local_err);
83
if (local_err != NULL) {
84
error_propagate(errp, local_err);
85
}
86
+
87
+ bdrv_subtree_drained_end(bs);
88
+
89
return ret;
90
}
91
92
diff --git a/block/replication.c b/block/replication.c
29
index XXXXXXX..XXXXXXX 100644
93
index XXXXXXX..XXXXXXX 100644
30
--- a/block/backup.c
94
--- a/block/replication.c
31
+++ b/block/backup.c
95
+++ b/block/replication.c
32
@@ -XXX,XX +XXX,XX @@ static void cow_request_end(CowRequest *req)
96
@@ -XXX,XX +XXX,XX @@ static void reopen_backing_file(BlockDriverState *bs, bool writable,
97
new_secondary_flags = s->orig_secondary_flags;
98
}
99
100
+ bdrv_subtree_drained_begin(s->hidden_disk->bs);
101
+ bdrv_subtree_drained_begin(s->secondary_disk->bs);
102
+
103
if (orig_hidden_flags != new_hidden_flags) {
104
reopen_queue = bdrv_reopen_queue(reopen_queue, s->hidden_disk->bs, NULL,
105
new_hidden_flags);
106
@@ -XXX,XX +XXX,XX @@ static void reopen_backing_file(BlockDriverState *bs, bool writable,
107
reopen_queue, &local_err);
108
error_propagate(errp, local_err);
109
}
110
+
111
+ bdrv_subtree_drained_end(s->hidden_disk->bs);
112
+ bdrv_subtree_drained_end(s->secondary_disk->bs);
33
}
113
}
34
114
35
/* Copy range to target with a bounce buffer and return the bytes copied. If
115
static void backup_job_cleanup(BlockDriverState *bs)
36
- * error occured, return a negative error number */
116
diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c
37
+ * error occurred, return a negative error number */
38
static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
39
int64_t start,
40
int64_t end,
41
@@ -XXX,XX +XXX,XX @@ fail:
42
43
}
44
45
-/* Copy range to target and return the bytes copied. If error occured, return a
46
+/* Copy range to target and return the bytes copied. If error occurred, return a
47
* negative error number. */
48
static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
49
int64_t start,
50
diff --git a/block/curl.c b/block/curl.c
51
index XXXXXXX..XXXXXXX 100644
117
index XXXXXXX..XXXXXXX 100644
52
--- a/block/curl.c
118
--- a/qemu-io-cmds.c
53
+++ b/block/curl.c
119
+++ b/qemu-io-cmds.c
54
@@ -XXX,XX +XXX,XX @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
120
@@ -XXX,XX +XXX,XX @@ static int reopen_f(BlockBackend *blk, int argc, char **argv)
55
}
121
opts = qopts ? qemu_opts_to_qdict(qopts, NULL) : NULL;
56
/* Prior CURL 7.19.4 return value of 0 could mean that the file size is not
122
qemu_opts_reset(&reopen_opts);
57
* know or the size is zero. From 7.19.4 CURL returns -1 if size is not
123
58
- * known and zero if it is realy zero-length file. */
124
+ bdrv_subtree_drained_begin(bs);
59
+ * known and zero if it is really zero-length file. */
125
brq = bdrv_reopen_queue(NULL, bs, opts, flags);
60
#if LIBCURL_VERSION_NUM >= 0x071304
126
bdrv_reopen_multiple(bdrv_get_aio_context(bs), brq, &local_err);
61
if (d < 0) {
127
+ bdrv_subtree_drained_end(bs);
62
pstrcpy(state->errmsg, CURL_ERROR_SIZE,
128
+
63
diff --git a/block/gluster.c b/block/gluster.c
129
if (local_err) {
64
index XXXXXXX..XXXXXXX 100644
130
error_report_err(local_err);
65
--- a/block/gluster.c
131
} else {
66
+++ b/block/gluster.c
67
@@ -XXX,XX +XXX,XX @@ static int qemu_gluster_has_zero_init(BlockDriverState *bs)
68
* If @start is in a trailing hole or beyond EOF, return -ENXIO.
69
* If we can't find out, return a negative errno other than -ENXIO.
70
*
71
- * (Shamefully copied from file-posix.c, only miniscule adaptions.)
72
+ * (Shamefully copied from file-posix.c, only minuscule adaptions.)
73
*/
74
static int find_allocation(BlockDriverState *bs, off_t start,
75
off_t *data, off_t *hole)
76
diff --git a/block/vhdx.c b/block/vhdx.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/block/vhdx.c
79
+++ b/block/vhdx.c
80
@@ -XXX,XX +XXX,XX @@ uint32_t vhdx_checksum_calc(uint32_t crc, uint8_t *buf, size_t size,
81
/* Validates the checksum of the buffer, with an in-place CRC.
82
*
83
* Zero is substituted during crc calculation for the original crc field,
84
- * and the crc field is restored afterwards. But the buffer will be modifed
85
+ * and the crc field is restored afterwards. But the buffer will be modified
86
* during the calculation, so this may not be not suitable for multi-threaded
87
* use.
88
*
89
--
132
--
90
2.13.6
133
2.13.6
91
134
92
135
diff view generated by jsdifflib