1
The following changes since commit ba29883206d92a29ad5a466e679ccfc2ee6132ef:
1
The following changes since commit fea445e8fe9acea4f775a832815ee22bdf2b0222:
2
2
3
Merge remote-tracking branch 'remotes/borntraeger/tags/s390x-20200310' into staging (2020-03-10 16:50:28 +0000)
3
Merge tag 'pull-maintainer-final-for-real-this-time-200324-1' of https://gitlab.com/stsquad/qemu into staging (2024-03-21 10:31:56 +0000)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/XanClic/qemu.git tags/pull-block-2020-03-11
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 397f4e9d83e9c0000905f0a988ba1aeda162571c:
9
for you to fetch changes up to 9352f80cd926fe2dde7c89b93ee33bb0356ff40e:
10
10
11
block/block-copy: hide structure definitions (2020-03-11 12:42:30 +0100)
11
coroutine: reserve 5,000 mappings (2024-03-21 13:14:30 -0400)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Block patches for the 5.0 softfreeze:
14
Pull request
15
- qemu-img measure for LUKS
15
16
- Improve block-copy's performance by reducing inter-request
16
I was too quick in sending the coroutine pool sizing change for -rc0 and still
17
dependencies
17
needed to address feedback from Daniel Berrangé.
18
- Make curl's detection of accept-ranges more robust
19
- Memleak fixes
20
- iotest fix
21
18
22
----------------------------------------------------------------
19
----------------------------------------------------------------
23
David Edmondson (2):
24
block/curl: HTTP header fields allow whitespace around values
25
block/curl: HTTP header field names are case insensitive
26
20
27
Eric Blake (1):
21
Stefan Hajnoczi (1):
28
iotests: Fix nonportable use of od --endian
22
coroutine: reserve 5,000 mappings
29
23
30
Pan Nengyuan (2):
24
util/qemu-coroutine.c | 15 ++++++++++-----
31
block/qcow2: do free crypto_opts in qcow2_close()
25
1 file changed, 10 insertions(+), 5 deletions(-)
32
qemu-img: free memory before re-assign
33
34
Stefan Hajnoczi (4):
35
luks: extract qcrypto_block_calculate_payload_offset()
36
luks: implement .bdrv_measure()
37
qemu-img: allow qemu-img measure --object without a filename
38
iotests: add 288 luks qemu-img measure test
39
40
Vladimir Sementsov-Ogievskiy (10):
41
block/qcow2-threads: fix qcow2_decompress
42
job: refactor progress to separate object
43
block/block-copy: fix progress calculation
44
block/block-copy: specialcase first copy_range request
45
block/block-copy: use block_status
46
block/block-copy: factor out find_conflicting_inflight_req
47
block/block-copy: refactor interfaces to use bytes instead of end
48
block/block-copy: rename start to offset in interfaces
49
block/block-copy: reduce intersecting request lock
50
block/block-copy: hide structure definitions
51
52
block/backup-top.c | 6 +-
53
block/backup.c | 38 ++-
54
block/block-copy.c | 405 ++++++++++++++++++++++++-------
55
block/crypto.c | 62 +++++
56
block/curl.c | 32 ++-
57
block/qcow2-threads.c | 12 +-
58
block/qcow2.c | 75 ++----
59
block/trace-events | 1 +
60
blockjob.c | 16 +-
61
crypto/block.c | 36 +++
62
include/block/block-copy.h | 65 +----
63
include/crypto/block.h | 22 ++
64
include/qemu/job.h | 11 +-
65
include/qemu/progress_meter.h | 58 +++++
66
job-qmp.c | 4 +-
67
job.c | 6 +-
68
qemu-img.c | 14 +-
69
tests/qemu-iotests/178 | 2 +-
70
tests/qemu-iotests/178.out.qcow2 | 8 +-
71
tests/qemu-iotests/178.out.raw | 8 +-
72
tests/qemu-iotests/288 | 93 +++++++
73
tests/qemu-iotests/288.out | 30 +++
74
tests/qemu-iotests/common.rc | 22 +-
75
tests/qemu-iotests/group | 1 +
76
24 files changed, 749 insertions(+), 278 deletions(-)
77
create mode 100644 include/qemu/progress_meter.h
78
create mode 100755 tests/qemu-iotests/288
79
create mode 100644 tests/qemu-iotests/288.out
80
26
81
--
27
--
82
2.24.1
28
2.44.0
83
29
84
30
diff view generated by jsdifflib
Deleted patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
1
3
The qcow2 .bdrv_measure() code calculates the crypto payload offset.
4
This logic really belongs in crypto/block.c where it can be reused by
5
other image formats.
6
7
The "luks" block driver will need this same logic in order to implement
8
.bdrv_measure(), so extract the qcrypto_block_calculate_payload_offset()
9
function now.
10
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Reviewed-by: Max Reitz <mreitz@redhat.com>
13
Message-Id: <20200221112522.1497712-2-stefanha@redhat.com>
14
Signed-off-by: Max Reitz <mreitz@redhat.com>
15
---
16
block/qcow2.c | 74 +++++++++++-------------------------------
17
crypto/block.c | 36 ++++++++++++++++++++
18
include/crypto/block.h | 22 +++++++++++++
19
3 files changed, 77 insertions(+), 55 deletions(-)
20
21
diff --git a/block/qcow2.c b/block/qcow2.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/block/qcow2.c
24
+++ b/block/qcow2.c
25
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs)
26
return ret;
27
}
28
29
-static ssize_t qcow2_measure_crypto_hdr_init_func(QCryptoBlock *block,
30
- size_t headerlen, void *opaque, Error **errp)
31
-{
32
- size_t *headerlenp = opaque;
33
-
34
- /* Stash away the payload size */
35
- *headerlenp = headerlen;
36
- return 0;
37
-}
38
-
39
-static ssize_t qcow2_measure_crypto_hdr_write_func(QCryptoBlock *block,
40
- size_t offset, const uint8_t *buf, size_t buflen,
41
- void *opaque, Error **errp)
42
-{
43
- /* Discard the bytes, we're not actually writing to an image */
44
- return buflen;
45
-}
46
-
47
-/* Determine the number of bytes for the LUKS payload */
48
-static bool qcow2_measure_luks_headerlen(QemuOpts *opts, size_t *len,
49
- Error **errp)
50
-{
51
- QDict *opts_qdict;
52
- QDict *cryptoopts_qdict;
53
- QCryptoBlockCreateOptions *cryptoopts;
54
- QCryptoBlock *crypto;
55
-
56
- /* Extract "encrypt." options into a qdict */
57
- opts_qdict = qemu_opts_to_qdict(opts, NULL);
58
- qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt.");
59
- qobject_unref(opts_qdict);
60
-
61
- /* Build QCryptoBlockCreateOptions object from qdict */
62
- qdict_put_str(cryptoopts_qdict, "format", "luks");
63
- cryptoopts = block_crypto_create_opts_init(cryptoopts_qdict, errp);
64
- qobject_unref(cryptoopts_qdict);
65
- if (!cryptoopts) {
66
- return false;
67
- }
68
-
69
- /* Fake LUKS creation in order to determine the payload size */
70
- crypto = qcrypto_block_create(cryptoopts, "encrypt.",
71
- qcow2_measure_crypto_hdr_init_func,
72
- qcow2_measure_crypto_hdr_write_func,
73
- len, errp);
74
- qapi_free_QCryptoBlockCreateOptions(cryptoopts);
75
- if (!crypto) {
76
- return false;
77
- }
78
-
79
- qcrypto_block_free(crypto);
80
- return true;
81
-}
82
-
83
static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs,
84
Error **errp)
85
{
86
@@ -XXX,XX +XXX,XX @@ static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs,
87
g_free(optstr);
88
89
if (has_luks) {
90
+ g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL;
91
+ QDict *opts_qdict;
92
+ QDict *cryptoopts;
93
size_t headerlen;
94
95
- if (!qcow2_measure_luks_headerlen(opts, &headerlen, &local_err)) {
96
+ opts_qdict = qemu_opts_to_qdict(opts, NULL);
97
+ qdict_extract_subqdict(opts_qdict, &cryptoopts, "encrypt.");
98
+ qobject_unref(opts_qdict);
99
+
100
+ qdict_put_str(cryptoopts, "format", "luks");
101
+
102
+ create_opts = block_crypto_create_opts_init(cryptoopts, errp);
103
+ qobject_unref(cryptoopts);
104
+ if (!create_opts) {
105
+ goto err;
106
+ }
107
+
108
+ if (!qcrypto_block_calculate_payload_offset(create_opts,
109
+ "encrypt.",
110
+ &headerlen,
111
+ &local_err)) {
112
goto err;
113
}
114
115
diff --git a/crypto/block.c b/crypto/block.c
116
index XXXXXXX..XXXXXXX 100644
117
--- a/crypto/block.c
118
+++ b/crypto/block.c
119
@@ -XXX,XX +XXX,XX @@ QCryptoBlock *qcrypto_block_create(QCryptoBlockCreateOptions *options,
120
}
121
122
123
+static ssize_t qcrypto_block_headerlen_hdr_init_func(QCryptoBlock *block,
124
+ size_t headerlen, void *opaque, Error **errp)
125
+{
126
+ size_t *headerlenp = opaque;
127
+
128
+ /* Stash away the payload size */
129
+ *headerlenp = headerlen;
130
+ return 0;
131
+}
132
+
133
+
134
+static ssize_t qcrypto_block_headerlen_hdr_write_func(QCryptoBlock *block,
135
+ size_t offset, const uint8_t *buf, size_t buflen,
136
+ void *opaque, Error **errp)
137
+{
138
+ /* Discard the bytes, we're not actually writing to an image */
139
+ return buflen;
140
+}
141
+
142
+
143
+bool
144
+qcrypto_block_calculate_payload_offset(QCryptoBlockCreateOptions *create_opts,
145
+ const char *optprefix,
146
+ size_t *len,
147
+ Error **errp)
148
+{
149
+ /* Fake LUKS creation in order to determine the payload size */
150
+ g_autoptr(QCryptoBlock) crypto =
151
+ qcrypto_block_create(create_opts, optprefix,
152
+ qcrypto_block_headerlen_hdr_init_func,
153
+ qcrypto_block_headerlen_hdr_write_func,
154
+ len, errp);
155
+ return crypto != NULL;
156
+}
157
+
158
+
159
QCryptoBlockInfo *qcrypto_block_get_info(QCryptoBlock *block,
160
Error **errp)
161
{
162
diff --git a/include/crypto/block.h b/include/crypto/block.h
163
index XXXXXXX..XXXXXXX 100644
164
--- a/include/crypto/block.h
165
+++ b/include/crypto/block.h
166
@@ -XXX,XX +XXX,XX @@ QCryptoBlock *qcrypto_block_create(QCryptoBlockCreateOptions *options,
167
Error **errp);
168
169
170
+/**
171
+ * qcrypto_block_calculate_payload_offset:
172
+ * @create_opts: the encryption options
173
+ * @optprefix: name prefix for options
174
+ * @len: output for number of header bytes before payload
175
+ * @errp: pointer to a NULL-initialized error object
176
+ *
177
+ * Calculate the number of header bytes before the payload in an encrypted
178
+ * storage volume. The header is an area before the payload that is reserved
179
+ * for encryption metadata.
180
+ *
181
+ * Returns: true on success, false on error
182
+ */
183
+bool
184
+qcrypto_block_calculate_payload_offset(QCryptoBlockCreateOptions *create_opts,
185
+ const char *optprefix,
186
+ size_t *len,
187
+ Error **errp);
188
+
189
+
190
/**
191
* qcrypto_block_get_info:
192
* @block: the block encryption object
193
@@ -XXX,XX +XXX,XX @@ uint64_t qcrypto_block_get_sector_size(QCryptoBlock *block);
194
void qcrypto_block_free(QCryptoBlock *block);
195
196
G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoBlock, qcrypto_block_free)
197
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoBlockCreateOptions,
198
+ qapi_free_QCryptoBlockCreateOptions)
199
200
#endif /* QCRYPTO_BLOCK_H */
201
--
202
2.24.1
203
204
diff view generated by jsdifflib
1
From: Stefan Hajnoczi <stefanha@redhat.com>
1
Daniel P. Berrangé <berrange@redhat.com> pointed out that the coroutine
2
pool size heuristic is very conservative. Instead of halving
3
max_map_count, he suggested reserving 5,000 mappings for non-coroutine
4
users based on observations of guests he has access to.
2
5
3
Add qemu-img measure support in the "luks" block driver.
6
Fixes: 86a637e48104 ("coroutine: cap per-thread local pool size")
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
9
Message-id: 20240320181232.1464819-1-stefanha@redhat.com
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
12
util/qemu-coroutine.c | 15 ++++++++++-----
13
1 file changed, 10 insertions(+), 5 deletions(-)
4
14
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
6
Reviewed-by: Max Reitz <mreitz@redhat.com>
7
Message-Id: <20200221112522.1497712-3-stefanha@redhat.com>
8
Signed-off-by: Max Reitz <mreitz@redhat.com>
9
---
10
block/crypto.c | 62 ++++++++++++++++++++++++++++++++++++++++++++++++++
11
1 file changed, 62 insertions(+)
12
13
diff --git a/block/crypto.c b/block/crypto.c
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/block/crypto.c
17
--- a/util/qemu-coroutine.c
16
+++ b/block/crypto.c
18
+++ b/util/qemu-coroutine.c
17
@@ -XXX,XX +XXX,XX @@ static int64_t block_crypto_getlength(BlockDriverState *bs)
19
@@ -XXX,XX +XXX,XX @@ static unsigned int get_global_pool_hard_max_size(void)
18
}
20
NULL) &&
19
21
qemu_strtoi(contents, NULL, 10, &max_map_count) == 0) {
20
22
/*
21
+static BlockMeasureInfo *block_crypto_measure(QemuOpts *opts,
23
- * This is a conservative upper bound that avoids exceeding
22
+ BlockDriverState *in_bs,
24
- * max_map_count. Leave half for non-coroutine users like library
23
+ Error **errp)
25
- * dependencies, vhost-user, etc. Each coroutine takes up 2 VMAs so
24
+{
26
- * halve the amount again.
25
+ g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL;
27
+ * This is an upper bound that avoids exceeding max_map_count. Leave a
26
+ Error *local_err = NULL;
28
+ * fixed amount for non-coroutine users like library dependencies,
27
+ BlockMeasureInfo *info;
29
+ * vhost-user, etc. Each coroutine takes up 2 VMAs so halve the
28
+ uint64_t size;
30
+ * remaining amount.
29
+ size_t luks_payload_size;
31
*/
30
+ QDict *cryptoopts;
32
- return max_map_count / 4;
31
+
33
+ if (max_map_count > 5000) {
32
+ /*
34
+ return (max_map_count - 5000) / 2;
33
+ * Preallocation mode doesn't affect size requirements but we must consume
35
+ } else {
34
+ * the option.
36
+ /* Disable the global pool but threads still have local pools */
35
+ */
37
+ return 0;
36
+ g_free(qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC));
37
+
38
+ size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0);
39
+
40
+ if (in_bs) {
41
+ int64_t ssize = bdrv_getlength(in_bs);
42
+
43
+ if (ssize < 0) {
44
+ error_setg_errno(&local_err, -ssize,
45
+ "Unable to get image virtual_size");
46
+ goto err;
47
+ }
38
+ }
48
+
39
}
49
+ size = ssize;
40
#endif
50
+ }
51
+
52
+ cryptoopts = qemu_opts_to_qdict_filtered(opts, NULL,
53
+ &block_crypto_create_opts_luks, true);
54
+ qdict_put_str(cryptoopts, "format", "luks");
55
+ create_opts = block_crypto_create_opts_init(cryptoopts, &local_err);
56
+ qobject_unref(cryptoopts);
57
+ if (!create_opts) {
58
+ goto err;
59
+ }
60
+
61
+ if (!qcrypto_block_calculate_payload_offset(create_opts, NULL,
62
+ &luks_payload_size,
63
+ &local_err)) {
64
+ goto err;
65
+ }
66
+
67
+ /*
68
+ * Unallocated blocks are still encrypted so allocation status makes no
69
+ * difference to the file size.
70
+ */
71
+ info = g_new(BlockMeasureInfo, 1);
72
+ info->fully_allocated = luks_payload_size + size;
73
+ info->required = luks_payload_size + size;
74
+ return info;
75
+
76
+err:
77
+ error_propagate(errp, local_err);
78
+ return NULL;
79
+}
80
+
81
+
82
static int block_crypto_probe_luks(const uint8_t *buf,
83
int buf_size,
84
const char *filename) {
85
@@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_crypto_luks = {
86
.bdrv_co_preadv = block_crypto_co_preadv,
87
.bdrv_co_pwritev = block_crypto_co_pwritev,
88
.bdrv_getlength = block_crypto_getlength,
89
+ .bdrv_measure = block_crypto_measure,
90
.bdrv_get_info = block_crypto_get_info_luks,
91
.bdrv_get_specific_info = block_crypto_get_specific_info_luks,
92
41
93
--
42
--
94
2.24.1
43
2.44.0
95
44
96
45
diff view generated by jsdifflib
Deleted patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
1
3
In most qemu-img sub-commands the --object option only makes sense when
4
there is a filename. qemu-img measure is an exception because objects
5
may be referenced from the image creation options instead of an existing
6
image file. Allow --object without a filename.
7
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Reviewed-by: Max Reitz <mreitz@redhat.com>
10
Message-Id: <20200221112522.1497712-4-stefanha@redhat.com>
11
Signed-off-by: Max Reitz <mreitz@redhat.com>
12
---
13
qemu-img.c | 6 ++----
14
tests/qemu-iotests/178 | 2 +-
15
tests/qemu-iotests/178.out.qcow2 | 8 ++++----
16
tests/qemu-iotests/178.out.raw | 8 ++++----
17
4 files changed, 11 insertions(+), 13 deletions(-)
18
19
diff --git a/qemu-img.c b/qemu-img.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/qemu-img.c
22
+++ b/qemu-img.c
23
@@ -XXX,XX +XXX,XX @@ static int img_measure(int argc, char **argv)
24
filename = argv[optind];
25
}
26
27
- if (!filename &&
28
- (object_opts || image_opts || fmt || snapshot_name || sn_opts)) {
29
- error_report("--object, --image-opts, -f, and -l "
30
- "require a filename argument.");
31
+ if (!filename && (image_opts || fmt || snapshot_name || sn_opts)) {
32
+ error_report("--image-opts, -f, and -l require a filename argument.");
33
goto out;
34
}
35
if (filename && img_size != UINT64_MAX) {
36
diff --git a/tests/qemu-iotests/178 b/tests/qemu-iotests/178
37
index XXXXXXX..XXXXXXX 100755
38
--- a/tests/qemu-iotests/178
39
+++ b/tests/qemu-iotests/178
40
@@ -XXX,XX +XXX,XX @@ _make_test_img 1G
41
$QEMU_IMG measure # missing arguments
42
$QEMU_IMG measure --size 2G "$TEST_IMG" # only one allowed
43
$QEMU_IMG measure "$TEST_IMG" a # only one filename allowed
44
-$QEMU_IMG measure --object secret,id=sec0,data=MTIzNDU2,format=base64 # missing filename
45
+$QEMU_IMG measure --object secret,id=sec0,data=MTIzNDU2,format=base64 # size or filename needed
46
$QEMU_IMG measure --image-opts # missing filename
47
$QEMU_IMG measure -f qcow2 # missing filename
48
$QEMU_IMG measure -l snap1 # missing filename
49
diff --git a/tests/qemu-iotests/178.out.qcow2 b/tests/qemu-iotests/178.out.qcow2
50
index XXXXXXX..XXXXXXX 100644
51
--- a/tests/qemu-iotests/178.out.qcow2
52
+++ b/tests/qemu-iotests/178.out.qcow2
53
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
54
qemu-img: Either --size N or one filename must be specified.
55
qemu-img: --size N cannot be used together with a filename.
56
qemu-img: At most one filename argument is allowed.
57
-qemu-img: --object, --image-opts, -f, and -l require a filename argument.
58
-qemu-img: --object, --image-opts, -f, and -l require a filename argument.
59
-qemu-img: --object, --image-opts, -f, and -l require a filename argument.
60
-qemu-img: --object, --image-opts, -f, and -l require a filename argument.
61
+qemu-img: Either --size N or one filename must be specified.
62
+qemu-img: --image-opts, -f, and -l require a filename argument.
63
+qemu-img: --image-opts, -f, and -l require a filename argument.
64
+qemu-img: --image-opts, -f, and -l require a filename argument.
65
qemu-img: Invalid option list: ,
66
qemu-img: Invalid parameter 'snapshot.foo'
67
qemu-img: Failed in parsing snapshot param 'snapshot.foo'
68
diff --git a/tests/qemu-iotests/178.out.raw b/tests/qemu-iotests/178.out.raw
69
index XXXXXXX..XXXXXXX 100644
70
--- a/tests/qemu-iotests/178.out.raw
71
+++ b/tests/qemu-iotests/178.out.raw
72
@@ -XXX,XX +XXX,XX @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
73
qemu-img: Either --size N or one filename must be specified.
74
qemu-img: --size N cannot be used together with a filename.
75
qemu-img: At most one filename argument is allowed.
76
-qemu-img: --object, --image-opts, -f, and -l require a filename argument.
77
-qemu-img: --object, --image-opts, -f, and -l require a filename argument.
78
-qemu-img: --object, --image-opts, -f, and -l require a filename argument.
79
-qemu-img: --object, --image-opts, -f, and -l require a filename argument.
80
+qemu-img: Either --size N or one filename must be specified.
81
+qemu-img: --image-opts, -f, and -l require a filename argument.
82
+qemu-img: --image-opts, -f, and -l require a filename argument.
83
+qemu-img: --image-opts, -f, and -l require a filename argument.
84
qemu-img: Invalid option list: ,
85
qemu-img: Invalid parameter 'snapshot.foo'
86
qemu-img: Failed in parsing snapshot param 'snapshot.foo'
87
--
88
2.24.1
89
90
diff view generated by jsdifflib
Deleted patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
1
3
This test exercises the block/crypto.c "luks" block driver
4
.bdrv_measure() code.
5
6
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Reviewed-by: Max Reitz <mreitz@redhat.com>
8
Message-Id: <20200221112522.1497712-5-stefanha@redhat.com>
9
[mreitz: Renamed test from 282 to 288]
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
---
12
tests/qemu-iotests/288 | 93 ++++++++++++++++++++++++++++++++++++++
13
tests/qemu-iotests/288.out | 30 ++++++++++++
14
tests/qemu-iotests/group | 1 +
15
3 files changed, 124 insertions(+)
16
create mode 100755 tests/qemu-iotests/288
17
create mode 100644 tests/qemu-iotests/288.out
18
19
diff --git a/tests/qemu-iotests/288 b/tests/qemu-iotests/288
20
new file mode 100755
21
index XXXXXXX..XXXXXXX
22
--- /dev/null
23
+++ b/tests/qemu-iotests/288
24
@@ -XXX,XX +XXX,XX @@
25
+#!/usr/bin/env bash
26
+#
27
+# qemu-img measure tests for LUKS images
28
+#
29
+# Copyright (C) 2020 Red Hat, Inc.
30
+#
31
+# This program is free software; you can redistribute it and/or modify
32
+# it under the terms of the GNU General Public License as published by
33
+# the Free Software Foundation; either version 2 of the License, or
34
+# (at your option) any later version.
35
+#
36
+# This program is distributed in the hope that it will be useful,
37
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
38
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
39
+# GNU General Public License for more details.
40
+#
41
+# You should have received a copy of the GNU General Public License
42
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
43
+#
44
+
45
+# creator
46
+owner=stefanha@redhat.com
47
+
48
+seq=`basename $0`
49
+echo "QA output created by $seq"
50
+
51
+status=1 # failure is the default!
52
+
53
+_cleanup()
54
+{
55
+ _cleanup_test_img
56
+ rm -f "$TEST_IMG.converted"
57
+}
58
+trap "_cleanup; exit \$status" 0 1 2 3 15
59
+
60
+# get standard environment, filters and checks
61
+. ./common.rc
62
+. ./common.filter
63
+. ./common.pattern
64
+
65
+_supported_fmt luks
66
+_supported_proto file
67
+_supported_os Linux
68
+
69
+SECRET=secret,id=sec0,data=passphrase
70
+
71
+echo "== measure 1G image file =="
72
+echo
73
+
74
+$QEMU_IMG measure --object "$SECRET" \
75
+     -O "$IMGFMT" \
76
+         -o key-secret=sec0,iter-time=10 \
77
+         --size 1G
78
+
79
+echo
80
+echo "== create 1G image file (size should be no greater than measured) =="
81
+echo
82
+
83
+_make_test_img 1G
84
+stat -c "image file size in bytes: %s" "$TEST_IMG_FILE"
85
+
86
+echo
87
+echo "== modified 1G image file (size should be no greater than measured) =="
88
+echo
89
+
90
+$QEMU_IO --object "$SECRET" --image-opts "$TEST_IMG" -c "write -P 0x51 0x10000 0x400" | _filter_qemu_io | _filter_testdir
91
+stat -c "image file size in bytes: %s" "$TEST_IMG_FILE"
92
+
93
+echo
94
+echo "== measure preallocation=falloc 1G image file =="
95
+echo
96
+
97
+$QEMU_IMG measure --object "$SECRET" \
98
+     -O "$IMGFMT" \
99
+         -o key-secret=sec0,iter-time=10,preallocation=falloc \
100
+         --size 1G
101
+
102
+echo
103
+echo "== measure with input image file =="
104
+echo
105
+
106
+IMGFMT=raw IMGKEYSECRET= IMGOPTS= _make_test_img 1G | _filter_imgfmt
107
+QEMU_IO_OPTIONS= IMGOPTSSYNTAX= $QEMU_IO -f raw -c "write -P 0x51 0x10000 0x400" "$TEST_IMG_FILE" | _filter_qemu_io | _filter_testdir
108
+$QEMU_IMG measure --object "$SECRET" \
109
+     -O "$IMGFMT" \
110
+         -o key-secret=sec0,iter-time=10 \
111
+         -f raw \
112
+         "$TEST_IMG_FILE"
113
+
114
+# success, all done
115
+echo "*** done"
116
+rm -f $seq.full
117
+status=0
118
diff --git a/tests/qemu-iotests/288.out b/tests/qemu-iotests/288.out
119
new file mode 100644
120
index XXXXXXX..XXXXXXX
121
--- /dev/null
122
+++ b/tests/qemu-iotests/288.out
123
@@ -XXX,XX +XXX,XX @@
124
+QA output created by 288
125
+== measure 1G image file ==
126
+
127
+required size: 1075810304
128
+fully allocated size: 1075810304
129
+
130
+== create 1G image file (size should be no greater than measured) ==
131
+
132
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
133
+image file size in bytes: 1075810304
134
+
135
+== modified 1G image file (size should be no greater than measured) ==
136
+
137
+wrote 1024/1024 bytes at offset 65536
138
+1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
139
+image file size in bytes: 1075810304
140
+
141
+== measure preallocation=falloc 1G image file ==
142
+
143
+required size: 1075810304
144
+fully allocated size: 1075810304
145
+
146
+== measure with input image file ==
147
+
148
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
149
+wrote 1024/1024 bytes at offset 65536
150
+1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
151
+required size: 1075810304
152
+fully allocated size: 1075810304
153
+*** done
154
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
155
index XXXXXXX..XXXXXXX 100644
156
--- a/tests/qemu-iotests/group
157
+++ b/tests/qemu-iotests/group
158
@@ -XXX,XX +XXX,XX @@
159
283 auto quick
160
284 rw
161
286 rw quick
162
+288 quick
163
--
164
2.24.1
165
166
diff view generated by jsdifflib
Deleted patch
1
From: David Edmondson <david.edmondson@oracle.com>
2
1
3
RFC 7230 section 3.2 indicates that whitespace is permitted between
4
the field name and field value and after the field value.
5
6
Signed-off-by: David Edmondson <david.edmondson@oracle.com>
7
Message-Id: <20200224101310.101169-2-david.edmondson@oracle.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
11
block/curl.c | 31 +++++++++++++++++++++++++++----
12
1 file changed, 27 insertions(+), 4 deletions(-)
13
14
diff --git a/block/curl.c b/block/curl.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/block/curl.c
17
+++ b/block/curl.c
18
@@ -XXX,XX +XXX,XX @@ static size_t curl_header_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
19
{
20
BDRVCURLState *s = opaque;
21
size_t realsize = size * nmemb;
22
- const char *accept_line = "Accept-Ranges: bytes";
23
+ const char *header = (char *)ptr;
24
+ const char *end = header + realsize;
25
+ const char *accept_ranges = "Accept-Ranges:";
26
+ const char *bytes = "bytes";
27
28
- if (realsize >= strlen(accept_line)
29
- && strncmp((char *)ptr, accept_line, strlen(accept_line)) == 0) {
30
- s->accept_range = true;
31
+ if (realsize >= strlen(accept_ranges)
32
+ && strncmp(header, accept_ranges, strlen(accept_ranges)) == 0) {
33
+
34
+ char *p = strchr(header, ':') + 1;
35
+
36
+ /* Skip whitespace between the header name and value. */
37
+ while (p < end && *p && g_ascii_isspace(*p)) {
38
+ p++;
39
+ }
40
+
41
+ if (end - p >= strlen(bytes)
42
+ && strncmp(p, bytes, strlen(bytes)) == 0) {
43
+
44
+ /* Check that there is nothing but whitespace after the value. */
45
+ p += strlen(bytes);
46
+ while (p < end && *p && g_ascii_isspace(*p)) {
47
+ p++;
48
+ }
49
+
50
+ if (p == end || !*p) {
51
+ s->accept_range = true;
52
+ }
53
+ }
54
}
55
56
return realsize;
57
--
58
2.24.1
59
60
diff view generated by jsdifflib
Deleted patch
1
From: David Edmondson <david.edmondson@oracle.com>
2
1
3
RFC 7230 section 3.2 indicates that HTTP header field names are case
4
insensitive.
5
6
Signed-off-by: David Edmondson <david.edmondson@oracle.com>
7
Message-Id: <20200224101310.101169-3-david.edmondson@oracle.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
11
block/curl.c | 5 +++--
12
1 file changed, 3 insertions(+), 2 deletions(-)
13
14
diff --git a/block/curl.c b/block/curl.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/block/curl.c
17
+++ b/block/curl.c
18
@@ -XXX,XX +XXX,XX @@ static size_t curl_header_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
19
size_t realsize = size * nmemb;
20
const char *header = (char *)ptr;
21
const char *end = header + realsize;
22
- const char *accept_ranges = "Accept-Ranges:";
23
+ const char *accept_ranges = "accept-ranges:";
24
const char *bytes = "bytes";
25
26
if (realsize >= strlen(accept_ranges)
27
- && strncmp(header, accept_ranges, strlen(accept_ranges)) == 0) {
28
+ && g_ascii_strncasecmp(header, accept_ranges,
29
+ strlen(accept_ranges)) == 0) {
30
31
char *p = strchr(header, ':') + 1;
32
33
--
34
2.24.1
35
36
diff view generated by jsdifflib
Deleted patch
1
From: Eric Blake <eblake@redhat.com>
2
1
3
Tests 261 and 272 fail on RHEL 7 with coreutils 8.22, since od
4
--endian was not added until coreutils 8.23. Fix this by manually
5
constructing the final value one byte at a time.
6
7
Fixes: fc8ba423
8
Reported-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
9
Signed-off-by: Eric Blake <eblake@redhat.com>
10
Reviewed-by: Max Reitz <mreitz@redhat.com>
11
Message-Id: <20200226125424.481840-1-eblake@redhat.com>
12
Signed-off-by: Max Reitz <mreitz@redhat.com>
13
---
14
tests/qemu-iotests/common.rc | 22 +++++++++++++++++-----
15
1 file changed, 17 insertions(+), 5 deletions(-)
16
17
diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tests/qemu-iotests/common.rc
20
+++ b/tests/qemu-iotests/common.rc
21
@@ -XXX,XX +XXX,XX @@ poke_file()
22
# peek_file_le 'test.img' 512 2 => 65534
23
peek_file_le()
24
{
25
- # Wrap in echo $() to strip spaces
26
- echo $(od -j"$2" -N"$3" --endian=little -An -vtu"$3" "$1")
27
+ local val=0 shift=0 byte
28
+
29
+ # coreutils' od --endian is not portable, so manually assemble bytes.
30
+ for byte in $(od -j"$2" -N"$3" -An -v -tu1 "$1"); do
31
+ val=$(( val | (byte << shift) ))
32
+ shift=$((shift + 8))
33
+ done
34
+ printf %llu $val
35
}
36
37
# peek_file_be 'test.img' 512 2 => 65279
38
peek_file_be()
39
{
40
- # Wrap in echo $() to strip spaces
41
- echo $(od -j"$2" -N"$3" --endian=big -An -vtu"$3" "$1")
42
+ local val=0 byte
43
+
44
+ # coreutils' od --endian is not portable, so manually assemble bytes.
45
+ for byte in $(od -j"$2" -N"$3" -An -v -tu1 "$1"); do
46
+ val=$(( (val << 8) | byte ))
47
+ done
48
+ printf %llu $val
49
}
50
51
-# peek_file_raw 'test.img' 512 2 => '\xff\xfe'
52
+# peek_file_raw 'test.img' 512 2 => '\xff\xfe'. Do not use if the raw data
53
+# is likely to contain \0 or trailing \n.
54
peek_file_raw()
55
{
56
dd if="$1" bs=1 skip="$2" count="$3" status=none
57
--
58
2.24.1
59
60
diff view generated by jsdifflib
Deleted patch
1
From: Pan Nengyuan <pannengyuan@huawei.com>
2
1
3
'crypto_opts' forgot to free in qcow2_close(), this patch fix the bellow leak stack:
4
5
Direct leak of 24 byte(s) in 1 object(s) allocated from:
6
#0 0x7f0edd81f970 in __interceptor_calloc (/lib64/libasan.so.5+0xef970)
7
#1 0x7f0edc6d149d in g_malloc0 (/lib64/libglib-2.0.so.0+0x5249d)
8
#2 0x55d7eaede63d in qobject_input_start_struct /mnt/sdb/qemu-new/qemu_test/qemu/qapi/qobject-input-visitor.c:295
9
#3 0x55d7eaed78b8 in visit_start_struct /mnt/sdb/qemu-new/qemu_test/qemu/qapi/qapi-visit-core.c:49
10
#4 0x55d7eaf5140b in visit_type_QCryptoBlockOpenOptions qapi/qapi-visit-crypto.c:290
11
#5 0x55d7eae43af3 in block_crypto_open_opts_init /mnt/sdb/qemu-new/qemu_test/qemu/block/crypto.c:163
12
#6 0x55d7eacd2924 in qcow2_update_options_prepare /mnt/sdb/qemu-new/qemu_test/qemu/block/qcow2.c:1148
13
#7 0x55d7eacd33f7 in qcow2_update_options /mnt/sdb/qemu-new/qemu_test/qemu/block/qcow2.c:1232
14
#8 0x55d7eacd9680 in qcow2_do_open /mnt/sdb/qemu-new/qemu_test/qemu/block/qcow2.c:1512
15
#9 0x55d7eacdc55e in qcow2_open_entry /mnt/sdb/qemu-new/qemu_test/qemu/block/qcow2.c:1792
16
#10 0x55d7eacdc8fe in qcow2_open /mnt/sdb/qemu-new/qemu_test/qemu/block/qcow2.c:1819
17
#11 0x55d7eac3742d in bdrv_open_driver /mnt/sdb/qemu-new/qemu_test/qemu/block.c:1317
18
#12 0x55d7eac3e990 in bdrv_open_common /mnt/sdb/qemu-new/qemu_test/qemu/block.c:1575
19
#13 0x55d7eac4442c in bdrv_open_inherit /mnt/sdb/qemu-new/qemu_test/qemu/block.c:3126
20
#14 0x55d7eac45c3f in bdrv_open /mnt/sdb/qemu-new/qemu_test/qemu/block.c:3219
21
#15 0x55d7ead8e8a4 in blk_new_open /mnt/sdb/qemu-new/qemu_test/qemu/block/block-backend.c:397
22
#16 0x55d7eacde74c in qcow2_co_create /mnt/sdb/qemu-new/qemu_test/qemu/block/qcow2.c:3534
23
#17 0x55d7eacdfa6d in qcow2_co_create_opts /mnt/sdb/qemu-new/qemu_test/qemu/block/qcow2.c:3668
24
#18 0x55d7eac1c678 in bdrv_create_co_entry /mnt/sdb/qemu-new/qemu_test/qemu/block.c:485
25
#19 0x55d7eb0024d2 in coroutine_trampoline /mnt/sdb/qemu-new/qemu_test/qemu/util/coroutine-ucontext.c:115
26
27
Reported-by: Euler Robot <euler.robot@huawei.com>
28
Signed-off-by: Pan Nengyuan <pannengyuan@huawei.com>
29
Reviewed-by: Max Reitz <mreitz@redhat.com>
30
Message-Id: <20200227012950.12256-2-pannengyuan@huawei.com>
31
Signed-off-by: Max Reitz <mreitz@redhat.com>
32
---
33
block/qcow2.c | 1 +
34
1 file changed, 1 insertion(+)
35
36
diff --git a/block/qcow2.c b/block/qcow2.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/block/qcow2.c
39
+++ b/block/qcow2.c
40
@@ -XXX,XX +XXX,XX @@ static void qcow2_close(BlockDriverState *bs)
41
42
qcrypto_block_free(s->crypto);
43
s->crypto = NULL;
44
+ qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
45
46
g_free(s->unknown_header_fields);
47
cleanup_unknown_header_ext(bs);
48
--
49
2.24.1
50
51
diff view generated by jsdifflib
Deleted patch
1
From: Pan Nengyuan <pannengyuan@huawei.com>
2
1
3
collect_image_check() is called twice in img_check(), the filename/format will be alloced without free the original memory.
4
It is not a big deal since the process will exit anyway, but seems like a clean code and it will remove the warning spotted by asan.
5
6
Reported-by: Euler Robot <euler.robot@huawei.com>
7
Signed-off-by: Pan Nengyuan <pannengyuan@huawei.com>
8
Message-Id: <20200227012950.12256-3-pannengyuan@huawei.com>
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
11
qemu-img.c | 2 ++
12
1 file changed, 2 insertions(+)
13
14
diff --git a/qemu-img.c b/qemu-img.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/qemu-img.c
17
+++ b/qemu-img.c
18
@@ -XXX,XX +XXX,XX @@ static int img_check(int argc, char **argv)
19
check->corruptions_fixed);
20
}
21
22
+ qapi_free_ImageCheck(check);
23
+ check = g_new0(ImageCheck, 1);
24
ret = collect_image_check(bs, check, filename, fmt, 0);
25
26
check->leaks_fixed = leaks_fixed;
27
--
28
2.24.1
29
30
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
On success path we return what inflate() returns instead of 0. And it
4
most probably works for Z_STREAM_END as it is positive, but is
5
definitely broken for Z_BUF_ERROR.
6
7
While being here, switch to errno return code, to be closer to
8
qcow2_compress API (and usual expectations).
9
10
Revert condition in if to be more positive. Drop dead initialization of
11
ret.
12
13
Cc: qemu-stable@nongnu.org # v4.0
14
Fixes: 341926ab83e2b
15
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
16
Message-Id: <20200302150930.16218-1-vsementsov@virtuozzo.com>
17
Reviewed-by: Alberto Garcia <berto@igalia.com>
18
Reviewed-by: Ján Tomko <jtomko@redhat.com>
19
Signed-off-by: Max Reitz <mreitz@redhat.com>
20
---
21
block/qcow2-threads.c | 12 +++++++-----
22
1 file changed, 7 insertions(+), 5 deletions(-)
23
24
diff --git a/block/qcow2-threads.c b/block/qcow2-threads.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/block/qcow2-threads.c
27
+++ b/block/qcow2-threads.c
28
@@ -XXX,XX +XXX,XX @@ static ssize_t qcow2_compress(void *dest, size_t dest_size,
29
* @src - source buffer, @src_size bytes
30
*
31
* Returns: 0 on success
32
- * -1 on fail
33
+ * -EIO on fail
34
*/
35
static ssize_t qcow2_decompress(void *dest, size_t dest_size,
36
const void *src, size_t src_size)
37
{
38
- int ret = 0;
39
+ int ret;
40
z_stream strm;
41
42
memset(&strm, 0, sizeof(strm));
43
@@ -XXX,XX +XXX,XX @@ static ssize_t qcow2_decompress(void *dest, size_t dest_size,
44
45
ret = inflateInit2(&strm, -12);
46
if (ret != Z_OK) {
47
- return -1;
48
+ return -EIO;
49
}
50
51
ret = inflate(&strm, Z_FINISH);
52
- if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || strm.avail_out != 0) {
53
+ if ((ret == Z_STREAM_END || ret == Z_BUF_ERROR) && strm.avail_out == 0) {
54
/*
55
* We approve Z_BUF_ERROR because we need @dest buffer to be filled, but
56
* @src buffer may be processed partly (because in qcow2 we know size of
57
* compressed data with precision of one sector)
58
*/
59
- ret = -1;
60
+ ret = 0;
61
+ } else {
62
+ ret = -EIO;
63
}
64
65
inflateEnd(&strm);
66
--
67
2.24.1
68
69
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
We need it in separate to pass to the block-copy object in the next
4
commit.
5
6
Cc: qemu-stable@nongnu.org
7
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
8
Reviewed-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
9
Reviewed-by: Max Reitz <mreitz@redhat.com>
10
Message-Id: <20200311103004.7649-2-vsementsov@virtuozzo.com>
11
Signed-off-by: Max Reitz <mreitz@redhat.com>
12
---
13
blockjob.c | 16 +++++-----
14
include/qemu/job.h | 11 ++-----
15
include/qemu/progress_meter.h | 58 +++++++++++++++++++++++++++++++++++
16
job-qmp.c | 4 +--
17
job.c | 6 ++--
18
qemu-img.c | 6 ++--
19
6 files changed, 76 insertions(+), 25 deletions(-)
20
create mode 100644 include/qemu/progress_meter.h
21
22
diff --git a/blockjob.c b/blockjob.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/blockjob.c
25
+++ b/blockjob.c
26
@@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
27
info->device = g_strdup(job->job.id);
28
info->busy = atomic_read(&job->job.busy);
29
info->paused = job->job.pause_count > 0;
30
- info->offset = job->job.progress_current;
31
- info->len = job->job.progress_total;
32
+ info->offset = job->job.progress.current;
33
+ info->len = job->job.progress.total;
34
info->speed = job->speed;
35
info->io_status = job->iostatus;
36
info->ready = job_is_ready(&job->job),
37
@@ -XXX,XX +XXX,XX @@ static void block_job_event_cancelled(Notifier *n, void *opaque)
38
39
qapi_event_send_block_job_cancelled(job_type(&job->job),
40
job->job.id,
41
- job->job.progress_total,
42
- job->job.progress_current,
43
+ job->job.progress.total,
44
+ job->job.progress.current,
45
job->speed);
46
}
47
48
@@ -XXX,XX +XXX,XX @@ static void block_job_event_completed(Notifier *n, void *opaque)
49
50
qapi_event_send_block_job_completed(job_type(&job->job),
51
job->job.id,
52
- job->job.progress_total,
53
- job->job.progress_current,
54
+ job->job.progress.total,
55
+ job->job.progress.current,
56
job->speed,
57
!!msg,
58
msg);
59
@@ -XXX,XX +XXX,XX @@ static void block_job_event_ready(Notifier *n, void *opaque)
60
61
qapi_event_send_block_job_ready(job_type(&job->job),
62
job->job.id,
63
- job->job.progress_total,
64
- job->job.progress_current,
65
+ job->job.progress.total,
66
+ job->job.progress.current,
67
job->speed);
68
}
69
70
diff --git a/include/qemu/job.h b/include/qemu/job.h
71
index XXXXXXX..XXXXXXX 100644
72
--- a/include/qemu/job.h
73
+++ b/include/qemu/job.h
74
@@ -XXX,XX +XXX,XX @@
75
76
#include "qapi/qapi-types-job.h"
77
#include "qemu/queue.h"
78
+#include "qemu/progress_meter.h"
79
#include "qemu/coroutine.h"
80
#include "block/aio.h"
81
82
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
83
/** True if this job should automatically dismiss itself */
84
bool auto_dismiss;
85
86
- /**
87
- * Current progress. The unit is arbitrary as long as the ratio between
88
- * progress_current and progress_total represents the estimated percentage
89
- * of work already done.
90
- */
91
- int64_t progress_current;
92
-
93
- /** Estimated progress_current value at the completion of the job */
94
- int64_t progress_total;
95
+ ProgressMeter progress;
96
97
/**
98
* Return code from @run and/or @prepare callback(s).
99
diff --git a/include/qemu/progress_meter.h b/include/qemu/progress_meter.h
100
new file mode 100644
101
index XXXXXXX..XXXXXXX
102
--- /dev/null
103
+++ b/include/qemu/progress_meter.h
104
@@ -XXX,XX +XXX,XX @@
105
+/*
106
+ * Helper functionality for some process progress tracking.
107
+ *
108
+ * Copyright (c) 2011 IBM Corp.
109
+ * Copyright (c) 2012, 2018 Red Hat, Inc.
110
+ * Copyright (c) 2020 Virtuozzo International GmbH
111
+ *
112
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
113
+ * of this software and associated documentation files (the "Software"), to deal
114
+ * in the Software without restriction, including without limitation the rights
115
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
116
+ * copies of the Software, and to permit persons to whom the Software is
117
+ * furnished to do so, subject to the following conditions:
118
+ *
119
+ * The above copyright notice and this permission notice shall be included in
120
+ * all copies or substantial portions of the Software.
121
+ *
122
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
123
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
124
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
125
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
126
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
127
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
128
+ * THE SOFTWARE.
129
+ */
130
+
131
+#ifndef QEMU_PROGRESS_METER_H
132
+#define QEMU_PROGRESS_METER_H
133
+
134
+typedef struct ProgressMeter {
135
+ /**
136
+ * Current progress. The unit is arbitrary as long as the ratio between
137
+ * current and total represents the estimated percentage
138
+ * of work already done.
139
+ */
140
+ uint64_t current;
141
+
142
+ /** Estimated current value at the completion of the process */
143
+ uint64_t total;
144
+} ProgressMeter;
145
+
146
+static inline void progress_work_done(ProgressMeter *pm, uint64_t done)
147
+{
148
+ pm->current += done;
149
+}
150
+
151
+static inline void progress_set_remaining(ProgressMeter *pm, uint64_t remaining)
152
+{
153
+ pm->total = pm->current + remaining;
154
+}
155
+
156
+static inline void progress_increase_remaining(ProgressMeter *pm,
157
+ uint64_t delta)
158
+{
159
+ pm->total += delta;
160
+}
161
+
162
+#endif /* QEMU_PROGRESS_METER_H */
163
diff --git a/job-qmp.c b/job-qmp.c
164
index XXXXXXX..XXXXXXX 100644
165
--- a/job-qmp.c
166
+++ b/job-qmp.c
167
@@ -XXX,XX +XXX,XX @@ static JobInfo *job_query_single(Job *job, Error **errp)
168
.id = g_strdup(job->id),
169
.type = job_type(job),
170
.status = job->status,
171
- .current_progress = job->progress_current,
172
- .total_progress = job->progress_total,
173
+ .current_progress = job->progress.current,
174
+ .total_progress = job->progress.total,
175
.has_error = !!job->err,
176
.error = job->err ? \
177
g_strdup(error_get_pretty(job->err)) : NULL,
178
diff --git a/job.c b/job.c
179
index XXXXXXX..XXXXXXX 100644
180
--- a/job.c
181
+++ b/job.c
182
@@ -XXX,XX +XXX,XX @@ void job_unref(Job *job)
183
184
void job_progress_update(Job *job, uint64_t done)
185
{
186
- job->progress_current += done;
187
+ progress_work_done(&job->progress, done);
188
}
189
190
void job_progress_set_remaining(Job *job, uint64_t remaining)
191
{
192
- job->progress_total = job->progress_current + remaining;
193
+ progress_set_remaining(&job->progress, remaining);
194
}
195
196
void job_progress_increase_remaining(Job *job, uint64_t delta)
197
{
198
- job->progress_total += delta;
199
+ progress_increase_remaining(&job->progress, delta);
200
}
201
202
void job_event_cancelled(Job *job)
203
diff --git a/qemu-img.c b/qemu-img.c
204
index XXXXXXX..XXXXXXX 100644
205
--- a/qemu-img.c
206
+++ b/qemu-img.c
207
@@ -XXX,XX +XXX,XX @@ static void run_block_job(BlockJob *job, Error **errp)
208
do {
209
float progress = 0.0f;
210
aio_poll(aio_context, true);
211
- if (job->job.progress_total) {
212
- progress = (float)job->job.progress_current /
213
- job->job.progress_total * 100.f;
214
+ if (job->job.progress.total) {
215
+ progress = (float)job->job.progress.current /
216
+ job->job.progress.total * 100.f;
217
}
218
qemu_progress_print(progress, 0);
219
} while (!job_is_ready(&job->job) && !job_is_completed(&job->job));
220
--
221
2.24.1
222
223
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Assume we have two regions, A and B, and region B is in-flight now,
4
region A is not yet touched, but it is unallocated and should be
5
skipped.
6
7
Correspondingly, as progress we have
8
9
total = A + B
10
current = 0
11
12
If we reset unallocated region A and call progress_reset_callback,
13
it will calculate 0 bytes dirty in the bitmap and call
14
job_progress_set_remaining, which will set
15
16
total = current + 0 = 0 + 0 = 0
17
18
So, B bytes are actually removed from total accounting. When job
19
finishes we'll have
20
21
total = 0
22
current = B
23
24
, which doesn't sound good.
25
26
This is because we didn't considered in-flight bytes, actually when
27
calculating remaining, we should have set (in_flight + dirty_bytes)
28
as remaining, not only dirty_bytes.
29
30
To fix it, let's refactor progress calculation, moving it to block-copy
31
itself instead of fixing callback. And, of course, track in_flight
32
bytes count.
33
34
We still have to keep one callback, to maintain backup job bytes_read
35
calculation, but it will go on soon, when we turn the whole backup
36
process into one block_copy call.
37
38
Cc: qemu-stable@nongnu.org
39
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
40
Reviewed-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
41
Message-Id: <20200311103004.7649-3-vsementsov@virtuozzo.com>
42
Signed-off-by: Max Reitz <mreitz@redhat.com>
43
---
44
block/backup.c | 13 ++-----------
45
block/block-copy.c | 16 ++++++++++++----
46
include/block/block-copy.h | 15 +++++----------
47
3 files changed, 19 insertions(+), 25 deletions(-)
48
49
diff --git a/block/backup.c b/block/backup.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/block/backup.c
52
+++ b/block/backup.c
53
@@ -XXX,XX +XXX,XX @@ static void backup_progress_bytes_callback(int64_t bytes, void *opaque)
54
BackupBlockJob *s = opaque;
55
56
s->bytes_read += bytes;
57
- job_progress_update(&s->common.job, bytes);
58
-}
59
-
60
-static void backup_progress_reset_callback(void *opaque)
61
-{
62
- BackupBlockJob *s = opaque;
63
- uint64_t estimate = bdrv_get_dirty_count(s->bcs->copy_bitmap);
64
-
65
- job_progress_set_remaining(&s->common.job, estimate);
66
}
67
68
static int coroutine_fn backup_do_cow(BackupBlockJob *job,
69
@@ -XXX,XX +XXX,XX @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
70
job->cluster_size = cluster_size;
71
job->len = len;
72
73
- block_copy_set_callbacks(bcs, backup_progress_bytes_callback,
74
- backup_progress_reset_callback, job);
75
+ block_copy_set_progress_callback(bcs, backup_progress_bytes_callback, job);
76
+ block_copy_set_progress_meter(bcs, &job->common.job.progress);
77
78
/* Required permissions are already taken by backup-top target */
79
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
80
diff --git a/block/block-copy.c b/block/block-copy.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/block/block-copy.c
83
+++ b/block/block-copy.c
84
@@ -XXX,XX +XXX,XX @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
85
return s;
86
}
87
88
-void block_copy_set_callbacks(
89
+void block_copy_set_progress_callback(
90
BlockCopyState *s,
91
ProgressBytesCallbackFunc progress_bytes_callback,
92
- ProgressResetCallbackFunc progress_reset_callback,
93
void *progress_opaque)
94
{
95
s->progress_bytes_callback = progress_bytes_callback;
96
- s->progress_reset_callback = progress_reset_callback;
97
s->progress_opaque = progress_opaque;
98
}
99
100
+void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
101
+{
102
+ s->progress = pm;
103
+}
104
+
105
/*
106
* block_copy_do_copy
107
*
108
@@ -XXX,XX +XXX,XX @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
109
110
if (!ret) {
111
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
112
- s->progress_reset_callback(s->progress_opaque);
113
+ progress_set_remaining(s->progress,
114
+ bdrv_get_dirty_count(s->copy_bitmap) +
115
+ s->in_flight_bytes);
116
}
117
118
*count = bytes;
119
@@ -XXX,XX +XXX,XX @@ int coroutine_fn block_copy(BlockCopyState *s,
120
trace_block_copy_process(s, start);
121
122
bdrv_reset_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
123
+ s->in_flight_bytes += chunk_end - start;
124
125
co_get_from_shres(s->mem, chunk_end - start);
126
ret = block_copy_do_copy(s, start, chunk_end, error_is_read);
127
co_put_to_shres(s->mem, chunk_end - start);
128
+ s->in_flight_bytes -= chunk_end - start;
129
if (ret < 0) {
130
bdrv_set_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
131
break;
132
}
133
134
+ progress_work_done(s->progress, chunk_end - start);
135
s->progress_bytes_callback(chunk_end - start, s->progress_opaque);
136
start = chunk_end;
137
ret = 0;
138
diff --git a/include/block/block-copy.h b/include/block/block-copy.h
139
index XXXXXXX..XXXXXXX 100644
140
--- a/include/block/block-copy.h
141
+++ b/include/block/block-copy.h
142
@@ -XXX,XX +XXX,XX @@ typedef struct BlockCopyInFlightReq {
143
} BlockCopyInFlightReq;
144
145
typedef void (*ProgressBytesCallbackFunc)(int64_t bytes, void *opaque);
146
-typedef void (*ProgressResetCallbackFunc)(void *opaque);
147
typedef struct BlockCopyState {
148
/*
149
* BdrvChild objects are not owned or managed by block-copy. They are
150
@@ -XXX,XX +XXX,XX @@ typedef struct BlockCopyState {
151
BdrvChild *source;
152
BdrvChild *target;
153
BdrvDirtyBitmap *copy_bitmap;
154
+ int64_t in_flight_bytes;
155
int64_t cluster_size;
156
bool use_copy_range;
157
int64_t copy_size;
158
@@ -XXX,XX +XXX,XX @@ typedef struct BlockCopyState {
159
*/
160
bool skip_unallocated;
161
162
+ ProgressMeter *progress;
163
/* progress_bytes_callback: called when some copying progress is done. */
164
ProgressBytesCallbackFunc progress_bytes_callback;
165
-
166
- /*
167
- * progress_reset_callback: called when some bytes reset from copy_bitmap
168
- * (see @skip_unallocated above). The callee is assumed to recalculate how
169
- * many bytes remain based on the dirty bit count of copy_bitmap.
170
- */
171
- ProgressResetCallbackFunc progress_reset_callback;
172
void *progress_opaque;
173
174
SharedResource *mem;
175
@@ -XXX,XX +XXX,XX @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
176
BdrvRequestFlags write_flags,
177
Error **errp);
178
179
-void block_copy_set_callbacks(
180
+void block_copy_set_progress_callback(
181
BlockCopyState *s,
182
ProgressBytesCallbackFunc progress_bytes_callback,
183
- ProgressResetCallbackFunc progress_reset_callback,
184
void *progress_opaque);
185
186
+void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm);
187
+
188
void block_copy_state_free(BlockCopyState *s);
189
190
int64_t block_copy_reset_unallocated(BlockCopyState *s,
191
--
192
2.24.1
193
194
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
In block_copy_do_copy we fallback to read+write if copy_range failed.
4
In this case copy_size is larger than defined for buffered IO, and
5
there is corresponding commit. Still, backup copies data cluster by
6
cluster, and most of requests are limited to one cluster anyway, so the
7
only source of this one bad-limited request is copy-before-write
8
operation.
9
10
Further patch will move backup to use block_copy directly, than for
11
cases where copy_range is not supported, first request will be
12
oversized in each backup. It's not good, let's change it now.
13
14
Fix is simple: just limit first copy_range request like buffer-based
15
request. If it succeed, set larger copy_range limit.
16
17
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
18
Reviewed-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
19
Reviewed-by: Max Reitz <mreitz@redhat.com>
20
Message-Id: <20200311103004.7649-4-vsementsov@virtuozzo.com>
21
Signed-off-by: Max Reitz <mreitz@redhat.com>
22
---
23
block/block-copy.c | 41 +++++++++++++++++++++++++++++++----------
24
1 file changed, 31 insertions(+), 10 deletions(-)
25
26
diff --git a/block/block-copy.c b/block/block-copy.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/block/block-copy.c
29
+++ b/block/block-copy.c
30
@@ -XXX,XX +XXX,XX @@ void block_copy_state_free(BlockCopyState *s)
31
g_free(s);
32
}
33
34
+static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
35
+{
36
+ return MIN_NON_ZERO(INT_MAX,
37
+ MIN_NON_ZERO(source->bs->bl.max_transfer,
38
+ target->bs->bl.max_transfer));
39
+}
40
+
41
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
42
int64_t cluster_size,
43
BdrvRequestFlags write_flags, Error **errp)
44
{
45
BlockCopyState *s;
46
BdrvDirtyBitmap *copy_bitmap;
47
- uint32_t max_transfer =
48
- MIN_NON_ZERO(INT_MAX,
49
- MIN_NON_ZERO(source->bs->bl.max_transfer,
50
- target->bs->bl.max_transfer));
51
52
copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
53
errp);
54
@@ -XXX,XX +XXX,XX @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
55
.mem = shres_create(BLOCK_COPY_MAX_MEM),
56
};
57
58
- if (max_transfer < cluster_size) {
59
+ if (block_copy_max_transfer(source, target) < cluster_size) {
60
/*
61
* copy_range does not respect max_transfer. We don't want to bother
62
* with requests smaller than block-copy cluster size, so fallback to
63
@@ -XXX,XX +XXX,XX @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
64
s->copy_size = cluster_size;
65
} else {
66
/*
67
- * copy_range does not respect max_transfer (it's a TODO), so we factor
68
- * that in here.
69
+ * We enable copy-range, but keep small copy_size, until first
70
+ * successful copy_range (look at block_copy_do_copy).
71
*/
72
s->use_copy_range = true;
73
- s->copy_size = MIN(MAX(cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
74
- QEMU_ALIGN_DOWN(max_transfer, cluster_size));
75
+ s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
76
}
77
78
QLIST_INIT(&s->inflight_reqs);
79
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
80
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
81
/* Fallback to read+write with allocated buffer */
82
} else {
83
+ if (s->use_copy_range) {
84
+ /*
85
+ * Successful copy-range. Now increase copy_size. copy_range
86
+ * does not respect max_transfer (it's a TODO), so we factor
87
+ * that in here.
88
+ *
89
+ * Note: we double-check s->use_copy_range for the case when
90
+ * parallel block-copy request unsets it during previous
91
+ * bdrv_co_copy_range call.
92
+ */
93
+ s->copy_size =
94
+ MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
95
+ QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source,
96
+ s->target),
97
+ s->cluster_size));
98
+ }
99
goto out;
100
}
101
}
102
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
103
/*
104
* In case of failed copy_range request above, we may proceed with buffered
105
* request larger than BLOCK_COPY_MAX_BUFFER. Still, further requests will
106
- * be properly limited, so don't care too much.
107
+ * be properly limited, so don't care too much. Moreover the most likely
108
+ * case (copy_range is unsupported for the configuration, so the very first
109
+ * copy_range request fails) is handled by setting large copy_size only
110
+ * after first successful copy_range.
111
*/
112
113
bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
114
--
115
2.24.1
116
117
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Use bdrv_block_status_above to chose effective chunk size and to handle
4
zeroes effectively.
5
6
This substitutes checking for just being allocated or not, and drops
7
old code path for it. Assistance by backup job is dropped too, as
8
caching block-status information is more difficult than just caching
9
is-allocated information in our dirty bitmap, and backup job is not
10
good place for this caching anyway.
11
12
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
13
Reviewed-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
14
Reviewed-by: Max Reitz <mreitz@redhat.com>
15
Message-Id: <20200311103004.7649-5-vsementsov@virtuozzo.com>
16
Signed-off-by: Max Reitz <mreitz@redhat.com>
17
---
18
block/block-copy.c | 73 +++++++++++++++++++++++++++++++++++++---------
19
block/trace-events | 1 +
20
2 files changed, 61 insertions(+), 13 deletions(-)
21
22
diff --git a/block/block-copy.c b/block/block-copy.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/block/block-copy.c
25
+++ b/block/block-copy.c
26
@@ -XXX,XX +XXX,XX @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
27
*/
28
static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
29
int64_t start, int64_t end,
30
- bool *error_is_read)
31
+ bool zeroes, bool *error_is_read)
32
{
33
int ret;
34
int nbytes = MIN(end, s->len) - start;
35
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
36
assert(QEMU_IS_ALIGNED(end, s->cluster_size));
37
assert(end < s->len || end == QEMU_ALIGN_UP(s->len, s->cluster_size));
38
39
+ if (zeroes) {
40
+ ret = bdrv_co_pwrite_zeroes(s->target, start, nbytes, s->write_flags &
41
+ ~BDRV_REQ_WRITE_COMPRESSED);
42
+ if (ret < 0) {
43
+ trace_block_copy_write_zeroes_fail(s, start, ret);
44
+ if (error_is_read) {
45
+ *error_is_read = false;
46
+ }
47
+ }
48
+ return ret;
49
+ }
50
+
51
if (s->use_copy_range) {
52
ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes,
53
0, s->write_flags);
54
@@ -XXX,XX +XXX,XX @@ out:
55
return ret;
56
}
57
58
+static int block_copy_block_status(BlockCopyState *s, int64_t offset,
59
+ int64_t bytes, int64_t *pnum)
60
+{
61
+ int64_t num;
62
+ BlockDriverState *base;
63
+ int ret;
64
+
65
+ if (s->skip_unallocated && s->source->bs->backing) {
66
+ base = s->source->bs->backing->bs;
67
+ } else {
68
+ base = NULL;
69
+ }
70
+
71
+ ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num,
72
+ NULL, NULL);
73
+ if (ret < 0 || num < s->cluster_size) {
74
+ /*
75
+ * On error or if failed to obtain large enough chunk just fallback to
76
+ * copy one cluster.
77
+ */
78
+ num = s->cluster_size;
79
+ ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA;
80
+ } else if (offset + num == s->len) {
81
+ num = QEMU_ALIGN_UP(num, s->cluster_size);
82
+ } else {
83
+ num = QEMU_ALIGN_DOWN(num, s->cluster_size);
84
+ }
85
+
86
+ *pnum = num;
87
+ return ret;
88
+}
89
+
90
/*
91
* Check if the cluster starting at offset is allocated or not.
92
* return via pnum the number of contiguous clusters sharing this allocation.
93
@@ -XXX,XX +XXX,XX @@ int coroutine_fn block_copy(BlockCopyState *s,
94
{
95
int ret = 0;
96
int64_t end = bytes + start; /* bytes */
97
- int64_t status_bytes;
98
BlockCopyInFlightReq req;
99
100
/*
101
@@ -XXX,XX +XXX,XX @@ int coroutine_fn block_copy(BlockCopyState *s,
102
block_copy_inflight_req_begin(s, &req, start, end);
103
104
while (start < end) {
105
- int64_t next_zero, chunk_end;
106
+ int64_t next_zero, chunk_end, status_bytes;
107
108
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
109
trace_block_copy_skip(s, start);
110
@@ -XXX,XX +XXX,XX @@ int coroutine_fn block_copy(BlockCopyState *s,
111
chunk_end = next_zero;
112
}
113
114
- if (s->skip_unallocated) {
115
- ret = block_copy_reset_unallocated(s, start, &status_bytes);
116
- if (ret == 0) {
117
- trace_block_copy_skip_range(s, start, status_bytes);
118
- start += status_bytes;
119
- continue;
120
- }
121
- /* Clamp to known allocated region */
122
- chunk_end = MIN(chunk_end, start + status_bytes);
123
+ ret = block_copy_block_status(s, start, chunk_end - start,
124
+ &status_bytes);
125
+ if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
126
+ bdrv_reset_dirty_bitmap(s->copy_bitmap, start, status_bytes);
127
+ progress_set_remaining(s->progress,
128
+ bdrv_get_dirty_count(s->copy_bitmap) +
129
+ s->in_flight_bytes);
130
+ trace_block_copy_skip_range(s, start, status_bytes);
131
+ start += status_bytes;
132
+ continue;
133
}
134
135
+ chunk_end = MIN(chunk_end, start + status_bytes);
136
+
137
trace_block_copy_process(s, start);
138
139
bdrv_reset_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
140
s->in_flight_bytes += chunk_end - start;
141
142
co_get_from_shres(s->mem, chunk_end - start);
143
- ret = block_copy_do_copy(s, start, chunk_end, error_is_read);
144
+ ret = block_copy_do_copy(s, start, chunk_end, ret & BDRV_BLOCK_ZERO,
145
+ error_is_read);
146
co_put_to_shres(s->mem, chunk_end - start);
147
s->in_flight_bytes -= chunk_end - start;
148
if (ret < 0) {
149
diff --git a/block/trace-events b/block/trace-events
150
index XXXXXXX..XXXXXXX 100644
151
--- a/block/trace-events
152
+++ b/block/trace-events
153
@@ -XXX,XX +XXX,XX @@ block_copy_process(void *bcs, int64_t start) "bcs %p start %"PRId64
154
block_copy_copy_range_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
155
block_copy_read_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
156
block_copy_write_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
157
+block_copy_write_zeroes_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
158
159
# ../blockdev.c
160
qmp_block_job_cancel(void *job) "job %p"
161
--
162
2.24.1
163
164
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Split find_conflicting_inflight_req to be used separately.
4
5
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
Reviewed-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
7
Reviewed-by: Max Reitz <mreitz@redhat.com>
8
Message-Id: <20200311103004.7649-6-vsementsov@virtuozzo.com>
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
11
block/block-copy.c | 31 +++++++++++++++++++------------
12
1 file changed, 19 insertions(+), 12 deletions(-)
13
14
diff --git a/block/block-copy.c b/block/block-copy.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/block/block-copy.c
17
+++ b/block/block-copy.c
18
@@ -XXX,XX +XXX,XX @@
19
#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
20
#define BLOCK_COPY_MAX_MEM (128 * MiB)
21
22
+static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
23
+ int64_t start,
24
+ int64_t end)
25
+{
26
+ BlockCopyInFlightReq *req;
27
+
28
+ QLIST_FOREACH(req, &s->inflight_reqs, list) {
29
+ if (end > req->start_byte && start < req->end_byte) {
30
+ return req;
31
+ }
32
+ }
33
+
34
+ return NULL;
35
+}
36
+
37
static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s,
38
int64_t start,
39
int64_t end)
40
{
41
BlockCopyInFlightReq *req;
42
- bool waited;
43
-
44
- do {
45
- waited = false;
46
- QLIST_FOREACH(req, &s->inflight_reqs, list) {
47
- if (end > req->start_byte && start < req->end_byte) {
48
- qemu_co_queue_wait(&req->wait_queue, NULL);
49
- waited = true;
50
- break;
51
- }
52
- }
53
- } while (waited);
54
+
55
+ while ((req = find_conflicting_inflight_req(s, start, end))) {
56
+ qemu_co_queue_wait(&req->wait_queue, NULL);
57
+ }
58
}
59
60
static void block_copy_inflight_req_begin(BlockCopyState *s,
61
--
62
2.24.1
63
64
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
We have a lot of "chunk_end - start" invocations, let's switch to
4
bytes/cur_bytes scheme instead.
5
6
While being here, improve check on block_copy_do_copy parameters to not
7
overflow when calculating nbytes and use int64_t for bytes in
8
block_copy for consistency.
9
10
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
11
Reviewed-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
12
Reviewed-by: Max Reitz <mreitz@redhat.com>
13
Message-Id: <20200311103004.7649-7-vsementsov@virtuozzo.com>
14
Signed-off-by: Max Reitz <mreitz@redhat.com>
15
---
16
block/block-copy.c | 78 ++++++++++++++++++++------------------
17
include/block/block-copy.h | 6 +--
18
2 files changed, 44 insertions(+), 40 deletions(-)
19
20
diff --git a/block/block-copy.c b/block/block-copy.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/block/block-copy.c
23
+++ b/block/block-copy.c
24
@@ -XXX,XX +XXX,XX @@
25
26
static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
27
int64_t start,
28
- int64_t end)
29
+ int64_t bytes)
30
{
31
BlockCopyInFlightReq *req;
32
33
QLIST_FOREACH(req, &s->inflight_reqs, list) {
34
- if (end > req->start_byte && start < req->end_byte) {
35
+ if (start + bytes > req->start && start < req->start + req->bytes) {
36
return req;
37
}
38
}
39
@@ -XXX,XX +XXX,XX @@ static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
40
41
static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s,
42
int64_t start,
43
- int64_t end)
44
+ int64_t bytes)
45
{
46
BlockCopyInFlightReq *req;
47
48
- while ((req = find_conflicting_inflight_req(s, start, end))) {
49
+ while ((req = find_conflicting_inflight_req(s, start, bytes))) {
50
qemu_co_queue_wait(&req->wait_queue, NULL);
51
}
52
}
53
54
static void block_copy_inflight_req_begin(BlockCopyState *s,
55
BlockCopyInFlightReq *req,
56
- int64_t start, int64_t end)
57
+ int64_t start, int64_t bytes)
58
{
59
- req->start_byte = start;
60
- req->end_byte = end;
61
+ req->start = start;
62
+ req->bytes = bytes;
63
qemu_co_queue_init(&req->wait_queue);
64
QLIST_INSERT_HEAD(&s->inflight_reqs, req, list);
65
}
66
@@ -XXX,XX +XXX,XX @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
67
/*
68
* block_copy_do_copy
69
*
70
- * Do copy of cluser-aligned chunk. @end is allowed to exceed s->len only to
71
- * cover last cluster when s->len is not aligned to clusters.
72
+ * Do copy of cluster-aligned chunk. Requested region is allowed to exceed
73
+ * s->len only to cover last cluster when s->len is not aligned to clusters.
74
*
75
* No sync here: nor bitmap neighter intersecting requests handling, only copy.
76
*
77
* Returns 0 on success.
78
*/
79
static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
80
- int64_t start, int64_t end,
81
+ int64_t start, int64_t bytes,
82
bool zeroes, bool *error_is_read)
83
{
84
int ret;
85
- int nbytes = MIN(end, s->len) - start;
86
+ int64_t nbytes = MIN(start + bytes, s->len) - start;
87
void *bounce_buffer = NULL;
88
89
+ assert(start >= 0 && bytes > 0 && INT64_MAX - start >= bytes);
90
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
91
- assert(QEMU_IS_ALIGNED(end, s->cluster_size));
92
- assert(end < s->len || end == QEMU_ALIGN_UP(s->len, s->cluster_size));
93
+ assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
94
+ assert(start < s->len);
95
+ assert(start + bytes <= s->len ||
96
+ start + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
97
+ assert(nbytes < INT_MAX);
98
99
if (zeroes) {
100
ret = bdrv_co_pwrite_zeroes(s->target, start, nbytes, s->write_flags &
101
@@ -XXX,XX +XXX,XX @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
102
}
103
104
int coroutine_fn block_copy(BlockCopyState *s,
105
- int64_t start, uint64_t bytes,
106
+ int64_t start, int64_t bytes,
107
bool *error_is_read)
108
{
109
int ret = 0;
110
- int64_t end = bytes + start; /* bytes */
111
BlockCopyInFlightReq req;
112
113
/*
114
@@ -XXX,XX +XXX,XX @@ int coroutine_fn block_copy(BlockCopyState *s,
115
bdrv_get_aio_context(s->target->bs));
116
117
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
118
- assert(QEMU_IS_ALIGNED(end, s->cluster_size));
119
+ assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
120
121
block_copy_wait_inflight_reqs(s, start, bytes);
122
- block_copy_inflight_req_begin(s, &req, start, end);
123
+ block_copy_inflight_req_begin(s, &req, start, bytes);
124
125
- while (start < end) {
126
- int64_t next_zero, chunk_end, status_bytes;
127
+ while (bytes) {
128
+ int64_t next_zero, cur_bytes, status_bytes;
129
130
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
131
trace_block_copy_skip(s, start);
132
start += s->cluster_size;
133
+ bytes -= s->cluster_size;
134
continue; /* already copied */
135
}
136
137
- chunk_end = MIN(end, start + s->copy_size);
138
+ cur_bytes = MIN(bytes, s->copy_size);
139
140
next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
141
- chunk_end - start);
142
+ cur_bytes);
143
if (next_zero >= 0) {
144
assert(next_zero > start); /* start is dirty */
145
- assert(next_zero < chunk_end); /* no need to do MIN() */
146
- chunk_end = next_zero;
147
+ assert(next_zero < start + cur_bytes); /* no need to do MIN() */
148
+ cur_bytes = next_zero - start;
149
}
150
151
- ret = block_copy_block_status(s, start, chunk_end - start,
152
- &status_bytes);
153
+ ret = block_copy_block_status(s, start, cur_bytes, &status_bytes);
154
if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
155
bdrv_reset_dirty_bitmap(s->copy_bitmap, start, status_bytes);
156
progress_set_remaining(s->progress,
157
@@ -XXX,XX +XXX,XX @@ int coroutine_fn block_copy(BlockCopyState *s,
158
s->in_flight_bytes);
159
trace_block_copy_skip_range(s, start, status_bytes);
160
start += status_bytes;
161
+ bytes -= status_bytes;
162
continue;
163
}
164
165
- chunk_end = MIN(chunk_end, start + status_bytes);
166
+ cur_bytes = MIN(cur_bytes, status_bytes);
167
168
trace_block_copy_process(s, start);
169
170
- bdrv_reset_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
171
- s->in_flight_bytes += chunk_end - start;
172
+ bdrv_reset_dirty_bitmap(s->copy_bitmap, start, cur_bytes);
173
+ s->in_flight_bytes += cur_bytes;
174
175
- co_get_from_shres(s->mem, chunk_end - start);
176
- ret = block_copy_do_copy(s, start, chunk_end, ret & BDRV_BLOCK_ZERO,
177
+ co_get_from_shres(s->mem, cur_bytes);
178
+ ret = block_copy_do_copy(s, start, cur_bytes, ret & BDRV_BLOCK_ZERO,
179
error_is_read);
180
- co_put_to_shres(s->mem, chunk_end - start);
181
- s->in_flight_bytes -= chunk_end - start;
182
+ co_put_to_shres(s->mem, cur_bytes);
183
+ s->in_flight_bytes -= cur_bytes;
184
if (ret < 0) {
185
- bdrv_set_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
186
+ bdrv_set_dirty_bitmap(s->copy_bitmap, start, cur_bytes);
187
break;
188
}
189
190
- progress_work_done(s->progress, chunk_end - start);
191
- s->progress_bytes_callback(chunk_end - start, s->progress_opaque);
192
- start = chunk_end;
193
- ret = 0;
194
+ progress_work_done(s->progress, cur_bytes);
195
+ s->progress_bytes_callback(cur_bytes, s->progress_opaque);
196
+ start += cur_bytes;
197
+ bytes -= cur_bytes;
198
}
199
200
block_copy_inflight_req_end(&req);
201
diff --git a/include/block/block-copy.h b/include/block/block-copy.h
202
index XXXXXXX..XXXXXXX 100644
203
--- a/include/block/block-copy.h
204
+++ b/include/block/block-copy.h
205
@@ -XXX,XX +XXX,XX @@
206
#include "qemu/co-shared-resource.h"
207
208
typedef struct BlockCopyInFlightReq {
209
- int64_t start_byte;
210
- int64_t end_byte;
211
+ int64_t start;
212
+ int64_t bytes;
213
QLIST_ENTRY(BlockCopyInFlightReq) list;
214
CoQueue wait_queue; /* coroutines blocked on this request */
215
} BlockCopyInFlightReq;
216
@@ -XXX,XX +XXX,XX @@ void block_copy_state_free(BlockCopyState *s);
217
int64_t block_copy_reset_unallocated(BlockCopyState *s,
218
int64_t offset, int64_t *count);
219
220
-int coroutine_fn block_copy(BlockCopyState *s, int64_t start, uint64_t bytes,
221
+int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes,
222
bool *error_is_read);
223
224
#endif /* BLOCK_COPY_H */
225
--
226
2.24.1
227
228
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
offset/bytes pair is more usual naming in block layer, let's use it.
4
5
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
Reviewed-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
7
Reviewed-by: Max Reitz <mreitz@redhat.com>
8
Message-Id: <20200311103004.7649-8-vsementsov@virtuozzo.com>
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
11
block/block-copy.c | 82 +++++++++++++++++++-------------------
12
include/block/block-copy.h | 4 +-
13
2 files changed, 43 insertions(+), 43 deletions(-)
14
15
diff --git a/block/block-copy.c b/block/block-copy.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/block/block-copy.c
18
+++ b/block/block-copy.c
19
@@ -XXX,XX +XXX,XX @@
20
#define BLOCK_COPY_MAX_MEM (128 * MiB)
21
22
static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
23
- int64_t start,
24
+ int64_t offset,
25
int64_t bytes)
26
{
27
BlockCopyInFlightReq *req;
28
29
QLIST_FOREACH(req, &s->inflight_reqs, list) {
30
- if (start + bytes > req->start && start < req->start + req->bytes) {
31
+ if (offset + bytes > req->offset && offset < req->offset + req->bytes) {
32
return req;
33
}
34
}
35
@@ -XXX,XX +XXX,XX @@ static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
36
}
37
38
static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s,
39
- int64_t start,
40
+ int64_t offset,
41
int64_t bytes)
42
{
43
BlockCopyInFlightReq *req;
44
45
- while ((req = find_conflicting_inflight_req(s, start, bytes))) {
46
+ while ((req = find_conflicting_inflight_req(s, offset, bytes))) {
47
qemu_co_queue_wait(&req->wait_queue, NULL);
48
}
49
}
50
51
static void block_copy_inflight_req_begin(BlockCopyState *s,
52
BlockCopyInFlightReq *req,
53
- int64_t start, int64_t bytes)
54
+ int64_t offset, int64_t bytes)
55
{
56
- req->start = start;
57
+ req->offset = offset;
58
req->bytes = bytes;
59
qemu_co_queue_init(&req->wait_queue);
60
QLIST_INSERT_HEAD(&s->inflight_reqs, req, list);
61
@@ -XXX,XX +XXX,XX @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
62
* Returns 0 on success.
63
*/
64
static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
65
- int64_t start, int64_t bytes,
66
+ int64_t offset, int64_t bytes,
67
bool zeroes, bool *error_is_read)
68
{
69
int ret;
70
- int64_t nbytes = MIN(start + bytes, s->len) - start;
71
+ int64_t nbytes = MIN(offset + bytes, s->len) - offset;
72
void *bounce_buffer = NULL;
73
74
- assert(start >= 0 && bytes > 0 && INT64_MAX - start >= bytes);
75
- assert(QEMU_IS_ALIGNED(start, s->cluster_size));
76
+ assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes);
77
+ assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
78
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
79
- assert(start < s->len);
80
- assert(start + bytes <= s->len ||
81
- start + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
82
+ assert(offset < s->len);
83
+ assert(offset + bytes <= s->len ||
84
+ offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
85
assert(nbytes < INT_MAX);
86
87
if (zeroes) {
88
- ret = bdrv_co_pwrite_zeroes(s->target, start, nbytes, s->write_flags &
89
+ ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
90
~BDRV_REQ_WRITE_COMPRESSED);
91
if (ret < 0) {
92
- trace_block_copy_write_zeroes_fail(s, start, ret);
93
+ trace_block_copy_write_zeroes_fail(s, offset, ret);
94
if (error_is_read) {
95
*error_is_read = false;
96
}
97
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
98
}
99
100
if (s->use_copy_range) {
101
- ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes,
102
+ ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
103
0, s->write_flags);
104
if (ret < 0) {
105
- trace_block_copy_copy_range_fail(s, start, ret);
106
+ trace_block_copy_copy_range_fail(s, offset, ret);
107
s->use_copy_range = false;
108
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
109
/* Fallback to read+write with allocated buffer */
110
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
111
112
bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
113
114
- ret = bdrv_co_pread(s->source, start, nbytes, bounce_buffer, 0);
115
+ ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0);
116
if (ret < 0) {
117
- trace_block_copy_read_fail(s, start, ret);
118
+ trace_block_copy_read_fail(s, offset, ret);
119
if (error_is_read) {
120
*error_is_read = true;
121
}
122
goto out;
123
}
124
125
- ret = bdrv_co_pwrite(s->target, start, nbytes, bounce_buffer,
126
+ ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer,
127
s->write_flags);
128
if (ret < 0) {
129
- trace_block_copy_write_fail(s, start, ret);
130
+ trace_block_copy_write_fail(s, offset, ret);
131
if (error_is_read) {
132
*error_is_read = false;
133
}
134
@@ -XXX,XX +XXX,XX @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
135
}
136
137
int coroutine_fn block_copy(BlockCopyState *s,
138
- int64_t start, int64_t bytes,
139
+ int64_t offset, int64_t bytes,
140
bool *error_is_read)
141
{
142
int ret = 0;
143
@@ -XXX,XX +XXX,XX @@ int coroutine_fn block_copy(BlockCopyState *s,
144
assert(bdrv_get_aio_context(s->source->bs) ==
145
bdrv_get_aio_context(s->target->bs));
146
147
- assert(QEMU_IS_ALIGNED(start, s->cluster_size));
148
+ assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
149
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
150
151
- block_copy_wait_inflight_reqs(s, start, bytes);
152
- block_copy_inflight_req_begin(s, &req, start, bytes);
153
+ block_copy_wait_inflight_reqs(s, offset, bytes);
154
+ block_copy_inflight_req_begin(s, &req, offset, bytes);
155
156
while (bytes) {
157
int64_t next_zero, cur_bytes, status_bytes;
158
159
- if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
160
- trace_block_copy_skip(s, start);
161
- start += s->cluster_size;
162
+ if (!bdrv_dirty_bitmap_get(s->copy_bitmap, offset)) {
163
+ trace_block_copy_skip(s, offset);
164
+ offset += s->cluster_size;
165
bytes -= s->cluster_size;
166
continue; /* already copied */
167
}
168
169
cur_bytes = MIN(bytes, s->copy_size);
170
171
- next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
172
+ next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, offset,
173
cur_bytes);
174
if (next_zero >= 0) {
175
- assert(next_zero > start); /* start is dirty */
176
- assert(next_zero < start + cur_bytes); /* no need to do MIN() */
177
- cur_bytes = next_zero - start;
178
+ assert(next_zero > offset); /* offset is dirty */
179
+ assert(next_zero < offset + cur_bytes); /* no need to do MIN() */
180
+ cur_bytes = next_zero - offset;
181
}
182
183
- ret = block_copy_block_status(s, start, cur_bytes, &status_bytes);
184
+ ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes);
185
if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
186
- bdrv_reset_dirty_bitmap(s->copy_bitmap, start, status_bytes);
187
+ bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, status_bytes);
188
progress_set_remaining(s->progress,
189
bdrv_get_dirty_count(s->copy_bitmap) +
190
s->in_flight_bytes);
191
- trace_block_copy_skip_range(s, start, status_bytes);
192
- start += status_bytes;
193
+ trace_block_copy_skip_range(s, offset, status_bytes);
194
+ offset += status_bytes;
195
bytes -= status_bytes;
196
continue;
197
}
198
199
cur_bytes = MIN(cur_bytes, status_bytes);
200
201
- trace_block_copy_process(s, start);
202
+ trace_block_copy_process(s, offset);
203
204
- bdrv_reset_dirty_bitmap(s->copy_bitmap, start, cur_bytes);
205
+ bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, cur_bytes);
206
s->in_flight_bytes += cur_bytes;
207
208
co_get_from_shres(s->mem, cur_bytes);
209
- ret = block_copy_do_copy(s, start, cur_bytes, ret & BDRV_BLOCK_ZERO,
210
+ ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO,
211
error_is_read);
212
co_put_to_shres(s->mem, cur_bytes);
213
s->in_flight_bytes -= cur_bytes;
214
if (ret < 0) {
215
- bdrv_set_dirty_bitmap(s->copy_bitmap, start, cur_bytes);
216
+ bdrv_set_dirty_bitmap(s->copy_bitmap, offset, cur_bytes);
217
break;
218
}
219
220
progress_work_done(s->progress, cur_bytes);
221
s->progress_bytes_callback(cur_bytes, s->progress_opaque);
222
- start += cur_bytes;
223
+ offset += cur_bytes;
224
bytes -= cur_bytes;
225
}
226
227
diff --git a/include/block/block-copy.h b/include/block/block-copy.h
228
index XXXXXXX..XXXXXXX 100644
229
--- a/include/block/block-copy.h
230
+++ b/include/block/block-copy.h
231
@@ -XXX,XX +XXX,XX @@
232
#include "qemu/co-shared-resource.h"
233
234
typedef struct BlockCopyInFlightReq {
235
- int64_t start;
236
+ int64_t offset;
237
int64_t bytes;
238
QLIST_ENTRY(BlockCopyInFlightReq) list;
239
CoQueue wait_queue; /* coroutines blocked on this request */
240
@@ -XXX,XX +XXX,XX @@ void block_copy_state_free(BlockCopyState *s);
241
int64_t block_copy_reset_unallocated(BlockCopyState *s,
242
int64_t offset, int64_t *count);
243
244
-int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes,
245
+int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
246
bool *error_is_read);
247
248
#endif /* BLOCK_COPY_H */
249
--
250
2.24.1
251
252
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Currently, block_copy operation lock the whole requested region. But
4
there is no reason to lock clusters, which are already copied, it will
5
disturb other parallel block_copy requests for no reason.
6
7
Let's instead do the following:
8
9
Lock only sub-region, which we are going to operate on. Then, after
10
copying all dirty sub-regions, we should wait for intersecting
11
requests block-copy, if they failed, we should retry these new dirty
12
clusters.
13
14
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
15
Reviewed-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
16
Message-Id: <20200311103004.7649-9-vsementsov@virtuozzo.com>
17
Signed-off-by: Max Reitz <mreitz@redhat.com>
18
---
19
block/block-copy.c | 129 ++++++++++++++++++++++++++++++++++++---------
20
1 file changed, 105 insertions(+), 24 deletions(-)
21
22
diff --git a/block/block-copy.c b/block/block-copy.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/block/block-copy.c
25
+++ b/block/block-copy.c
26
@@ -XXX,XX +XXX,XX @@ static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
27
return NULL;
28
}
29
30
-static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s,
31
- int64_t offset,
32
- int64_t bytes)
33
+/*
34
+ * If there are no intersecting requests return false. Otherwise, wait for the
35
+ * first found intersecting request to finish and return true.
36
+ */
37
+static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
38
+ int64_t bytes)
39
{
40
- BlockCopyInFlightReq *req;
41
+ BlockCopyInFlightReq *req = find_conflicting_inflight_req(s, offset, bytes);
42
43
- while ((req = find_conflicting_inflight_req(s, offset, bytes))) {
44
- qemu_co_queue_wait(&req->wait_queue, NULL);
45
+ if (!req) {
46
+ return false;
47
}
48
+
49
+ qemu_co_queue_wait(&req->wait_queue, NULL);
50
+
51
+ return true;
52
}
53
54
+/* Called only on full-dirty region */
55
static void block_copy_inflight_req_begin(BlockCopyState *s,
56
BlockCopyInFlightReq *req,
57
int64_t offset, int64_t bytes)
58
{
59
+ assert(!find_conflicting_inflight_req(s, offset, bytes));
60
+
61
+ bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
62
+ s->in_flight_bytes += bytes;
63
+
64
req->offset = offset;
65
req->bytes = bytes;
66
qemu_co_queue_init(&req->wait_queue);
67
QLIST_INSERT_HEAD(&s->inflight_reqs, req, list);
68
}
69
70
-static void coroutine_fn block_copy_inflight_req_end(BlockCopyInFlightReq *req)
71
+/*
72
+ * block_copy_inflight_req_shrink
73
+ *
74
+ * Drop the tail of the request to be handled later. Set dirty bits back and
75
+ * wake up all requests waiting for us (may be some of them are not intersecting
76
+ * with shrunk request)
77
+ */
78
+static void coroutine_fn block_copy_inflight_req_shrink(BlockCopyState *s,
79
+ BlockCopyInFlightReq *req, int64_t new_bytes)
80
{
81
+ if (new_bytes == req->bytes) {
82
+ return;
83
+ }
84
+
85
+ assert(new_bytes > 0 && new_bytes < req->bytes);
86
+
87
+ s->in_flight_bytes -= req->bytes - new_bytes;
88
+ bdrv_set_dirty_bitmap(s->copy_bitmap,
89
+ req->offset + new_bytes, req->bytes - new_bytes);
90
+
91
+ req->bytes = new_bytes;
92
+ qemu_co_queue_restart_all(&req->wait_queue);
93
+}
94
+
95
+static void coroutine_fn block_copy_inflight_req_end(BlockCopyState *s,
96
+ BlockCopyInFlightReq *req,
97
+ int ret)
98
+{
99
+ s->in_flight_bytes -= req->bytes;
100
+ if (ret < 0) {
101
+ bdrv_set_dirty_bitmap(s->copy_bitmap, req->offset, req->bytes);
102
+ }
103
QLIST_REMOVE(req, list);
104
qemu_co_queue_restart_all(&req->wait_queue);
105
}
106
@@ -XXX,XX +XXX,XX @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
107
return ret;
108
}
109
110
-int coroutine_fn block_copy(BlockCopyState *s,
111
- int64_t offset, int64_t bytes,
112
- bool *error_is_read)
113
+/*
114
+ * block_copy_dirty_clusters
115
+ *
116
+ * Copy dirty clusters in @offset/@bytes range.
117
+ * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
118
+ * clusters found and -errno on failure.
119
+ */
120
+static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
121
+ int64_t offset, int64_t bytes,
122
+ bool *error_is_read)
123
{
124
int ret = 0;
125
- BlockCopyInFlightReq req;
126
+ bool found_dirty = false;
127
128
/*
129
* block_copy() user is responsible for keeping source and target in same
130
@@ -XXX,XX +XXX,XX @@ int coroutine_fn block_copy(BlockCopyState *s,
131
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
132
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
133
134
- block_copy_wait_inflight_reqs(s, offset, bytes);
135
- block_copy_inflight_req_begin(s, &req, offset, bytes);
136
-
137
while (bytes) {
138
+ BlockCopyInFlightReq req;
139
int64_t next_zero, cur_bytes, status_bytes;
140
141
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, offset)) {
142
@@ -XXX,XX +XXX,XX @@ int coroutine_fn block_copy(BlockCopyState *s,
143
continue; /* already copied */
144
}
145
146
+ found_dirty = true;
147
+
148
cur_bytes = MIN(bytes, s->copy_size);
149
150
next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, offset,
151
@@ -XXX,XX +XXX,XX @@ int coroutine_fn block_copy(BlockCopyState *s,
152
assert(next_zero < offset + cur_bytes); /* no need to do MIN() */
153
cur_bytes = next_zero - offset;
154
}
155
+ block_copy_inflight_req_begin(s, &req, offset, cur_bytes);
156
157
ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes);
158
+ assert(ret >= 0); /* never fail */
159
+ cur_bytes = MIN(cur_bytes, status_bytes);
160
+ block_copy_inflight_req_shrink(s, &req, cur_bytes);
161
if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
162
- bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, status_bytes);
163
+ block_copy_inflight_req_end(s, &req, 0);
164
progress_set_remaining(s->progress,
165
bdrv_get_dirty_count(s->copy_bitmap) +
166
s->in_flight_bytes);
167
@@ -XXX,XX +XXX,XX @@ int coroutine_fn block_copy(BlockCopyState *s,
168
continue;
169
}
170
171
- cur_bytes = MIN(cur_bytes, status_bytes);
172
-
173
trace_block_copy_process(s, offset);
174
175
- bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, cur_bytes);
176
- s->in_flight_bytes += cur_bytes;
177
-
178
co_get_from_shres(s->mem, cur_bytes);
179
ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO,
180
error_is_read);
181
co_put_to_shres(s->mem, cur_bytes);
182
- s->in_flight_bytes -= cur_bytes;
183
+ block_copy_inflight_req_end(s, &req, ret);
184
if (ret < 0) {
185
- bdrv_set_dirty_bitmap(s->copy_bitmap, offset, cur_bytes);
186
- break;
187
+ return ret;
188
}
189
190
progress_work_done(s->progress, cur_bytes);
191
@@ -XXX,XX +XXX,XX @@ int coroutine_fn block_copy(BlockCopyState *s,
192
bytes -= cur_bytes;
193
}
194
195
- block_copy_inflight_req_end(&req);
196
+ return found_dirty;
197
+}
198
+
199
+/*
200
+ * block_copy
201
+ *
202
+ * Copy requested region, accordingly to dirty bitmap.
203
+ * Collaborate with parallel block_copy requests: if they succeed it will help
204
+ * us. If they fail, we will retry not-copied regions. So, if we return error,
205
+ * it means that some I/O operation failed in context of _this_ block_copy call,
206
+ * not some parallel operation.
207
+ */
208
+int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
209
+ bool *error_is_read)
210
+{
211
+ int ret;
212
+
213
+ do {
214
+ ret = block_copy_dirty_clusters(s, offset, bytes, error_is_read);
215
+
216
+ if (ret == 0) {
217
+ ret = block_copy_wait_one(s, offset, bytes);
218
+ }
219
+
220
+ /*
221
+ * We retry in two cases:
222
+ * 1. Some progress done
223
+ * Something was copied, which means that there were yield points
224
+ * and some new dirty bits may have appeared (due to failed parallel
225
+ * block-copy requests).
226
+ * 2. We have waited for some intersecting block-copy request
227
+ * It may have failed and produced new dirty bits.
228
+ */
229
+ } while (ret > 0);
230
231
return ret;
232
}
233
--
234
2.24.1
235
236
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Hide structure definitions and add explicit API instead, to keep an
4
eye on the scope of the shared fields.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Message-Id: <20200311103004.7649-10-vsementsov@virtuozzo.com>
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
---
12
block/backup-top.c | 6 ++--
13
block/backup.c | 25 ++++++++--------
14
block/block-copy.c | 59 ++++++++++++++++++++++++++++++++++++++
15
include/block/block-copy.h | 52 +++------------------------------
16
4 files changed, 80 insertions(+), 62 deletions(-)
17
18
diff --git a/block/backup-top.c b/block/backup-top.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/block/backup-top.c
21
+++ b/block/backup-top.c
22
@@ -XXX,XX +XXX,XX @@ typedef struct BDRVBackupTopState {
23
BlockCopyState *bcs;
24
BdrvChild *target;
25
bool active;
26
+ int64_t cluster_size;
27
} BDRVBackupTopState;
28
29
static coroutine_fn int backup_top_co_preadv(
30
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int backup_top_cbw(BlockDriverState *bs, uint64_t offset,
31
return 0;
32
}
33
34
- off = QEMU_ALIGN_DOWN(offset, s->bcs->cluster_size);
35
- end = QEMU_ALIGN_UP(offset + bytes, s->bcs->cluster_size);
36
+ off = QEMU_ALIGN_DOWN(offset, s->cluster_size);
37
+ end = QEMU_ALIGN_UP(offset + bytes, s->cluster_size);
38
39
return block_copy(s->bcs, off, end - off, NULL);
40
}
41
@@ -XXX,XX +XXX,XX @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
42
goto fail;
43
}
44
45
+ state->cluster_size = cluster_size;
46
state->bcs = block_copy_state_new(top->backing, state->target,
47
cluster_size, write_flags, &local_err);
48
if (local_err) {
49
diff --git a/block/backup.c b/block/backup.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/block/backup.c
52
+++ b/block/backup.c
53
@@ -XXX,XX +XXX,XX @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
54
55
if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) {
56
/* If we failed and synced, merge in the bits we didn't copy: */
57
- bdrv_dirty_bitmap_merge_internal(bm, job->bcs->copy_bitmap,
58
+ bdrv_dirty_bitmap_merge_internal(bm, block_copy_dirty_bitmap(job->bcs),
59
NULL, true);
60
}
61
}
62
@@ -XXX,XX +XXX,XX @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
63
return;
64
}
65
66
- bdrv_set_dirty_bitmap(backup_job->bcs->copy_bitmap, 0, backup_job->len);
67
+ bdrv_set_dirty_bitmap(block_copy_dirty_bitmap(backup_job->bcs), 0,
68
+ backup_job->len);
69
}
70
71
static BlockErrorAction backup_error_action(BackupBlockJob *job,
72
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_loop(BackupBlockJob *job)
73
BdrvDirtyBitmapIter *bdbi;
74
int ret = 0;
75
76
- bdbi = bdrv_dirty_iter_new(job->bcs->copy_bitmap);
77
+ bdbi = bdrv_dirty_iter_new(block_copy_dirty_bitmap(job->bcs));
78
while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) {
79
do {
80
if (yield_and_check(job)) {
81
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_loop(BackupBlockJob *job)
82
return ret;
83
}
84
85
-static void backup_init_copy_bitmap(BackupBlockJob *job)
86
+static void backup_init_bcs_bitmap(BackupBlockJob *job)
87
{
88
bool ret;
89
uint64_t estimate;
90
+ BdrvDirtyBitmap *bcs_bitmap = block_copy_dirty_bitmap(job->bcs);
91
92
if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) {
93
- ret = bdrv_dirty_bitmap_merge_internal(job->bcs->copy_bitmap,
94
- job->sync_bitmap,
95
+ ret = bdrv_dirty_bitmap_merge_internal(bcs_bitmap, job->sync_bitmap,
96
NULL, true);
97
assert(ret);
98
} else {
99
@@ -XXX,XX +XXX,XX @@ static void backup_init_copy_bitmap(BackupBlockJob *job)
100
* We can't hog the coroutine to initialize this thoroughly.
101
* Set a flag and resume work when we are able to yield safely.
102
*/
103
- job->bcs->skip_unallocated = true;
104
+ block_copy_set_skip_unallocated(job->bcs, true);
105
}
106
- bdrv_set_dirty_bitmap(job->bcs->copy_bitmap, 0, job->len);
107
+ bdrv_set_dirty_bitmap(bcs_bitmap, 0, job->len);
108
}
109
110
- estimate = bdrv_get_dirty_count(job->bcs->copy_bitmap);
111
+ estimate = bdrv_get_dirty_count(bcs_bitmap);
112
job_progress_set_remaining(&job->common.job, estimate);
113
}
114
115
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *job, Error **errp)
116
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
117
int ret = 0;
118
119
- backup_init_copy_bitmap(s);
120
+ backup_init_bcs_bitmap(s);
121
122
if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
123
int64_t offset = 0;
124
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *job, Error **errp)
125
126
offset += count;
127
}
128
- s->bcs->skip_unallocated = false;
129
+ block_copy_set_skip_unallocated(s->bcs, false);
130
}
131
132
if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
133
/*
134
- * All bits are set in copy_bitmap to allow any cluster to be copied.
135
+ * All bits are set in bcs bitmap to allow any cluster to be copied.
136
* This does not actually require them to be copied.
137
*/
138
while (!job_is_cancelled(job)) {
139
diff --git a/block/block-copy.c b/block/block-copy.c
140
index XXXXXXX..XXXXXXX 100644
141
--- a/block/block-copy.c
142
+++ b/block/block-copy.c
143
@@ -XXX,XX +XXX,XX @@
144
#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
145
#define BLOCK_COPY_MAX_MEM (128 * MiB)
146
147
+typedef struct BlockCopyInFlightReq {
148
+ int64_t offset;
149
+ int64_t bytes;
150
+ QLIST_ENTRY(BlockCopyInFlightReq) list;
151
+ CoQueue wait_queue; /* coroutines blocked on this request */
152
+} BlockCopyInFlightReq;
153
+
154
+typedef struct BlockCopyState {
155
+ /*
156
+ * BdrvChild objects are not owned or managed by block-copy. They are
157
+ * provided by block-copy user and user is responsible for appropriate
158
+ * permissions on these children.
159
+ */
160
+ BdrvChild *source;
161
+ BdrvChild *target;
162
+ BdrvDirtyBitmap *copy_bitmap;
163
+ int64_t in_flight_bytes;
164
+ int64_t cluster_size;
165
+ bool use_copy_range;
166
+ int64_t copy_size;
167
+ uint64_t len;
168
+ QLIST_HEAD(, BlockCopyInFlightReq) inflight_reqs;
169
+
170
+ BdrvRequestFlags write_flags;
171
+
172
+ /*
173
+ * skip_unallocated:
174
+ *
175
+ * Used by sync=top jobs, which first scan the source node for unallocated
176
+ * areas and clear them in the copy_bitmap. During this process, the bitmap
177
+ * is thus not fully initialized: It may still have bits set for areas that
178
+ * are unallocated and should actually not be copied.
179
+ *
180
+ * This is indicated by skip_unallocated.
181
+ *
182
+ * In this case, block_copy() will query the source’s allocation status,
183
+ * skip unallocated regions, clear them in the copy_bitmap, and invoke
184
+ * block_copy_reset_unallocated() every time it does.
185
+ */
186
+ bool skip_unallocated;
187
+
188
+ ProgressMeter *progress;
189
+ /* progress_bytes_callback: called when some copying progress is done. */
190
+ ProgressBytesCallbackFunc progress_bytes_callback;
191
+ void *progress_opaque;
192
+
193
+ SharedResource *mem;
194
+} BlockCopyState;
195
+
196
static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
197
int64_t offset,
198
int64_t bytes)
199
@@ -XXX,XX +XXX,XX @@ int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
200
201
return ret;
202
}
203
+
204
+BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
205
+{
206
+ return s->copy_bitmap;
207
+}
208
+
209
+void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
210
+{
211
+ s->skip_unallocated = skip;
212
+}
213
diff --git a/include/block/block-copy.h b/include/block/block-copy.h
214
index XXXXXXX..XXXXXXX 100644
215
--- a/include/block/block-copy.h
216
+++ b/include/block/block-copy.h
217
@@ -XXX,XX +XXX,XX @@
218
#include "block/block.h"
219
#include "qemu/co-shared-resource.h"
220
221
-typedef struct BlockCopyInFlightReq {
222
- int64_t offset;
223
- int64_t bytes;
224
- QLIST_ENTRY(BlockCopyInFlightReq) list;
225
- CoQueue wait_queue; /* coroutines blocked on this request */
226
-} BlockCopyInFlightReq;
227
-
228
typedef void (*ProgressBytesCallbackFunc)(int64_t bytes, void *opaque);
229
-typedef struct BlockCopyState {
230
- /*
231
- * BdrvChild objects are not owned or managed by block-copy. They are
232
- * provided by block-copy user and user is responsible for appropriate
233
- * permissions on these children.
234
- */
235
- BdrvChild *source;
236
- BdrvChild *target;
237
- BdrvDirtyBitmap *copy_bitmap;
238
- int64_t in_flight_bytes;
239
- int64_t cluster_size;
240
- bool use_copy_range;
241
- int64_t copy_size;
242
- uint64_t len;
243
- QLIST_HEAD(, BlockCopyInFlightReq) inflight_reqs;
244
-
245
- BdrvRequestFlags write_flags;
246
-
247
- /*
248
- * skip_unallocated:
249
- *
250
- * Used by sync=top jobs, which first scan the source node for unallocated
251
- * areas and clear them in the copy_bitmap. During this process, the bitmap
252
- * is thus not fully initialized: It may still have bits set for areas that
253
- * are unallocated and should actually not be copied.
254
- *
255
- * This is indicated by skip_unallocated.
256
- *
257
- * In this case, block_copy() will query the source’s allocation status,
258
- * skip unallocated regions, clear them in the copy_bitmap, and invoke
259
- * block_copy_reset_unallocated() every time it does.
260
- */
261
- bool skip_unallocated;
262
-
263
- ProgressMeter *progress;
264
- /* progress_bytes_callback: called when some copying progress is done. */
265
- ProgressBytesCallbackFunc progress_bytes_callback;
266
- void *progress_opaque;
267
-
268
- SharedResource *mem;
269
-} BlockCopyState;
270
+typedef struct BlockCopyState BlockCopyState;
271
272
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
273
int64_t cluster_size,
274
@@ -XXX,XX +XXX,XX @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
275
int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
276
bool *error_is_read);
277
278
+BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s);
279
+void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip);
280
+
281
#endif /* BLOCK_COPY_H */
282
--
283
2.24.1
284
285
diff view generated by jsdifflib