1 | The following changes since commit 848a6caa88b9f082c89c9b41afa975761262981d: | 1 | The following changes since commit 813bac3d8d70d85cb7835f7945eb9eed84c2d8d0: |
---|---|---|---|
2 | 2 | ||
3 | Merge tag 'migration-20230602-pull-request' of https://gitlab.com/juan.quintela/qemu into staging (2023-06-02 17:33:29 -0700) | 3 | Merge tag '2023q3-bsd-user-pull-request' of https://gitlab.com/bsdimp/qemu into staging (2023-08-29 08:58:00 -0400) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | https://gitlab.com/hreitz/qemu.git tags/pull-block-2023-06-05 | 7 | https://gitlab.com/stefanha/qemu.git tags/block-pull-request |
8 | 8 | ||
9 | for you to fetch changes up to 42a2890a76f4783cd1c212f27856edcf2b5e8a75: | 9 | for you to fetch changes up to 87ec6f55af38e29be5b2b65a8acf84da73e06d06: |
10 | 10 | ||
11 | qcow2: add discard-no-unref option (2023-06-05 13:15:42 +0200) | 11 | aio-posix: zero out io_uring sqe user_data (2023-08-30 07:39:59 -0400) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Block patches | 14 | Pull request |
15 | 15 | ||
16 | - Fix padding of unaligned vectored requests to match the host alignment | 16 | v3: |
17 | for vectors with 1023 or 1024 buffers | 17 | - Drop UFS emulation due to CI failures |
18 | - Refactor and fix bugs in parallels's image check functionality | 18 | - Add "aio-posix: zero out io_uring sqe user_data" |
19 | - Add an option to the qcow2 driver to retain (qcow2-level) allocations | ||
20 | on discard requests from the guest (while still forwarding the discard | ||
21 | to the lower level and marking the range as zero) | ||
22 | 19 | ||
23 | ---------------------------------------------------------------- | 20 | ---------------------------------------------------------------- |
24 | Alexander Ivanov (12): | ||
25 | parallels: Out of image offset in BAT leads to image inflation | ||
26 | parallels: Fix high_off calculation in parallels_co_check() | ||
27 | parallels: Fix image_end_offset and data_end after out-of-image check | ||
28 | parallels: create parallels_set_bat_entry_helper() to assign BAT value | ||
29 | parallels: Use generic infrastructure for BAT writing in | ||
30 | parallels_co_check() | ||
31 | parallels: Move check of unclean image to a separate function | ||
32 | parallels: Move check of cluster outside image to a separate function | ||
33 | parallels: Fix statistics calculation | ||
34 | parallels: Move check of leaks to a separate function | ||
35 | parallels: Move statistic collection to a separate function | ||
36 | parallels: Replace qemu_co_mutex_lock by WITH_QEMU_LOCK_GUARD | ||
37 | parallels: Incorrect condition in out-of-image check | ||
38 | 21 | ||
39 | Hanna Czenczek (4): | 22 | Andrey Drobyshev (3): |
40 | util/iov: Make qiov_slice() public | 23 | block: add subcluster_size field to BlockDriverInfo |
41 | block: Collapse padded I/O vecs exceeding IOV_MAX | 24 | block/io: align requests to subcluster_size |
42 | util/iov: Remove qemu_iovec_init_extended() | 25 | tests/qemu-iotests/197: add testcase for CoR with subclusters |
43 | iotests/iov-padding: New test | ||
44 | 26 | ||
45 | Jean-Louis Dupond (1): | 27 | Fabiano Rosas (1): |
46 | qcow2: add discard-no-unref option | 28 | block-migration: Ensure we don't crash during migration cleanup |
47 | 29 | ||
48 | qapi/block-core.json | 12 ++ | 30 | Stefan Hajnoczi (1): |
49 | block/qcow2.h | 3 + | 31 | aio-posix: zero out io_uring sqe user_data |
50 | include/qemu/iov.h | 8 +- | 32 | |
51 | block/io.c | 166 ++++++++++++++++++-- | 33 | include/block/block-common.h | 5 ++++ |
52 | block/parallels.c | 190 ++++++++++++++++------- | 34 | include/block/block-io.h | 8 +++--- |
53 | block/qcow2-cluster.c | 32 +++- | 35 | block.c | 7 +++++ |
54 | block/qcow2.c | 18 +++ | 36 | block/io.c | 50 ++++++++++++++++++------------------ |
55 | util/iov.c | 89 ++--------- | 37 | block/mirror.c | 8 +++--- |
56 | qemu-options.hx | 12 ++ | 38 | block/qcow2.c | 1 + |
57 | tests/qemu-iotests/tests/iov-padding | 85 ++++++++++ | 39 | migration/block.c | 11 ++++++-- |
58 | tests/qemu-iotests/tests/iov-padding.out | 59 +++++++ | 40 | util/fdmon-io_uring.c | 2 ++ |
59 | 11 files changed, 523 insertions(+), 151 deletions(-) | 41 | tests/qemu-iotests/197 | 29 +++++++++++++++++++++ |
60 | create mode 100755 tests/qemu-iotests/tests/iov-padding | 42 | tests/qemu-iotests/197.out | 24 +++++++++++++++++ |
61 | create mode 100644 tests/qemu-iotests/tests/iov-padding.out | 43 | 10 files changed, 110 insertions(+), 35 deletions(-) |
62 | 44 | ||
63 | -- | 45 | -- |
64 | 2.40.1 | 46 | 2.41.0 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | We want to inline qemu_iovec_init_extended() in block/io.c for padding | ||
2 | requests, and having access to qiov_slice() is useful for this. As a | ||
3 | public function, it is renamed to qemu_iovec_slice(). | ||
4 | 1 | ||
5 | (We will need to count the number of I/O vector elements of a slice | ||
6 | there, and then later process this slice. Without qiov_slice(), we | ||
7 | would need to call qemu_iovec_subvec_niov(), and all further | ||
8 | IOV-processing functions may need to skip prefixing elements to | ||
9 | accomodate for a qiov_offset. Because qemu_iovec_subvec_niov() | ||
10 | internally calls qiov_slice(), we can just have the block/io.c code call | ||
11 | qiov_slice() itself, thus get the number of elements, and also create an | ||
12 | iovec array with the superfluous prefixing elements stripped, so the | ||
13 | following processing functions no longer need to skip them.) | ||
14 | |||
15 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
16 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
17 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
18 | Message-Id: <20230411173418.19549-2-hreitz@redhat.com> | ||
19 | --- | ||
20 | include/qemu/iov.h | 3 +++ | ||
21 | util/iov.c | 14 +++++++------- | ||
22 | 2 files changed, 10 insertions(+), 7 deletions(-) | ||
23 | |||
24 | diff --git a/include/qemu/iov.h b/include/qemu/iov.h | ||
25 | index XXXXXXX..XXXXXXX 100644 | ||
26 | --- a/include/qemu/iov.h | ||
27 | +++ b/include/qemu/iov.h | ||
28 | @@ -XXX,XX +XXX,XX @@ int qemu_iovec_init_extended( | ||
29 | void *tail_buf, size_t tail_len); | ||
30 | void qemu_iovec_init_slice(QEMUIOVector *qiov, QEMUIOVector *source, | ||
31 | size_t offset, size_t len); | ||
32 | +struct iovec *qemu_iovec_slice(QEMUIOVector *qiov, | ||
33 | + size_t offset, size_t len, | ||
34 | + size_t *head, size_t *tail, int *niov); | ||
35 | int qemu_iovec_subvec_niov(QEMUIOVector *qiov, size_t offset, size_t len); | ||
36 | void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len); | ||
37 | void qemu_iovec_concat(QEMUIOVector *dst, | ||
38 | diff --git a/util/iov.c b/util/iov.c | ||
39 | index XXXXXXX..XXXXXXX 100644 | ||
40 | --- a/util/iov.c | ||
41 | +++ b/util/iov.c | ||
42 | @@ -XXX,XX +XXX,XX @@ static struct iovec *iov_skip_offset(struct iovec *iov, size_t offset, | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | - * qiov_slice | ||
47 | + * qemu_iovec_slice | ||
48 | * | ||
49 | * Find subarray of iovec's, containing requested range. @head would | ||
50 | * be offset in first iov (returned by the function), @tail would be | ||
51 | * count of extra bytes in last iovec (returned iov + @niov - 1). | ||
52 | */ | ||
53 | -static struct iovec *qiov_slice(QEMUIOVector *qiov, | ||
54 | - size_t offset, size_t len, | ||
55 | - size_t *head, size_t *tail, int *niov) | ||
56 | +struct iovec *qemu_iovec_slice(QEMUIOVector *qiov, | ||
57 | + size_t offset, size_t len, | ||
58 | + size_t *head, size_t *tail, int *niov) | ||
59 | { | ||
60 | struct iovec *iov, *end_iov; | ||
61 | |||
62 | @@ -XXX,XX +XXX,XX @@ int qemu_iovec_subvec_niov(QEMUIOVector *qiov, size_t offset, size_t len) | ||
63 | size_t head, tail; | ||
64 | int niov; | ||
65 | |||
66 | - qiov_slice(qiov, offset, len, &head, &tail, &niov); | ||
67 | + qemu_iovec_slice(qiov, offset, len, &head, &tail, &niov); | ||
68 | |||
69 | return niov; | ||
70 | } | ||
71 | @@ -XXX,XX +XXX,XX @@ int qemu_iovec_init_extended( | ||
72 | } | ||
73 | |||
74 | if (mid_len) { | ||
75 | - mid_iov = qiov_slice(mid_qiov, mid_offset, mid_len, | ||
76 | - &mid_head, &mid_tail, &mid_niov); | ||
77 | + mid_iov = qemu_iovec_slice(mid_qiov, mid_offset, mid_len, | ||
78 | + &mid_head, &mid_tail, &mid_niov); | ||
79 | } | ||
80 | |||
81 | total_niov = !!head_len + mid_niov + !!tail_len; | ||
82 | -- | ||
83 | 2.40.1 | diff view generated by jsdifflib |
1 | From: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | 1 | From: Fabiano Rosas <farosas@suse.de> |
---|---|---|---|
2 | 2 | ||
3 | We will add more and more checks so we need a better code structure in | 3 | We can fail the blk_insert_bs() at init_blk_migration(), leaving the |
4 | parallels_co_check. Let each check performs in a separate loop in a | 4 | BlkMigDevState without a dirty_bitmap and BlockDriverState. Account |
5 | separate helper. | 5 | for the possibly missing elements when doing cleanup. |
6 | 6 | ||
7 | Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | 7 | Fix the following crashes: |
8 | Reviewed-by: Denis V. Lunev <den@openvz.org> | 8 | |
9 | Message-Id: <20230424093147.197643-8-alexander.ivanov@virtuozzo.com> | 9 | Thread 1 "qemu-system-x86" received signal SIGSEGV, Segmentation fault. |
10 | Reviewed-by: Hanna Czenczek <hreitz@redhat.com> | 10 | 0x0000555555ec83ef in bdrv_release_dirty_bitmap (bitmap=0x0) at ../block/dirty-bitmap.c:359 |
11 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | 11 | 359 BlockDriverState *bs = bitmap->bs; |
12 | #0 0x0000555555ec83ef in bdrv_release_dirty_bitmap (bitmap=0x0) at ../block/dirty-bitmap.c:359 | ||
13 | #1 0x0000555555bba331 in unset_dirty_tracking () at ../migration/block.c:371 | ||
14 | #2 0x0000555555bbad98 in block_migration_cleanup_bmds () at ../migration/block.c:681 | ||
15 | |||
16 | Thread 1 "qemu-system-x86" received signal SIGSEGV, Segmentation fault. | ||
17 | 0x0000555555e971ff in bdrv_op_unblock (bs=0x0, op=BLOCK_OP_TYPE_BACKUP_SOURCE, reason=0x0) at ../block.c:7073 | ||
18 | 7073 QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) { | ||
19 | #0 0x0000555555e971ff in bdrv_op_unblock (bs=0x0, op=BLOCK_OP_TYPE_BACKUP_SOURCE, reason=0x0) at ../block.c:7073 | ||
20 | #1 0x0000555555e9734a in bdrv_op_unblock_all (bs=0x0, reason=0x0) at ../block.c:7095 | ||
21 | #2 0x0000555555bbae13 in block_migration_cleanup_bmds () at ../migration/block.c:690 | ||
22 | |||
23 | Signed-off-by: Fabiano Rosas <farosas@suse.de> | ||
24 | Message-id: 20230731203338.27581-1-farosas@suse.de | ||
25 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | 26 | --- |
13 | block/parallels.c | 75 +++++++++++++++++++++++++++++++---------------- | 27 | migration/block.c | 11 +++++++++-- |
14 | 1 file changed, 49 insertions(+), 26 deletions(-) | 28 | 1 file changed, 9 insertions(+), 2 deletions(-) |
15 | 29 | ||
16 | diff --git a/block/parallels.c b/block/parallels.c | 30 | diff --git a/migration/block.c b/migration/block.c |
17 | index XXXXXXX..XXXXXXX 100644 | 31 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/block/parallels.c | 32 | --- a/migration/block.c |
19 | +++ b/block/parallels.c | 33 | +++ b/migration/block.c |
20 | @@ -XXX,XX +XXX,XX @@ static void parallels_check_unclean(BlockDriverState *bs, | 34 | @@ -XXX,XX +XXX,XX @@ static void unset_dirty_tracking(void) |
35 | BlkMigDevState *bmds; | ||
36 | |||
37 | QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { | ||
38 | - bdrv_release_dirty_bitmap(bmds->dirty_bitmap); | ||
39 | + if (bmds->dirty_bitmap) { | ||
40 | + bdrv_release_dirty_bitmap(bmds->dirty_bitmap); | ||
41 | + } | ||
21 | } | 42 | } |
22 | } | 43 | } |
23 | 44 | ||
24 | +static int coroutine_fn GRAPH_RDLOCK | 45 | @@ -XXX,XX +XXX,XX @@ static int64_t get_remaining_dirty(void) |
25 | +parallels_check_outside_image(BlockDriverState *bs, BdrvCheckResult *res, | 46 | static void block_migration_cleanup_bmds(void) |
26 | + BdrvCheckMode fix) | 47 | { |
27 | +{ | 48 | BlkMigDevState *bmds; |
28 | + BDRVParallelsState *s = bs->opaque; | 49 | + BlockDriverState *bs; |
29 | + uint32_t i; | 50 | AioContext *ctx; |
30 | + int64_t off, high_off, size; | 51 | |
52 | unset_dirty_tracking(); | ||
53 | |||
54 | while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) { | ||
55 | QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry); | ||
56 | - bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker); | ||
31 | + | 57 | + |
32 | + size = bdrv_getlength(bs->file->bs); | 58 | + bs = blk_bs(bmds->blk); |
33 | + if (size < 0) { | 59 | + if (bs) { |
34 | + res->check_errors++; | 60 | + bdrv_op_unblock_all(bs, bmds->blocker); |
35 | + return size; | ||
36 | + } | ||
37 | + | ||
38 | + high_off = 0; | ||
39 | + for (i = 0; i < s->bat_size; i++) { | ||
40 | + off = bat2sect(s, i) << BDRV_SECTOR_BITS; | ||
41 | + if (off > size) { | ||
42 | + fprintf(stderr, "%s cluster %u is outside image\n", | ||
43 | + fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i); | ||
44 | + res->corruptions++; | ||
45 | + if (fix & BDRV_FIX_ERRORS) { | ||
46 | + parallels_set_bat_entry(s, i, 0); | ||
47 | + res->corruptions_fixed++; | ||
48 | + } | ||
49 | + continue; | ||
50 | + } | 61 | + } |
51 | + if (high_off < off) { | 62 | error_free(bmds->blocker); |
52 | + high_off = off; | 63 | |
53 | + } | 64 | /* Save ctx, because bmds->blk can disappear during blk_unref. */ |
54 | + } | ||
55 | + | ||
56 | + if (high_off == 0) { | ||
57 | + res->image_end_offset = s->data_end << BDRV_SECTOR_BITS; | ||
58 | + } else { | ||
59 | + res->image_end_offset = high_off + s->cluster_size; | ||
60 | + s->data_end = res->image_end_offset >> BDRV_SECTOR_BITS; | ||
61 | + } | ||
62 | + | ||
63 | + return 0; | ||
64 | +} | ||
65 | + | ||
66 | static int coroutine_fn GRAPH_RDLOCK | ||
67 | parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
68 | BdrvCheckMode fix) | ||
69 | { | ||
70 | BDRVParallelsState *s = bs->opaque; | ||
71 | - int64_t size, prev_off, high_off; | ||
72 | - int ret = 0; | ||
73 | + int64_t size, prev_off; | ||
74 | + int ret; | ||
75 | uint32_t i; | ||
76 | |||
77 | size = bdrv_getlength(bs->file->bs); | ||
78 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
79 | |||
80 | parallels_check_unclean(bs, res, fix); | ||
81 | |||
82 | + ret = parallels_check_outside_image(bs, res, fix); | ||
83 | + if (ret < 0) { | ||
84 | + goto out; | ||
85 | + } | ||
86 | + | ||
87 | res->bfi.total_clusters = s->bat_size; | ||
88 | res->bfi.compressed_clusters = 0; /* compression is not supported */ | ||
89 | |||
90 | - high_off = 0; | ||
91 | prev_off = 0; | ||
92 | for (i = 0; i < s->bat_size; i++) { | ||
93 | int64_t off = bat2sect(s, i) << BDRV_SECTOR_BITS; | ||
94 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
95 | continue; | ||
96 | } | ||
97 | |||
98 | - /* cluster outside the image */ | ||
99 | - if (off > size) { | ||
100 | - fprintf(stderr, "%s cluster %u is outside image\n", | ||
101 | - fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i); | ||
102 | - res->corruptions++; | ||
103 | - if (fix & BDRV_FIX_ERRORS) { | ||
104 | - parallels_set_bat_entry(s, i, 0); | ||
105 | - res->corruptions_fixed++; | ||
106 | - } | ||
107 | - prev_off = 0; | ||
108 | - continue; | ||
109 | - } | ||
110 | - | ||
111 | res->bfi.allocated_clusters++; | ||
112 | - if (off > high_off) { | ||
113 | - high_off = off; | ||
114 | - } | ||
115 | |||
116 | if (prev_off != 0 && (prev_off + s->cluster_size) != off) { | ||
117 | res->bfi.fragmented_clusters++; | ||
118 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
119 | prev_off = off; | ||
120 | } | ||
121 | |||
122 | - if (high_off == 0) { | ||
123 | - res->image_end_offset = s->data_end << BDRV_SECTOR_BITS; | ||
124 | - } else { | ||
125 | - res->image_end_offset = high_off + s->cluster_size; | ||
126 | - s->data_end = res->image_end_offset >> BDRV_SECTOR_BITS; | ||
127 | - } | ||
128 | - | ||
129 | if (size > res->image_end_offset) { | ||
130 | int64_t count; | ||
131 | count = DIV_ROUND_UP(size - res->image_end_offset, s->cluster_size); | ||
132 | -- | 65 | -- |
133 | 2.40.1 | 66 | 2.41.0 | diff view generated by jsdifflib |
1 | From: Jean-Louis Dupond <jean-louis@dupond.be> | 1 | From: Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> |
---|---|---|---|
2 | 2 | ||
3 | When we for example have a sparse qcow2 image and discard: unmap is enabled, | 3 | This is going to be used in the subsequent commit as requests alignment |
4 | there can be a lot of fragmentation in the image after some time. Especially on VM's | 4 | (in particular, during copy-on-read). This value only makes sense for |
5 | that do a lot of writes/deletes. | 5 | the formats which support subclusters (currently QCOW2 only). If this |
6 | This causes the qcow2 image to grow even over 110% of its virtual size, | 6 | field isn't set by driver's own bdrv_get_info() implementation, we |
7 | because the free gaps in the image get too small to allocate new | 7 | simply set it equal to the cluster size thus treating each cluster as |
8 | continuous clusters. So it allocates new space at the end of the image. | 8 | having a single subcluster. |
9 | 9 | ||
10 | Disabling discard is not an option, as discard is needed to keep the | 10 | Reviewed-by: Eric Blake <eblake@redhat.com> |
11 | incremental backup size as low as possible. Without discard, the | 11 | Reviewed-by: Denis V. Lunev <den@openvz.org> |
12 | incremental backups would become large, as qemu thinks it's just dirty | 12 | Signed-off-by: Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> |
13 | blocks but it doesn't know the blocks are unneeded. | 13 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
14 | So we need to avoid fragmentation but also 'empty' the unneeded blocks in | 14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
15 | the image to have a small incremental backup. | 15 | Message-ID: <20230711172553.234055-2-andrey.drobyshev@virtuozzo.com> |
16 | --- | ||
17 | include/block/block-common.h | 5 +++++ | ||
18 | block.c | 7 +++++++ | ||
19 | block/qcow2.c | 1 + | ||
20 | 3 files changed, 13 insertions(+) | ||
16 | 21 | ||
17 | In addition, we also want to send the discards further down the stack, so | 22 | diff --git a/include/block/block-common.h b/include/block/block-common.h |
18 | the underlying blocks are still discarded. | ||
19 | |||
20 | Therefor we introduce a new qcow2 option "discard-no-unref". | ||
21 | When setting this option to true, discards will no longer have the qcow2 | ||
22 | driver relinquish cluster allocations. Other than that, the request is | ||
23 | handled as normal: All clusters in range are marked as zero, and, if | ||
24 | pass-discard-request is true, it is passed further down the stack. | ||
25 | The only difference is that the now-zero clusters are preallocated | ||
26 | instead of being unallocated. | ||
27 | This will avoid fragmentation on the qcow2 image. | ||
28 | |||
29 | Fixes: https://gitlab.com/qemu-project/qemu/-/issues/1621 | ||
30 | Signed-off-by: Jean-Louis Dupond <jean-louis@dupond.be> | ||
31 | Message-Id: <20230605084523.34134-2-jean-louis@dupond.be> | ||
32 | Reviewed-by: Hanna Czenczek <hreitz@redhat.com> | ||
33 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
34 | --- | ||
35 | qapi/block-core.json | 12 ++++++++++++ | ||
36 | block/qcow2.h | 3 +++ | ||
37 | block/qcow2-cluster.c | 32 ++++++++++++++++++++++++++++---- | ||
38 | block/qcow2.c | 18 ++++++++++++++++++ | ||
39 | qemu-options.hx | 12 ++++++++++++ | ||
40 | 5 files changed, 73 insertions(+), 4 deletions(-) | ||
41 | |||
42 | diff --git a/qapi/block-core.json b/qapi/block-core.json | ||
43 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
44 | --- a/qapi/block-core.json | 24 | --- a/include/block/block-common.h |
45 | +++ b/qapi/block-core.json | 25 | +++ b/include/block/block-common.h |
46 | @@ -XXX,XX +XXX,XX @@ | 26 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockZoneWps { |
47 | # @pass-discard-other: whether discard requests for the data source | 27 | typedef struct BlockDriverInfo { |
48 | # should be issued on other occasions where a cluster gets freed | 28 | /* in bytes, 0 if irrelevant */ |
49 | # | 29 | int cluster_size; |
50 | +# @discard-no-unref: when enabled, discards from the guest will not cause | 30 | + /* |
51 | +# cluster allocations to be relinquished. This prevents qcow2 fragmentation | 31 | + * A fraction of cluster_size, if supported (currently QCOW2 only); if |
52 | +# that would be caused by such discards. Besides potential | 32 | + * disabled or unsupported, set equal to cluster_size. |
53 | +# performance degradation, such fragmentation can lead to increased | 33 | + */ |
54 | +# allocation of clusters past the end of the image file, | 34 | + int subcluster_size; |
55 | +# resulting in image files whose file length can grow much larger | 35 | /* offset at which the VM state can be saved (0 if not possible) */ |
56 | +# than their guest disk size would suggest. | 36 | int64_t vm_state_offset; |
57 | +# If image file length is of concern (e.g. when storing qcow2 | 37 | bool is_dirty; |
58 | +# images directly on block devices), you should consider enabling | 38 | diff --git a/block.c b/block.c |
59 | +# this option. (since 8.1) | ||
60 | +# | ||
61 | # @overlap-check: which overlap checks to perform for writes to the | ||
62 | # image, defaults to 'cached' (since 2.2) | ||
63 | # | ||
64 | @@ -XXX,XX +XXX,XX @@ | ||
65 | '*pass-discard-request': 'bool', | ||
66 | '*pass-discard-snapshot': 'bool', | ||
67 | '*pass-discard-other': 'bool', | ||
68 | + '*discard-no-unref': 'bool', | ||
69 | '*overlap-check': 'Qcow2OverlapChecks', | ||
70 | '*cache-size': 'int', | ||
71 | '*l2-cache-size': 'int', | ||
72 | diff --git a/block/qcow2.h b/block/qcow2.h | ||
73 | index XXXXXXX..XXXXXXX 100644 | 39 | index XXXXXXX..XXXXXXX 100644 |
74 | --- a/block/qcow2.h | 40 | --- a/block.c |
75 | +++ b/block/qcow2.h | 41 | +++ b/block.c |
76 | @@ -XXX,XX +XXX,XX @@ | 42 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) |
77 | #define QCOW2_OPT_DISCARD_REQUEST "pass-discard-request" | ||
78 | #define QCOW2_OPT_DISCARD_SNAPSHOT "pass-discard-snapshot" | ||
79 | #define QCOW2_OPT_DISCARD_OTHER "pass-discard-other" | ||
80 | +#define QCOW2_OPT_DISCARD_NO_UNREF "discard-no-unref" | ||
81 | #define QCOW2_OPT_OVERLAP "overlap-check" | ||
82 | #define QCOW2_OPT_OVERLAP_TEMPLATE "overlap-check.template" | ||
83 | #define QCOW2_OPT_OVERLAP_MAIN_HEADER "overlap-check.main-header" | ||
84 | @@ -XXX,XX +XXX,XX @@ typedef struct BDRVQcow2State { | ||
85 | |||
86 | bool discard_passthrough[QCOW2_DISCARD_MAX]; | ||
87 | |||
88 | + bool discard_no_unref; | ||
89 | + | ||
90 | int overlap_check; /* bitmask of Qcow2MetadataOverlap values */ | ||
91 | bool signaled_corruption; | ||
92 | |||
93 | diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c | ||
94 | index XXXXXXX..XXXXXXX 100644 | ||
95 | --- a/block/qcow2-cluster.c | ||
96 | +++ b/block/qcow2-cluster.c | ||
97 | @@ -XXX,XX +XXX,XX @@ static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset, | ||
98 | uint64_t new_l2_bitmap = old_l2_bitmap; | ||
99 | QCow2ClusterType cluster_type = | ||
100 | qcow2_get_cluster_type(bs, old_l2_entry); | ||
101 | + bool keep_reference = (cluster_type != QCOW2_CLUSTER_COMPRESSED) && | ||
102 | + !full_discard && | ||
103 | + (s->discard_no_unref && | ||
104 | + type == QCOW2_DISCARD_REQUEST); | ||
105 | |||
106 | /* | ||
107 | * If full_discard is true, the cluster should not read back as zeroes, | ||
108 | @@ -XXX,XX +XXX,XX @@ static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset, | ||
109 | new_l2_entry = new_l2_bitmap = 0; | ||
110 | } else if (bs->backing || qcow2_cluster_is_allocated(cluster_type)) { | ||
111 | if (has_subclusters(s)) { | ||
112 | - new_l2_entry = 0; | ||
113 | + if (keep_reference) { | ||
114 | + new_l2_entry = old_l2_entry; | ||
115 | + } else { | ||
116 | + new_l2_entry = 0; | ||
117 | + } | ||
118 | new_l2_bitmap = QCOW_L2_BITMAP_ALL_ZEROES; | ||
119 | } else { | ||
120 | - new_l2_entry = s->qcow_version >= 3 ? QCOW_OFLAG_ZERO : 0; | ||
121 | + if (s->qcow_version >= 3) { | ||
122 | + if (keep_reference) { | ||
123 | + new_l2_entry |= QCOW_OFLAG_ZERO; | ||
124 | + } else { | ||
125 | + new_l2_entry = QCOW_OFLAG_ZERO; | ||
126 | + } | ||
127 | + } else { | ||
128 | + new_l2_entry = 0; | ||
129 | + } | ||
130 | } | ||
131 | } | ||
132 | |||
133 | @@ -XXX,XX +XXX,XX @@ static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset, | ||
134 | if (has_subclusters(s)) { | ||
135 | set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap); | ||
136 | } | ||
137 | - /* Then decrease the refcount */ | ||
138 | - qcow2_free_any_cluster(bs, old_l2_entry, type); | ||
139 | + if (!keep_reference) { | ||
140 | + /* Then decrease the refcount */ | ||
141 | + qcow2_free_any_cluster(bs, old_l2_entry, type); | ||
142 | + } else if (s->discard_passthrough[type] && | ||
143 | + (cluster_type == QCOW2_CLUSTER_NORMAL || | ||
144 | + cluster_type == QCOW2_CLUSTER_ZERO_ALLOC)) { | ||
145 | + /* If we keep the reference, pass on the discard still */ | ||
146 | + bdrv_pdiscard(s->data_file, old_l2_entry & L2E_OFFSET_MASK, | ||
147 | + s->cluster_size); | ||
148 | + } | ||
149 | } | 43 | } |
150 | 44 | memset(bdi, 0, sizeof(*bdi)); | |
151 | qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); | 45 | ret = drv->bdrv_co_get_info(bs, bdi); |
46 | + if (bdi->subcluster_size == 0) { | ||
47 | + /* | ||
48 | + * If the driver left this unset, subclusters are not supported. | ||
49 | + * Then it is safe to treat each cluster as having only one subcluster. | ||
50 | + */ | ||
51 | + bdi->subcluster_size = bdi->cluster_size; | ||
52 | + } | ||
53 | if (ret < 0) { | ||
54 | return ret; | ||
55 | } | ||
152 | diff --git a/block/qcow2.c b/block/qcow2.c | 56 | diff --git a/block/qcow2.c b/block/qcow2.c |
153 | index XXXXXXX..XXXXXXX 100644 | 57 | index XXXXXXX..XXXXXXX 100644 |
154 | --- a/block/qcow2.c | 58 | --- a/block/qcow2.c |
155 | +++ b/block/qcow2.c | 59 | +++ b/block/qcow2.c |
156 | @@ -XXX,XX +XXX,XX @@ static const char *const mutable_opts[] = { | 60 | @@ -XXX,XX +XXX,XX @@ qcow2_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) |
157 | QCOW2_OPT_DISCARD_REQUEST, | 61 | { |
158 | QCOW2_OPT_DISCARD_SNAPSHOT, | 62 | BDRVQcow2State *s = bs->opaque; |
159 | QCOW2_OPT_DISCARD_OTHER, | 63 | bdi->cluster_size = s->cluster_size; |
160 | + QCOW2_OPT_DISCARD_NO_UNREF, | 64 | + bdi->subcluster_size = s->subcluster_size; |
161 | QCOW2_OPT_OVERLAP, | 65 | bdi->vm_state_offset = qcow2_vm_state_offset(s); |
162 | QCOW2_OPT_OVERLAP_TEMPLATE, | 66 | bdi->is_dirty = s->incompatible_features & QCOW2_INCOMPAT_DIRTY; |
163 | QCOW2_OPT_OVERLAP_MAIN_HEADER, | 67 | return 0; |
164 | @@ -XXX,XX +XXX,XX @@ static QemuOptsList qcow2_runtime_opts = { | ||
165 | .type = QEMU_OPT_BOOL, | ||
166 | .help = "Generate discard requests when other clusters are freed", | ||
167 | }, | ||
168 | + { | ||
169 | + .name = QCOW2_OPT_DISCARD_NO_UNREF, | ||
170 | + .type = QEMU_OPT_BOOL, | ||
171 | + .help = "Do not unreference discarded clusters", | ||
172 | + }, | ||
173 | { | ||
174 | .name = QCOW2_OPT_OVERLAP, | ||
175 | .type = QEMU_OPT_STRING, | ||
176 | @@ -XXX,XX +XXX,XX @@ typedef struct Qcow2ReopenState { | ||
177 | bool use_lazy_refcounts; | ||
178 | int overlap_check; | ||
179 | bool discard_passthrough[QCOW2_DISCARD_MAX]; | ||
180 | + bool discard_no_unref; | ||
181 | uint64_t cache_clean_interval; | ||
182 | QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */ | ||
183 | } Qcow2ReopenState; | ||
184 | @@ -XXX,XX +XXX,XX @@ static int qcow2_update_options_prepare(BlockDriverState *bs, | ||
185 | r->discard_passthrough[QCOW2_DISCARD_OTHER] = | ||
186 | qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false); | ||
187 | |||
188 | + r->discard_no_unref = qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_NO_UNREF, | ||
189 | + false); | ||
190 | + if (r->discard_no_unref && s->qcow_version < 3) { | ||
191 | + error_setg(errp, | ||
192 | + "discard-no-unref is only supported since qcow2 version 3"); | ||
193 | + ret = -EINVAL; | ||
194 | + goto fail; | ||
195 | + } | ||
196 | + | ||
197 | switch (s->crypt_method_header) { | ||
198 | case QCOW_CRYPT_NONE: | ||
199 | if (encryptfmt) { | ||
200 | @@ -XXX,XX +XXX,XX @@ static void qcow2_update_options_commit(BlockDriverState *bs, | ||
201 | s->discard_passthrough[i] = r->discard_passthrough[i]; | ||
202 | } | ||
203 | |||
204 | + s->discard_no_unref = r->discard_no_unref; | ||
205 | + | ||
206 | if (s->cache_clean_interval != r->cache_clean_interval) { | ||
207 | cache_clean_timer_del(bs); | ||
208 | s->cache_clean_interval = r->cache_clean_interval; | ||
209 | diff --git a/qemu-options.hx b/qemu-options.hx | ||
210 | index XXXXXXX..XXXXXXX 100644 | ||
211 | --- a/qemu-options.hx | ||
212 | +++ b/qemu-options.hx | ||
213 | @@ -XXX,XX +XXX,XX @@ SRST | ||
214 | issued on other occasions where a cluster gets freed | ||
215 | (on/off; default: off) | ||
216 | |||
217 | + ``discard-no-unref`` | ||
218 | + When enabled, discards from the guest will not cause cluster | ||
219 | + allocations to be relinquished. This prevents qcow2 fragmentation | ||
220 | + that would be caused by such discards. Besides potential | ||
221 | + performance degradation, such fragmentation can lead to increased | ||
222 | + allocation of clusters past the end of the image file, | ||
223 | + resulting in image files whose file length can grow much larger | ||
224 | + than their guest disk size would suggest. | ||
225 | + If image file length is of concern (e.g. when storing qcow2 | ||
226 | + images directly on block devices), you should consider enabling | ||
227 | + this option. | ||
228 | + | ||
229 | ``overlap-check`` | ||
230 | Which overlap checks to perform for writes to the image | ||
231 | (none/constant/cached/all; default: cached). For details or | ||
232 | -- | 68 | -- |
233 | 2.40.1 | 69 | 2.41.0 | diff view generated by jsdifflib |
1 | When processing vectored guest requests that are not aligned to the | 1 | From: Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> |
---|---|---|---|
2 | storage request alignment, we pad them by adding head and/or tail | 2 | |
3 | buffers for a read-modify-write cycle. | 3 | When target image is using subclusters, and we align the request during |
4 | 4 | copy-on-read, it makes sense to align to subcluster_size rather than | |
5 | The guest can submit I/O vectors up to IOV_MAX (1024) in length, but | 5 | cluster_size. Otherwise we end up with unnecessary allocations. |
6 | with this padding, the vector can exceed that limit. As of | 6 | |
7 | 4c002cef0e9abe7135d7916c51abce47f7fc1ee2 ("util/iov: make | 7 | This commit renames bdrv_round_to_clusters() to bdrv_round_to_subclusters() |
8 | qemu_iovec_init_extended() honest"), we refuse to pad vectors beyond the | 8 | and utilizes subcluster_size field of BlockDriverInfo to make necessary |
9 | limit, instead returning an error to the guest. | 9 | alignments. It affects copy-on-read as well as mirror job (which is |
10 | 10 | using bdrv_round_to_clusters()). | |
11 | To the guest, this appears as a random I/O error. We should not return | 11 | |
12 | an I/O error to the guest when it issued a perfectly valid request. | 12 | This change also fixes the following bug with failing assert (covered by |
13 | 13 | the test in the subsequent commit): | |
14 | Before 4c002cef0e9abe7135d7916c51abce47f7fc1ee2, we just made the vector | 14 | |
15 | longer than IOV_MAX, which generally seems to work (because the guest | 15 | qemu-img create -f qcow2 base.qcow2 64K |
16 | assumes a smaller alignment than we really have, file-posix's | 16 | qemu-img create -f qcow2 -o extended_l2=on,backing_file=base.qcow2,backing_fmt=qcow2 img.qcow2 64K |
17 | raw_co_prw() will generally see bdrv_qiov_is_aligned() return false, and | 17 | qemu-io -c "write -P 0xaa 0 2K" img.qcow2 |
18 | so emulate the request, so that the IOV_MAX does not matter). However, | 18 | qemu-io -C -c "read -P 0x00 2K 62K" img.qcow2 |
19 | that does not seem exactly great. | 19 | |
20 | 20 | qemu-io: ../block/io.c:1236: bdrv_co_do_copy_on_readv: Assertion `skip_bytes < pnum' failed. | |
21 | I see two ways to fix this problem: | 21 | |
22 | 1. We split such long requests into two requests. | 22 | Reviewed-by: Eric Blake <eblake@redhat.com> |
23 | 2. We join some elements of the vector into new buffers to make it | 23 | Reviewed-by: Denis V. Lunev <den@openvz.org> |
24 | shorter. | 24 | Signed-off-by: Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> |
25 | |||
26 | I am wary of (1), because it seems like it may have unintended side | ||
27 | effects. | ||
28 | |||
29 | (2) on the other hand seems relatively simple to implement, with | ||
30 | hopefully few side effects, so this patch does that. | ||
31 | |||
32 | To do this, the use of qemu_iovec_init_extended() in bdrv_pad_request() | ||
33 | is effectively replaced by the new function bdrv_create_padded_qiov(), | ||
34 | which not only wraps the request IOV with padding head/tail, but also | ||
35 | ensures that the resulting vector will not have more than IOV_MAX | ||
36 | elements. Putting that functionality into qemu_iovec_init_extended() is | ||
37 | infeasible because it requires allocating a bounce buffer; doing so | ||
38 | would require many more parameters (buffer alignment, how to initialize | ||
39 | the buffer, and out parameters like the buffer, its length, and the | ||
40 | original elements), which is not reasonable. | ||
41 | |||
42 | Conversely, it is not difficult to move qemu_iovec_init_extended()'s | ||
43 | functionality into bdrv_create_padded_qiov() by using public | ||
44 | qemu_iovec_* functions, so that is what this patch does. | ||
45 | |||
46 | Because bdrv_pad_request() was the only "serious" user of | ||
47 | qemu_iovec_init_extended(), the next patch will remove the latter | ||
48 | function, so the functionality is not implemented twice. | ||
49 | |||
50 | Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=2141964 | ||
51 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
52 | Message-Id: <20230411173418.19549-3-hreitz@redhat.com> | ||
53 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | 25 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
26 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
27 | Message-ID: <20230711172553.234055-3-andrey.drobyshev@virtuozzo.com> | ||
54 | --- | 28 | --- |
55 | block/io.c | 166 ++++++++++++++++++++++++++++++++++++++++++++++++----- | 29 | include/block/block-io.h | 8 +++---- |
56 | 1 file changed, 151 insertions(+), 15 deletions(-) | 30 | block/io.c | 50 ++++++++++++++++++++-------------------- |
57 | 31 | block/mirror.c | 8 +++---- | |
32 | 3 files changed, 33 insertions(+), 33 deletions(-) | ||
33 | |||
34 | diff --git a/include/block/block-io.h b/include/block/block-io.h | ||
35 | index XXXXXXX..XXXXXXX 100644 | ||
36 | --- a/include/block/block-io.h | ||
37 | +++ b/include/block/block-io.h | ||
38 | @@ -XXX,XX +XXX,XX @@ bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); | ||
39 | ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs, | ||
40 | Error **errp); | ||
41 | BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs); | ||
42 | -void bdrv_round_to_clusters(BlockDriverState *bs, | ||
43 | - int64_t offset, int64_t bytes, | ||
44 | - int64_t *cluster_offset, | ||
45 | - int64_t *cluster_bytes); | ||
46 | +void bdrv_round_to_subclusters(BlockDriverState *bs, | ||
47 | + int64_t offset, int64_t bytes, | ||
48 | + int64_t *cluster_offset, | ||
49 | + int64_t *cluster_bytes); | ||
50 | |||
51 | void bdrv_get_backing_filename(BlockDriverState *bs, | ||
52 | char *filename, int filename_size); | ||
58 | diff --git a/block/io.c b/block/io.c | 53 | diff --git a/block/io.c b/block/io.c |
59 | index XXXXXXX..XXXXXXX 100644 | 54 | index XXXXXXX..XXXXXXX 100644 |
60 | --- a/block/io.c | 55 | --- a/block/io.c |
61 | +++ b/block/io.c | 56 | +++ b/block/io.c |
62 | @@ -XXX,XX +XXX,XX @@ out: | 57 | @@ -XXX,XX +XXX,XX @@ BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs) |
63 | * @merge_reads is true for small requests, | 58 | } |
64 | * if @buf_len == @head + bytes + @tail. In this case it is possible that both | 59 | |
65 | * head and tail exist but @buf_len == align and @tail_buf == @buf. | 60 | /** |
66 | + * | 61 | - * Round a region to cluster boundaries |
67 | + * @write is true for write requests, false for read requests. | 62 | + * Round a region to subcluster (if supported) or cluster boundaries |
68 | + * | ||
69 | + * If padding makes the vector too long (exceeding IOV_MAX), then we need to | ||
70 | + * merge existing vector elements into a single one. @collapse_bounce_buf acts | ||
71 | + * as the bounce buffer in such cases. @pre_collapse_qiov has the pre-collapse | ||
72 | + * I/O vector elements so for read requests, the data can be copied back after | ||
73 | + * the read is done. | ||
74 | */ | 63 | */ |
75 | typedef struct BdrvRequestPadding { | 64 | void coroutine_fn GRAPH_RDLOCK |
76 | uint8_t *buf; | 65 | -bdrv_round_to_clusters(BlockDriverState *bs, int64_t offset, int64_t bytes, |
77 | @@ -XXX,XX +XXX,XX @@ typedef struct BdrvRequestPadding { | 66 | - int64_t *cluster_offset, int64_t *cluster_bytes) |
78 | size_t head; | 67 | +bdrv_round_to_subclusters(BlockDriverState *bs, int64_t offset, int64_t bytes, |
79 | size_t tail; | 68 | + int64_t *align_offset, int64_t *align_bytes) |
80 | bool merge_reads; | ||
81 | + bool write; | ||
82 | QEMUIOVector local_qiov; | ||
83 | + | ||
84 | + uint8_t *collapse_bounce_buf; | ||
85 | + size_t collapse_len; | ||
86 | + QEMUIOVector pre_collapse_qiov; | ||
87 | } BdrvRequestPadding; | ||
88 | |||
89 | static bool bdrv_init_padding(BlockDriverState *bs, | ||
90 | int64_t offset, int64_t bytes, | ||
91 | + bool write, | ||
92 | BdrvRequestPadding *pad) | ||
93 | { | 69 | { |
94 | int64_t align = bs->bl.request_alignment; | 70 | BlockDriverInfo bdi; |
95 | @@ -XXX,XX +XXX,XX @@ static bool bdrv_init_padding(BlockDriverState *bs, | 71 | IO_CODE(); |
96 | pad->tail_buf = pad->buf + pad->buf_len - align; | 72 | - if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) { |
73 | - *cluster_offset = offset; | ||
74 | - *cluster_bytes = bytes; | ||
75 | + if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.subcluster_size == 0) { | ||
76 | + *align_offset = offset; | ||
77 | + *align_bytes = bytes; | ||
78 | } else { | ||
79 | - int64_t c = bdi.cluster_size; | ||
80 | - *cluster_offset = QEMU_ALIGN_DOWN(offset, c); | ||
81 | - *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c); | ||
82 | + int64_t c = bdi.subcluster_size; | ||
83 | + *align_offset = QEMU_ALIGN_DOWN(offset, c); | ||
84 | + *align_bytes = QEMU_ALIGN_UP(offset - *align_offset + bytes, c); | ||
97 | } | 85 | } |
98 | |||
99 | + pad->write = write; | ||
100 | + | ||
101 | return true; | ||
102 | } | 86 | } |
103 | 87 | ||
104 | @@ -XXX,XX +XXX,XX @@ zero_mem: | 88 | @@ -XXX,XX +XXX,XX @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes, |
105 | return 0; | 89 | void *bounce_buffer = NULL; |
106 | } | 90 | |
107 | 91 | BlockDriver *drv = bs->drv; | |
108 | -static void bdrv_padding_destroy(BdrvRequestPadding *pad) | 92 | - int64_t cluster_offset; |
109 | +/** | 93 | - int64_t cluster_bytes; |
110 | + * Free *pad's associated buffers, and perform any necessary finalization steps. | 94 | + int64_t align_offset; |
111 | + */ | 95 | + int64_t align_bytes; |
112 | +static void bdrv_padding_finalize(BdrvRequestPadding *pad) | 96 | int64_t skip_bytes; |
113 | { | ||
114 | + if (pad->collapse_bounce_buf) { | ||
115 | + if (!pad->write) { | ||
116 | + /* | ||
117 | + * If padding required elements in the vector to be collapsed into a | ||
118 | + * bounce buffer, copy the bounce buffer content back | ||
119 | + */ | ||
120 | + qemu_iovec_from_buf(&pad->pre_collapse_qiov, 0, | ||
121 | + pad->collapse_bounce_buf, pad->collapse_len); | ||
122 | + } | ||
123 | + qemu_vfree(pad->collapse_bounce_buf); | ||
124 | + qemu_iovec_destroy(&pad->pre_collapse_qiov); | ||
125 | + } | ||
126 | if (pad->buf) { | ||
127 | qemu_vfree(pad->buf); | ||
128 | qemu_iovec_destroy(&pad->local_qiov); | ||
129 | @@ -XXX,XX +XXX,XX @@ static void bdrv_padding_destroy(BdrvRequestPadding *pad) | ||
130 | memset(pad, 0, sizeof(*pad)); | ||
131 | } | ||
132 | |||
133 | +/* | ||
134 | + * Create pad->local_qiov by wrapping @iov in the padding head and tail, while | ||
135 | + * ensuring that the resulting vector will not exceed IOV_MAX elements. | ||
136 | + * | ||
137 | + * To ensure this, when necessary, the first two or three elements of @iov are | ||
138 | + * merged into pad->collapse_bounce_buf and replaced by a reference to that | ||
139 | + * bounce buffer in pad->local_qiov. | ||
140 | + * | ||
141 | + * After performing a read request, the data from the bounce buffer must be | ||
142 | + * copied back into pad->pre_collapse_qiov (e.g. by bdrv_padding_finalize()). | ||
143 | + */ | ||
144 | +static int bdrv_create_padded_qiov(BlockDriverState *bs, | ||
145 | + BdrvRequestPadding *pad, | ||
146 | + struct iovec *iov, int niov, | ||
147 | + size_t iov_offset, size_t bytes) | ||
148 | +{ | ||
149 | + int padded_niov, surplus_count, collapse_count; | ||
150 | + | ||
151 | + /* Assert this invariant */ | ||
152 | + assert(niov <= IOV_MAX); | ||
153 | + | ||
154 | + /* | ||
155 | + * Cannot pad if resulting length would exceed SIZE_MAX. Returning an error | ||
156 | + * to the guest is not ideal, but there is little else we can do. At least | ||
157 | + * this will practically never happen on 64-bit systems. | ||
158 | + */ | ||
159 | + if (SIZE_MAX - pad->head < bytes || | ||
160 | + SIZE_MAX - pad->head - bytes < pad->tail) | ||
161 | + { | ||
162 | + return -EINVAL; | ||
163 | + } | ||
164 | + | ||
165 | + /* Length of the resulting IOV if we just concatenated everything */ | ||
166 | + padded_niov = !!pad->head + niov + !!pad->tail; | ||
167 | + | ||
168 | + qemu_iovec_init(&pad->local_qiov, MIN(padded_niov, IOV_MAX)); | ||
169 | + | ||
170 | + if (pad->head) { | ||
171 | + qemu_iovec_add(&pad->local_qiov, pad->buf, pad->head); | ||
172 | + } | ||
173 | + | ||
174 | + /* | ||
175 | + * If padded_niov > IOV_MAX, we cannot just concatenate everything. | ||
176 | + * Instead, merge the first two or three elements of @iov to reduce the | ||
177 | + * number of vector elements as necessary. | ||
178 | + */ | ||
179 | + if (padded_niov > IOV_MAX) { | ||
180 | + /* | ||
181 | + * Only head and tail can have lead to the number of entries exceeding | ||
182 | + * IOV_MAX, so we can exceed it by the head and tail at most. We need | ||
183 | + * to reduce the number of elements by `surplus_count`, so we merge that | ||
184 | + * many elements plus one into one element. | ||
185 | + */ | ||
186 | + surplus_count = padded_niov - IOV_MAX; | ||
187 | + assert(surplus_count <= !!pad->head + !!pad->tail); | ||
188 | + collapse_count = surplus_count + 1; | ||
189 | + | ||
190 | + /* | ||
191 | + * Move the elements to collapse into `pad->pre_collapse_qiov`, then | ||
192 | + * advance `iov` (and associated variables) by those elements. | ||
193 | + */ | ||
194 | + qemu_iovec_init(&pad->pre_collapse_qiov, collapse_count); | ||
195 | + qemu_iovec_concat_iov(&pad->pre_collapse_qiov, iov, | ||
196 | + collapse_count, iov_offset, SIZE_MAX); | ||
197 | + iov += collapse_count; | ||
198 | + iov_offset = 0; | ||
199 | + niov -= collapse_count; | ||
200 | + bytes -= pad->pre_collapse_qiov.size; | ||
201 | + | ||
202 | + /* | ||
203 | + * Construct the bounce buffer to match the length of the to-collapse | ||
204 | + * vector elements, and for write requests, initialize it with the data | ||
205 | + * from those elements. Then add it to `pad->local_qiov`. | ||
206 | + */ | ||
207 | + pad->collapse_len = pad->pre_collapse_qiov.size; | ||
208 | + pad->collapse_bounce_buf = qemu_blockalign(bs, pad->collapse_len); | ||
209 | + if (pad->write) { | ||
210 | + qemu_iovec_to_buf(&pad->pre_collapse_qiov, 0, | ||
211 | + pad->collapse_bounce_buf, pad->collapse_len); | ||
212 | + } | ||
213 | + qemu_iovec_add(&pad->local_qiov, | ||
214 | + pad->collapse_bounce_buf, pad->collapse_len); | ||
215 | + } | ||
216 | + | ||
217 | + qemu_iovec_concat_iov(&pad->local_qiov, iov, niov, iov_offset, bytes); | ||
218 | + | ||
219 | + if (pad->tail) { | ||
220 | + qemu_iovec_add(&pad->local_qiov, | ||
221 | + pad->buf + pad->buf_len - pad->tail, pad->tail); | ||
222 | + } | ||
223 | + | ||
224 | + assert(pad->local_qiov.niov == MIN(padded_niov, IOV_MAX)); | ||
225 | + return 0; | ||
226 | +} | ||
227 | + | ||
228 | /* | ||
229 | * bdrv_pad_request | ||
230 | * | ||
231 | @@ -XXX,XX +XXX,XX @@ static void bdrv_padding_destroy(BdrvRequestPadding *pad) | ||
232 | * read of padding, bdrv_padding_rmw_read() should be called separately if | ||
233 | * needed. | ||
234 | * | ||
235 | + * @write is true for write requests, false for read requests. | ||
236 | + * | ||
237 | * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out: | ||
238 | * - on function start they represent original request | ||
239 | * - on failure or when padding is not needed they are unchanged | ||
240 | @@ -XXX,XX +XXX,XX @@ static void bdrv_padding_destroy(BdrvRequestPadding *pad) | ||
241 | static int bdrv_pad_request(BlockDriverState *bs, | ||
242 | QEMUIOVector **qiov, size_t *qiov_offset, | ||
243 | int64_t *offset, int64_t *bytes, | ||
244 | + bool write, | ||
245 | BdrvRequestPadding *pad, bool *padded, | ||
246 | BdrvRequestFlags *flags) | ||
247 | { | ||
248 | int ret; | 97 | int ret; |
249 | + struct iovec *sliced_iov; | 98 | int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, |
250 | + int sliced_niov; | 99 | @@ -XXX,XX +XXX,XX @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes, |
251 | + size_t sliced_head, sliced_tail; | 100 | * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which |
252 | 101 | * is one reason we loop rather than doing it all at once. | |
253 | bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort); | 102 | */ |
254 | 103 | - bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); | |
255 | - if (!bdrv_init_padding(bs, *offset, *bytes, pad)) { | 104 | - skip_bytes = offset - cluster_offset; |
256 | + if (!bdrv_init_padding(bs, *offset, *bytes, write, pad)) { | 105 | + bdrv_round_to_subclusters(bs, offset, bytes, &align_offset, &align_bytes); |
257 | if (padded) { | 106 | + skip_bytes = offset - align_offset; |
258 | *padded = false; | 107 | |
108 | trace_bdrv_co_do_copy_on_readv(bs, offset, bytes, | ||
109 | - cluster_offset, cluster_bytes); | ||
110 | + align_offset, align_bytes); | ||
111 | |||
112 | - while (cluster_bytes) { | ||
113 | + while (align_bytes) { | ||
114 | int64_t pnum; | ||
115 | |||
116 | if (skip_write) { | ||
117 | ret = 1; /* "already allocated", so nothing will be copied */ | ||
118 | - pnum = MIN(cluster_bytes, max_transfer); | ||
119 | + pnum = MIN(align_bytes, max_transfer); | ||
120 | } else { | ||
121 | - ret = bdrv_is_allocated(bs, cluster_offset, | ||
122 | - MIN(cluster_bytes, max_transfer), &pnum); | ||
123 | + ret = bdrv_is_allocated(bs, align_offset, | ||
124 | + MIN(align_bytes, max_transfer), &pnum); | ||
125 | if (ret < 0) { | ||
126 | /* | ||
127 | * Safe to treat errors in querying allocation as if | ||
128 | * unallocated; we'll probably fail again soon on the | ||
129 | * read, but at least that will set a decent errno. | ||
130 | */ | ||
131 | - pnum = MIN(cluster_bytes, max_transfer); | ||
132 | + pnum = MIN(align_bytes, max_transfer); | ||
133 | } | ||
134 | |||
135 | /* Stop at EOF if the image ends in the middle of the cluster */ | ||
136 | @@ -XXX,XX +XXX,XX @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes, | ||
137 | /* Must copy-on-read; use the bounce buffer */ | ||
138 | pnum = MIN(pnum, MAX_BOUNCE_BUFFER); | ||
139 | if (!bounce_buffer) { | ||
140 | - int64_t max_we_need = MAX(pnum, cluster_bytes - pnum); | ||
141 | + int64_t max_we_need = MAX(pnum, align_bytes - pnum); | ||
142 | int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER); | ||
143 | int64_t bounce_buffer_len = MIN(max_we_need, max_allowed); | ||
144 | |||
145 | @@ -XXX,XX +XXX,XX @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes, | ||
146 | } | ||
147 | qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); | ||
148 | |||
149 | - ret = bdrv_driver_preadv(bs, cluster_offset, pnum, | ||
150 | + ret = bdrv_driver_preadv(bs, align_offset, pnum, | ||
151 | &local_qiov, 0, 0); | ||
152 | if (ret < 0) { | ||
153 | goto err; | ||
154 | @@ -XXX,XX +XXX,XX @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes, | ||
155 | /* FIXME: Should we (perhaps conditionally) be setting | ||
156 | * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy | ||
157 | * that still correctly reads as zero? */ | ||
158 | - ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, | ||
159 | + ret = bdrv_co_do_pwrite_zeroes(bs, align_offset, pnum, | ||
160 | BDRV_REQ_WRITE_UNCHANGED); | ||
161 | } else { | ||
162 | /* This does not change the data on the disk, it is not | ||
163 | * necessary to flush even in cache=writethrough mode. | ||
164 | */ | ||
165 | - ret = bdrv_driver_pwritev(bs, cluster_offset, pnum, | ||
166 | + ret = bdrv_driver_pwritev(bs, align_offset, pnum, | ||
167 | &local_qiov, 0, | ||
168 | BDRV_REQ_WRITE_UNCHANGED); | ||
169 | } | ||
170 | @@ -XXX,XX +XXX,XX @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes, | ||
171 | } | ||
259 | } | 172 | } |
260 | return 0; | 173 | |
174 | - cluster_offset += pnum; | ||
175 | - cluster_bytes -= pnum; | ||
176 | + align_offset += pnum; | ||
177 | + align_bytes -= pnum; | ||
178 | progress += pnum - skip_bytes; | ||
179 | skip_bytes = 0; | ||
261 | } | 180 | } |
262 | 181 | diff --git a/block/mirror.c b/block/mirror.c | |
263 | - ret = qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head, | 182 | index XXXXXXX..XXXXXXX 100644 |
264 | - *qiov, *qiov_offset, *bytes, | 183 | --- a/block/mirror.c |
265 | - pad->buf + pad->buf_len - pad->tail, | 184 | +++ b/block/mirror.c |
266 | - pad->tail); | 185 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn mirror_cow_align(MirrorBlockJob *s, int64_t *offset, |
267 | + sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes, | 186 | need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity, |
268 | + &sliced_head, &sliced_tail, | 187 | s->cow_bitmap); |
269 | + &sliced_niov); | 188 | if (need_cow) { |
270 | + | 189 | - bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes, |
271 | + /* Guaranteed by bdrv_check_qiov_request() */ | 190 | - &align_offset, &align_bytes); |
272 | + assert(*bytes <= SIZE_MAX); | 191 | + bdrv_round_to_subclusters(blk_bs(s->target), *offset, *bytes, |
273 | + ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov, | 192 | + &align_offset, &align_bytes); |
274 | + sliced_head, *bytes); | ||
275 | if (ret < 0) { | ||
276 | - bdrv_padding_destroy(pad); | ||
277 | + bdrv_padding_finalize(pad); | ||
278 | return ret; | ||
279 | } | 193 | } |
280 | *bytes += pad->head + pad->tail; | 194 | |
281 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, | 195 | if (align_bytes > max_bytes) { |
282 | flags |= BDRV_REQ_COPY_ON_READ; | 196 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn mirror_iteration(MirrorBlockJob *s) |
283 | } | 197 | int64_t target_offset; |
284 | 198 | int64_t target_bytes; | |
285 | - ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, | 199 | WITH_GRAPH_RDLOCK_GUARD() { |
286 | - NULL, &flags); | 200 | - bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes, |
287 | + ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, false, | 201 | - &target_offset, &target_bytes); |
288 | + &pad, NULL, &flags); | 202 | + bdrv_round_to_subclusters(blk_bs(s->target), offset, io_bytes, |
289 | if (ret < 0) { | 203 | + &target_offset, &target_bytes); |
290 | goto fail; | 204 | } |
291 | } | 205 | if (target_offset == offset && |
292 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, | 206 | target_bytes == io_bytes) { |
293 | bs->bl.request_alignment, | ||
294 | qiov, qiov_offset, flags); | ||
295 | tracked_request_end(&req); | ||
296 | - bdrv_padding_destroy(&pad); | ||
297 | + bdrv_padding_finalize(&pad); | ||
298 | |||
299 | fail: | ||
300 | bdrv_dec_in_flight(bs); | ||
301 | @@ -XXX,XX +XXX,XX @@ bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes, | ||
302 | /* This flag doesn't make sense for padding or zero writes */ | ||
303 | flags &= ~BDRV_REQ_REGISTERED_BUF; | ||
304 | |||
305 | - padding = bdrv_init_padding(bs, offset, bytes, &pad); | ||
306 | + padding = bdrv_init_padding(bs, offset, bytes, true, &pad); | ||
307 | if (padding) { | ||
308 | assert(!(flags & BDRV_REQ_NO_WAIT)); | ||
309 | bdrv_make_request_serialising(req, align); | ||
310 | @@ -XXX,XX +XXX,XX @@ bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes, | ||
311 | } | ||
312 | |||
313 | out: | ||
314 | - bdrv_padding_destroy(&pad); | ||
315 | + bdrv_padding_finalize(&pad); | ||
316 | |||
317 | return ret; | ||
318 | } | ||
319 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, | ||
320 | * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do | ||
321 | * alignment only if there is no ZERO flag. | ||
322 | */ | ||
323 | - ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad, | ||
324 | - &padded, &flags); | ||
325 | + ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, true, | ||
326 | + &pad, &padded, &flags); | ||
327 | if (ret < 0) { | ||
328 | return ret; | ||
329 | } | ||
330 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, | ||
331 | ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align, | ||
332 | qiov, qiov_offset, flags); | ||
333 | |||
334 | - bdrv_padding_destroy(&pad); | ||
335 | + bdrv_padding_finalize(&pad); | ||
336 | |||
337 | out: | ||
338 | tracked_request_end(&req); | ||
339 | -- | 207 | -- |
340 | 2.40.1 | 208 | 2.41.0 | diff view generated by jsdifflib |
1 | Test that even vectored IO requests with 1024 vector elements that are | 1 | From: Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> |
---|---|---|---|
2 | not aligned to the device's request alignment will succeed. | 2 | |
3 | Add testcase which checks that allocations during copy-on-read are | ||
4 | performed on the subcluster basis when subclusters are enabled in target | ||
5 | image. | ||
6 | |||
7 | This testcase also triggers the following assert with previous commit | ||
8 | not being applied, so we check that as well: | ||
9 | |||
10 | qemu-io: ../block/io.c:1236: bdrv_co_do_copy_on_readv: Assertion `skip_bytes < pnum' failed. | ||
3 | 11 | ||
4 | Reviewed-by: Eric Blake <eblake@redhat.com> | 12 | Reviewed-by: Eric Blake <eblake@redhat.com> |
13 | Reviewed-by: Denis V. Lunev <den@openvz.org> | ||
14 | Signed-off-by: Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> | ||
5 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | 15 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> |
6 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | 16 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
7 | Message-Id: <20230411173418.19549-5-hreitz@redhat.com> | 17 | Message-ID: <20230711172553.234055-4-andrey.drobyshev@virtuozzo.com> |
8 | --- | 18 | --- |
9 | tests/qemu-iotests/tests/iov-padding | 85 ++++++++++++++++++++++++ | 19 | tests/qemu-iotests/197 | 29 +++++++++++++++++++++++++++++ |
10 | tests/qemu-iotests/tests/iov-padding.out | 59 ++++++++++++++++ | 20 | tests/qemu-iotests/197.out | 24 ++++++++++++++++++++++++ |
11 | 2 files changed, 144 insertions(+) | 21 | 2 files changed, 53 insertions(+) |
12 | create mode 100755 tests/qemu-iotests/tests/iov-padding | ||
13 | create mode 100644 tests/qemu-iotests/tests/iov-padding.out | ||
14 | 22 | ||
15 | diff --git a/tests/qemu-iotests/tests/iov-padding b/tests/qemu-iotests/tests/iov-padding | 23 | diff --git a/tests/qemu-iotests/197 b/tests/qemu-iotests/197 |
16 | new file mode 100755 | 24 | index XXXXXXX..XXXXXXX 100755 |
17 | index XXXXXXX..XXXXXXX | 25 | --- a/tests/qemu-iotests/197 |
18 | --- /dev/null | 26 | +++ b/tests/qemu-iotests/197 |
19 | +++ b/tests/qemu-iotests/tests/iov-padding | 27 | @@ -XXX,XX +XXX,XX @@ $QEMU_IO -f qcow2 -C -c 'read 0 1024' "$TEST_WRAP" | _filter_qemu_io |
20 | @@ -XXX,XX +XXX,XX @@ | 28 | $QEMU_IO -f qcow2 -c map "$TEST_WRAP" |
21 | +#!/usr/bin/env bash | 29 | _check_test_img |
22 | +# group: rw quick | 30 | |
23 | +# | 31 | +echo |
24 | +# Check the interaction of request padding (to fit alignment restrictions) with | 32 | +echo '=== Copy-on-read with subclusters ===' |
25 | +# vectored I/O from the guest | 33 | +echo |
26 | +# | ||
27 | +# Copyright Red Hat | ||
28 | +# | ||
29 | +# This program is free software; you can redistribute it and/or modify | ||
30 | +# it under the terms of the GNU General Public License as published by | ||
31 | +# the Free Software Foundation; either version 2 of the License, or | ||
32 | +# (at your option) any later version. | ||
33 | +# | ||
34 | +# This program is distributed in the hope that it will be useful, | ||
35 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
36 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
37 | +# GNU General Public License for more details. | ||
38 | +# | ||
39 | +# You should have received a copy of the GNU General Public License | ||
40 | +# along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
41 | +# | ||
42 | + | 34 | + |
43 | +seq=$(basename $0) | 35 | +# Create base and top images 64K (1 cluster) each. Make subclusters enabled |
44 | +echo "QA output created by $seq" | 36 | +# for the top image |
37 | +_make_test_img 64K | ||
38 | +IMGPROTO=file IMGFMT=qcow2 TEST_IMG_FILE="$TEST_WRAP" \ | ||
39 | + _make_test_img --no-opts -o extended_l2=true -F "$IMGFMT" -b "$TEST_IMG" \ | ||
40 | + 64K | _filter_img_create | ||
45 | + | 41 | + |
46 | +status=1 # failure is the default! | 42 | +$QEMU_IO -c "write -P 0xaa 0 64k" "$TEST_IMG" | _filter_qemu_io |
47 | + | 43 | + |
48 | +_cleanup() | 44 | +# Allocate individual subclusters in the top image, and not the whole cluster |
49 | +{ | 45 | +$QEMU_IO -c "write -P 0xbb 28K 2K" -c "write -P 0xcc 34K 2K" "$TEST_WRAP" \ |
50 | + _cleanup_test_img | 46 | + | _filter_qemu_io |
51 | +} | ||
52 | +trap "_cleanup; exit \$status" 0 1 2 3 15 | ||
53 | + | 47 | + |
54 | +# get standard environment, filters and checks | 48 | +# Only 2 subclusters should be allocated in the top image at this point |
55 | +cd .. | 49 | +$QEMU_IMG map "$TEST_WRAP" | _filter_qemu_img_map |
56 | +. ./common.rc | ||
57 | +. ./common.filter | ||
58 | + | 50 | + |
59 | +_supported_fmt raw | 51 | +# Actual copy-on-read operation |
60 | +_supported_proto file | 52 | +$QEMU_IO -C -c "read -P 0xaa 30K 4K" "$TEST_WRAP" | _filter_qemu_io |
61 | + | 53 | + |
62 | +_make_test_img 1M | 54 | +# And here we should have 4 subclusters allocated right in the middle of the |
55 | +# top image. Make sure the whole cluster remains unallocated | ||
56 | +$QEMU_IMG map "$TEST_WRAP" | _filter_qemu_img_map | ||
63 | + | 57 | + |
64 | +IMGSPEC="driver=blkdebug,align=4096,image.driver=file,image.filename=$TEST_IMG" | 58 | +_check_test_img |
65 | + | 59 | + |
66 | +# Four combinations: | 60 | # success, all done |
67 | +# - Offset 4096, length 1023 * 512 + 512: Fully aligned to 4k | 61 | echo '*** done' |
68 | +# - Offset 4096, length 1023 * 512 + 4096: Head is aligned, tail is not | 62 | status=0 |
69 | +# - Offset 512, length 1023 * 512 + 512: Neither head nor tail are aligned | 63 | diff --git a/tests/qemu-iotests/197.out b/tests/qemu-iotests/197.out |
70 | +# - Offset 512, length 1023 * 512 + 4096: Tail is aligned, head is not | 64 | index XXXXXXX..XXXXXXX 100644 |
71 | +for start_offset in 4096 512; do | 65 | --- a/tests/qemu-iotests/197.out |
72 | + for last_element_length in 512 4096; do | 66 | +++ b/tests/qemu-iotests/197.out |
73 | + length=$((1023 * 512 + $last_element_length)) | 67 | @@ -XXX,XX +XXX,XX @@ read 1024/1024 bytes at offset 0 |
68 | 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
69 | 1 KiB (0x400) bytes allocated at offset 0 bytes (0x0) | ||
70 | No errors were found on the image. | ||
74 | + | 71 | + |
75 | + echo | 72 | +=== Copy-on-read with subclusters === |
76 | + echo "== performing 1024-element vectored requests to image (offset: $start_offset; length: $length) ==" | ||
77 | + | 73 | + |
78 | + # Fill with data for testing | 74 | +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=65536 |
79 | + $QEMU_IO -c 'write -P 1 0 1M' "$TEST_IMG" | _filter_qemu_io | 75 | +Formatting 'TEST_DIR/t.wrap.IMGFMT', fmt=IMGFMT size=65536 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT |
80 | + | 76 | +wrote 65536/65536 bytes at offset 0 |
81 | + # 1023 512-byte buffers, and then one with length $last_element_length | 77 | +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) |
82 | + cmd_params="-P 2 $start_offset $(yes 512 | head -n 1023 | tr '\n' ' ') $last_element_length" | 78 | +wrote 2048/2048 bytes at offset 28672 |
83 | + QEMU_IO_OPTIONS="$QEMU_IO_OPTIONS_NO_FMT" $QEMU_IO \ | 79 | +2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) |
84 | + -c "writev $cmd_params" \ | 80 | +wrote 2048/2048 bytes at offset 34816 |
85 | + --image-opts \ | 81 | +2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) |
86 | + "$IMGSPEC" \ | 82 | +Offset Length File |
87 | + | _filter_qemu_io | 83 | +0 0x7000 TEST_DIR/t.IMGFMT |
88 | + | 84 | +0x7000 0x800 TEST_DIR/t.wrap.IMGFMT |
89 | + # Read all patterns -- read the part we just wrote with writev twice, | 85 | +0x7800 0x1000 TEST_DIR/t.IMGFMT |
90 | + # once "normally", and once with a readv, so we see that that works, too | 86 | +0x8800 0x800 TEST_DIR/t.wrap.IMGFMT |
91 | + QEMU_IO_OPTIONS="$QEMU_IO_OPTIONS_NO_FMT" $QEMU_IO \ | 87 | +0x9000 0x7000 TEST_DIR/t.IMGFMT |
92 | + -c "read -P 1 0 $start_offset" \ | 88 | +read 4096/4096 bytes at offset 30720 |
93 | + -c "read -P 2 $start_offset $length" \ | ||
94 | + -c "readv $cmd_params" \ | ||
95 | + -c "read -P 1 $((start_offset + length)) $((1024 * 1024 - length - start_offset))" \ | ||
96 | + --image-opts \ | ||
97 | + "$IMGSPEC" \ | ||
98 | + | _filter_qemu_io | ||
99 | + done | ||
100 | +done | ||
101 | + | ||
102 | +# success, all done | ||
103 | +echo "*** done" | ||
104 | +rm -f $seq.full | ||
105 | +status=0 | ||
106 | diff --git a/tests/qemu-iotests/tests/iov-padding.out b/tests/qemu-iotests/tests/iov-padding.out | ||
107 | new file mode 100644 | ||
108 | index XXXXXXX..XXXXXXX | ||
109 | --- /dev/null | ||
110 | +++ b/tests/qemu-iotests/tests/iov-padding.out | ||
111 | @@ -XXX,XX +XXX,XX @@ | ||
112 | +QA output created by iov-padding | ||
113 | +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 | ||
114 | + | ||
115 | +== performing 1024-element vectored requests to image (offset: 4096; length: 524288) == | ||
116 | +wrote 1048576/1048576 bytes at offset 0 | ||
117 | +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
118 | +wrote 524288/524288 bytes at offset 4096 | ||
119 | +512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
120 | +read 4096/4096 bytes at offset 0 | ||
121 | +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | 89 | +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) |
122 | +read 524288/524288 bytes at offset 4096 | 90 | +Offset Length File |
123 | +512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | 91 | +0 0x7000 TEST_DIR/t.IMGFMT |
124 | +read 524288/524288 bytes at offset 4096 | 92 | +0x7000 0x2000 TEST_DIR/t.wrap.IMGFMT |
125 | +512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | 93 | +0x9000 0x7000 TEST_DIR/t.IMGFMT |
126 | +read 520192/520192 bytes at offset 528384 | 94 | +No errors were found on the image. |
127 | +508 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | 95 | *** done |
128 | + | ||
129 | +== performing 1024-element vectored requests to image (offset: 4096; length: 527872) == | ||
130 | +wrote 1048576/1048576 bytes at offset 0 | ||
131 | +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
132 | +wrote 527872/527872 bytes at offset 4096 | ||
133 | +515.500 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
134 | +read 4096/4096 bytes at offset 0 | ||
135 | +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
136 | +read 527872/527872 bytes at offset 4096 | ||
137 | +515.500 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
138 | +read 527872/527872 bytes at offset 4096 | ||
139 | +515.500 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
140 | +read 516608/516608 bytes at offset 531968 | ||
141 | +504.500 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
142 | + | ||
143 | +== performing 1024-element vectored requests to image (offset: 512; length: 524288) == | ||
144 | +wrote 1048576/1048576 bytes at offset 0 | ||
145 | +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
146 | +wrote 524288/524288 bytes at offset 512 | ||
147 | +512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
148 | +read 512/512 bytes at offset 0 | ||
149 | +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
150 | +read 524288/524288 bytes at offset 512 | ||
151 | +512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
152 | +read 524288/524288 bytes at offset 512 | ||
153 | +512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
154 | +read 523776/523776 bytes at offset 524800 | ||
155 | +511.500 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
156 | + | ||
157 | +== performing 1024-element vectored requests to image (offset: 512; length: 527872) == | ||
158 | +wrote 1048576/1048576 bytes at offset 0 | ||
159 | +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
160 | +wrote 527872/527872 bytes at offset 512 | ||
161 | +515.500 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
162 | +read 512/512 bytes at offset 0 | ||
163 | +512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
164 | +read 527872/527872 bytes at offset 512 | ||
165 | +515.500 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
166 | +read 527872/527872 bytes at offset 512 | ||
167 | +515.500 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
168 | +read 520192/520192 bytes at offset 528384 | ||
169 | +508 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
170 | +*** done | ||
171 | -- | 96 | -- |
172 | 2.40.1 | 97 | 2.41.0 | diff view generated by jsdifflib |
1 | bdrv_pad_request() was the main user of qemu_iovec_init_extended(). | 1 | liburing does not clear sqe->user_data. We must do it ourselves to avoid |
---|---|---|---|
2 | HEAD^ has removed that use, so we can remove qemu_iovec_init_extended() | 2 | undefined behavior in process_cqe() when user_data is used. |
3 | now. | ||
4 | 3 | ||
5 | The only remaining user is qemu_iovec_init_slice(), which can easily | 4 | Note that fdmon-io_uring is currently disabled, so this is a latent bug |
6 | inline the small part it really needs. | 5 | that does not affect users. Let's merge this fix now to make it easier |
6 | to enable fdmon-io_uring in the future (and I'm working on that). | ||
7 | 7 | ||
8 | Note that qemu_iovec_init_extended() offered a memcpy() optimization to | 8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
9 | initialize the new I/O vector. qemu_iovec_concat_iov(), which is used | 9 | Message-ID: <20230426212639.82310-1-stefanha@redhat.com> |
10 | to replace its functionality, does not, but calls qemu_iovec_add() for | 10 | --- |
11 | every single element. If we decide this optimization was important, we | 11 | util/fdmon-io_uring.c | 2 ++ |
12 | will need to re-implement it in qemu_iovec_concat_iov(), which might | 12 | 1 file changed, 2 insertions(+) |
13 | also benefit its pre-existing users. | ||
14 | 13 | ||
15 | Reviewed-by: Eric Blake <eblake@redhat.com> | 14 | diff --git a/util/fdmon-io_uring.c b/util/fdmon-io_uring.c |
16 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
17 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
18 | Message-Id: <20230411173418.19549-4-hreitz@redhat.com> | ||
19 | --- | ||
20 | include/qemu/iov.h | 5 --- | ||
21 | util/iov.c | 79 +++++++--------------------------------------- | ||
22 | 2 files changed, 11 insertions(+), 73 deletions(-) | ||
23 | |||
24 | diff --git a/include/qemu/iov.h b/include/qemu/iov.h | ||
25 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/include/qemu/iov.h | 16 | --- a/util/fdmon-io_uring.c |
27 | +++ b/include/qemu/iov.h | 17 | +++ b/util/fdmon-io_uring.c |
28 | @@ -XXX,XX +XXX,XX @@ static inline void *qemu_iovec_buf(QEMUIOVector *qiov) | 18 | @@ -XXX,XX +XXX,XX @@ static void add_poll_remove_sqe(AioContext *ctx, AioHandler *node) |
29 | 19 | #else | |
30 | void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint); | 20 | io_uring_prep_poll_remove(sqe, node); |
31 | void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov); | 21 | #endif |
32 | -int qemu_iovec_init_extended( | 22 | + io_uring_sqe_set_data(sqe, NULL); |
33 | - QEMUIOVector *qiov, | ||
34 | - void *head_buf, size_t head_len, | ||
35 | - QEMUIOVector *mid_qiov, size_t mid_offset, size_t mid_len, | ||
36 | - void *tail_buf, size_t tail_len); | ||
37 | void qemu_iovec_init_slice(QEMUIOVector *qiov, QEMUIOVector *source, | ||
38 | size_t offset, size_t len); | ||
39 | struct iovec *qemu_iovec_slice(QEMUIOVector *qiov, | ||
40 | diff --git a/util/iov.c b/util/iov.c | ||
41 | index XXXXXXX..XXXXXXX 100644 | ||
42 | --- a/util/iov.c | ||
43 | +++ b/util/iov.c | ||
44 | @@ -XXX,XX +XXX,XX @@ int qemu_iovec_subvec_niov(QEMUIOVector *qiov, size_t offset, size_t len) | ||
45 | return niov; | ||
46 | } | 23 | } |
47 | 24 | ||
48 | -/* | 25 | /* Add a timeout that self-cancels when another cqe becomes ready */ |
49 | - * Compile new iovec, combining @head_buf buffer, sub-qiov of @mid_qiov, | 26 | @@ -XXX,XX +XXX,XX @@ static void add_timeout_sqe(AioContext *ctx, int64_t ns) |
50 | - * and @tail_buf buffer into new qiov. | 27 | |
51 | - */ | 28 | sqe = get_sqe(ctx); |
52 | -int qemu_iovec_init_extended( | 29 | io_uring_prep_timeout(sqe, &ts, 1, 0); |
53 | - QEMUIOVector *qiov, | 30 | + io_uring_sqe_set_data(sqe, NULL); |
54 | - void *head_buf, size_t head_len, | ||
55 | - QEMUIOVector *mid_qiov, size_t mid_offset, size_t mid_len, | ||
56 | - void *tail_buf, size_t tail_len) | ||
57 | -{ | ||
58 | - size_t mid_head, mid_tail; | ||
59 | - int total_niov, mid_niov = 0; | ||
60 | - struct iovec *p, *mid_iov = NULL; | ||
61 | - | ||
62 | - assert(mid_qiov->niov <= IOV_MAX); | ||
63 | - | ||
64 | - if (SIZE_MAX - head_len < mid_len || | ||
65 | - SIZE_MAX - head_len - mid_len < tail_len) | ||
66 | - { | ||
67 | - return -EINVAL; | ||
68 | - } | ||
69 | - | ||
70 | - if (mid_len) { | ||
71 | - mid_iov = qemu_iovec_slice(mid_qiov, mid_offset, mid_len, | ||
72 | - &mid_head, &mid_tail, &mid_niov); | ||
73 | - } | ||
74 | - | ||
75 | - total_niov = !!head_len + mid_niov + !!tail_len; | ||
76 | - if (total_niov > IOV_MAX) { | ||
77 | - return -EINVAL; | ||
78 | - } | ||
79 | - | ||
80 | - if (total_niov == 1) { | ||
81 | - qemu_iovec_init_buf(qiov, NULL, 0); | ||
82 | - p = &qiov->local_iov; | ||
83 | - } else { | ||
84 | - qiov->niov = qiov->nalloc = total_niov; | ||
85 | - qiov->size = head_len + mid_len + tail_len; | ||
86 | - p = qiov->iov = g_new(struct iovec, qiov->niov); | ||
87 | - } | ||
88 | - | ||
89 | - if (head_len) { | ||
90 | - p->iov_base = head_buf; | ||
91 | - p->iov_len = head_len; | ||
92 | - p++; | ||
93 | - } | ||
94 | - | ||
95 | - assert(!mid_niov == !mid_len); | ||
96 | - if (mid_niov) { | ||
97 | - memcpy(p, mid_iov, mid_niov * sizeof(*p)); | ||
98 | - p[0].iov_base = (uint8_t *)p[0].iov_base + mid_head; | ||
99 | - p[0].iov_len -= mid_head; | ||
100 | - p[mid_niov - 1].iov_len -= mid_tail; | ||
101 | - p += mid_niov; | ||
102 | - } | ||
103 | - | ||
104 | - if (tail_len) { | ||
105 | - p->iov_base = tail_buf; | ||
106 | - p->iov_len = tail_len; | ||
107 | - } | ||
108 | - | ||
109 | - return 0; | ||
110 | -} | ||
111 | - | ||
112 | /* | ||
113 | * Check if the contents of subrange of qiov data is all zeroes. | ||
114 | */ | ||
115 | @@ -XXX,XX +XXX,XX @@ bool qemu_iovec_is_zero(QEMUIOVector *qiov, size_t offset, size_t bytes) | ||
116 | void qemu_iovec_init_slice(QEMUIOVector *qiov, QEMUIOVector *source, | ||
117 | size_t offset, size_t len) | ||
118 | { | ||
119 | - int ret; | ||
120 | + struct iovec *slice_iov; | ||
121 | + int slice_niov; | ||
122 | + size_t slice_head, slice_tail; | ||
123 | |||
124 | assert(source->size >= len); | ||
125 | assert(source->size - len >= offset); | ||
126 | |||
127 | - /* We shrink the request, so we can't overflow neither size_t nor MAX_IOV */ | ||
128 | - ret = qemu_iovec_init_extended(qiov, NULL, 0, source, offset, len, NULL, 0); | ||
129 | - assert(ret == 0); | ||
130 | + slice_iov = qemu_iovec_slice(source, offset, len, | ||
131 | + &slice_head, &slice_tail, &slice_niov); | ||
132 | + if (slice_niov == 1) { | ||
133 | + qemu_iovec_init_buf(qiov, slice_iov[0].iov_base + slice_head, len); | ||
134 | + } else { | ||
135 | + qemu_iovec_init(qiov, slice_niov); | ||
136 | + qemu_iovec_concat_iov(qiov, slice_iov, slice_niov, slice_head, len); | ||
137 | + } | ||
138 | } | 31 | } |
139 | 32 | ||
140 | void qemu_iovec_destroy(QEMUIOVector *qiov) | 33 | /* Add sqes from ctx->submit_list for submission */ |
141 | -- | 34 | -- |
142 | 2.40.1 | 35 | 2.41.0 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
2 | 1 | ||
3 | data_end field in BDRVParallelsState is set to the biggest offset present | ||
4 | in BAT. If this offset is outside of the image, any further write will | ||
5 | create the cluster at this offset and/or the image will be truncated to | ||
6 | this offset on close. This is definitely not correct. | ||
7 | |||
8 | Raise an error in parallels_open() if data_end points outside the image | ||
9 | and it is not a check (let the check to repaire the image). Set data_end | ||
10 | to the end of the cluster with the last correct offset. | ||
11 | |||
12 | Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
13 | Message-Id: <20230424093147.197643-2-alexander.ivanov@virtuozzo.com> | ||
14 | Reviewed-by: Hanna Czenczek <hreitz@redhat.com> | ||
15 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
16 | --- | ||
17 | block/parallels.c | 17 +++++++++++++++++ | ||
18 | 1 file changed, 17 insertions(+) | ||
19 | |||
20 | diff --git a/block/parallels.c b/block/parallels.c | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/block/parallels.c | ||
23 | +++ b/block/parallels.c | ||
24 | @@ -XXX,XX +XXX,XX @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags, | ||
25 | BDRVParallelsState *s = bs->opaque; | ||
26 | ParallelsHeader ph; | ||
27 | int ret, size, i; | ||
28 | + int64_t file_nb_sectors; | ||
29 | QemuOpts *opts = NULL; | ||
30 | Error *local_err = NULL; | ||
31 | char *buf; | ||
32 | @@ -XXX,XX +XXX,XX @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags, | ||
33 | return ret; | ||
34 | } | ||
35 | |||
36 | + file_nb_sectors = bdrv_nb_sectors(bs->file->bs); | ||
37 | + if (file_nb_sectors < 0) { | ||
38 | + return -EINVAL; | ||
39 | + } | ||
40 | + | ||
41 | ret = bdrv_pread(bs->file, 0, sizeof(ph), &ph, 0); | ||
42 | if (ret < 0) { | ||
43 | goto fail; | ||
44 | @@ -XXX,XX +XXX,XX @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags, | ||
45 | |||
46 | for (i = 0; i < s->bat_size; i++) { | ||
47 | int64_t off = bat2sect(s, i); | ||
48 | + if (off >= file_nb_sectors) { | ||
49 | + if (flags & BDRV_O_CHECK) { | ||
50 | + continue; | ||
51 | + } | ||
52 | + error_setg(errp, "parallels: Offset %" PRIi64 " in BAT[%d] entry " | ||
53 | + "is larger than file size (%" PRIi64 ")", | ||
54 | + off << BDRV_SECTOR_BITS, i, | ||
55 | + file_nb_sectors << BDRV_SECTOR_BITS); | ||
56 | + ret = -EINVAL; | ||
57 | + goto fail; | ||
58 | + } | ||
59 | if (off >= s->data_end) { | ||
60 | s->data_end = off + s->tracks; | ||
61 | } | ||
62 | -- | ||
63 | 2.40.1 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
2 | 1 | ||
3 | Don't let high_off be more than the file size even if we don't fix the | ||
4 | image. | ||
5 | |||
6 | Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
7 | Reviewed-by: Denis V. Lunev <den@openvz.org> | ||
8 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
9 | Message-Id: <20230424093147.197643-3-alexander.ivanov@virtuozzo.com> | ||
10 | Reviewed-by: Hanna Czenczek <hreitz@redhat.com> | ||
11 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
12 | --- | ||
13 | block/parallels.c | 4 ++-- | ||
14 | 1 file changed, 2 insertions(+), 2 deletions(-) | ||
15 | |||
16 | diff --git a/block/parallels.c b/block/parallels.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/parallels.c | ||
19 | +++ b/block/parallels.c | ||
20 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
21 | fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i); | ||
22 | res->corruptions++; | ||
23 | if (fix & BDRV_FIX_ERRORS) { | ||
24 | - prev_off = 0; | ||
25 | s->bat_bitmap[i] = 0; | ||
26 | res->corruptions_fixed++; | ||
27 | flush_bat = true; | ||
28 | - continue; | ||
29 | } | ||
30 | + prev_off = 0; | ||
31 | + continue; | ||
32 | } | ||
33 | |||
34 | res->bfi.allocated_clusters++; | ||
35 | -- | ||
36 | 2.40.1 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
2 | 1 | ||
3 | Set data_end to the end of the last cluster inside the image. In such a | ||
4 | way we can be sure that corrupted offsets in the BAT can't affect on the | ||
5 | image size. If there are no allocated clusters set image_end_offset by | ||
6 | data_end. | ||
7 | |||
8 | Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
9 | Reviewed-by: Denis V. Lunev <den@openvz.org> | ||
10 | Message-Id: <20230424093147.197643-4-alexander.ivanov@virtuozzo.com> | ||
11 | Reviewed-by: Hanna Czenczek <hreitz@redhat.com> | ||
12 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
13 | --- | ||
14 | block/parallels.c | 8 +++++++- | ||
15 | 1 file changed, 7 insertions(+), 1 deletion(-) | ||
16 | |||
17 | diff --git a/block/parallels.c b/block/parallels.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/block/parallels.c | ||
20 | +++ b/block/parallels.c | ||
21 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
22 | } | ||
23 | } | ||
24 | |||
25 | - res->image_end_offset = high_off + s->cluster_size; | ||
26 | + if (high_off == 0) { | ||
27 | + res->image_end_offset = s->data_end << BDRV_SECTOR_BITS; | ||
28 | + } else { | ||
29 | + res->image_end_offset = high_off + s->cluster_size; | ||
30 | + s->data_end = res->image_end_offset >> BDRV_SECTOR_BITS; | ||
31 | + } | ||
32 | + | ||
33 | if (size > res->image_end_offset) { | ||
34 | int64_t count; | ||
35 | count = DIV_ROUND_UP(size - res->image_end_offset, s->cluster_size); | ||
36 | -- | ||
37 | 2.40.1 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
2 | 1 | ||
3 | This helper will be reused in next patches during parallels_co_check | ||
4 | rework to simplify its code. | ||
5 | |||
6 | Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
7 | Reviewed-by: Denis V. Lunev <den@openvz.org> | ||
8 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
9 | Message-Id: <20230424093147.197643-5-alexander.ivanov@virtuozzo.com> | ||
10 | Reviewed-by: Hanna Czenczek <hreitz@redhat.com> | ||
11 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
12 | --- | ||
13 | block/parallels.c | 11 ++++++++--- | ||
14 | 1 file changed, 8 insertions(+), 3 deletions(-) | ||
15 | |||
16 | diff --git a/block/parallels.c b/block/parallels.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/parallels.c | ||
19 | +++ b/block/parallels.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static int64_t block_status(BDRVParallelsState *s, int64_t sector_num, | ||
21 | return start_off; | ||
22 | } | ||
23 | |||
24 | +static void parallels_set_bat_entry(BDRVParallelsState *s, | ||
25 | + uint32_t index, uint32_t offset) | ||
26 | +{ | ||
27 | + s->bat_bitmap[index] = cpu_to_le32(offset); | ||
28 | + bitmap_set(s->bat_dirty_bmap, bat_entry_off(index) / s->bat_dirty_block, 1); | ||
29 | +} | ||
30 | + | ||
31 | static int64_t coroutine_fn GRAPH_RDLOCK | ||
32 | allocate_clusters(BlockDriverState *bs, int64_t sector_num, | ||
33 | int nb_sectors, int *pnum) | ||
34 | @@ -XXX,XX +XXX,XX @@ allocate_clusters(BlockDriverState *bs, int64_t sector_num, | ||
35 | } | ||
36 | |||
37 | for (i = 0; i < to_allocate; i++) { | ||
38 | - s->bat_bitmap[idx + i] = cpu_to_le32(s->data_end / s->off_multiplier); | ||
39 | + parallels_set_bat_entry(s, idx + i, s->data_end / s->off_multiplier); | ||
40 | s->data_end += s->tracks; | ||
41 | - bitmap_set(s->bat_dirty_bmap, | ||
42 | - bat_entry_off(idx + i) / s->bat_dirty_block, 1); | ||
43 | } | ||
44 | |||
45 | return bat2sect(s, idx) + sector_num % s->tracks; | ||
46 | -- | ||
47 | 2.40.1 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
2 | 1 | ||
3 | BAT is written in the context of conventional operations over the image | ||
4 | inside bdrv_co_flush() when it calls parallels_co_flush_to_os() callback. | ||
5 | Thus we should not modify BAT array directly, but call | ||
6 | parallels_set_bat_entry() helper and bdrv_co_flush() further on. After | ||
7 | that there is no need to manually write BAT and track its modification. | ||
8 | |||
9 | This makes code more generic and allows to split parallels_set_bat_entry() | ||
10 | for independent pieces. | ||
11 | |||
12 | Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
13 | Reviewed-by: Denis V. Lunev <den@openvz.org> | ||
14 | Message-Id: <20230424093147.197643-6-alexander.ivanov@virtuozzo.com> | ||
15 | Reviewed-by: Hanna Czenczek <hreitz@redhat.com> | ||
16 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
17 | --- | ||
18 | block/parallels.c | 23 ++++++++++------------- | ||
19 | 1 file changed, 10 insertions(+), 13 deletions(-) | ||
20 | |||
21 | diff --git a/block/parallels.c b/block/parallels.c | ||
22 | index XXXXXXX..XXXXXXX 100644 | ||
23 | --- a/block/parallels.c | ||
24 | +++ b/block/parallels.c | ||
25 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
26 | { | ||
27 | BDRVParallelsState *s = bs->opaque; | ||
28 | int64_t size, prev_off, high_off; | ||
29 | - int ret; | ||
30 | + int ret = 0; | ||
31 | uint32_t i; | ||
32 | - bool flush_bat = false; | ||
33 | |||
34 | size = bdrv_getlength(bs->file->bs); | ||
35 | if (size < 0) { | ||
36 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
37 | fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i); | ||
38 | res->corruptions++; | ||
39 | if (fix & BDRV_FIX_ERRORS) { | ||
40 | - s->bat_bitmap[i] = 0; | ||
41 | + parallels_set_bat_entry(s, i, 0); | ||
42 | res->corruptions_fixed++; | ||
43 | - flush_bat = true; | ||
44 | } | ||
45 | prev_off = 0; | ||
46 | continue; | ||
47 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
48 | prev_off = off; | ||
49 | } | ||
50 | |||
51 | - ret = 0; | ||
52 | - if (flush_bat) { | ||
53 | - ret = bdrv_co_pwrite_sync(bs->file, 0, s->header_size, s->header, 0); | ||
54 | - if (ret < 0) { | ||
55 | - res->check_errors++; | ||
56 | - goto out; | ||
57 | - } | ||
58 | - } | ||
59 | - | ||
60 | if (high_off == 0) { | ||
61 | res->image_end_offset = s->data_end << BDRV_SECTOR_BITS; | ||
62 | } else { | ||
63 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
64 | |||
65 | out: | ||
66 | qemu_co_mutex_unlock(&s->lock); | ||
67 | + | ||
68 | + if (ret == 0) { | ||
69 | + ret = bdrv_co_flush(bs); | ||
70 | + if (ret < 0) { | ||
71 | + res->check_errors++; | ||
72 | + } | ||
73 | + } | ||
74 | + | ||
75 | return ret; | ||
76 | } | ||
77 | |||
78 | -- | ||
79 | 2.40.1 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
2 | 1 | ||
3 | We will add more and more checks so we need a better code structure | ||
4 | in parallels_co_check. Let each check performs in a separate loop | ||
5 | in a separate helper. | ||
6 | |||
7 | Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
8 | Reviewed-by: Denis V. Lunev <den@openvz.org> | ||
9 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
10 | Message-Id: <20230424093147.197643-7-alexander.ivanov@virtuozzo.com> | ||
11 | Reviewed-by: Hanna Czenczek <hreitz@redhat.com> | ||
12 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
13 | --- | ||
14 | block/parallels.c | 31 +++++++++++++++++++++---------- | ||
15 | 1 file changed, 21 insertions(+), 10 deletions(-) | ||
16 | |||
17 | diff --git a/block/parallels.c b/block/parallels.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/block/parallels.c | ||
20 | +++ b/block/parallels.c | ||
21 | @@ -XXX,XX +XXX,XX @@ parallels_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, | ||
22 | return ret; | ||
23 | } | ||
24 | |||
25 | +static void parallels_check_unclean(BlockDriverState *bs, | ||
26 | + BdrvCheckResult *res, | ||
27 | + BdrvCheckMode fix) | ||
28 | +{ | ||
29 | + BDRVParallelsState *s = bs->opaque; | ||
30 | + | ||
31 | + if (!s->header_unclean) { | ||
32 | + return; | ||
33 | + } | ||
34 | + | ||
35 | + fprintf(stderr, "%s image was not closed correctly\n", | ||
36 | + fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR"); | ||
37 | + res->corruptions++; | ||
38 | + if (fix & BDRV_FIX_ERRORS) { | ||
39 | + /* parallels_close will do the job right */ | ||
40 | + res->corruptions_fixed++; | ||
41 | + s->header_unclean = false; | ||
42 | + } | ||
43 | +} | ||
44 | |||
45 | static int coroutine_fn GRAPH_RDLOCK | ||
46 | parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
47 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
48 | } | ||
49 | |||
50 | qemu_co_mutex_lock(&s->lock); | ||
51 | - if (s->header_unclean) { | ||
52 | - fprintf(stderr, "%s image was not closed correctly\n", | ||
53 | - fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR"); | ||
54 | - res->corruptions++; | ||
55 | - if (fix & BDRV_FIX_ERRORS) { | ||
56 | - /* parallels_close will do the job right */ | ||
57 | - res->corruptions_fixed++; | ||
58 | - s->header_unclean = false; | ||
59 | - } | ||
60 | - } | ||
61 | + | ||
62 | + parallels_check_unclean(bs, res, fix); | ||
63 | |||
64 | res->bfi.total_clusters = s->bat_size; | ||
65 | res->bfi.compressed_clusters = 0; /* compression is not supported */ | ||
66 | -- | ||
67 | 2.40.1 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
2 | 1 | ||
3 | Exclude out-of-image clusters from allocated and fragmented clusters | ||
4 | calculation. | ||
5 | |||
6 | Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
7 | Message-Id: <20230424093147.197643-9-alexander.ivanov@virtuozzo.com> | ||
8 | Reviewed-by: Hanna Czenczek <hreitz@redhat.com> | ||
9 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
10 | --- | ||
11 | block/parallels.c | 6 +++++- | ||
12 | 1 file changed, 5 insertions(+), 1 deletion(-) | ||
13 | |||
14 | diff --git a/block/parallels.c b/block/parallels.c | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/block/parallels.c | ||
17 | +++ b/block/parallels.c | ||
18 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
19 | prev_off = 0; | ||
20 | for (i = 0; i < s->bat_size; i++) { | ||
21 | int64_t off = bat2sect(s, i) << BDRV_SECTOR_BITS; | ||
22 | - if (off == 0) { | ||
23 | + /* | ||
24 | + * If BDRV_FIX_ERRORS is not set, out-of-image BAT entries were not | ||
25 | + * fixed. Skip not allocated and out-of-image BAT entries. | ||
26 | + */ | ||
27 | + if (off == 0 || off + s->cluster_size > res->image_end_offset) { | ||
28 | prev_off = 0; | ||
29 | continue; | ||
30 | } | ||
31 | -- | ||
32 | 2.40.1 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
2 | 1 | ||
3 | We will add more and more checks so we need a better code structure | ||
4 | in parallels_co_check. Let each check performs in a separate loop | ||
5 | in a separate helper. | ||
6 | |||
7 | Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
8 | Message-Id: <20230424093147.197643-10-alexander.ivanov@virtuozzo.com> | ||
9 | Reviewed-by: Hanna Czenczek <hreitz@redhat.com> | ||
10 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
11 | --- | ||
12 | block/parallels.c | 74 ++++++++++++++++++++++++++++------------------- | ||
13 | 1 file changed, 45 insertions(+), 29 deletions(-) | ||
14 | |||
15 | diff --git a/block/parallels.c b/block/parallels.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/block/parallels.c | ||
18 | +++ b/block/parallels.c | ||
19 | @@ -XXX,XX +XXX,XX @@ parallels_check_outside_image(BlockDriverState *bs, BdrvCheckResult *res, | ||
20 | } | ||
21 | |||
22 | static int coroutine_fn GRAPH_RDLOCK | ||
23 | -parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
24 | - BdrvCheckMode fix) | ||
25 | +parallels_check_leak(BlockDriverState *bs, BdrvCheckResult *res, | ||
26 | + BdrvCheckMode fix) | ||
27 | { | ||
28 | BDRVParallelsState *s = bs->opaque; | ||
29 | - int64_t size, prev_off; | ||
30 | + int64_t size; | ||
31 | int ret; | ||
32 | - uint32_t i; | ||
33 | |||
34 | size = bdrv_getlength(bs->file->bs); | ||
35 | if (size < 0) { | ||
36 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
37 | return size; | ||
38 | } | ||
39 | |||
40 | + if (size > res->image_end_offset) { | ||
41 | + int64_t count; | ||
42 | + count = DIV_ROUND_UP(size - res->image_end_offset, s->cluster_size); | ||
43 | + fprintf(stderr, "%s space leaked at the end of the image %" PRId64 "\n", | ||
44 | + fix & BDRV_FIX_LEAKS ? "Repairing" : "ERROR", | ||
45 | + size - res->image_end_offset); | ||
46 | + res->leaks += count; | ||
47 | + if (fix & BDRV_FIX_LEAKS) { | ||
48 | + Error *local_err = NULL; | ||
49 | + | ||
50 | + /* | ||
51 | + * In order to really repair the image, we must shrink it. | ||
52 | + * That means we have to pass exact=true. | ||
53 | + */ | ||
54 | + ret = bdrv_co_truncate(bs->file, res->image_end_offset, true, | ||
55 | + PREALLOC_MODE_OFF, 0, &local_err); | ||
56 | + if (ret < 0) { | ||
57 | + error_report_err(local_err); | ||
58 | + res->check_errors++; | ||
59 | + return ret; | ||
60 | + } | ||
61 | + res->leaks_fixed += count; | ||
62 | + } | ||
63 | + } | ||
64 | + | ||
65 | + return 0; | ||
66 | +} | ||
67 | + | ||
68 | +static int coroutine_fn GRAPH_RDLOCK | ||
69 | +parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
70 | + BdrvCheckMode fix) | ||
71 | +{ | ||
72 | + BDRVParallelsState *s = bs->opaque; | ||
73 | + int64_t prev_off; | ||
74 | + int ret; | ||
75 | + uint32_t i; | ||
76 | + | ||
77 | qemu_co_mutex_lock(&s->lock); | ||
78 | |||
79 | parallels_check_unclean(bs, res, fix); | ||
80 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
81 | goto out; | ||
82 | } | ||
83 | |||
84 | + ret = parallels_check_leak(bs, res, fix); | ||
85 | + if (ret < 0) { | ||
86 | + goto out; | ||
87 | + } | ||
88 | + | ||
89 | res->bfi.total_clusters = s->bat_size; | ||
90 | res->bfi.compressed_clusters = 0; /* compression is not supported */ | ||
91 | |||
92 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
93 | prev_off = off; | ||
94 | } | ||
95 | |||
96 | - if (size > res->image_end_offset) { | ||
97 | - int64_t count; | ||
98 | - count = DIV_ROUND_UP(size - res->image_end_offset, s->cluster_size); | ||
99 | - fprintf(stderr, "%s space leaked at the end of the image %" PRId64 "\n", | ||
100 | - fix & BDRV_FIX_LEAKS ? "Repairing" : "ERROR", | ||
101 | - size - res->image_end_offset); | ||
102 | - res->leaks += count; | ||
103 | - if (fix & BDRV_FIX_LEAKS) { | ||
104 | - Error *local_err = NULL; | ||
105 | - | ||
106 | - /* | ||
107 | - * In order to really repair the image, we must shrink it. | ||
108 | - * That means we have to pass exact=true. | ||
109 | - */ | ||
110 | - ret = bdrv_co_truncate(bs->file, res->image_end_offset, true, | ||
111 | - PREALLOC_MODE_OFF, 0, &local_err); | ||
112 | - if (ret < 0) { | ||
113 | - error_report_err(local_err); | ||
114 | - res->check_errors++; | ||
115 | - goto out; | ||
116 | - } | ||
117 | - res->leaks_fixed += count; | ||
118 | - } | ||
119 | - } | ||
120 | - | ||
121 | out: | ||
122 | qemu_co_mutex_unlock(&s->lock); | ||
123 | |||
124 | -- | ||
125 | 2.40.1 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
2 | 1 | ||
3 | We will add more and more checks so we need a better code structure | ||
4 | in parallels_co_check. Let each check performs in a separate loop | ||
5 | in a separate helper. | ||
6 | |||
7 | Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
8 | Reviewed-by: Denis V. Lunev <den@openvz.org> | ||
9 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> | ||
10 | Message-Id: <20230424093147.197643-11-alexander.ivanov@virtuozzo.com> | ||
11 | Reviewed-by: Hanna Czenczek <hreitz@redhat.com> | ||
12 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
13 | --- | ||
14 | block/parallels.c | 52 +++++++++++++++++++++++++++-------------------- | ||
15 | 1 file changed, 30 insertions(+), 22 deletions(-) | ||
16 | |||
17 | diff --git a/block/parallels.c b/block/parallels.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/block/parallels.c | ||
20 | +++ b/block/parallels.c | ||
21 | @@ -XXX,XX +XXX,XX @@ parallels_check_leak(BlockDriverState *bs, BdrvCheckResult *res, | ||
22 | return 0; | ||
23 | } | ||
24 | |||
25 | -static int coroutine_fn GRAPH_RDLOCK | ||
26 | -parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
27 | - BdrvCheckMode fix) | ||
28 | +static void parallels_collect_statistics(BlockDriverState *bs, | ||
29 | + BdrvCheckResult *res, | ||
30 | + BdrvCheckMode fix) | ||
31 | { | ||
32 | BDRVParallelsState *s = bs->opaque; | ||
33 | - int64_t prev_off; | ||
34 | - int ret; | ||
35 | + int64_t off, prev_off; | ||
36 | uint32_t i; | ||
37 | |||
38 | - qemu_co_mutex_lock(&s->lock); | ||
39 | - | ||
40 | - parallels_check_unclean(bs, res, fix); | ||
41 | - | ||
42 | - ret = parallels_check_outside_image(bs, res, fix); | ||
43 | - if (ret < 0) { | ||
44 | - goto out; | ||
45 | - } | ||
46 | - | ||
47 | - ret = parallels_check_leak(bs, res, fix); | ||
48 | - if (ret < 0) { | ||
49 | - goto out; | ||
50 | - } | ||
51 | - | ||
52 | res->bfi.total_clusters = s->bat_size; | ||
53 | res->bfi.compressed_clusters = 0; /* compression is not supported */ | ||
54 | |||
55 | prev_off = 0; | ||
56 | for (i = 0; i < s->bat_size; i++) { | ||
57 | - int64_t off = bat2sect(s, i) << BDRV_SECTOR_BITS; | ||
58 | + off = bat2sect(s, i) << BDRV_SECTOR_BITS; | ||
59 | /* | ||
60 | * If BDRV_FIX_ERRORS is not set, out-of-image BAT entries were not | ||
61 | * fixed. Skip not allocated and out-of-image BAT entries. | ||
62 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
63 | continue; | ||
64 | } | ||
65 | |||
66 | - res->bfi.allocated_clusters++; | ||
67 | - | ||
68 | if (prev_off != 0 && (prev_off + s->cluster_size) != off) { | ||
69 | res->bfi.fragmented_clusters++; | ||
70 | } | ||
71 | prev_off = off; | ||
72 | + res->bfi.allocated_clusters++; | ||
73 | } | ||
74 | +} | ||
75 | + | ||
76 | +static int coroutine_fn GRAPH_RDLOCK | ||
77 | +parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
78 | + BdrvCheckMode fix) | ||
79 | +{ | ||
80 | + BDRVParallelsState *s = bs->opaque; | ||
81 | + int ret; | ||
82 | + | ||
83 | + qemu_co_mutex_lock(&s->lock); | ||
84 | + | ||
85 | + parallels_check_unclean(bs, res, fix); | ||
86 | + | ||
87 | + ret = parallels_check_outside_image(bs, res, fix); | ||
88 | + if (ret < 0) { | ||
89 | + goto out; | ||
90 | + } | ||
91 | + | ||
92 | + ret = parallels_check_leak(bs, res, fix); | ||
93 | + if (ret < 0) { | ||
94 | + goto out; | ||
95 | + } | ||
96 | + | ||
97 | + parallels_collect_statistics(bs, res, fix); | ||
98 | |||
99 | out: | ||
100 | qemu_co_mutex_unlock(&s->lock); | ||
101 | -- | ||
102 | 2.40.1 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
2 | 1 | ||
3 | Replace the way we use mutex in parallels_co_check() for simplier | ||
4 | and less error prone code. | ||
5 | |||
6 | Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
7 | Reviewed-by: Denis V. Lunev <den@openvz.org> | ||
8 | Message-Id: <20230424093147.197643-12-alexander.ivanov@virtuozzo.com> | ||
9 | Reviewed-by: Hanna Czenczek <hreitz@redhat.com> | ||
10 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
11 | --- | ||
12 | block/parallels.c | 33 ++++++++++++++------------------- | ||
13 | 1 file changed, 14 insertions(+), 19 deletions(-) | ||
14 | |||
15 | diff --git a/block/parallels.c b/block/parallels.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/block/parallels.c | ||
18 | +++ b/block/parallels.c | ||
19 | @@ -XXX,XX +XXX,XX @@ parallels_co_check(BlockDriverState *bs, BdrvCheckResult *res, | ||
20 | BDRVParallelsState *s = bs->opaque; | ||
21 | int ret; | ||
22 | |||
23 | - qemu_co_mutex_lock(&s->lock); | ||
24 | + WITH_QEMU_LOCK_GUARD(&s->lock) { | ||
25 | + parallels_check_unclean(bs, res, fix); | ||
26 | |||
27 | - parallels_check_unclean(bs, res, fix); | ||
28 | + ret = parallels_check_outside_image(bs, res, fix); | ||
29 | + if (ret < 0) { | ||
30 | + return ret; | ||
31 | + } | ||
32 | |||
33 | - ret = parallels_check_outside_image(bs, res, fix); | ||
34 | - if (ret < 0) { | ||
35 | - goto out; | ||
36 | - } | ||
37 | + ret = parallels_check_leak(bs, res, fix); | ||
38 | + if (ret < 0) { | ||
39 | + return ret; | ||
40 | + } | ||
41 | |||
42 | - ret = parallels_check_leak(bs, res, fix); | ||
43 | - if (ret < 0) { | ||
44 | - goto out; | ||
45 | + parallels_collect_statistics(bs, res, fix); | ||
46 | } | ||
47 | |||
48 | - parallels_collect_statistics(bs, res, fix); | ||
49 | - | ||
50 | -out: | ||
51 | - qemu_co_mutex_unlock(&s->lock); | ||
52 | - | ||
53 | - if (ret == 0) { | ||
54 | - ret = bdrv_co_flush(bs); | ||
55 | - if (ret < 0) { | ||
56 | - res->check_errors++; | ||
57 | - } | ||
58 | + ret = bdrv_co_flush(bs); | ||
59 | + if (ret < 0) { | ||
60 | + res->check_errors++; | ||
61 | } | ||
62 | |||
63 | return ret; | ||
64 | -- | ||
65 | 2.40.1 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
2 | 1 | ||
3 | All the offsets in the BAT must be lower than the file size. | ||
4 | Fix the check condition for correct check. | ||
5 | |||
6 | Signed-off-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com> | ||
7 | Reviewed-by: Denis V. Lunev <den@openvz.org> | ||
8 | Message-Id: <20230424093147.197643-13-alexander.ivanov@virtuozzo.com> | ||
9 | Reviewed-by: Hanna Czenczek <hreitz@redhat.com> | ||
10 | Signed-off-by: Hanna Czenczek <hreitz@redhat.com> | ||
11 | --- | ||
12 | block/parallels.c | 2 +- | ||
13 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
14 | |||
15 | diff --git a/block/parallels.c b/block/parallels.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/block/parallels.c | ||
18 | +++ b/block/parallels.c | ||
19 | @@ -XXX,XX +XXX,XX @@ parallels_check_outside_image(BlockDriverState *bs, BdrvCheckResult *res, | ||
20 | high_off = 0; | ||
21 | for (i = 0; i < s->bat_size; i++) { | ||
22 | off = bat2sect(s, i) << BDRV_SECTOR_BITS; | ||
23 | - if (off > size) { | ||
24 | + if (off + s->cluster_size > size) { | ||
25 | fprintf(stderr, "%s cluster %u is outside image\n", | ||
26 | fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i); | ||
27 | res->corruptions++; | ||
28 | -- | ||
29 | 2.40.1 | diff view generated by jsdifflib |