1
The following changes since commit fc3dbb90f2eb069801bfb4cfe9cbc83cf9c5f4a9:
1
The following changes since commit 9d662a6b22a0838a85c5432385f35db2488a33a5:
2
2
3
Merge remote-tracking branch 'remotes/jnsnow/tags/bitmaps-pull-request' into staging (2019-02-21 13:09:33 +0000)
3
Merge remote-tracking branch 'remotes/legoater/tags/pull-ppc-20220305' into staging (2022-03-05 18:03:15 +0000)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/stefanha/qemu.git tags/block-pull-request
7
https://gitlab.com/hreitz/qemu.git tags/pull-block-2022-03-07
8
8
9
for you to fetch changes up to 9a9f4b74fa547b68edb38fa414999836770a4735:
9
for you to fetch changes up to 743da0b401cdc3ee94bc519975e339a3cdbe0ad1:
10
10
11
tests/virtio-blk: add test for DISCARD command (2019-02-22 09:42:17 +0000)
11
iotests/image-fleecing: test push backup with fleecing (2022-03-07 09:33:31 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Pull request
14
Block patches for 7.0-rc0:
15
- New fleecing backup scheme
16
- iotest fixes
17
- Fixes for the curl block driver
18
- Fix for the preallocate block driver
19
- IDE fix for zero-length TRIM requests
15
20
16
----------------------------------------------------------------
21
----------------------------------------------------------------
22
Hanna Reitz (2):
23
ide: Increment BB in-flight counter for TRIM BH
24
iotests: Write test output to TEST_DIR
17
25
18
Stefano Garzarella (10):
26
Peter Maydell (2):
19
virtio-blk: add acct_failed param to virtio_blk_handle_rw_error()
27
block/curl.c: Set error message string if curl_init_state() fails
20
virtio-blk: add host_features field in VirtIOBlock
28
block/curl.c: Check error return from curl_easy_setopt()
21
virtio-blk: add "discard" and "write-zeroes" properties
29
22
virtio-net: make VirtIOFeature usable for other virtio devices
30
Thomas Huth (2):
23
virtio-blk: set config size depending on the features enabled
31
tests/qemu-iotests/040: Skip TestCommitWithFilters without 'throttle'
24
virtio-blk: add DISCARD and WRITE_ZEROES features
32
tests/qemu-iotests/testrunner: Quote "case not run" lines in TAP mode
25
tests/virtio-blk: change assert on data_size in virtio_blk_request()
26
tests/virtio-blk: add virtio_blk_fix_dwz_hdr() function
27
tests/virtio-blk: add test for WRITE_ZEROES command
28
tests/virtio-blk: add test for DISCARD command
29
33
30
Vladimir Sementsov-Ogievskiy (17):
34
Vladimir Sementsov-Ogievskiy (17):
31
block: enhance QEMUIOVector structure
35
block: fix preallocate filter: don't do unaligned preallocate requests
32
block/io: use qemu_iovec_init_buf
36
block/block-copy: move copy_bitmap initialization to
33
block/block-backend: use QEMU_IOVEC_INIT_BUF
37
block_copy_state_new()
34
block/backup: use qemu_iovec_init_buf
38
block/dirty-bitmap: bdrv_merge_dirty_bitmap(): add return value
35
block/commit: use QEMU_IOVEC_INIT_BUF
39
block/block-copy: block_copy_state_new(): add bitmap parameter
36
block/stream: use QEMU_IOVEC_INIT_BUF
40
block/copy-before-write: add bitmap open parameter
37
block/parallels: use QEMU_IOVEC_INIT_BUF
41
block/block-copy: add block_copy_reset()
38
block/qcow: use qemu_iovec_init_buf
42
block: intoduce reqlist
39
block/qcow2: use qemu_iovec_init_buf
43
block/reqlist: reqlist_find_conflict(): use ranges_overlap()
40
block/qed: use qemu_iovec_init_buf
44
block/dirty-bitmap: introduce bdrv_dirty_bitmap_status()
41
block/vmdk: use qemu_iovec_init_buf
45
block/reqlist: add reqlist_wait_all()
42
qemu-img: use qemu_iovec_init_buf
46
block/io: introduce block driver snapshot-access API
43
migration/block: use qemu_iovec_init_buf
47
block: introduce snapshot-access block driver
44
tests/test-bdrv-drain: use QEMU_IOVEC_INIT_BUF
48
block: copy-before-write: realize snapshot-access API
45
hw/ide: drop iov field from IDEState
49
iotests/image-fleecing: add test-case for fleecing format node
46
hw/ide: drop iov field from IDEBufferedRequest
50
iotests.py: add qemu_io_pipe_and_status()
47
hw/ide: drop iov field from IDEDMA
51
iotests/image-fleecing: add test case with bitmap
52
iotests/image-fleecing: test push backup with fleecing
48
53
49
include/hw/ide/internal.h | 3 -
54
qapi/block-core.json | 14 +-
50
include/hw/virtio/virtio-blk.h | 6 +-
55
include/block/block-common.h | 3 +-
51
include/hw/virtio/virtio.h | 15 ++
56
include/block/block-copy.h | 2 +
52
include/qemu/iov.h | 64 ++++++++-
57
include/block/block_int-common.h | 24 ++
53
block/backup.c | 5 +-
58
include/block/block_int-io.h | 9 +
54
block/block-backend.c | 13 +-
59
include/block/dirty-bitmap.h | 4 +-
55
block/commit.c | 7 +-
60
include/block/reqlist.h | 75 ++++++
56
block/io.c | 89 +++---------
61
include/qemu/hbitmap.h | 12 +
57
block/parallels.c | 13 +-
62
block/block-copy.c | 150 +++++------
58
block/qcow.c | 21 +--
63
block/copy-before-write.c | 265 +++++++++++++++++++-
59
block/qcow2.c | 12 +-
64
block/curl.c | 92 ++++---
60
block/qed-table.c | 16 +--
65
block/dirty-bitmap.c | 15 +-
61
block/qed.c | 31 ++---
66
block/io.c | 76 ++++++
62
block/stream.c | 7 +-
67
block/monitor/bitmap-qmp-cmds.c | 5 +-
63
block/vmdk.c | 7 +-
68
block/preallocate.c | 15 +-
64
hw/block/virtio-blk.c | 245 ++++++++++++++++++++++++++++++---
69
block/reqlist.c | 85 +++++++
65
hw/core/machine.c | 2 +
70
block/snapshot-access.c | 132 ++++++++++
66
hw/ide/atapi.c | 14 +-
71
hw/ide/core.c | 7 +
67
hw/ide/core.c | 19 ++-
72
util/hbitmap.c | 33 +++
68
hw/net/virtio-net.c | 31 +----
73
MAINTAINERS | 5 +-
69
hw/virtio/virtio.c | 15 ++
74
block/meson.build | 2 +
70
migration/block.c | 10 +-
75
tests/qemu-iotests/040 | 1 +
71
qemu-img.c | 10 +-
76
tests/qemu-iotests/257.out | 224 +++++++++++++++++
72
tests/test-bdrv-drain.c | 29 +---
77
tests/qemu-iotests/common.rc | 6 +-
73
tests/virtio-blk-test.c | 127 ++++++++++++++++-
78
tests/qemu-iotests/iotests.py | 8 +-
74
25 files changed, 525 insertions(+), 286 deletions(-)
79
tests/qemu-iotests/testenv.py | 5 +-
80
tests/qemu-iotests/testrunner.py | 19 +-
81
tests/qemu-iotests/tests/image-fleecing | 185 +++++++++++---
82
tests/qemu-iotests/tests/image-fleecing.out | 221 +++++++++++++++-
83
29 files changed, 1499 insertions(+), 195 deletions(-)
84
create mode 100644 include/block/reqlist.h
85
create mode 100644 block/reqlist.c
86
create mode 100644 block/snapshot-access.c
75
87
76
--
88
--
77
2.20.1
89
2.34.1
78
79
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
When we still have an AIOCB registered for DMA operations, we try to
2
settle the respective operation by draining the BlockBackend associated
3
with the IDE device.
2
4
3
@iov is used only to initialize @qiov. Let's use new
5
However, this assumes that every DMA operation is associated with an
4
qemu_iovec_init_buf() instead, which simplifies the code.
6
increment of the BlockBackend’s in-flight counter (e.g. through some
7
ongoing I/O operation), so that draining the BB until its in-flight
8
counter reaches 0 will settle all DMA operations. That is not the case:
9
For TRIM, the guest can issue a zero-length operation that will not
10
result in any I/O operation forwarded to the BlockBackend, and also not
11
increment the in-flight counter in any other way. In such a case,
12
blk_drain() will be a no-op if no other operations are in flight.
5
13
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
14
It is clear that if blk_drain() is a no-op, the value of
7
Reviewed-by: Eric Blake <eblake@redhat.com>
15
s->bus->dma->aiocb will not change between checking it in the `if`
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
16
condition and asserting that it is NULL after blk_drain().
9
Message-id: 20190218140926.333779-17-vsementsov@virtuozzo.com
17
10
Message-Id: <20190218140926.333779-17-vsementsov@virtuozzo.com>
18
The particular problem is that ide_issue_trim() creates a BH
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
19
(ide_trim_bh_cb()) to settle the TRIM request: iocb->common.cb() is
20
ide_dma_cb(), which will either create a new request, or find the
21
transfer to be done and call ide_set_inactive(), which clears
22
s->bus->dma->aiocb. Therefore, the blk_drain() must wait for
23
ide_trim_bh_cb() to run, which currently it will not always do.
24
25
To fix this issue, we increment the BlockBackend's in-flight counter
26
when the TRIM operation begins (in ide_issue_trim(), when the
27
ide_trim_bh_cb() BH is created) and decrement it when ide_trim_bh_cb()
28
is done.
29
30
Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=2029980
31
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
32
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
33
Message-Id: <20220120142259.120189-1-hreitz@redhat.com>
34
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
35
Reviewed-by: John Snow <jsnow@redhat.com>
36
Tested-by: John Snow <jsnow@redhat.com>
12
---
37
---
13
include/hw/ide/internal.h | 1 -
38
hw/ide/core.c | 7 +++++++
14
hw/ide/core.c | 11 ++++++-----
39
1 file changed, 7 insertions(+)
15
2 files changed, 6 insertions(+), 6 deletions(-)
16
40
17
diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/ide/internal.h
20
+++ b/include/hw/ide/internal.h
21
@@ -XXX,XX +XXX,XX @@ extern const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT];
22
23
typedef struct IDEBufferedRequest {
24
QLIST_ENTRY(IDEBufferedRequest) list;
25
- struct iovec iov;
26
QEMUIOVector qiov;
27
QEMUIOVector *original_qiov;
28
BlockCompletionFunc *original_cb;
29
diff --git a/hw/ide/core.c b/hw/ide/core.c
41
diff --git a/hw/ide/core.c b/hw/ide/core.c
30
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/ide/core.c
43
--- a/hw/ide/core.c
32
+++ b/hw/ide/core.c
44
+++ b/hw/ide/core.c
33
@@ -XXX,XX +XXX,XX @@ static void ide_buffered_readv_cb(void *opaque, int ret)
45
@@ -XXX,XX +XXX,XX @@ static const AIOCBInfo trim_aiocb_info = {
34
IDEBufferedRequest *req = opaque;
46
static void ide_trim_bh_cb(void *opaque)
35
if (!req->orphaned) {
47
{
36
if (!ret) {
48
TrimAIOCB *iocb = opaque;
37
- qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
49
+ BlockBackend *blk = iocb->s->blk;
38
+ assert(req->qiov.size == req->original_qiov->size);
50
39
+ qemu_iovec_from_buf(req->original_qiov, 0,
51
iocb->common.cb(iocb->common.opaque, iocb->ret);
40
+ req->qiov.local_iov.iov_base,
52
41
req->original_qiov->size);
53
qemu_bh_delete(iocb->bh);
42
}
54
iocb->bh = NULL;
43
req->original_cb(req->original_opaque, ret);
55
qemu_aio_unref(iocb);
44
}
56
+
45
QLIST_REMOVE(req, list);
57
+ /* Paired with an increment in ide_issue_trim() */
46
- qemu_vfree(req->iov.iov_base);
58
+ blk_dec_in_flight(blk);
47
+ qemu_vfree(qemu_iovec_buf(&req->qiov));
48
g_free(req);
49
}
59
}
50
60
51
@@ -XXX,XX +XXX,XX @@ BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
61
static void ide_issue_trim_cb(void *opaque, int ret)
52
req->original_qiov = iov;
62
@@ -XXX,XX +XXX,XX @@ BlockAIOCB *ide_issue_trim(
53
req->original_cb = cb;
63
IDEState *s = opaque;
54
req->original_opaque = opaque;
64
TrimAIOCB *iocb;
55
- req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
65
56
- req->iov.iov_len = iov->size;
66
+ /* Paired with a decrement in ide_trim_bh_cb() */
57
- qemu_iovec_init_external(&req->qiov, &req->iov, 1);
67
+ blk_inc_in_flight(s->blk);
58
+ qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size),
68
+
59
+ iov->size);
69
iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
60
70
iocb->s = s;
61
aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
71
iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
62
&req->qiov, 0, ide_buffered_readv_cb, req);
63
--
72
--
64
2.20.1
73
2.34.1
65
74
66
75
diff view generated by jsdifflib
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
From: Peter Maydell <peter.maydell@linaro.org>
2
2
3
If the DISCARD feature is enabled, we try this command in the
3
In curl_open(), the 'out' label assumes that the state->errmsg string
4
test_basic(), checking only the status returned by the request.
4
has been set (either by curl_easy_perform() or by manually copying a
5
string into it); however if curl_init_state() fails we will jump to
6
that label without setting the string. Add the missing error string
7
setup.
5
8
6
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
(We can't be specific about the cause of failure: the documentation
7
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
10
of curl_easy_init() just says "If this function returns NULL,
8
Message-id: 20190221103314.58500-11-sgarzare@redhat.com
11
something went wrong".)
9
Message-Id: <20190221103314.58500-11-sgarzare@redhat.com>
12
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Message-Id: <20220222152341.850419-2-peter.maydell@linaro.org>
15
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
16
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
17
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
11
---
18
---
12
tests/virtio-blk-test.c | 27 +++++++++++++++++++++++++++
19
block/curl.c | 2 ++
13
1 file changed, 27 insertions(+)
20
1 file changed, 2 insertions(+)
14
21
15
diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c
22
diff --git a/block/curl.c b/block/curl.c
16
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
17
--- a/tests/virtio-blk-test.c
24
--- a/block/curl.c
18
+++ b/tests/virtio-blk-test.c
25
+++ b/block/curl.c
19
@@ -XXX,XX +XXX,XX @@ static void test_basic(QVirtioDevice *dev, QGuestAllocator *alloc,
26
@@ -XXX,XX +XXX,XX @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
20
guest_free(alloc, req_addr);
27
// Get file size
28
29
if (curl_init_state(s, state) < 0) {
30
+ pstrcpy(state->errmsg, CURL_ERROR_SIZE,
31
+ "curl library initialization failed.");
32
goto out;
21
}
33
}
22
34
23
+ if (features & (1u << VIRTIO_BLK_F_DISCARD)) {
24
+ struct virtio_blk_discard_write_zeroes dwz_hdr;
25
+
26
+ req.type = VIRTIO_BLK_T_DISCARD;
27
+ req.data = (char *) &dwz_hdr;
28
+ dwz_hdr.sector = 0;
29
+ dwz_hdr.num_sectors = 1;
30
+ dwz_hdr.flags = 0;
31
+
32
+ virtio_blk_fix_dwz_hdr(dev, &dwz_hdr);
33
+
34
+ req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr));
35
+
36
+ free_head = qvirtqueue_add(vq, req_addr, 16, false, true);
37
+ qvirtqueue_add(vq, req_addr + 16, sizeof(dwz_hdr), false, true);
38
+ qvirtqueue_add(vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, false);
39
+
40
+ qvirtqueue_kick(dev, vq, free_head);
41
+
42
+ qvirtio_wait_used_elem(dev, vq, free_head, NULL,
43
+ QVIRTIO_BLK_TIMEOUT_US);
44
+ status = readb(req_addr + 16 + sizeof(dwz_hdr));
45
+ g_assert_cmpint(status, ==, 0);
46
+
47
+ guest_free(alloc, req_addr);
48
+ }
49
+
50
if (features & (1u << VIRTIO_F_ANY_LAYOUT)) {
51
/* Write and read with 2 descriptor layout */
52
/* Write request */
53
--
35
--
54
2.20.1
36
2.34.1
55
37
56
38
diff view generated by jsdifflib
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
From: Peter Maydell <peter.maydell@linaro.org>
2
2
3
Starting from DISABLE and WRITE_ZEROES features, we use an array of
3
Coverity points out that we aren't checking the return value
4
VirtIOFeature (as virtio-net) to properly set the config size
4
from curl_easy_setopt() for any of the calls to it we make
5
depending on the features enabled.
5
in block/curl.c.
6
6
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Some of these options are documented as always succeeding (e.g.
8
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
8
CURLOPT_VERBOSE) but others have documented failure cases (e.g.
9
Message-id: 20190221103314.58500-6-sgarzare@redhat.com
9
CURLOPT_URL). For consistency we check every call, even the ones
10
Message-Id: <20190221103314.58500-6-sgarzare@redhat.com>
10
that theoretically cannot fail.
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
12
Fixes: Coverity CID 1459336, 1459482, 1460331
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Message-Id: <20220222152341.850419-3-peter.maydell@linaro.org>
15
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
16
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
12
---
17
---
13
include/hw/virtio/virtio-blk.h | 1 +
18
block/curl.c | 90 +++++++++++++++++++++++++++++++++-------------------
14
hw/block/virtio-blk.c | 31 +++++++++++++++++++++++++------
19
1 file changed, 57 insertions(+), 33 deletions(-)
15
2 files changed, 26 insertions(+), 6 deletions(-)
16
20
17
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
21
diff --git a/block/curl.c b/block/curl.c
18
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/virtio/virtio-blk.h
23
--- a/block/curl.c
20
+++ b/include/hw/virtio/virtio-blk.h
24
+++ b/block/curl.c
21
@@ -XXX,XX +XXX,XX @@ typedef struct VirtIOBlock {
25
@@ -XXX,XX +XXX,XX @@ static int curl_init_state(BDRVCURLState *s, CURLState *state)
22
bool dataplane_started;
26
if (!state->curl) {
23
struct VirtIOBlockDataPlane *dataplane;
27
return -EIO;
24
uint64_t host_features;
28
}
25
+ size_t config_size;
29
- curl_easy_setopt(state->curl, CURLOPT_URL, s->url);
26
} VirtIOBlock;
30
- curl_easy_setopt(state->curl, CURLOPT_SSL_VERIFYPEER,
27
31
- (long) s->sslverify);
28
typedef struct VirtIOBlockReq {
32
- curl_easy_setopt(state->curl, CURLOPT_SSL_VERIFYHOST,
29
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
33
- s->sslverify ? 2L : 0L);
30
index XXXXXXX..XXXXXXX 100644
34
+ if (curl_easy_setopt(state->curl, CURLOPT_URL, s->url) ||
31
--- a/hw/block/virtio-blk.c
35
+ curl_easy_setopt(state->curl, CURLOPT_SSL_VERIFYPEER,
32
+++ b/hw/block/virtio-blk.c
36
+ (long) s->sslverify) ||
33
@@ -XXX,XX +XXX,XX @@
37
+ curl_easy_setopt(state->curl, CURLOPT_SSL_VERIFYHOST,
34
#include "hw/virtio/virtio-bus.h"
38
+ s->sslverify ? 2L : 0L)) {
35
#include "hw/virtio/virtio-access.h"
39
+ goto err;
36
40
+ }
37
-/* We don't support discard yet, hide associated config fields. */
41
if (s->cookie) {
38
+/* Config size before the discard support (hide associated config fields) */
42
- curl_easy_setopt(state->curl, CURLOPT_COOKIE, s->cookie);
39
#define VIRTIO_BLK_CFG_SIZE offsetof(struct virtio_blk_config, \
43
+ if (curl_easy_setopt(state->curl, CURLOPT_COOKIE, s->cookie)) {
40
max_discard_sectors)
44
+ goto err;
41
+/*
45
+ }
42
+ * Starting from the discard feature, we can use this array to properly
46
+ }
43
+ * set the config size depending on the features enabled.
47
+ if (curl_easy_setopt(state->curl, CURLOPT_TIMEOUT, (long)s->timeout) ||
44
+ */
48
+ curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION,
45
+static VirtIOFeature feature_sizes[] = {
49
+ (void *)curl_read_cb) ||
46
+ {.flags = 1ULL << VIRTIO_BLK_F_DISCARD,
50
+ curl_easy_setopt(state->curl, CURLOPT_WRITEDATA, (void *)state) ||
47
+ .end = virtio_endof(struct virtio_blk_config, discard_sector_alignment)},
51
+ curl_easy_setopt(state->curl, CURLOPT_PRIVATE, (void *)state) ||
48
+ {.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES,
52
+ curl_easy_setopt(state->curl, CURLOPT_AUTOREFERER, 1) ||
49
+ .end = virtio_endof(struct virtio_blk_config, write_zeroes_may_unmap)},
53
+ curl_easy_setopt(state->curl, CURLOPT_FOLLOWLOCATION, 1) ||
50
+ {}
54
+ curl_easy_setopt(state->curl, CURLOPT_NOSIGNAL, 1) ||
51
+};
55
+ curl_easy_setopt(state->curl, CURLOPT_ERRORBUFFER, state->errmsg) ||
56
+ curl_easy_setopt(state->curl, CURLOPT_FAILONERROR, 1)) {
57
+ goto err;
58
}
59
- curl_easy_setopt(state->curl, CURLOPT_TIMEOUT, (long)s->timeout);
60
- curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION,
61
- (void *)curl_read_cb);
62
- curl_easy_setopt(state->curl, CURLOPT_WRITEDATA, (void *)state);
63
- curl_easy_setopt(state->curl, CURLOPT_PRIVATE, (void *)state);
64
- curl_easy_setopt(state->curl, CURLOPT_AUTOREFERER, 1);
65
- curl_easy_setopt(state->curl, CURLOPT_FOLLOWLOCATION, 1);
66
- curl_easy_setopt(state->curl, CURLOPT_NOSIGNAL, 1);
67
- curl_easy_setopt(state->curl, CURLOPT_ERRORBUFFER, state->errmsg);
68
- curl_easy_setopt(state->curl, CURLOPT_FAILONERROR, 1);
69
-
70
if (s->username) {
71
- curl_easy_setopt(state->curl, CURLOPT_USERNAME, s->username);
72
+ if (curl_easy_setopt(state->curl, CURLOPT_USERNAME, s->username)) {
73
+ goto err;
74
+ }
75
}
76
if (s->password) {
77
- curl_easy_setopt(state->curl, CURLOPT_PASSWORD, s->password);
78
+ if (curl_easy_setopt(state->curl, CURLOPT_PASSWORD, s->password)) {
79
+ goto err;
80
+ }
81
}
82
if (s->proxyusername) {
83
- curl_easy_setopt(state->curl,
84
- CURLOPT_PROXYUSERNAME, s->proxyusername);
85
+ if (curl_easy_setopt(state->curl,
86
+ CURLOPT_PROXYUSERNAME, s->proxyusername)) {
87
+ goto err;
88
+ }
89
}
90
if (s->proxypassword) {
91
- curl_easy_setopt(state->curl,
92
- CURLOPT_PROXYPASSWORD, s->proxypassword);
93
+ if (curl_easy_setopt(state->curl,
94
+ CURLOPT_PROXYPASSWORD, s->proxypassword)) {
95
+ goto err;
96
+ }
97
}
98
99
/* Restrict supported protocols to avoid security issues in the more
100
@@ -XXX,XX +XXX,XX @@ static int curl_init_state(BDRVCURLState *s, CURLState *state)
101
* Restricting protocols is only supported from 7.19.4 upwards.
102
*/
103
#if LIBCURL_VERSION_NUM >= 0x071304
104
- curl_easy_setopt(state->curl, CURLOPT_PROTOCOLS, PROTOCOLS);
105
- curl_easy_setopt(state->curl, CURLOPT_REDIR_PROTOCOLS, PROTOCOLS);
106
+ if (curl_easy_setopt(state->curl, CURLOPT_PROTOCOLS, PROTOCOLS) ||
107
+ curl_easy_setopt(state->curl, CURLOPT_REDIR_PROTOCOLS, PROTOCOLS)) {
108
+ goto err;
109
+ }
110
#endif
111
112
#ifdef DEBUG_VERBOSE
113
- curl_easy_setopt(state->curl, CURLOPT_VERBOSE, 1);
114
+ if (curl_easy_setopt(state->curl, CURLOPT_VERBOSE, 1)) {
115
+ goto err;
116
+ }
117
#endif
118
}
119
120
state->s = s;
121
122
return 0;
52
+
123
+
53
+static void virtio_blk_set_config_size(VirtIOBlock *s, uint64_t host_features)
124
+err:
54
+{
125
+ curl_easy_cleanup(state->curl);
55
+ s->config_size = MAX(VIRTIO_BLK_CFG_SIZE,
126
+ state->curl = NULL;
56
+ virtio_feature_get_config_size(feature_sizes, host_features));
127
+ return -EIO;
57
+
58
+ assert(s->config_size <= sizeof(struct virtio_blk_config));
59
+}
60
61
static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
62
VirtIOBlockReq *req)
63
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
64
blkcfg.alignment_offset = 0;
65
blkcfg.wce = blk_enable_write_cache(s->blk);
66
virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues);
67
- memcpy(config, &blkcfg, VIRTIO_BLK_CFG_SIZE);
68
- QEMU_BUILD_BUG_ON(VIRTIO_BLK_CFG_SIZE > sizeof(blkcfg));
69
+ memcpy(config, &blkcfg, s->config_size);
70
}
128
}
71
129
72
static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
130
/* Called with s->mutex held. */
73
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
131
@@ -XXX,XX +XXX,XX @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
74
VirtIOBlock *s = VIRTIO_BLK(vdev);
75
struct virtio_blk_config blkcfg;
76
77
- memcpy(&blkcfg, config, VIRTIO_BLK_CFG_SIZE);
78
- QEMU_BUILD_BUG_ON(VIRTIO_BLK_CFG_SIZE > sizeof(blkcfg));
79
+ memcpy(&blkcfg, config, s->config_size);
80
81
aio_context_acquire(blk_get_aio_context(s->blk));
82
blk_set_enable_write_cache(s->blk, blkcfg.wce != 0);
83
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
84
return;
85
}
132
}
86
133
87
- virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, VIRTIO_BLK_CFG_SIZE);
134
s->accept_range = false;
88
+ virtio_blk_set_config_size(s, s->host_features);
135
- curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1);
89
+
136
- curl_easy_setopt(state->curl, CURLOPT_HEADERFUNCTION,
90
+ virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, s->config_size);
137
- curl_header_cb);
91
138
- curl_easy_setopt(state->curl, CURLOPT_HEADERDATA, s);
92
s->blk = conf->conf.blk;
139
+ if (curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1) ||
93
s->rq = NULL;
140
+ curl_easy_setopt(state->curl, CURLOPT_HEADERFUNCTION, curl_header_cb) ||
141
+ curl_easy_setopt(state->curl, CURLOPT_HEADERDATA, s)) {
142
+ pstrcpy(state->errmsg, CURL_ERROR_SIZE,
143
+ "curl library initialization failed.");
144
+ goto out;
145
+ }
146
if (curl_easy_perform(state->curl))
147
goto out;
148
if (curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &d)) {
149
@@ -XXX,XX +XXX,XX @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
150
151
snprintf(state->range, 127, "%" PRIu64 "-%" PRIu64, start, end);
152
trace_curl_setup_preadv(acb->bytes, start, state->range);
153
- curl_easy_setopt(state->curl, CURLOPT_RANGE, state->range);
154
-
155
- if (curl_multi_add_handle(s->multi, state->curl) != CURLM_OK) {
156
+ if (curl_easy_setopt(state->curl, CURLOPT_RANGE, state->range) ||
157
+ curl_multi_add_handle(s->multi, state->curl) != CURLM_OK) {
158
state->acb[0] = NULL;
159
acb->ret = -EIO;
160
94
--
161
--
95
2.20.1
162
2.34.1
96
97
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Use new qemu_iovec_init_buf() instead of
3
There is a bug in handling BDRV_REQ_NO_WAIT flag: we still may wait in
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
4
wait_serialising_requests() if request is unaligned. And this is
5
possible for the only user of this flag (preallocate filter) if
6
underlying file is unaligned to its request_alignment on start.
5
7
6
While being here, use qemu_try_blockalign0 as well.
8
So, we have to fix preallocate filter to do only aligned preallocate
9
requests.
10
11
Next, we should fix generic block/io.c somehow. Keeping in mind that
12
preallocate is the only user of BDRV_REQ_NO_WAIT and that we have to
13
fix its behavior now, it seems more safe to just assert that we never
14
use BDRV_REQ_NO_WAIT with unaligned requests and add corresponding
15
comment. Let's do so.
7
16
8
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
17
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
9
Reviewed-by: Eric Blake <eblake@redhat.com>
18
Reviewed-by: Denis V. Lunev <den@openvz.org>
10
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
19
Message-Id: <20220215121609.38570-1-vsementsov@virtuozzo.com>
11
Message-id: 20190218140926.333779-3-vsementsov@virtuozzo.com
20
[hreitz: Rebased on block GS/IO split]
12
Message-Id: <20190218140926.333779-3-vsementsov@virtuozzo.com>
21
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
---
22
---
15
block/io.c | 89 ++++++++++++------------------------------------------
23
include/block/block-common.h | 3 ++-
16
1 file changed, 20 insertions(+), 69 deletions(-)
24
block/io.c | 4 ++++
25
block/preallocate.c | 15 ++++++++++++---
26
3 files changed, 18 insertions(+), 4 deletions(-)
17
27
28
diff --git a/include/block/block-common.h b/include/block/block-common.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/include/block/block-common.h
31
+++ b/include/block/block-common.h
32
@@ -XXX,XX +XXX,XX @@ typedef enum {
33
34
/*
35
* If we need to wait for other requests, just fail immediately. Used
36
- * only together with BDRV_REQ_SERIALISING.
37
+ * only together with BDRV_REQ_SERIALISING. Used only with requests aligned
38
+ * to request_alignment (corresponding assertions are in block/io.c).
39
*/
40
BDRV_REQ_NO_WAIT = 0x400,
41
18
diff --git a/block/io.c b/block/io.c
42
diff --git a/block/io.c b/block/io.c
19
index XXXXXXX..XXXXXXX 100644
43
index XXXXXXX..XXXXXXX 100644
20
--- a/block/io.c
44
--- a/block/io.c
21
+++ b/block/io.c
45
+++ b/block/io.c
22
@@ -XXX,XX +XXX,XX @@ static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
46
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
23
static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
47
24
int nb_sectors, bool is_write, BdrvRequestFlags flags)
48
padding = bdrv_init_padding(bs, offset, bytes, &pad);
25
{
49
if (padding) {
26
- QEMUIOVector qiov;
50
+ assert(!(flags & BDRV_REQ_NO_WAIT));
27
- struct iovec iov = {
51
bdrv_make_request_serialising(req, align);
28
- .iov_base = (void *)buf,
52
29
- .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
53
bdrv_padding_rmw_read(child, req, &pad, true);
30
- };
54
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
31
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf,
55
* serialize the request to prevent interactions of the
32
+ nb_sectors * BDRV_SECTOR_SIZE);
56
* widened region with other transactions.
33
57
*/
34
if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
58
+ assert(!(flags & BDRV_REQ_NO_WAIT));
35
return -EINVAL;
59
bdrv_make_request_serialising(&req, align);
60
bdrv_padding_rmw_read(child, &req, &pad, false);
36
}
61
}
37
62
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_copy_range_internal(
38
- qemu_iovec_init_external(&qiov, &iov, 1);
63
/* TODO We can support BDRV_REQ_NO_FALLBACK here */
39
return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
64
assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
40
&qiov, is_write, flags);
65
assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
41
}
66
+ assert(!(read_flags & BDRV_REQ_NO_WAIT));
42
@@ -XXX,XX +XXX,XX @@ int bdrv_write(BdrvChild *child, int64_t sector_num,
67
+ assert(!(write_flags & BDRV_REQ_NO_WAIT));
43
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
68
44
int bytes, BdrvRequestFlags flags)
69
if (!dst || !dst->bs || !bdrv_is_inserted(dst->bs)) {
45
{
70
return -ENOMEDIUM;
46
- QEMUIOVector qiov;
71
diff --git a/block/preallocate.c b/block/preallocate.c
47
- struct iovec iov = {
72
index XXXXXXX..XXXXXXX 100644
48
- .iov_base = NULL,
73
--- a/block/preallocate.c
49
- .iov_len = bytes,
74
+++ b/block/preallocate.c
50
- };
75
@@ -XXX,XX +XXX,XX @@ static bool coroutine_fn handle_write(BlockDriverState *bs, int64_t offset,
51
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
76
int64_t end = offset + bytes;
52
77
int64_t prealloc_start, prealloc_end;
53
- qemu_iovec_init_external(&qiov, &iov, 1);
54
return bdrv_prwv_co(child, offset, &qiov, true,
55
BDRV_REQ_ZERO_WRITE | flags);
56
}
57
@@ -XXX,XX +XXX,XX @@ int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
58
59
int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
60
{
61
- QEMUIOVector qiov;
62
- struct iovec iov = {
63
- .iov_base = (void *)buf,
64
- .iov_len = bytes,
65
- };
66
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
67
68
if (bytes < 0) {
69
return -EINVAL;
70
}
71
72
- qemu_iovec_init_external(&qiov, &iov, 1);
73
return bdrv_preadv(child, offset, &qiov);
74
}
75
76
@@ -XXX,XX +XXX,XX @@ int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
77
78
int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
79
{
80
- QEMUIOVector qiov;
81
- struct iovec iov = {
82
- .iov_base = (void *) buf,
83
- .iov_len = bytes,
84
- };
85
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
86
87
if (bytes < 0) {
88
return -EINVAL;
89
}
90
91
- qemu_iovec_init_external(&qiov, &iov, 1);
92
return bdrv_pwritev(child, offset, &qiov);
93
}
94
95
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
96
void *bounce_buffer;
97
98
BlockDriver *drv = bs->drv;
99
- struct iovec iov;
100
QEMUIOVector local_qiov;
101
int64_t cluster_offset;
102
int64_t cluster_bytes;
103
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
104
105
if (ret <= 0) {
106
/* Must copy-on-read; use the bounce buffer */
107
- iov.iov_base = bounce_buffer;
108
- iov.iov_len = pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
109
- qemu_iovec_init_external(&local_qiov, &iov, 1);
110
+ pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
111
+ qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
112
113
ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
114
&local_qiov, 0);
115
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
116
{
117
BlockDriver *drv = bs->drv;
118
QEMUIOVector qiov;
119
- struct iovec iov = {0};
120
+ void *buf = NULL;
121
int ret = 0;
122
bool need_flush = false;
123
int head = 0;
124
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
125
need_flush = true;
126
}
127
num = MIN(num, max_transfer);
128
- iov.iov_len = num;
129
- if (iov.iov_base == NULL) {
130
- iov.iov_base = qemu_try_blockalign(bs, num);
131
- if (iov.iov_base == NULL) {
132
+ if (buf == NULL) {
133
+ buf = qemu_try_blockalign0(bs, num);
134
+ if (buf == NULL) {
135
ret = -ENOMEM;
136
goto fail;
137
}
138
- memset(iov.iov_base, 0, num);
139
}
140
- qemu_iovec_init_external(&qiov, &iov, 1);
141
+ qemu_iovec_init_buf(&qiov, buf, num);
142
143
ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
144
145
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
146
* all future requests.
147
*/
148
if (num < max_transfer) {
149
- qemu_vfree(iov.iov_base);
150
- iov.iov_base = NULL;
151
+ qemu_vfree(buf);
152
+ buf = NULL;
153
}
154
}
155
156
@@ -XXX,XX +XXX,XX @@ fail:
157
if (ret == 0 && need_flush) {
158
ret = bdrv_co_flush(bs);
159
}
160
- qemu_vfree(iov.iov_base);
161
+ qemu_vfree(buf);
162
return ret;
163
}
164
165
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
166
BlockDriverState *bs = child->bs;
167
uint8_t *buf = NULL;
168
QEMUIOVector local_qiov;
169
- struct iovec iov;
170
uint64_t align = bs->bl.request_alignment;
171
unsigned int head_padding_bytes, tail_padding_bytes;
172
int ret = 0;
173
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
174
assert(flags & BDRV_REQ_ZERO_WRITE);
175
if (head_padding_bytes || tail_padding_bytes) {
176
buf = qemu_blockalign(bs, align);
177
- iov = (struct iovec) {
178
- .iov_base = buf,
179
- .iov_len = align,
180
- };
181
- qemu_iovec_init_external(&local_qiov, &iov, 1);
182
+ qemu_iovec_init_buf(&local_qiov, buf, align);
183
}
184
if (head_padding_bytes) {
185
uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
186
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
187
188
if (offset & (align - 1)) {
189
QEMUIOVector head_qiov;
190
- struct iovec head_iov;
191
192
mark_request_serialising(&req, align);
193
wait_serialising_requests(&req);
194
195
head_buf = qemu_blockalign(bs, align);
196
- head_iov = (struct iovec) {
197
- .iov_base = head_buf,
198
- .iov_len = align,
199
- };
200
- qemu_iovec_init_external(&head_qiov, &head_iov, 1);
201
+ qemu_iovec_init_buf(&head_qiov, head_buf, align);
202
203
bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
204
ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align,
205
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
206
207
if ((offset + bytes) & (align - 1)) {
208
QEMUIOVector tail_qiov;
209
- struct iovec tail_iov;
210
size_t tail_bytes;
211
bool waited;
212
213
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
214
assert(!waited || !use_local_qiov);
215
216
tail_buf = qemu_blockalign(bs, align);
217
- tail_iov = (struct iovec) {
218
- .iov_base = tail_buf,
219
- .iov_len = align,
220
- };
221
- qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
222
+ qemu_iovec_init_buf(&tail_qiov, tail_buf, align);
223
224
bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
225
ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1),
226
@@ -XXX,XX +XXX,XX @@ bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
227
int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
228
int64_t pos, int size)
229
{
230
- QEMUIOVector qiov;
231
- struct iovec iov = {
232
- .iov_base = (void *) buf,
233
- .iov_len = size,
234
- };
235
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
236
int ret;
78
int ret;
237
79
+ uint32_t file_align = bs->file->bs->bl.request_alignment;
238
- qemu_iovec_init_external(&qiov, &iov, 1);
80
+ uint32_t prealloc_align = MAX(s->opts.prealloc_align, file_align);
239
-
81
+
240
ret = bdrv_writev_vmstate(bs, &qiov, pos);
82
+ assert(QEMU_IS_ALIGNED(prealloc_align, file_align));
241
if (ret < 0) {
83
242
return ret;
84
if (!has_prealloc_perms(bs)) {
243
@@ -XXX,XX +XXX,XX @@ int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
85
/* We don't have state neither should try to recover it */
244
int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
86
@@ -XXX,XX +XXX,XX @@ static bool coroutine_fn handle_write(BlockDriverState *bs, int64_t offset,
245
int64_t pos, int size)
87
246
{
88
/* Now we want new preallocation, as request writes beyond s->file_end. */
247
- QEMUIOVector qiov;
89
248
- struct iovec iov = {
90
- prealloc_start = want_merge_zero ? MIN(offset, s->file_end) : s->file_end;
249
- .iov_base = buf,
91
- prealloc_end = QEMU_ALIGN_UP(end + s->opts.prealloc_size,
250
- .iov_len = size,
92
- s->opts.prealloc_align);
251
- };
93
+ prealloc_start = QEMU_ALIGN_UP(
252
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
94
+ want_merge_zero ? MIN(offset, s->file_end) : s->file_end,
253
int ret;
95
+ file_align);
254
96
+ prealloc_end = QEMU_ALIGN_UP(
255
- qemu_iovec_init_external(&qiov, &iov, 1);
97
+ MAX(prealloc_start, end) + s->opts.prealloc_size,
256
ret = bdrv_readv_vmstate(bs, &qiov, pos);
98
+ prealloc_align);
257
if (ret < 0) {
99
+
258
return ret;
100
+ want_merge_zero = want_merge_zero && (prealloc_start <= offset);
101
102
ret = bdrv_co_pwrite_zeroes(
103
bs->file, prealloc_start, prealloc_end - prealloc_start,
259
--
104
--
260
2.20.1
105
2.34.1
261
262
diff view generated by jsdifflib
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
From: Thomas Huth <thuth@redhat.com>
2
2
3
If the WRITE_ZEROES feature is enabled, we check this command
3
iotest 040 already has some checks for the availability of the 'throttle'
4
in the test_basic().
4
driver, but some new code has been added in the course of time that
5
depends on 'throttle' but does not check for its availability. Add
6
a check to the TestCommitWithFilters class so that this iotest now
7
also passes again if 'throttle' has not been enabled in the QEMU
8
binaries.
5
9
6
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
10
Signed-off-by: Thomas Huth <thuth@redhat.com>
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Message-Id: <20220223123127.3206042-1-thuth@redhat.com>
8
Acked-by: Thomas Huth <thuth@redhat.com>
12
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
9
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
10
Message-id: 20190221103314.58500-10-sgarzare@redhat.com
11
Message-Id: <20190221103314.58500-10-sgarzare@redhat.com>
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
13
---
14
tests/virtio-blk-test.c | 62 +++++++++++++++++++++++++++++++++++++++++
14
tests/qemu-iotests/040 | 1 +
15
1 file changed, 62 insertions(+)
15
1 file changed, 1 insertion(+)
16
16
17
diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c
17
diff --git a/tests/qemu-iotests/040 b/tests/qemu-iotests/040
18
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100755
19
--- a/tests/virtio-blk-test.c
19
--- a/tests/qemu-iotests/040
20
+++ b/tests/virtio-blk-test.c
20
+++ b/tests/qemu-iotests/040
21
@@ -XXX,XX +XXX,XX @@ static void test_basic(QVirtioDevice *dev, QGuestAllocator *alloc,
21
@@ -XXX,XX +XXX,XX @@ class TestCommitWithFilters(iotests.QMPTestCase):
22
22
pattern_file)
23
guest_free(alloc, req_addr);
23
self.assertFalse('Pattern verification failed' in result)
24
24
25
+ if (features & (1u << VIRTIO_BLK_F_WRITE_ZEROES)) {
25
+ @iotests.skip_if_unsupported(['throttle'])
26
+ struct virtio_blk_discard_write_zeroes dwz_hdr;
26
def setUp(self):
27
+ void *expected;
27
qemu_img('create', '-f', iotests.imgfmt, self.img0, '64M')
28
+
28
qemu_img('create', '-f', iotests.imgfmt, self.img1, '64M')
29
+ /*
30
+ * WRITE_ZEROES request on the same sector of previous test where
31
+ * we wrote "TEST".
32
+ */
33
+ req.type = VIRTIO_BLK_T_WRITE_ZEROES;
34
+ req.data = (char *) &dwz_hdr;
35
+ dwz_hdr.sector = 0;
36
+ dwz_hdr.num_sectors = 1;
37
+ dwz_hdr.flags = 0;
38
+
39
+ virtio_blk_fix_dwz_hdr(dev, &dwz_hdr);
40
+
41
+ req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr));
42
+
43
+ free_head = qvirtqueue_add(vq, req_addr, 16, false, true);
44
+ qvirtqueue_add(vq, req_addr + 16, sizeof(dwz_hdr), false, true);
45
+ qvirtqueue_add(vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, false);
46
+
47
+ qvirtqueue_kick(dev, vq, free_head);
48
+
49
+ qvirtio_wait_used_elem(dev, vq, free_head, NULL,
50
+ QVIRTIO_BLK_TIMEOUT_US);
51
+ status = readb(req_addr + 16 + sizeof(dwz_hdr));
52
+ g_assert_cmpint(status, ==, 0);
53
+
54
+ guest_free(alloc, req_addr);
55
+
56
+ /* Read request to check if the sector contains all zeroes */
57
+ req.type = VIRTIO_BLK_T_IN;
58
+ req.ioprio = 1;
59
+ req.sector = 0;
60
+ req.data = g_malloc0(512);
61
+
62
+ req_addr = virtio_blk_request(alloc, dev, &req, 512);
63
+
64
+ g_free(req.data);
65
+
66
+ free_head = qvirtqueue_add(vq, req_addr, 16, false, true);
67
+ qvirtqueue_add(vq, req_addr + 16, 512, true, true);
68
+ qvirtqueue_add(vq, req_addr + 528, 1, true, false);
69
+
70
+ qvirtqueue_kick(dev, vq, free_head);
71
+
72
+ qvirtio_wait_used_elem(dev, vq, free_head, NULL,
73
+ QVIRTIO_BLK_TIMEOUT_US);
74
+ status = readb(req_addr + 528);
75
+ g_assert_cmpint(status, ==, 0);
76
+
77
+ data = g_malloc(512);
78
+ expected = g_malloc0(512);
79
+ memread(req_addr + 16, data, 512);
80
+ g_assert_cmpmem(data, 512, expected, 512);
81
+ g_free(expected);
82
+ g_free(data);
83
+
84
+ guest_free(alloc, req_addr);
85
+ }
86
+
87
if (features & (1u << VIRTIO_F_ANY_LAYOUT)) {
88
/* Write and read with 2 descriptor layout */
89
/* Write request */
90
--
29
--
91
2.20.1
30
2.34.1
92
93
diff view generated by jsdifflib
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
From: Thomas Huth <thuth@redhat.com>
2
2
3
In order to avoid migration issues, we enable DISCARD and
3
In TAP mode, the stdout is reserved for the TAP protocol, so we
4
WRITE_ZEROES features only for machine type >= 4.0
4
have to make sure to mark other lines with a comment '#' character
5
at the beginning to avoid that the TAP parser at the other end
6
gets confused.
5
7
6
As discussed with Michael S. Tsirkin and Stefan Hajnoczi on the
8
To test this condition, run "configure" for example with:
7
list [1], DISCARD operation should not have security implications
8
(eg. page cache attacks), so we can enable it by default.
9
9
10
[1] https://lists.gnu.org/archive/html/qemu-devel/2019-02/msg00504.html
10
--block-drv-rw-whitelist=copy-before-write,qcow2,raw,file,host_device,blkdebug,null-co,copy-on-read
11
11
12
Suggested-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
12
so that iotest 041 will report that some tests are not run due to
13
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
13
the missing "quorum" driver. Without this change, "make check-block"
14
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
14
fails since the meson tap parser gets confused by these messages.
15
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
15
16
Message-id: 20190221103314.58500-4-sgarzare@redhat.com
16
Signed-off-by: Thomas Huth <thuth@redhat.com>
17
Message-Id: <20190221103314.58500-4-sgarzare@redhat.com>
17
Message-Id: <20220223124353.3273898-1-thuth@redhat.com>
18
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
18
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
19
---
19
---
20
hw/block/virtio-blk.c | 4 ++++
20
tests/qemu-iotests/testrunner.py | 5 ++++-
21
hw/core/machine.c | 2 ++
21
1 file changed, 4 insertions(+), 1 deletion(-)
22
2 files changed, 6 insertions(+)
23
22
24
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
23
diff --git a/tests/qemu-iotests/testrunner.py b/tests/qemu-iotests/testrunner.py
25
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
26
--- a/hw/block/virtio-blk.c
25
--- a/tests/qemu-iotests/testrunner.py
27
+++ b/hw/block/virtio-blk.c
26
+++ b/tests/qemu-iotests/testrunner.py
28
@@ -XXX,XX +XXX,XX @@ static Property virtio_blk_properties[] = {
27
@@ -XXX,XX +XXX,XX @@ def run_test(self, test: str,
29
DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 128),
28
description=res.description)
30
DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
29
31
IOThread *),
30
if res.casenotrun:
32
+ DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features,
31
- print(res.casenotrun)
33
+ VIRTIO_BLK_F_DISCARD, true),
32
+ if self.tap:
34
+ DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features,
33
+ print('#' + res.casenotrun.replace('\n', '\n#'))
35
+ VIRTIO_BLK_F_WRITE_ZEROES, true),
34
+ else:
36
DEFINE_PROP_END_OF_LIST(),
35
+ print(res.casenotrun)
37
};
36
38
37
return res
39
diff --git a/hw/core/machine.c b/hw/core/machine.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/hw/core/machine.c
42
+++ b/hw/core/machine.c
43
@@ -XXX,XX +XXX,XX @@ GlobalProperty hw_compat_3_1[] = {
44
{ "usb-kbd", "serial", "42" },
45
{ "usb-mouse", "serial", "42" },
46
{ "usb-kbd", "serial", "42" },
47
+ { "virtio-blk-device", "discard", "false" },
48
+ { "virtio-blk-device", "write-zeroes", "false" },
49
};
50
const size_t hw_compat_3_1_len = G_N_ELEMENTS(hw_compat_3_1);
51
38
52
--
39
--
53
2.20.1
40
2.34.1
54
55
diff view generated by jsdifflib
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
Drop the use of OUTPUT_DIR (test/qemu-iotests under the build
2
directory), and instead write test output files (.out.bad, .notrun, and
3
.casenotrun) to TEST_DIR.
2
4
3
In order to use VirtIOFeature also in other virtio devices, we move
5
With this, the same test can be run concurrently without the separate
4
its declaration and the endof() macro (renamed in virtio_endof())
6
instances interfering, because they will need separate TEST_DIRs anyway.
5
in virtio.h.
7
Running the same test separately is useful when running the iotests with
6
We add virtio_feature_get_config_size() function to iterate the array
8
various format/protocol combinations in parallel, or when you just want
7
of VirtIOFeature and to return the config size depending on the
9
to aggressively exercise a single test (e.g. when it fails only
8
features enabled. (as virtio_net_set_config_size() did)
10
sporadically).
9
11
10
Suggested-by: Michael S. Tsirkin <mst@redhat.com>
12
Putting this output into TEST_DIR means that it will stick around for
11
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
13
inspection after the test run is done (though running the same test in
12
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
14
the same TEST_DIR will overwrite it, just as it used to be); but given
13
Message-id: 20190221103314.58500-5-sgarzare@redhat.com
15
that TEST_DIR is a scratch directory, it should be clear that users can
14
Message-Id: <20190221103314.58500-5-sgarzare@redhat.com>
16
delete all of its content at any point. (And if TEST_DIR is on tmpfs,
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
it will just disappear on shutdown.) Contrarily, alternative approaches
18
that would put these output files into OUTPUT_DIR with some prefix to
19
differentiate between separate test runs might easily lead to cluttering
20
OUTPUT_DIR.
21
22
(This change means OUTPUT_DIR is no longer written to by the iotests, so
23
we can drop its usage altogether.)
24
25
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
26
Message-Id: <20220221172909.762858-1-hreitz@redhat.com>
27
[hreitz: Simplified `Path(os.path.join(x, y))` to `Path(x, y)`, as
28
suggested by Vladimir; and rebased on 9086c7639822b6
29
("tests/qemu-iotests: Rework the checks and spots using GNU
30
sed")]
31
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
16
---
32
---
17
include/hw/virtio/virtio.h | 15 +++++++++++++++
33
tests/qemu-iotests/common.rc | 6 +++---
18
hw/net/virtio-net.c | 31 +++++++------------------------
34
tests/qemu-iotests/iotests.py | 5 ++---
19
hw/virtio/virtio.c | 15 +++++++++++++++
35
tests/qemu-iotests/testenv.py | 5 +----
20
3 files changed, 37 insertions(+), 24 deletions(-)
36
tests/qemu-iotests/testrunner.py | 14 ++++++++------
37
4 files changed, 14 insertions(+), 16 deletions(-)
21
38
22
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
39
diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc
23
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
24
--- a/include/hw/virtio/virtio.h
41
--- a/tests/qemu-iotests/common.rc
25
+++ b/include/hw/virtio/virtio.h
42
+++ b/tests/qemu-iotests/common.rc
26
@@ -XXX,XX +XXX,XX @@ static inline hwaddr vring_align(hwaddr addr,
43
@@ -XXX,XX +XXX,XX @@
27
return QEMU_ALIGN_UP(addr, align);
44
# bail out, setting up .notrun file
45
_notrun()
46
{
47
- echo "$*" >"$OUTPUT_DIR/$seq.notrun"
48
+ echo "$*" >"$TEST_DIR/$seq.notrun"
49
echo "$seq not run: $*"
50
status=0
51
exit
52
@@ -XXX,XX +XXX,XX @@ _img_info()
53
#
54
_casenotrun()
55
{
56
- echo " [case not run] $*" >>"$OUTPUT_DIR/$seq.casenotrun"
57
+ echo " [case not run] $*" >>"$TEST_DIR/$seq.casenotrun"
28
}
58
}
29
59
30
+/*
60
# just plain bail out
31
+ * Calculate the number of bytes up to and including the given 'field' of
61
#
32
+ * 'container'.
62
_fail()
33
+ */
63
{
34
+#define virtio_endof(container, field) \
64
- echo "$*" | tee -a "$OUTPUT_DIR/$seq.full"
35
+ (offsetof(container, field) + sizeof_field(container, field))
65
+ echo "$*" | tee -a "$TEST_DIR/$seq.full"
66
echo "(see $seq.full for details)"
67
status=1
68
exit 1
69
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tests/qemu-iotests/iotests.py
72
+++ b/tests/qemu-iotests/iotests.py
73
@@ -XXX,XX +XXX,XX @@
74
75
imgfmt = os.environ.get('IMGFMT', 'raw')
76
imgproto = os.environ.get('IMGPROTO', 'file')
77
-output_dir = os.environ.get('OUTPUT_DIR', '.')
78
79
try:
80
test_dir = os.environ['TEST_DIR']
81
@@ -XXX,XX +XXX,XX @@ def notrun(reason):
82
# Each test in qemu-iotests has a number ("seq")
83
seq = os.path.basename(sys.argv[0])
84
85
- with open('%s/%s.notrun' % (output_dir, seq), 'w', encoding='utf-8') \
86
+ with open('%s/%s.notrun' % (test_dir, seq), 'w', encoding='utf-8') \
87
as outfile:
88
outfile.write(reason + '\n')
89
logger.warning("%s not run: %s", seq, reason)
90
@@ -XXX,XX +XXX,XX @@ def case_notrun(reason):
91
# Each test in qemu-iotests has a number ("seq")
92
seq = os.path.basename(sys.argv[0])
93
94
- with open('%s/%s.casenotrun' % (output_dir, seq), 'a', encoding='utf-8') \
95
+ with open('%s/%s.casenotrun' % (test_dir, seq), 'a', encoding='utf-8') \
96
as outfile:
97
outfile.write(' [case not run] ' + reason + '\n')
98
99
diff --git a/tests/qemu-iotests/testenv.py b/tests/qemu-iotests/testenv.py
100
index XXXXXXX..XXXXXXX 100644
101
--- a/tests/qemu-iotests/testenv.py
102
+++ b/tests/qemu-iotests/testenv.py
103
@@ -XXX,XX +XXX,XX @@ class TestEnv(ContextManager['TestEnv']):
104
# pylint: disable=too-many-instance-attributes
105
106
env_variables = ['PYTHONPATH', 'TEST_DIR', 'SOCK_DIR', 'SAMPLE_IMG_DIR',
107
- 'OUTPUT_DIR', 'PYTHON', 'QEMU_PROG', 'QEMU_IMG_PROG',
108
+ 'PYTHON', 'QEMU_PROG', 'QEMU_IMG_PROG',
109
'QEMU_IO_PROG', 'QEMU_NBD_PROG', 'QSD_PROG',
110
'QEMU_OPTIONS', 'QEMU_IMG_OPTIONS',
111
'QEMU_IO_OPTIONS', 'QEMU_IO_OPTIONS_NO_FMT',
112
@@ -XXX,XX +XXX,XX @@ def init_directories(self) -> None:
113
TEST_DIR
114
SOCK_DIR
115
SAMPLE_IMG_DIR
116
- OUTPUT_DIR
117
"""
118
119
# Path where qemu goodies live in this source tree.
120
@@ -XXX,XX +XXX,XX @@ def init_directories(self) -> None:
121
os.path.join(self.source_iotests,
122
'sample_images'))
123
124
- self.output_dir = os.getcwd() # OUTPUT_DIR
125
-
126
def init_binaries(self) -> None:
127
"""Init binary path variables:
128
PYTHON (for bash tests)
129
diff --git a/tests/qemu-iotests/testrunner.py b/tests/qemu-iotests/testrunner.py
130
index XXXXXXX..XXXXXXX 100644
131
--- a/tests/qemu-iotests/testrunner.py
132
+++ b/tests/qemu-iotests/testrunner.py
133
@@ -XXX,XX +XXX,XX @@ def do_run_test(self, test: str, mp: bool) -> TestResult:
134
"""
135
136
f_test = Path(test)
137
- f_bad = Path(f_test.name + '.out.bad')
138
- f_notrun = Path(f_test.name + '.notrun')
139
- f_casenotrun = Path(f_test.name + '.casenotrun')
140
f_reference = Path(self.find_reference(test))
141
142
if not f_test.exists():
143
@@ -XXX,XX +XXX,XX @@ def do_run_test(self, test: str, mp: bool) -> TestResult:
144
description='No qualified output '
145
f'(expected {f_reference})')
146
147
- for p in (f_bad, f_notrun, f_casenotrun):
148
- silent_unlink(p)
149
-
150
args = [str(f_test.resolve())]
151
env = self.env.prepare_subprocess(args)
152
if mp:
153
@@ -XXX,XX +XXX,XX @@ def do_run_test(self, test: str, mp: bool) -> TestResult:
154
env[d] = os.path.join(env[d], f_test.name)
155
Path(env[d]).mkdir(parents=True, exist_ok=True)
156
157
+ test_dir = env['TEST_DIR']
158
+ f_bad = Path(test_dir, f_test.name + '.out.bad')
159
+ f_notrun = Path(test_dir, f_test.name + '.notrun')
160
+ f_casenotrun = Path(test_dir, f_test.name + '.casenotrun')
36
+
161
+
37
+typedef struct VirtIOFeature {
162
+ for p in (f_notrun, f_casenotrun):
38
+ uint64_t flags;
163
+ silent_unlink(p)
39
+ size_t end;
40
+} VirtIOFeature;
41
+
164
+
42
+size_t virtio_feature_get_config_size(VirtIOFeature *features,
165
t0 = time.time()
43
+ uint64_t host_features);
166
with f_bad.open('w', encoding="utf-8") as f:
44
+
167
with subprocess.Popen(args, cwd=str(f_test.parent), env=env,
45
typedef struct VirtQueue VirtQueue;
46
47
#define VIRTQUEUE_MAX_SIZE 1024
48
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/hw/net/virtio-net.c
51
+++ b/hw/net/virtio-net.c
52
@@ -XXX,XX +XXX,XX @@ static inline __virtio16 *virtio_net_rsc_ext_num_dupacks(
53
54
#endif
55
56
-/*
57
- * Calculate the number of bytes up to and including the given 'field' of
58
- * 'container'.
59
- */
60
-#define endof(container, field) \
61
- (offsetof(container, field) + sizeof_field(container, field))
62
-
63
-typedef struct VirtIOFeature {
64
- uint64_t flags;
65
- size_t end;
66
-} VirtIOFeature;
67
-
68
static VirtIOFeature feature_sizes[] = {
69
{.flags = 1ULL << VIRTIO_NET_F_MAC,
70
- .end = endof(struct virtio_net_config, mac)},
71
+ .end = virtio_endof(struct virtio_net_config, mac)},
72
{.flags = 1ULL << VIRTIO_NET_F_STATUS,
73
- .end = endof(struct virtio_net_config, status)},
74
+ .end = virtio_endof(struct virtio_net_config, status)},
75
{.flags = 1ULL << VIRTIO_NET_F_MQ,
76
- .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
77
+ .end = virtio_endof(struct virtio_net_config, max_virtqueue_pairs)},
78
{.flags = 1ULL << VIRTIO_NET_F_MTU,
79
- .end = endof(struct virtio_net_config, mtu)},
80
+ .end = virtio_endof(struct virtio_net_config, mtu)},
81
{.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
82
- .end = endof(struct virtio_net_config, duplex)},
83
+ .end = virtio_endof(struct virtio_net_config, duplex)},
84
{}
85
};
86
87
@@ -XXX,XX +XXX,XX @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
88
89
static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
90
{
91
- int i, config_size = 0;
92
virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
93
94
- for (i = 0; feature_sizes[i].flags != 0; i++) {
95
- if (host_features & feature_sizes[i].flags) {
96
- config_size = MAX(feature_sizes[i].end, config_size);
97
- }
98
- }
99
- n->config_size = config_size;
100
+ n->config_size = virtio_feature_get_config_size(feature_sizes,
101
+ host_features);
102
}
103
104
void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
105
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/hw/virtio/virtio.c
108
+++ b/hw/virtio/virtio.c
109
@@ -XXX,XX +XXX,XX @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val)
110
return ret;
111
}
112
113
+size_t virtio_feature_get_config_size(VirtIOFeature *feature_sizes,
114
+ uint64_t host_features)
115
+{
116
+ size_t config_size = 0;
117
+ int i;
118
+
119
+ for (i = 0; feature_sizes[i].flags != 0; i++) {
120
+ if (host_features & feature_sizes[i].flags) {
121
+ config_size = MAX(feature_sizes[i].end, config_size);
122
+ }
123
+ }
124
+
125
+ return config_size;
126
+}
127
+
128
int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
129
{
130
int i, ret;
131
--
168
--
132
2.20.1
169
2.34.1
133
134
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Use new qemu_iovec_init_buf() instead of
3
We are going to complicate bitmap initialization in the further
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
4
commit. And in future, backup job will be able to work without filter
5
(when source is immutable), so we'll need same bitmap initialization in
6
copy-before-write filter and in backup job. So, it's reasonable to do
7
it in block-copy.
8
9
Note that for now cbw_open() is the only caller of
10
block_copy_state_new().
5
11
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
12
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
13
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Message-Id: <20220303194349.2304213-2-vsementsov@virtuozzo.com>
9
Message-id: 20190218140926.333779-11-vsementsov@virtuozzo.com
15
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
10
Message-Id: <20190218140926.333779-11-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
16
---
13
block/qed-table.c | 16 +++-------------
17
block/block-copy.c | 1 +
14
block/qed.c | 31 +++++++++----------------------
18
block/copy-before-write.c | 4 ----
15
2 files changed, 12 insertions(+), 35 deletions(-)
19
2 files changed, 1 insertion(+), 4 deletions(-)
16
20
17
diff --git a/block/qed-table.c b/block/qed-table.c
21
diff --git a/block/block-copy.c b/block/block-copy.c
18
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
19
--- a/block/qed-table.c
23
--- a/block/block-copy.c
20
+++ b/block/qed-table.c
24
+++ b/block/block-copy.c
21
@@ -XXX,XX +XXX,XX @@
25
@@ -XXX,XX +XXX,XX @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
22
/* Called with table_lock held. */
26
return NULL;
23
static int qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table)
27
}
28
bdrv_disable_dirty_bitmap(copy_bitmap);
29
+ bdrv_set_dirty_bitmap(copy_bitmap, 0, bdrv_dirty_bitmap_size(copy_bitmap));
30
31
/*
32
* If source is in backing chain of target assume that target is going to be
33
diff --git a/block/copy-before-write.c b/block/copy-before-write.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/block/copy-before-write.c
36
+++ b/block/copy-before-write.c
37
@@ -XXX,XX +XXX,XX @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
38
Error **errp)
24
{
39
{
25
- QEMUIOVector qiov;
40
BDRVCopyBeforeWriteState *s = bs->opaque;
26
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(
41
- BdrvDirtyBitmap *copy_bitmap;
27
+ qiov, table->offsets, s->header.cluster_size * s->header.table_size);
42
28
int noffsets;
43
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
29
int i, ret;
44
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
30
45
@@ -XXX,XX +XXX,XX @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
31
- struct iovec iov = {
46
return -EINVAL;
32
- .iov_base = table->offsets,
47
}
33
- .iov_len = s->header.cluster_size * s->header.table_size,
48
34
- };
49
- copy_bitmap = block_copy_dirty_bitmap(s->bcs);
35
- qemu_iovec_init_external(&qiov, &iov, 1);
50
- bdrv_set_dirty_bitmap(copy_bitmap, 0, bdrv_dirty_bitmap_size(copy_bitmap));
36
-
51
-
37
trace_qed_read_table(s, offset, table);
52
return 0;
38
39
qemu_co_mutex_unlock(&s->table_lock);
40
@@ -XXX,XX +XXX,XX @@ static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
41
unsigned int sector_mask = BDRV_SECTOR_SIZE / sizeof(uint64_t) - 1;
42
unsigned int start, end, i;
43
QEDTable *new_table;
44
- struct iovec iov;
45
QEMUIOVector qiov;
46
size_t len_bytes;
47
int ret;
48
@@ -XXX,XX +XXX,XX @@ static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
49
len_bytes = (end - start) * sizeof(uint64_t);
50
51
new_table = qemu_blockalign(s->bs, len_bytes);
52
- iov = (struct iovec) {
53
- .iov_base = new_table->offsets,
54
- .iov_len = len_bytes,
55
- };
56
- qemu_iovec_init_external(&qiov, &iov, 1);
57
+ qemu_iovec_init_buf(&qiov, new_table->offsets, len_bytes);
58
59
/* Byteswap table */
60
for (i = start; i < end; i++) {
61
diff --git a/block/qed.c b/block/qed.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/block/qed.c
64
+++ b/block/qed.c
65
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn qed_write_header(BDRVQEDState *s)
66
int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE);
67
size_t len = nsectors * BDRV_SECTOR_SIZE;
68
uint8_t *buf;
69
- struct iovec iov;
70
QEMUIOVector qiov;
71
int ret;
72
73
assert(s->allocating_acb || s->allocating_write_reqs_plugged);
74
75
buf = qemu_blockalign(s->bs, len);
76
- iov = (struct iovec) {
77
- .iov_base = buf,
78
- .iov_len = len,
79
- };
80
- qemu_iovec_init_external(&qiov, &iov, 1);
81
+ qemu_iovec_init_buf(&qiov, buf, len);
82
83
ret = bdrv_co_preadv(s->bs->file, 0, qiov.size, &qiov, 0);
84
if (ret < 0) {
85
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s,
86
{
87
QEMUIOVector qiov;
88
QEMUIOVector *backing_qiov = NULL;
89
- struct iovec iov;
90
int ret;
91
92
/* Skip copy entirely if there is no work to do */
93
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s,
94
return 0;
95
}
96
97
- iov = (struct iovec) {
98
- .iov_base = qemu_blockalign(s->bs, len),
99
- .iov_len = len,
100
- };
101
- qemu_iovec_init_external(&qiov, &iov, 1);
102
+ qemu_iovec_init_buf(&qiov, qemu_blockalign(s->bs, len), len);
103
104
ret = qed_read_backing_file(s, pos, &qiov, &backing_qiov);
105
106
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s,
107
}
108
ret = 0;
109
out:
110
- qemu_vfree(iov.iov_base);
111
+ qemu_vfree(qemu_iovec_buf(&qiov));
112
return ret;
113
}
53
}
114
54
115
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
116
BdrvRequestFlags flags)
117
{
118
BDRVQEDState *s = bs->opaque;
119
- QEMUIOVector qiov;
120
- struct iovec iov;
121
+
122
+ /*
123
+ * Zero writes start without an I/O buffer. If a buffer becomes necessary
124
+ * then it will be allocated during request processing.
125
+ */
126
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
127
128
/* Fall back if the request is not aligned */
129
if (qed_offset_into_cluster(s, offset) ||
130
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
131
return -ENOTSUP;
132
}
133
134
- /* Zero writes start without an I/O buffer. If a buffer becomes necessary
135
- * then it will be allocated during request processing.
136
- */
137
- iov.iov_base = NULL;
138
- iov.iov_len = bytes;
139
-
140
- qemu_iovec_init_external(&qiov, &iov, 1);
141
return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov,
142
bytes >> BDRV_SECTOR_BITS,
143
QED_AIOCB_WRITE | QED_AIOCB_ZERO);
144
--
55
--
145
2.20.1
56
2.34.1
146
147
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Use new QEMU_IOVEC_INIT_BUF() instead of
3
That simplifies handling failure in existing code and in further new
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
4
usage of bdrv_merge_dirty_bitmap().
5
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
7
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Message-Id: <20220303194349.2304213-3-vsementsov@virtuozzo.com>
9
Message-id: 20190218140926.333779-8-vsementsov@virtuozzo.com
9
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
10
Message-Id: <20190218140926.333779-8-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
10
---
13
block/parallels.c | 13 +++++--------
11
include/block/dirty-bitmap.h | 2 +-
14
1 file changed, 5 insertions(+), 8 deletions(-)
12
block/dirty-bitmap.c | 9 +++++++--
13
block/monitor/bitmap-qmp-cmds.c | 5 +----
14
3 files changed, 9 insertions(+), 7 deletions(-)
15
15
16
diff --git a/block/parallels.c b/block/parallels.c
16
diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/parallels.c
18
--- a/include/block/dirty-bitmap.h
19
+++ b/block/parallels.c
19
+++ b/include/block/dirty-bitmap.h
20
@@ -XXX,XX +XXX,XX @@ static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num,
20
@@ -XXX,XX +XXX,XX @@ void bdrv_dirty_bitmap_set_persistence(BdrvDirtyBitmap *bitmap,
21
if (bs->backing) {
21
bool persistent);
22
int64_t nb_cow_sectors = to_allocate * s->tracks;
22
void bdrv_dirty_bitmap_set_inconsistent(BdrvDirtyBitmap *bitmap);
23
int64_t nb_cow_bytes = nb_cow_sectors << BDRV_SECTOR_BITS;
23
void bdrv_dirty_bitmap_set_busy(BdrvDirtyBitmap *bitmap, bool busy);
24
- QEMUIOVector qiov;
24
-void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
25
- struct iovec iov = {
25
+bool bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
26
- .iov_len = nb_cow_bytes,
26
HBitmap **backup, Error **errp);
27
- .iov_base = qemu_blockalign(bs, nb_cow_bytes)
27
void bdrv_dirty_bitmap_skip_store(BdrvDirtyBitmap *bitmap, bool skip);
28
- };
28
bool bdrv_dirty_bitmap_get(BdrvDirtyBitmap *bitmap, int64_t offset);
29
- qemu_iovec_init_external(&qiov, &iov, 1);
29
diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c
30
+ QEMUIOVector qiov =
30
index XXXXXXX..XXXXXXX 100644
31
+ QEMU_IOVEC_INIT_BUF(qiov, qemu_blockalign(bs, nb_cow_bytes),
31
--- a/block/dirty-bitmap.c
32
+ nb_cow_bytes);
32
+++ b/block/dirty-bitmap.c
33
33
@@ -XXX,XX +XXX,XX @@ bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap,
34
ret = bdrv_co_preadv(bs->backing, idx * s->tracks * BDRV_SECTOR_SIZE,
34
* Ensures permissions on bitmaps are reasonable; use for public API.
35
nb_cow_bytes, &qiov, 0);
35
*
36
if (ret < 0) {
36
* @backup: If provided, make a copy of dest here prior to merge.
37
- qemu_vfree(iov.iov_base);
37
+ *
38
+ qemu_vfree(qemu_iovec_buf(&qiov));
38
+ * Returns true on success, false on failure. In case of failure bitmaps are
39
return ret;
39
+ * untouched.
40
*/
41
-void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
42
+bool bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
43
HBitmap **backup, Error **errp)
44
{
45
- bool ret;
46
+ bool ret = false;
47
48
bdrv_dirty_bitmaps_lock(dest->bs);
49
if (src->bs != dest->bs) {
50
@@ -XXX,XX +XXX,XX @@ out:
51
if (src->bs != dest->bs) {
52
bdrv_dirty_bitmaps_unlock(src->bs);
53
}
54
+
55
+ return ret;
56
}
57
58
/**
59
diff --git a/block/monitor/bitmap-qmp-cmds.c b/block/monitor/bitmap-qmp-cmds.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/block/monitor/bitmap-qmp-cmds.c
62
+++ b/block/monitor/bitmap-qmp-cmds.c
63
@@ -XXX,XX +XXX,XX @@ BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target,
64
BlockDriverState *bs;
65
BdrvDirtyBitmap *dst, *src, *anon;
66
BlockDirtyBitmapMergeSourceList *lst;
67
- Error *local_err = NULL;
68
69
GLOBAL_STATE_CODE();
70
71
@@ -XXX,XX +XXX,XX @@ BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target,
72
abort();
40
}
73
}
41
74
42
ret = bdrv_co_pwritev(bs->file, s->data_end * BDRV_SECTOR_SIZE,
75
- bdrv_merge_dirty_bitmap(anon, src, NULL, &local_err);
43
nb_cow_bytes, &qiov, 0);
76
- if (local_err) {
44
- qemu_vfree(iov.iov_base);
77
- error_propagate(errp, local_err);
45
+ qemu_vfree(qemu_iovec_buf(&qiov));
78
+ if (!bdrv_merge_dirty_bitmap(anon, src, NULL, errp)) {
46
if (ret < 0) {
79
dst = NULL;
47
return ret;
80
goto out;
48
}
81
}
49
--
82
--
50
2.20.1
83
2.34.1
51
52
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
@iov is used only to initialize @qiov. Let's use new
3
This will be used in the following commit to bring "incremental" mode
4
qemu_iovec_init_buf() instead, which simplifies the code.
4
to copy-before-write filter.
5
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
7
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Message-Id: <20220303194349.2304213-4-vsementsov@virtuozzo.com>
9
Message-id: 20190218140926.333779-18-vsementsov@virtuozzo.com
9
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
10
Message-Id: <20190218140926.333779-18-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
10
---
13
include/hw/ide/internal.h | 1 -
11
include/block/block-copy.h | 1 +
14
hw/ide/atapi.c | 5 ++---
12
block/block-copy.c | 14 +++++++++++++-
15
2 files changed, 2 insertions(+), 4 deletions(-)
13
block/copy-before-write.c | 2 +-
14
3 files changed, 15 insertions(+), 2 deletions(-)
16
15
17
diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h
16
diff --git a/include/block/block-copy.h b/include/block/block-copy.h
18
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/ide/internal.h
18
--- a/include/block/block-copy.h
20
+++ b/include/hw/ide/internal.h
19
+++ b/include/block/block-copy.h
21
@@ -XXX,XX +XXX,XX @@ struct IDEDMAOps {
20
@@ -XXX,XX +XXX,XX @@ typedef struct BlockCopyState BlockCopyState;
22
21
typedef struct BlockCopyCallState BlockCopyCallState;
23
struct IDEDMA {
22
24
const struct IDEDMAOps *ops;
23
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
25
- struct iovec iov;
24
+ const BdrvDirtyBitmap *bitmap,
26
QEMUIOVector qiov;
25
Error **errp);
27
BlockAIOCB *aiocb;
26
28
};
27
/* Function should be called prior any actual copy request */
29
diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
28
diff --git a/block/block-copy.c b/block/block-copy.c
30
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/ide/atapi.c
30
--- a/block/block-copy.c
32
+++ b/hw/ide/atapi.c
31
+++ b/block/block-copy.c
33
@@ -XXX,XX +XXX,XX @@ static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret)
32
@@ -XXX,XX +XXX,XX @@ static int64_t block_copy_calculate_cluster_size(BlockDriverState *target,
34
data_offset = 0;
33
}
34
35
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
36
+ const BdrvDirtyBitmap *bitmap,
37
Error **errp)
38
{
39
+ ERRP_GUARD();
40
BlockCopyState *s;
41
int64_t cluster_size;
42
BdrvDirtyBitmap *copy_bitmap;
43
@@ -XXX,XX +XXX,XX @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
44
return NULL;
35
}
45
}
36
trace_ide_atapi_cmd_read_dma_cb_aio(s, s->lba, n);
46
bdrv_disable_dirty_bitmap(copy_bitmap);
37
- s->bus->dma->iov.iov_base = (void *)(s->io_buffer + data_offset);
47
- bdrv_set_dirty_bitmap(copy_bitmap, 0, bdrv_dirty_bitmap_size(copy_bitmap));
38
- s->bus->dma->iov.iov_len = n * ATAPI_SECTOR_SIZE;
48
+ if (bitmap) {
39
- qemu_iovec_init_external(&s->bus->dma->qiov, &s->bus->dma->iov, 1);
49
+ if (!bdrv_merge_dirty_bitmap(copy_bitmap, bitmap, NULL, errp)) {
40
+ qemu_iovec_init_buf(&s->bus->dma->qiov, s->io_buffer + data_offset,
50
+ error_prepend(errp, "Failed to merge bitmap '%s' to internal "
41
+ n * ATAPI_SECTOR_SIZE);
51
+ "copy-bitmap: ", bdrv_dirty_bitmap_name(bitmap));
42
52
+ bdrv_release_dirty_bitmap(copy_bitmap);
43
s->bus->dma->aiocb = ide_buffered_readv(s, (int64_t)s->lba << 2,
53
+ return NULL;
44
&s->bus->dma->qiov, n * 4,
54
+ }
55
+ } else {
56
+ bdrv_set_dirty_bitmap(copy_bitmap, 0,
57
+ bdrv_dirty_bitmap_size(copy_bitmap));
58
+ }
59
60
/*
61
* If source is in backing chain of target assume that target is going to be
62
diff --git a/block/copy-before-write.c b/block/copy-before-write.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/block/copy-before-write.c
65
+++ b/block/copy-before-write.c
66
@@ -XXX,XX +XXX,XX @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
67
((BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) &
68
bs->file->bs->supported_zero_flags);
69
70
- s->bcs = block_copy_state_new(bs->file, s->target, errp);
71
+ s->bcs = block_copy_state_new(bs->file, s->target, NULL, errp);
72
if (!s->bcs) {
73
error_prepend(errp, "Cannot create block-copy-state: ");
74
return -EINVAL;
45
--
75
--
46
2.20.1
76
2.34.1
47
48
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
@iov is used only to initialize @qiov. Let's use new
3
This brings "incremental" mode to copy-before-write filter: user can
4
qemu_iovec_init_buf() instead, which simplifies the code.
4
specify bitmap so that filter will copy only "dirty" areas.
5
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
7
Message-Id: <20220303194349.2304213-5-vsementsov@virtuozzo.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
9
Message-id: 20190218140926.333779-16-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-16-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
9
---
13
include/hw/ide/internal.h | 1 -
10
qapi/block-core.json | 10 +++++++-
14
hw/ide/atapi.c | 9 ++++-----
11
block/copy-before-write.c | 51 ++++++++++++++++++++++++++++++++++++++-
15
hw/ide/core.c | 8 ++------
12
2 files changed, 59 insertions(+), 2 deletions(-)
16
3 files changed, 6 insertions(+), 12 deletions(-)
17
13
18
diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h
14
diff --git a/qapi/block-core.json b/qapi/block-core.json
19
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/ide/internal.h
16
--- a/qapi/block-core.json
21
+++ b/include/hw/ide/internal.h
17
+++ b/qapi/block-core.json
22
@@ -XXX,XX +XXX,XX @@ struct IDEState {
18
@@ -XXX,XX +XXX,XX @@
23
int atapi_dma; /* true if dma is requested for the packet cmd */
19
#
24
BlockAcctCookie acct;
20
# @target: The target for copy-before-write operations.
25
BlockAIOCB *pio_aiocb;
21
#
26
- struct iovec iov;
22
+# @bitmap: If specified, copy-before-write filter will do
27
QEMUIOVector qiov;
23
+# copy-before-write operations only for dirty regions of the
28
QLIST_HEAD(, IDEBufferedRequest) buffered_requests;
24
+# bitmap. Bitmap size must be equal to length of file and
29
/* ATA DMA state */
25
+# target child of the filter. Note also, that bitmap is used
30
diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
26
+# only to initialize internal bitmap of the process, so further
27
+# modifications (or removing) of specified bitmap doesn't
28
+# influence the filter. (Since 7.0)
29
+#
30
# Since: 6.2
31
##
32
{ 'struct': 'BlockdevOptionsCbw',
33
'base': 'BlockdevOptionsGenericFormat',
34
- 'data': { 'target': 'BlockdevRef' } }
35
+ 'data': { 'target': 'BlockdevRef', '*bitmap': 'BlockDirtyBitmap' } }
36
37
##
38
# @BlockdevOptions:
39
diff --git a/block/copy-before-write.c b/block/copy-before-write.c
31
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
32
--- a/hw/ide/atapi.c
41
--- a/block/copy-before-write.c
33
+++ b/hw/ide/atapi.c
42
+++ b/block/copy-before-write.c
34
@@ -XXX,XX +XXX,XX @@ static void cd_read_sector_cb(void *opaque, int ret)
43
@@ -XXX,XX +XXX,XX @@
35
44
36
static int cd_read_sector(IDEState *s)
45
#include "block/copy-before-write.h"
46
47
+#include "qapi/qapi-visit-block-core.h"
48
+
49
typedef struct BDRVCopyBeforeWriteState {
50
BlockCopyState *bcs;
51
BdrvChild *target;
52
@@ -XXX,XX +XXX,XX @@ static void cbw_child_perm(BlockDriverState *bs, BdrvChild *c,
53
}
54
}
55
56
+static bool cbw_parse_bitmap_option(QDict *options, BdrvDirtyBitmap **bitmap,
57
+ Error **errp)
58
+{
59
+ QDict *bitmap_qdict = NULL;
60
+ BlockDirtyBitmap *bmp_param = NULL;
61
+ Visitor *v = NULL;
62
+ bool ret = false;
63
+
64
+ *bitmap = NULL;
65
+
66
+ qdict_extract_subqdict(options, &bitmap_qdict, "bitmap.");
67
+ if (!qdict_size(bitmap_qdict)) {
68
+ ret = true;
69
+ goto out;
70
+ }
71
+
72
+ v = qobject_input_visitor_new_flat_confused(bitmap_qdict, errp);
73
+ if (!v) {
74
+ goto out;
75
+ }
76
+
77
+ visit_type_BlockDirtyBitmap(v, NULL, &bmp_param, errp);
78
+ if (!bmp_param) {
79
+ goto out;
80
+ }
81
+
82
+ *bitmap = block_dirty_bitmap_lookup(bmp_param->node, bmp_param->name, NULL,
83
+ errp);
84
+ if (!*bitmap) {
85
+ goto out;
86
+ }
87
+
88
+ ret = true;
89
+
90
+out:
91
+ qapi_free_BlockDirtyBitmap(bmp_param);
92
+ visit_free(v);
93
+ qobject_unref(bitmap_qdict);
94
+
95
+ return ret;
96
+}
97
+
98
static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
99
Error **errp)
37
{
100
{
38
+ void *buf;
101
BDRVCopyBeforeWriteState *s = bs->opaque;
39
+
102
+ BdrvDirtyBitmap *bitmap = NULL;
40
if (s->cd_sector_size != 2048 && s->cd_sector_size != 2352) {
103
41
block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
104
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
105
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
106
@@ -XXX,XX +XXX,XX @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
42
return -EINVAL;
107
return -EINVAL;
43
}
108
}
44
109
45
- s->iov.iov_base = (s->cd_sector_size == 2352) ?
110
+ if (!cbw_parse_bitmap_option(options, &bitmap, errp)) {
46
- s->io_buffer + 16 : s->io_buffer;
111
+ return -EINVAL;
47
-
112
+ }
48
- s->iov.iov_len = ATAPI_SECTOR_SIZE;
113
+
49
- qemu_iovec_init_external(&s->qiov, &s->iov, 1);
114
bs->total_sectors = bs->file->bs->total_sectors;
50
+ buf = (s->cd_sector_size == 2352) ? s->io_buffer + 16 : s->io_buffer;
115
bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED |
51
+ qemu_iovec_init_buf(&s->qiov, buf, ATAPI_SECTOR_SIZE);
116
(BDRV_REQ_FUA & bs->file->bs->supported_write_flags);
52
117
@@ -XXX,XX +XXX,XX @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
53
trace_cd_read_sector(s->lba);
118
((BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) &
54
119
bs->file->bs->supported_zero_flags);
55
diff --git a/hw/ide/core.c b/hw/ide/core.c
120
56
index XXXXXXX..XXXXXXX 100644
121
- s->bcs = block_copy_state_new(bs->file, s->target, NULL, errp);
57
--- a/hw/ide/core.c
122
+ s->bcs = block_copy_state_new(bs->file, s->target, bitmap, errp);
58
+++ b/hw/ide/core.c
123
if (!s->bcs) {
59
@@ -XXX,XX +XXX,XX @@ static void ide_sector_read(IDEState *s)
124
error_prepend(errp, "Cannot create block-copy-state: ");
60
return;
125
return -EINVAL;
61
}
62
63
- s->iov.iov_base = s->io_buffer;
64
- s->iov.iov_len = n * BDRV_SECTOR_SIZE;
65
- qemu_iovec_init_external(&s->qiov, &s->iov, 1);
66
+ qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
67
68
block_acct_start(blk_get_stats(s->blk), &s->acct,
69
n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
70
@@ -XXX,XX +XXX,XX @@ static void ide_sector_write(IDEState *s)
71
return;
72
}
73
74
- s->iov.iov_base = s->io_buffer;
75
- s->iov.iov_len = n * BDRV_SECTOR_SIZE;
76
- qemu_iovec_init_external(&s->qiov, &s->iov, 1);
77
+ qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
78
79
block_acct_start(blk_get_stats(s->blk), &s->acct,
80
n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
81
--
126
--
82
2.20.1
127
2.34.1
83
84
diff view generated by jsdifflib
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
This function is useful to fix the endianness of struct
3
Split block_copy_reset() out of block_copy_reset_unallocated() to be
4
virtio_blk_discard_write_zeroes headers.
4
used separately later.
5
5
6
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
7
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
8
Message-id: 20190221103314.58500-9-sgarzare@redhat.com
8
Message-Id: <20220303194349.2304213-6-vsementsov@virtuozzo.com>
9
Message-Id: <20190221103314.58500-9-sgarzare@redhat.com>
9
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
10
---
12
tests/virtio-blk-test.c | 23 +++++++++++++++++------
11
include/block/block-copy.h | 1 +
13
1 file changed, 17 insertions(+), 6 deletions(-)
12
block/block-copy.c | 21 +++++++++++++--------
13
2 files changed, 14 insertions(+), 8 deletions(-)
14
14
15
diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c
15
diff --git a/include/block/block-copy.h b/include/block/block-copy.h
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/tests/virtio-blk-test.c
17
--- a/include/block/block-copy.h
18
+++ b/tests/virtio-blk-test.c
18
+++ b/include/block/block-copy.h
19
@@ -XXX,XX +XXX,XX @@ typedef struct QVirtioBlkReq {
19
@@ -XXX,XX +XXX,XX @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm);
20
uint8_t status;
20
21
} QVirtioBlkReq;
21
void block_copy_state_free(BlockCopyState *s);
22
22
23
+#ifdef HOST_WORDS_BIGENDIAN
23
+void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes);
24
+const bool host_is_big_endian = true;
24
int64_t block_copy_reset_unallocated(BlockCopyState *s,
25
+#else
25
int64_t offset, int64_t *count);
26
+const bool host_is_big_endian; /* false */
26
27
+#endif
27
diff --git a/block/block-copy.c b/block/block-copy.c
28
+
28
index XXXXXXX..XXXXXXX 100644
29
static char *drive_create(void)
29
--- a/block/block-copy.c
30
{
30
+++ b/block/block-copy.c
31
int fd, ret;
31
@@ -XXX,XX +XXX,XX @@ static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
32
@@ -XXX,XX +XXX,XX @@ static QVirtioPCIDevice *virtio_blk_pci_init(QPCIBus *bus, int slot)
33
34
static inline void virtio_blk_fix_request(QVirtioDevice *d, QVirtioBlkReq *req)
35
{
36
-#ifdef HOST_WORDS_BIGENDIAN
37
- const bool host_is_big_endian = true;
38
-#else
39
- const bool host_is_big_endian = false;
40
-#endif
41
-
42
if (qvirtio_is_big_endian(d) != host_is_big_endian) {
43
req->type = bswap32(req->type);
44
req->ioprio = bswap32(req->ioprio);
45
@@ -XXX,XX +XXX,XX @@ static inline void virtio_blk_fix_request(QVirtioDevice *d, QVirtioBlkReq *req)
46
}
32
}
47
}
33
}
48
34
35
+void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes)
36
+{
37
+ QEMU_LOCK_GUARD(&s->lock);
49
+
38
+
50
+static inline void virtio_blk_fix_dwz_hdr(QVirtioDevice *d,
39
+ bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
51
+ struct virtio_blk_discard_write_zeroes *dwz_hdr)
40
+ if (s->progress) {
52
+{
41
+ progress_set_remaining(s->progress,
53
+ if (qvirtio_is_big_endian(d) != host_is_big_endian) {
42
+ bdrv_get_dirty_count(s->copy_bitmap) +
54
+ dwz_hdr->sector = bswap64(dwz_hdr->sector);
43
+ s->in_flight_bytes);
55
+ dwz_hdr->num_sectors = bswap32(dwz_hdr->num_sectors);
56
+ dwz_hdr->flags = bswap32(dwz_hdr->flags);
57
+ }
44
+ }
58
+}
45
+}
59
+
46
+
60
static uint64_t virtio_blk_request(QGuestAllocator *alloc, QVirtioDevice *d,
47
/*
61
QVirtioBlkReq *req, uint64_t data_size)
48
* Reset bits in copy_bitmap starting at offset if they represent unallocated
62
{
49
* data in the image. May reset subsequent contiguous bits.
50
@@ -XXX,XX +XXX,XX @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
51
bytes = clusters * s->cluster_size;
52
53
if (!ret) {
54
- qemu_co_mutex_lock(&s->lock);
55
- bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
56
- if (s->progress) {
57
- progress_set_remaining(s->progress,
58
- bdrv_get_dirty_count(s->copy_bitmap) +
59
- s->in_flight_bytes);
60
- }
61
- qemu_co_mutex_unlock(&s->lock);
62
+ block_copy_reset(s, offset, bytes);
63
}
64
65
*count = bytes;
63
--
66
--
64
2.20.1
67
2.34.1
65
66
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Use new qemu_iovec_init_buf() instead of
3
Split intersecting-requests functionality out of block-copy to be
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
4
reused in copy-before-write filter.
5
6
Note: while being here, fix tiny typo in MAINTAINERS.
5
7
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
8
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
9
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Message-Id: <20220303194349.2304213-7-vsementsov@virtuozzo.com>
9
Message-id: 20190218140926.333779-9-vsementsov@virtuozzo.com
11
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
10
Message-Id: <20190218140926.333779-9-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
12
---
13
block/qcow.c | 21 ++++-----------------
13
include/block/reqlist.h | 67 +++++++++++++++++++++++
14
1 file changed, 4 insertions(+), 17 deletions(-)
14
block/block-copy.c | 116 +++++++++++++---------------------------
15
block/reqlist.c | 76 ++++++++++++++++++++++++++
16
MAINTAINERS | 4 +-
17
block/meson.build | 1 +
18
5 files changed, 184 insertions(+), 80 deletions(-)
19
create mode 100644 include/block/reqlist.h
20
create mode 100644 block/reqlist.c
15
21
16
diff --git a/block/qcow.c b/block/qcow.c
22
diff --git a/include/block/reqlist.h b/include/block/reqlist.h
23
new file mode 100644
24
index XXXXXXX..XXXXXXX
25
--- /dev/null
26
+++ b/include/block/reqlist.h
27
@@ -XXX,XX +XXX,XX @@
28
+/*
29
+ * reqlist API
30
+ *
31
+ * Copyright (C) 2013 Proxmox Server Solutions
32
+ * Copyright (c) 2021 Virtuozzo International GmbH.
33
+ *
34
+ * Authors:
35
+ * Dietmar Maurer (dietmar@proxmox.com)
36
+ * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
37
+ *
38
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
39
+ * See the COPYING file in the top-level directory.
40
+ */
41
+
42
+#ifndef REQLIST_H
43
+#define REQLIST_H
44
+
45
+#include "qemu/coroutine.h"
46
+
47
+/*
48
+ * The API is not thread-safe and shouldn't be. The struct is public to be part
49
+ * of other structures and protected by third-party locks, see
50
+ * block/block-copy.c for example.
51
+ */
52
+
53
+typedef struct BlockReq {
54
+ int64_t offset;
55
+ int64_t bytes;
56
+
57
+ CoQueue wait_queue; /* coroutines blocked on this req */
58
+ QLIST_ENTRY(BlockReq) list;
59
+} BlockReq;
60
+
61
+typedef QLIST_HEAD(, BlockReq) BlockReqList;
62
+
63
+/*
64
+ * Initialize new request and add it to the list. Caller must be sure that
65
+ * there are no conflicting requests in the list.
66
+ */
67
+void reqlist_init_req(BlockReqList *reqs, BlockReq *req, int64_t offset,
68
+ int64_t bytes);
69
+/* Search for request in the list intersecting with @offset/@bytes area. */
70
+BlockReq *reqlist_find_conflict(BlockReqList *reqs, int64_t offset,
71
+ int64_t bytes);
72
+
73
+/*
74
+ * If there are no intersecting requests return false. Otherwise, wait for the
75
+ * first found intersecting request to finish and return true.
76
+ *
77
+ * @lock is passed to qemu_co_queue_wait()
78
+ * False return value proves that lock was released at no point.
79
+ */
80
+bool coroutine_fn reqlist_wait_one(BlockReqList *reqs, int64_t offset,
81
+ int64_t bytes, CoMutex *lock);
82
+
83
+/*
84
+ * Shrink request and wake all waiting coroutines (maybe some of them are not
85
+ * intersecting with shrunk request).
86
+ */
87
+void coroutine_fn reqlist_shrink_req(BlockReq *req, int64_t new_bytes);
88
+
89
+/*
90
+ * Remove request and wake all waiting coroutines. Do not release any memory.
91
+ */
92
+void coroutine_fn reqlist_remove_req(BlockReq *req);
93
+
94
+#endif /* REQLIST_H */
95
diff --git a/block/block-copy.c b/block/block-copy.c
17
index XXXXXXX..XXXXXXX 100644
96
index XXXXXXX..XXXXXXX 100644
18
--- a/block/qcow.c
97
--- a/block/block-copy.c
19
+++ b/block/qcow.c
98
+++ b/block/block-copy.c
20
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset,
99
@@ -XXX,XX +XXX,XX @@
21
int offset_in_cluster;
100
#include "trace.h"
22
int ret = 0, n;
101
#include "qapi/error.h"
23
uint64_t cluster_offset;
102
#include "block/block-copy.h"
24
- struct iovec hd_iov;
103
+#include "block/reqlist.h"
25
QEMUIOVector hd_qiov;
104
#include "sysemu/block-backend.h"
26
uint8_t *buf;
105
#include "qemu/units.h"
27
void *orig_buf;
106
#include "qemu/coroutine.h"
28
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset,
107
@@ -XXX,XX +XXX,XX @@ typedef struct BlockCopyTask {
29
if (!cluster_offset) {
108
*/
30
if (bs->backing) {
109
BlockCopyState *s;
31
/* read from the base image */
110
BlockCopyCallState *call_state;
32
- hd_iov.iov_base = (void *)buf;
111
- int64_t offset;
33
- hd_iov.iov_len = n;
112
/*
34
- qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
113
* @method can also be set again in the while loop of
35
+ qemu_iovec_init_buf(&hd_qiov, buf, n);
114
* block_copy_dirty_clusters(), but it is never accessed concurrently
36
qemu_co_mutex_unlock(&s->lock);
115
@@ -XXX,XX +XXX,XX @@ typedef struct BlockCopyTask {
37
/* qcow2 emits this on bs->file instead of bs->backing */
116
BlockCopyMethod method;
38
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
117
39
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset,
118
/*
40
ret = -EIO;
119
- * Fields whose state changes throughout the execution
41
break;
120
- * Protected by lock in BlockCopyState.
121
+ * Generally, req is protected by lock in BlockCopyState, Still req.offset
122
+ * is only set on task creation, so may be read concurrently after creation.
123
+ * req.bytes is changed at most once, and need only protecting the case of
124
+ * parallel read while updating @bytes value in block_copy_task_shrink().
125
*/
126
- CoQueue wait_queue; /* coroutines blocked on this task */
127
- /*
128
- * Only protect the case of parallel read while updating @bytes
129
- * value in block_copy_task_shrink().
130
- */
131
- int64_t bytes;
132
- QLIST_ENTRY(BlockCopyTask) list;
133
+ BlockReq req;
134
} BlockCopyTask;
135
136
static int64_t task_end(BlockCopyTask *task)
137
{
138
- return task->offset + task->bytes;
139
+ return task->req.offset + task->req.bytes;
140
}
141
142
typedef struct BlockCopyState {
143
@@ -XXX,XX +XXX,XX @@ typedef struct BlockCopyState {
144
CoMutex lock;
145
int64_t in_flight_bytes;
146
BlockCopyMethod method;
147
- QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */
148
+ BlockReqList reqs;
149
QLIST_HEAD(, BlockCopyCallState) calls;
150
/*
151
* skip_unallocated:
152
@@ -XXX,XX +XXX,XX @@ typedef struct BlockCopyState {
153
RateLimit rate_limit;
154
} BlockCopyState;
155
156
-/* Called with lock held */
157
-static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
158
- int64_t offset, int64_t bytes)
159
-{
160
- BlockCopyTask *t;
161
-
162
- QLIST_FOREACH(t, &s->tasks, list) {
163
- if (offset + bytes > t->offset && offset < t->offset + t->bytes) {
164
- return t;
165
- }
166
- }
167
-
168
- return NULL;
169
-}
170
-
171
-/*
172
- * If there are no intersecting tasks return false. Otherwise, wait for the
173
- * first found intersecting tasks to finish and return true.
174
- *
175
- * Called with lock held. May temporary release the lock.
176
- * Return value of 0 proves that lock was NOT released.
177
- */
178
-static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
179
- int64_t bytes)
180
-{
181
- BlockCopyTask *task = find_conflicting_task(s, offset, bytes);
182
-
183
- if (!task) {
184
- return false;
185
- }
186
-
187
- qemu_co_queue_wait(&task->wait_queue, &s->lock);
188
-
189
- return true;
190
-}
191
-
192
/* Called with lock held */
193
static int64_t block_copy_chunk_size(BlockCopyState *s)
194
{
195
@@ -XXX,XX +XXX,XX @@ block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state,
196
bytes = QEMU_ALIGN_UP(bytes, s->cluster_size);
197
198
/* region is dirty, so no existent tasks possible in it */
199
- assert(!find_conflicting_task(s, offset, bytes));
200
+ assert(!reqlist_find_conflict(&s->reqs, offset, bytes));
201
202
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
203
s->in_flight_bytes += bytes;
204
@@ -XXX,XX +XXX,XX @@ block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state,
205
.task.func = block_copy_task_entry,
206
.s = s,
207
.call_state = call_state,
208
- .offset = offset,
209
- .bytes = bytes,
210
.method = s->method,
211
};
212
- qemu_co_queue_init(&task->wait_queue);
213
- QLIST_INSERT_HEAD(&s->tasks, task, list);
214
+ reqlist_init_req(&s->reqs, &task->req, offset, bytes);
215
216
return task;
217
}
218
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
219
int64_t new_bytes)
220
{
221
QEMU_LOCK_GUARD(&task->s->lock);
222
- if (new_bytes == task->bytes) {
223
+ if (new_bytes == task->req.bytes) {
224
return;
225
}
226
227
- assert(new_bytes > 0 && new_bytes < task->bytes);
228
+ assert(new_bytes > 0 && new_bytes < task->req.bytes);
229
230
- task->s->in_flight_bytes -= task->bytes - new_bytes;
231
+ task->s->in_flight_bytes -= task->req.bytes - new_bytes;
232
bdrv_set_dirty_bitmap(task->s->copy_bitmap,
233
- task->offset + new_bytes, task->bytes - new_bytes);
234
+ task->req.offset + new_bytes,
235
+ task->req.bytes - new_bytes);
236
237
- task->bytes = new_bytes;
238
- qemu_co_queue_restart_all(&task->wait_queue);
239
+ reqlist_shrink_req(&task->req, new_bytes);
240
}
241
242
static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret)
243
{
244
QEMU_LOCK_GUARD(&task->s->lock);
245
- task->s->in_flight_bytes -= task->bytes;
246
+ task->s->in_flight_bytes -= task->req.bytes;
247
if (ret < 0) {
248
- bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes);
249
+ bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->req.offset,
250
+ task->req.bytes);
251
}
252
- QLIST_REMOVE(task, list);
253
if (task->s->progress) {
254
progress_set_remaining(task->s->progress,
255
bdrv_get_dirty_count(task->s->copy_bitmap) +
256
task->s->in_flight_bytes);
257
}
258
- qemu_co_queue_restart_all(&task->wait_queue);
259
+ reqlist_remove_req(&task->req);
260
}
261
262
void block_copy_state_free(BlockCopyState *s)
263
@@ -XXX,XX +XXX,XX @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
264
265
ratelimit_init(&s->rate_limit);
266
qemu_co_mutex_init(&s->lock);
267
- QLIST_INIT(&s->tasks);
268
+ QLIST_INIT(&s->reqs);
269
QLIST_INIT(&s->calls);
270
271
return s;
272
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
273
274
aio_task_pool_wait_slot(pool);
275
if (aio_task_pool_status(pool) < 0) {
276
- co_put_to_shres(task->s->mem, task->bytes);
277
+ co_put_to_shres(task->s->mem, task->req.bytes);
278
block_copy_task_end(task, -ECANCELED);
279
g_free(task);
280
return -ECANCELED;
281
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int block_copy_task_entry(AioTask *task)
282
BlockCopyMethod method = t->method;
283
int ret;
284
285
- ret = block_copy_do_copy(s, t->offset, t->bytes, &method, &error_is_read);
286
+ ret = block_copy_do_copy(s, t->req.offset, t->req.bytes, &method,
287
+ &error_is_read);
288
289
WITH_QEMU_LOCK_GUARD(&s->lock) {
290
if (s->method == t->method) {
291
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int block_copy_task_entry(AioTask *task)
292
t->call_state->error_is_read = error_is_read;
42
}
293
}
43
- hd_iov.iov_base = (void *)buf;
294
} else if (s->progress) {
44
- hd_iov.iov_len = n;
295
- progress_work_done(s->progress, t->bytes);
45
- qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
296
+ progress_work_done(s->progress, t->req.bytes);
46
+ qemu_iovec_init_buf(&hd_qiov, buf, n);
297
}
47
qemu_co_mutex_unlock(&s->lock);
298
}
48
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
299
- co_put_to_shres(s->mem, t->bytes);
49
ret = bdrv_co_preadv(bs->file, cluster_offset + offset_in_cluster,
300
+ co_put_to_shres(s->mem, t->req.bytes);
50
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, uint64_t offset,
301
block_copy_task_end(t, ret);
51
int offset_in_cluster;
302
52
uint64_t cluster_offset;
303
return ret;
53
int ret = 0, n;
304
@@ -XXX,XX +XXX,XX @@ block_copy_dirty_clusters(BlockCopyCallState *call_state)
54
- struct iovec hd_iov;
305
trace_block_copy_skip_range(s, offset, bytes);
55
QEMUIOVector hd_qiov;
306
break;
56
uint8_t *buf;
307
}
57
void *orig_buf;
308
- if (task->offset > offset) {
58
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, uint64_t offset,
309
- trace_block_copy_skip_range(s, offset, task->offset - offset);
310
+ if (task->req.offset > offset) {
311
+ trace_block_copy_skip_range(s, offset, task->req.offset - offset);
312
}
313
314
found_dirty = true;
315
316
- ret = block_copy_block_status(s, task->offset, task->bytes,
317
+ ret = block_copy_block_status(s, task->req.offset, task->req.bytes,
318
&status_bytes);
319
assert(ret >= 0); /* never fail */
320
- if (status_bytes < task->bytes) {
321
+ if (status_bytes < task->req.bytes) {
322
block_copy_task_shrink(task, status_bytes);
323
}
324
if (qatomic_read(&s->skip_unallocated) &&
325
!(ret & BDRV_BLOCK_ALLOCATED)) {
326
block_copy_task_end(task, 0);
327
- trace_block_copy_skip_range(s, task->offset, task->bytes);
328
+ trace_block_copy_skip_range(s, task->req.offset, task->req.bytes);
329
offset = task_end(task);
330
bytes = end - offset;
331
g_free(task);
332
@@ -XXX,XX +XXX,XX @@ block_copy_dirty_clusters(BlockCopyCallState *call_state)
59
}
333
}
60
}
334
}
61
335
62
- hd_iov.iov_base = (void *)buf;
336
- ratelimit_calculate_delay(&s->rate_limit, task->bytes);
63
- hd_iov.iov_len = n;
337
+ ratelimit_calculate_delay(&s->rate_limit, task->req.bytes);
64
- qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
338
65
+ qemu_iovec_init_buf(&hd_qiov, buf, n);
339
- trace_block_copy_process(s, task->offset);
66
qemu_co_mutex_unlock(&s->lock);
340
+ trace_block_copy_process(s, task->req.offset);
67
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
341
68
ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster,
342
- co_get_from_shres(s->mem, task->bytes);
69
@@ -XXX,XX +XXX,XX @@ qcow_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
343
+ co_get_from_shres(s->mem, task->req.bytes);
70
{
344
71
BDRVQcowState *s = bs->opaque;
345
offset = task_end(task);
72
QEMUIOVector hd_qiov;
346
bytes = end - offset;
73
- struct iovec iov;
347
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
74
z_stream strm;
348
* Check that there is no task we still need to
75
int ret, out_len;
349
* wait to complete
76
uint8_t *buf, *out_buf;
350
*/
77
@@ -XXX,XX +XXX,XX @@ qcow_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
351
- ret = block_copy_wait_one(s, call_state->offset,
78
}
352
- call_state->bytes);
79
cluster_offset &= s->cluster_offset_mask;
353
+ ret = reqlist_wait_one(&s->reqs, call_state->offset,
80
354
+ call_state->bytes, &s->lock);
81
- iov = (struct iovec) {
355
if (ret == 0) {
82
- .iov_base = out_buf,
356
/*
83
- .iov_len = out_len,
357
* No pending tasks, but check again the bitmap in this
84
- };
358
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
85
- qemu_iovec_init_external(&hd_qiov, &iov, 1);
359
* between this and the critical section in
86
+ qemu_iovec_init_buf(&hd_qiov, out_buf, out_len);
360
* block_copy_dirty_clusters().
87
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED);
361
*
88
ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0);
362
- * block_copy_wait_one return value 0 also means that it
89
if (ret < 0) {
363
+ * reqlist_wait_one return value 0 also means that it
364
* didn't release the lock. So, we are still in the same
365
* critical section, not interrupted by any concurrent
366
* access to state.
367
diff --git a/block/reqlist.c b/block/reqlist.c
368
new file mode 100644
369
index XXXXXXX..XXXXXXX
370
--- /dev/null
371
+++ b/block/reqlist.c
372
@@ -XXX,XX +XXX,XX @@
373
+/*
374
+ * reqlist API
375
+ *
376
+ * Copyright (C) 2013 Proxmox Server Solutions
377
+ * Copyright (c) 2021 Virtuozzo International GmbH.
378
+ *
379
+ * Authors:
380
+ * Dietmar Maurer (dietmar@proxmox.com)
381
+ * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
382
+ *
383
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
384
+ * See the COPYING file in the top-level directory.
385
+ */
386
+
387
+#include "qemu/osdep.h"
388
+
389
+#include "block/reqlist.h"
390
+
391
+void reqlist_init_req(BlockReqList *reqs, BlockReq *req, int64_t offset,
392
+ int64_t bytes)
393
+{
394
+ assert(!reqlist_find_conflict(reqs, offset, bytes));
395
+
396
+ *req = (BlockReq) {
397
+ .offset = offset,
398
+ .bytes = bytes,
399
+ };
400
+ qemu_co_queue_init(&req->wait_queue);
401
+ QLIST_INSERT_HEAD(reqs, req, list);
402
+}
403
+
404
+BlockReq *reqlist_find_conflict(BlockReqList *reqs, int64_t offset,
405
+ int64_t bytes)
406
+{
407
+ BlockReq *r;
408
+
409
+ QLIST_FOREACH(r, reqs, list) {
410
+ if (offset + bytes > r->offset && offset < r->offset + r->bytes) {
411
+ return r;
412
+ }
413
+ }
414
+
415
+ return NULL;
416
+}
417
+
418
+bool coroutine_fn reqlist_wait_one(BlockReqList *reqs, int64_t offset,
419
+ int64_t bytes, CoMutex *lock)
420
+{
421
+ BlockReq *r = reqlist_find_conflict(reqs, offset, bytes);
422
+
423
+ if (!r) {
424
+ return false;
425
+ }
426
+
427
+ qemu_co_queue_wait(&r->wait_queue, lock);
428
+
429
+ return true;
430
+}
431
+
432
+void coroutine_fn reqlist_shrink_req(BlockReq *req, int64_t new_bytes)
433
+{
434
+ if (new_bytes == req->bytes) {
435
+ return;
436
+ }
437
+
438
+ assert(new_bytes > 0 && new_bytes < req->bytes);
439
+
440
+ req->bytes = new_bytes;
441
+ qemu_co_queue_restart_all(&req->wait_queue);
442
+}
443
+
444
+void coroutine_fn reqlist_remove_req(BlockReq *req)
445
+{
446
+ QLIST_REMOVE(req, list);
447
+ qemu_co_queue_restart_all(&req->wait_queue);
448
+}
449
diff --git a/MAINTAINERS b/MAINTAINERS
450
index XXXXXXX..XXXXXXX 100644
451
--- a/MAINTAINERS
452
+++ b/MAINTAINERS
453
@@ -XXX,XX +XXX,XX @@ F: block/stream.c
454
F: block/mirror.c
455
F: qapi/job.json
456
F: block/block-copy.c
457
-F: include/block/block-copy.c
458
+F: include/block/block-copy.h
459
+F: block/reqlist.c
460
+F: include/block/reqlist.h
461
F: block/copy-before-write.h
462
F: block/copy-before-write.c
463
F: include/block/aio_task.h
464
diff --git a/block/meson.build b/block/meson.build
465
index XXXXXXX..XXXXXXX 100644
466
--- a/block/meson.build
467
+++ b/block/meson.build
468
@@ -XXX,XX +XXX,XX @@ block_ss.add(files(
469
'qcow2.c',
470
'quorum.c',
471
'raw-format.c',
472
+ 'reqlist.c',
473
'snapshot.c',
474
'throttle-groups.c',
475
'throttle.c',
90
--
476
--
91
2.20.1
477
2.34.1
92
93
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Use new QEMU_IOVEC_INIT_BUF() instead of
3
Let's reuse convenient helper.
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
5
4
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
5
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
6
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Message-Id: <20220303194349.2304213-8-vsementsov@virtuozzo.com>
9
Message-id: 20190218140926.333779-15-vsementsov@virtuozzo.com
8
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
10
Message-Id: <20190218140926.333779-15-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
9
---
13
tests/test-bdrv-drain.c | 29 ++++-------------------------
10
block/reqlist.c | 3 ++-
14
1 file changed, 4 insertions(+), 25 deletions(-)
11
1 file changed, 2 insertions(+), 1 deletion(-)
15
12
16
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
13
diff --git a/block/reqlist.c b/block/reqlist.c
17
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
18
--- a/tests/test-bdrv-drain.c
15
--- a/block/reqlist.c
19
+++ b/tests/test-bdrv-drain.c
16
+++ b/block/reqlist.c
20
@@ -XXX,XX +XXX,XX @@ static void test_drv_cb_common(enum drain_type drain_type, bool recursive)
17
@@ -XXX,XX +XXX,XX @@
21
BlockAIOCB *acb;
18
*/
22
int aio_ret;
19
23
20
#include "qemu/osdep.h"
24
- QEMUIOVector qiov;
21
+#include "qemu/range.h"
25
- struct iovec iov = {
22
26
- .iov_base = NULL,
23
#include "block/reqlist.h"
27
- .iov_len = 0,
24
28
- };
25
@@ -XXX,XX +XXX,XX @@ BlockReq *reqlist_find_conflict(BlockReqList *reqs, int64_t offset,
29
- qemu_iovec_init_external(&qiov, &iov, 1);
26
BlockReq *r;
30
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
27
31
28
QLIST_FOREACH(r, reqs, list) {
32
blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
29
- if (offset + bytes > r->offset && offset < r->offset + r->bytes) {
33
bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
30
+ if (ranges_overlap(offset, bytes, r->offset, r->bytes)) {
34
@@ -XXX,XX +XXX,XX @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
31
return r;
35
AioContext *ctx_a = iothread_get_aio_context(a);
32
}
36
AioContext *ctx_b = iothread_get_aio_context(b);
33
}
37
38
- QEMUIOVector qiov;
39
- struct iovec iov = {
40
- .iov_base = NULL,
41
- .iov_len = 0,
42
- };
43
- qemu_iovec_init_external(&qiov, &iov, 1);
44
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
45
46
/* bdrv_drain_all() may only be called from the main loop thread */
47
if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) {
48
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn test_co_delete_by_drain(void *opaque)
49
BlockDriverState *bs = blk_bs(blk);
50
BDRVTestTopState *tts = bs->opaque;
51
void *buffer = g_malloc(65536);
52
- QEMUIOVector qiov;
53
- struct iovec iov = {
54
- .iov_base = buffer,
55
- .iov_len = 65536,
56
- };
57
-
58
- qemu_iovec_init_external(&qiov, &iov, 1);
59
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536);
60
61
/* Pretend some internal write operation from parent to child.
62
* Important: We have to read from the child, not from the parent!
63
@@ -XXX,XX +XXX,XX @@ static void test_detach_indirect(bool by_parent_cb)
64
BdrvChild *child_a, *child_b;
65
BlockAIOCB *acb;
66
67
- QEMUIOVector qiov;
68
- struct iovec iov = {
69
- .iov_base = NULL,
70
- .iov_len = 0,
71
- };
72
- qemu_iovec_init_external(&qiov, &iov, 1);
73
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
74
75
if (!by_parent_cb) {
76
detach_by_driver_cb_role = child_file;
77
--
34
--
78
2.20.1
35
2.34.1
79
80
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Use new QEMU_IOVEC_INIT_BUF() instead of
3
Add a convenient function similar with bdrv_block_status() to get
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
4
status of dirty bitmap.
5
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
7
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Message-Id: <20220303194349.2304213-9-vsementsov@virtuozzo.com>
9
Message-id: 20190218140926.333779-4-vsementsov@virtuozzo.com
9
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
10
Message-Id: <20190218140926.333779-4-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
10
---
13
block/block-backend.c | 13 ++-----------
11
include/block/dirty-bitmap.h | 2 ++
14
1 file changed, 2 insertions(+), 11 deletions(-)
12
include/qemu/hbitmap.h | 12 ++++++++++++
13
block/dirty-bitmap.c | 6 ++++++
14
util/hbitmap.c | 33 +++++++++++++++++++++++++++++++++
15
4 files changed, 53 insertions(+)
15
16
16
diff --git a/block/block-backend.c b/block/block-backend.c
17
diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h
17
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
18
--- a/block/block-backend.c
19
--- a/include/block/dirty-bitmap.h
19
+++ b/block/block-backend.c
20
+++ b/include/block/dirty-bitmap.h
20
@@ -XXX,XX +XXX,XX @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
21
@@ -XXX,XX +XXX,XX @@ int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset,
21
int64_t bytes, CoroutineEntry co_entry,
22
bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap,
22
BdrvRequestFlags flags)
23
int64_t start, int64_t end, int64_t max_dirty_count,
24
int64_t *dirty_start, int64_t *dirty_count);
25
+bool bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap, int64_t offset,
26
+ int64_t bytes, int64_t *count);
27
BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap,
28
Error **errp);
29
30
diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
31
index XXXXXXX..XXXXXXX 100644
32
--- a/include/qemu/hbitmap.h
33
+++ b/include/qemu/hbitmap.h
34
@@ -XXX,XX +XXX,XX @@ bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end,
35
int64_t max_dirty_count,
36
int64_t *dirty_start, int64_t *dirty_count);
37
38
+/*
39
+ * bdrv_dirty_bitmap_status:
40
+ * @hb: The HBitmap to operate on
41
+ * @start: The bit to start from
42
+ * @count: Number of bits to proceed
43
+ * @pnum: Out-parameter. How many bits has same value starting from @start
44
+ *
45
+ * Returns true if bitmap is dirty at @start, false otherwise.
46
+ */
47
+bool hbitmap_status(const HBitmap *hb, int64_t start, int64_t count,
48
+ int64_t *pnum);
49
+
50
/**
51
* hbitmap_iter_next:
52
* @hbi: HBitmapIter to operate on.
53
diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/block/dirty-bitmap.c
56
+++ b/block/dirty-bitmap.c
57
@@ -XXX,XX +XXX,XX @@ bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap,
58
dirty_start, dirty_count);
59
}
60
61
+bool bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap, int64_t offset,
62
+ int64_t bytes, int64_t *count)
63
+{
64
+ return hbitmap_status(bitmap->bitmap, offset, bytes, count);
65
+}
66
+
67
/**
68
* bdrv_merge_dirty_bitmap: merge src into dest.
69
* Ensures permissions on bitmaps are reasonable; use for public API.
70
diff --git a/util/hbitmap.c b/util/hbitmap.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/util/hbitmap.c
73
+++ b/util/hbitmap.c
74
@@ -XXX,XX +XXX,XX @@ bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end,
75
return true;
76
}
77
78
+bool hbitmap_status(const HBitmap *hb, int64_t start, int64_t count,
79
+ int64_t *pnum)
80
+{
81
+ int64_t next_dirty, next_zero;
82
+
83
+ assert(start >= 0);
84
+ assert(count > 0);
85
+ assert(start + count <= hb->orig_size);
86
+
87
+ next_dirty = hbitmap_next_dirty(hb, start, count);
88
+ if (next_dirty == -1) {
89
+ *pnum = count;
90
+ return false;
91
+ }
92
+
93
+ if (next_dirty > start) {
94
+ *pnum = next_dirty - start;
95
+ return false;
96
+ }
97
+
98
+ assert(next_dirty == start);
99
+
100
+ next_zero = hbitmap_next_zero(hb, start, count);
101
+ if (next_zero == -1) {
102
+ *pnum = count;
103
+ return true;
104
+ }
105
+
106
+ assert(next_zero > start);
107
+ *pnum = next_zero - start;
108
+ return false;
109
+}
110
+
111
bool hbitmap_empty(const HBitmap *hb)
23
{
112
{
24
- QEMUIOVector qiov;
113
return hb->count == 0;
25
- struct iovec iov;
26
- BlkRwCo rwco;
27
-
28
- iov = (struct iovec) {
29
- .iov_base = buf,
30
- .iov_len = bytes,
31
- };
32
- qemu_iovec_init_external(&qiov, &iov, 1);
33
-
34
- rwco = (BlkRwCo) {
35
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
36
+ BlkRwCo rwco = {
37
.blk = blk,
38
.offset = offset,
39
.iobuf = &qiov,
40
--
114
--
41
2.20.1
115
2.34.1
42
43
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Use new qemu_iovec_init_buf() instead of
3
Add function to wait for all intersecting requests.
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
4
To be used in the further commit.
5
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
7
Reviewed-by: Nikita Lapshin <nikita.lapshin@virtuozzo.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
9
Message-id: 20190218140926.333779-14-vsementsov@virtuozzo.com
9
Message-Id: <20220303194349.2304213-10-vsementsov@virtuozzo.com>
10
Message-Id: <20190218140926.333779-14-vsementsov@virtuozzo.com>
10
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
11
---
13
migration/block.c | 10 +++-------
12
include/block/reqlist.h | 8 ++++++++
14
1 file changed, 3 insertions(+), 7 deletions(-)
13
block/reqlist.c | 8 ++++++++
14
2 files changed, 16 insertions(+)
15
15
16
diff --git a/migration/block.c b/migration/block.c
16
diff --git a/include/block/reqlist.h b/include/block/reqlist.h
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/migration/block.c
18
--- a/include/block/reqlist.h
19
+++ b/migration/block.c
19
+++ b/include/block/reqlist.h
20
@@ -XXX,XX +XXX,XX @@ typedef struct BlkMigBlock {
20
@@ -XXX,XX +XXX,XX @@ BlockReq *reqlist_find_conflict(BlockReqList *reqs, int64_t offset,
21
BlkMigDevState *bmds;
21
bool coroutine_fn reqlist_wait_one(BlockReqList *reqs, int64_t offset,
22
int64_t sector;
22
int64_t bytes, CoMutex *lock);
23
int nr_sectors;
23
24
- struct iovec iov;
24
+/*
25
QEMUIOVector qiov;
25
+ * Wait for all intersecting requests. It just calls reqlist_wait_one() in a
26
BlockAIOCB *aiocb;
26
+ * loop, caller is responsible to stop producing new requests in this region
27
27
+ * in parallel, otherwise reqlist_wait_all() may never return.
28
@@ -XXX,XX +XXX,XX @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
28
+ */
29
blk->sector = cur_sector;
29
+void coroutine_fn reqlist_wait_all(BlockReqList *reqs, int64_t offset,
30
blk->nr_sectors = nr_sectors;
30
+ int64_t bytes, CoMutex *lock);
31
31
+
32
- blk->iov.iov_base = blk->buf;
32
/*
33
- blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
33
* Shrink request and wake all waiting coroutines (maybe some of them are not
34
- qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
34
* intersecting with shrunk request).
35
+ qemu_iovec_init_buf(&blk->qiov, blk->buf, nr_sectors * BDRV_SECTOR_SIZE);
35
diff --git a/block/reqlist.c b/block/reqlist.c
36
36
index XXXXXXX..XXXXXXX 100644
37
blk_mig_lock();
37
--- a/block/reqlist.c
38
block_mig_state.submitted++;
38
+++ b/block/reqlist.c
39
@@ -XXX,XX +XXX,XX @@ static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
39
@@ -XXX,XX +XXX,XX @@ bool coroutine_fn reqlist_wait_one(BlockReqList *reqs, int64_t offset,
40
blk->nr_sectors = nr_sectors;
40
return true;
41
41
}
42
if (is_async) {
42
43
- blk->iov.iov_base = blk->buf;
43
+void coroutine_fn reqlist_wait_all(BlockReqList *reqs, int64_t offset,
44
- blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
44
+ int64_t bytes, CoMutex *lock)
45
- qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
45
+{
46
+ qemu_iovec_init_buf(&blk->qiov, blk->buf,
46
+ while (reqlist_wait_one(reqs, offset, bytes, lock)) {
47
+ nr_sectors * BDRV_SECTOR_SIZE);
47
+ /* continue */
48
48
+ }
49
blk->aiocb = blk_aio_preadv(bmds->blk,
49
+}
50
sector * BDRV_SECTOR_SIZE,
50
+
51
void coroutine_fn reqlist_shrink_req(BlockReq *req, int64_t new_bytes)
52
{
53
if (new_bytes == req->bytes) {
51
--
54
--
52
2.20.1
55
2.34.1
53
54
diff view generated by jsdifflib
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
This patch adds the support of DISCARD and WRITE_ZEROES commands,
3
Add new block driver handlers and corresponding generic wrappers.
4
that have been introduced in the virtio-blk protocol to have
4
It will be used to allow copy-before-write filter to provide
5
better performance when using SSD backend.
5
reach fleecing interface in further commit.
6
6
7
We support only one segment per request since multiple segments
7
In future this approach may be used to allow reading qcow2 internal
8
are not widely used and there are no userspace APIs that allow
8
snapshots, for example to export them through NBD.
9
applications to submit multiple segments in a single call.
10
9
11
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
10
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
12
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
13
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
12
Message-Id: <20220303194349.2304213-11-vsementsov@virtuozzo.com>
14
Message-id: 20190221103314.58500-7-sgarzare@redhat.com
13
[hreitz: Rebased on block GS/IO split]
15
Message-Id: <20190221103314.58500-7-sgarzare@redhat.com>
14
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
---
15
---
18
include/hw/virtio/virtio-blk.h | 2 +
16
include/block/block_int-common.h | 18 ++++++++
19
hw/block/virtio-blk.c | 184 +++++++++++++++++++++++++++++++++
17
include/block/block_int-io.h | 9 ++++
20
2 files changed, 186 insertions(+)
18
block/io.c | 72 ++++++++++++++++++++++++++++++++
19
3 files changed, 99 insertions(+)
21
20
22
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
21
diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h
23
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
24
--- a/include/hw/virtio/virtio-blk.h
23
--- a/include/block/block_int-common.h
25
+++ b/include/hw/virtio/virtio-blk.h
24
+++ b/include/block/block_int-common.h
26
@@ -XXX,XX +XXX,XX @@ struct VirtIOBlkConf
25
@@ -XXX,XX +XXX,XX @@ struct BlockDriver {
27
uint32_t request_merging;
26
bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum,
28
uint16_t num_queues;
27
int64_t *map, BlockDriverState **file);
29
uint16_t queue_size;
28
30
+ uint32_t max_discard_sectors;
29
+ /*
31
+ uint32_t max_write_zeroes_sectors;
30
+ * Snapshot-access API.
32
};
31
+ *
33
32
+ * Block-driver may provide snapshot-access API: special functions to access
34
struct VirtIOBlockDataPlane;
33
+ * some internal "snapshot". The functions are similar with normal
35
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
34
+ * read/block_status/discard handler, but don't have any specific handling
35
+ * in generic block-layer: no serializing, no alignment, no tracked
36
+ * requests. So, block-driver that realizes these APIs is fully responsible
37
+ * for synchronization between snapshot-access API and normal IO requests.
38
+ */
39
+ int coroutine_fn (*bdrv_co_preadv_snapshot)(BlockDriverState *bs,
40
+ int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset);
41
+ int coroutine_fn (*bdrv_co_snapshot_block_status)(BlockDriverState *bs,
42
+ bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum,
43
+ int64_t *map, BlockDriverState **file);
44
+ int coroutine_fn (*bdrv_co_pdiscard_snapshot)(BlockDriverState *bs,
45
+ int64_t offset, int64_t bytes);
46
+
47
/*
48
* Invalidate any cached meta-data.
49
*/
50
diff --git a/include/block/block_int-io.h b/include/block/block_int-io.h
36
index XXXXXXX..XXXXXXX 100644
51
index XXXXXXX..XXXXXXX 100644
37
--- a/hw/block/virtio-blk.c
52
--- a/include/block/block_int-io.h
38
+++ b/hw/block/virtio-blk.c
53
+++ b/include/block/block_int-io.h
39
@@ -XXX,XX +XXX,XX @@ out:
54
@@ -XXX,XX +XXX,XX @@
40
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
55
* the I/O API.
56
*/
57
58
+int coroutine_fn bdrv_co_preadv_snapshot(BdrvChild *child,
59
+ int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset);
60
+int coroutine_fn bdrv_co_snapshot_block_status(BlockDriverState *bs,
61
+ bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum,
62
+ int64_t *map, BlockDriverState **file);
63
+int coroutine_fn bdrv_co_pdiscard_snapshot(BlockDriverState *bs,
64
+ int64_t offset, int64_t bytes);
65
+
66
+
67
int coroutine_fn bdrv_co_preadv(BdrvChild *child,
68
int64_t offset, int64_t bytes, QEMUIOVector *qiov,
69
BdrvRequestFlags flags);
70
diff --git a/block/io.c b/block/io.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/block/io.c
73
+++ b/block/io.c
74
@@ -XXX,XX +XXX,XX @@ void bdrv_cancel_in_flight(BlockDriverState *bs)
75
bs->drv->bdrv_cancel_in_flight(bs);
76
}
41
}
77
}
42
78
+
43
+static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
79
+int coroutine_fn
80
+bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes,
81
+ QEMUIOVector *qiov, size_t qiov_offset)
44
+{
82
+{
45
+ VirtIOBlockReq *req = opaque;
83
+ BlockDriverState *bs = child->bs;
46
+ VirtIOBlock *s = req->dev;
84
+ BlockDriver *drv = bs->drv;
47
+ bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) &
85
+ int ret;
48
+ ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES;
86
+ IO_CODE();
49
+
87
+
50
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
88
+ if (!drv) {
51
+ if (ret) {
89
+ return -ENOMEDIUM;
52
+ if (virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) {
53
+ goto out;
54
+ }
55
+ }
90
+ }
56
+
91
+
57
+ virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
92
+ if (!drv->bdrv_co_preadv_snapshot) {
58
+ if (is_write_zeroes) {
93
+ return -ENOTSUP;
59
+ block_acct_done(blk_get_stats(s->blk), &req->acct);
60
+ }
94
+ }
61
+ virtio_blk_free_request(req);
62
+
95
+
63
+out:
96
+ bdrv_inc_in_flight(bs);
64
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
97
+ ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset);
98
+ bdrv_dec_in_flight(bs);
99
+
100
+ return ret;
65
+}
101
+}
66
+
102
+
67
#ifdef __linux__
103
+int coroutine_fn
68
104
+bdrv_co_snapshot_block_status(BlockDriverState *bs,
69
typedef struct {
105
+ bool want_zero, int64_t offset, int64_t bytes,
70
@@ -XXX,XX +XXX,XX @@ static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
106
+ int64_t *pnum, int64_t *map,
71
return true;
107
+ BlockDriverState **file)
72
}
73
74
+static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq *req,
75
+ struct virtio_blk_discard_write_zeroes *dwz_hdr, bool is_write_zeroes)
76
+{
108
+{
77
+ VirtIOBlock *s = req->dev;
109
+ BlockDriver *drv = bs->drv;
78
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
110
+ int ret;
79
+ uint64_t sector;
111
+ IO_CODE();
80
+ uint32_t num_sectors, flags, max_sectors;
81
+ uint8_t err_status;
82
+ int bytes;
83
+
112
+
84
+ sector = virtio_ldq_p(vdev, &dwz_hdr->sector);
113
+ if (!drv) {
85
+ num_sectors = virtio_ldl_p(vdev, &dwz_hdr->num_sectors);
114
+ return -ENOMEDIUM;
86
+ flags = virtio_ldl_p(vdev, &dwz_hdr->flags);
87
+ max_sectors = is_write_zeroes ? s->conf.max_write_zeroes_sectors :
88
+ s->conf.max_discard_sectors;
89
+
90
+ /*
91
+ * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check
92
+ * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in
93
+ * the integer variable.
94
+ */
95
+ if (unlikely(num_sectors > max_sectors)) {
96
+ err_status = VIRTIO_BLK_S_IOERR;
97
+ goto err;
98
+ }
115
+ }
99
+
116
+
100
+ bytes = num_sectors << BDRV_SECTOR_BITS;
117
+ if (!drv->bdrv_co_snapshot_block_status) {
101
+
118
+ return -ENOTSUP;
102
+ if (unlikely(!virtio_blk_sect_range_ok(s, sector, bytes))) {
103
+ err_status = VIRTIO_BLK_S_IOERR;
104
+ goto err;
105
+ }
119
+ }
106
+
120
+
107
+ /*
121
+ bdrv_inc_in_flight(bs);
108
+ * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard
122
+ ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes,
109
+ * and write zeroes commands if any unknown flag is set.
123
+ pnum, map, file);
110
+ */
124
+ bdrv_dec_in_flight(bs);
111
+ if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
125
+
112
+ err_status = VIRTIO_BLK_S_UNSUPP;
126
+ return ret;
113
+ goto err;
127
+}
128
+
129
+int coroutine_fn
130
+bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
131
+{
132
+ BlockDriver *drv = bs->drv;
133
+ int ret;
134
+ IO_CODE();
135
+
136
+ if (!drv) {
137
+ return -ENOMEDIUM;
114
+ }
138
+ }
115
+
139
+
116
+ if (is_write_zeroes) { /* VIRTIO_BLK_T_WRITE_ZEROES */
140
+ if (!drv->bdrv_co_pdiscard_snapshot) {
117
+ int blk_aio_flags = 0;
141
+ return -ENOTSUP;
118
+
119
+ if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
120
+ blk_aio_flags |= BDRV_REQ_MAY_UNMAP;
121
+ }
122
+
123
+ block_acct_start(blk_get_stats(s->blk), &req->acct, bytes,
124
+ BLOCK_ACCT_WRITE);
125
+
126
+ blk_aio_pwrite_zeroes(s->blk, sector << BDRV_SECTOR_BITS,
127
+ bytes, blk_aio_flags,
128
+ virtio_blk_discard_write_zeroes_complete, req);
129
+ } else { /* VIRTIO_BLK_T_DISCARD */
130
+ /*
131
+ * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for
132
+ * discard commands if the unmap flag is set.
133
+ */
134
+ if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
135
+ err_status = VIRTIO_BLK_S_UNSUPP;
136
+ goto err;
137
+ }
138
+
139
+ blk_aio_pdiscard(s->blk, sector << BDRV_SECTOR_BITS, bytes,
140
+ virtio_blk_discard_write_zeroes_complete, req);
141
+ }
142
+ }
142
+
143
+
143
+ return VIRTIO_BLK_S_OK;
144
+ bdrv_inc_in_flight(bs);
145
+ ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes);
146
+ bdrv_dec_in_flight(bs);
144
+
147
+
145
+err:
148
+ return ret;
146
+ if (is_write_zeroes) {
147
+ block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
148
+ }
149
+ return err_status;
150
+}
149
+}
151
+
152
static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
153
{
154
uint32_t type;
155
@@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
156
virtio_blk_free_request(req);
157
break;
158
}
159
+ /*
160
+ * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with
161
+ * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement,
162
+ * so we must mask it for these requests, then we will check if it is set.
163
+ */
164
+ case VIRTIO_BLK_T_DISCARD & ~VIRTIO_BLK_T_OUT:
165
+ case VIRTIO_BLK_T_WRITE_ZEROES & ~VIRTIO_BLK_T_OUT:
166
+ {
167
+ struct virtio_blk_discard_write_zeroes dwz_hdr;
168
+ size_t out_len = iov_size(out_iov, out_num);
169
+ bool is_write_zeroes = (type & ~VIRTIO_BLK_T_BARRIER) ==
170
+ VIRTIO_BLK_T_WRITE_ZEROES;
171
+ uint8_t err_status;
172
+
173
+ /*
174
+ * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains
175
+ * more than one segment.
176
+ */
177
+ if (unlikely(!(type & VIRTIO_BLK_T_OUT) ||
178
+ out_len > sizeof(dwz_hdr))) {
179
+ virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
180
+ virtio_blk_free_request(req);
181
+ return 0;
182
+ }
183
+
184
+ if (unlikely(iov_to_buf(out_iov, out_num, 0, &dwz_hdr,
185
+ sizeof(dwz_hdr)) != sizeof(dwz_hdr))) {
186
+ virtio_error(vdev, "virtio-blk discard/write_zeroes header"
187
+ " too short");
188
+ return -1;
189
+ }
190
+
191
+ err_status = virtio_blk_handle_discard_write_zeroes(req, &dwz_hdr,
192
+ is_write_zeroes);
193
+ if (err_status != VIRTIO_BLK_S_OK) {
194
+ virtio_blk_req_complete(req, err_status);
195
+ virtio_blk_free_request(req);
196
+ }
197
+
198
+ break;
199
+ }
200
default:
201
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
202
virtio_blk_free_request(req);
203
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
204
blkcfg.alignment_offset = 0;
205
blkcfg.wce = blk_enable_write_cache(s->blk);
206
virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues);
207
+ if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD)) {
208
+ virtio_stl_p(vdev, &blkcfg.max_discard_sectors,
209
+ s->conf.max_discard_sectors);
210
+ virtio_stl_p(vdev, &blkcfg.discard_sector_alignment,
211
+ blk_size >> BDRV_SECTOR_BITS);
212
+ /*
213
+ * We support only one segment per request since multiple segments
214
+ * are not widely used and there are no userspace APIs that allow
215
+ * applications to submit multiple segments in a single call.
216
+ */
217
+ virtio_stl_p(vdev, &blkcfg.max_discard_seg, 1);
218
+ }
219
+ if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES)) {
220
+ virtio_stl_p(vdev, &blkcfg.max_write_zeroes_sectors,
221
+ s->conf.max_write_zeroes_sectors);
222
+ blkcfg.write_zeroes_may_unmap = 1;
223
+ virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1);
224
+ }
225
memcpy(config, &blkcfg, s->config_size);
226
}
227
228
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
229
return;
230
}
231
232
+ if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) &&
233
+ (!conf->max_discard_sectors ||
234
+ conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) {
235
+ error_setg(errp, "invalid max-discard-sectors property (%" PRIu32 ")"
236
+ ", must be between 1 and %d",
237
+ conf->max_discard_sectors, (int)BDRV_REQUEST_MAX_SECTORS);
238
+ return;
239
+ }
240
+
241
+ if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES) &&
242
+ (!conf->max_write_zeroes_sectors ||
243
+ conf->max_write_zeroes_sectors > BDRV_REQUEST_MAX_SECTORS)) {
244
+ error_setg(errp, "invalid max-write-zeroes-sectors property (%" PRIu32
245
+ "), must be between 1 and %d",
246
+ conf->max_write_zeroes_sectors,
247
+ (int)BDRV_REQUEST_MAX_SECTORS);
248
+ return;
249
+ }
250
+
251
virtio_blk_set_config_size(s, s->host_features);
252
253
virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, s->config_size);
254
@@ -XXX,XX +XXX,XX @@ static Property virtio_blk_properties[] = {
255
VIRTIO_BLK_F_DISCARD, true),
256
DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features,
257
VIRTIO_BLK_F_WRITE_ZEROES, true),
258
+ DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock,
259
+ conf.max_discard_sectors, BDRV_REQUEST_MAX_SECTORS),
260
+ DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock,
261
+ conf.max_write_zeroes_sectors, BDRV_REQUEST_MAX_SECTORS),
262
DEFINE_PROP_END_OF_LIST(),
263
};
264
265
--
150
--
266
2.20.1
151
2.34.1
267
268
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Use new qemu_iovec_init_buf() instead of
3
The new block driver simply utilizes snapshot-access API of underlying
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
4
block node.
5
6
In further patches we want to use it like this:
7
8
[guest] [NBD export]
9
| |
10
| root | root
11
v file v
12
[copy-before-write]<------[snapshot-access]
13
| |
14
| file | target
15
v v
16
[active-disk] [temp.img]
17
18
This way, NBD client will be able to read snapshotted state of active
19
disk, when active disk is continued to be written by guest. This is
20
known as "fleecing", and currently uses another scheme based on qcow2
21
temporary image which backing file is active-disk. New scheme comes
22
with benefits - see next commit.
23
24
The other possible application is exporting internal snapshots of
25
qcow2, like this:
26
27
[guest] [NBD export]
28
| |
29
| root | root
30
v file v
31
[qcow2]<---------[snapshot-access]
32
33
For this, we'll need to implement snapshot-access API handlers in
34
qcow2 driver, and improve snapshot-access block driver (and API) to
35
make it possible to select snapshot by name. Another thing to improve
36
is size of snapshot. Now for simplicity we just use size of bs->file,
37
which is OK for backup, but for qcow2 snapshots export we'll need to
38
imporve snapshot-access API to get size of snapshot.
5
39
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
40
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
41
Message-Id: <20220303194349.2304213-12-vsementsov@virtuozzo.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
42
[hreitz: Rebased on block GS/IO split]
9
Message-id: 20190218140926.333779-13-vsementsov@virtuozzo.com
43
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
10
Message-Id: <20190218140926.333779-13-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
44
---
13
qemu-img.c | 10 ++--------
45
qapi/block-core.json | 4 +-
14
1 file changed, 2 insertions(+), 8 deletions(-)
46
include/block/block_int-common.h | 6 ++
15
47
block/snapshot-access.c | 132 +++++++++++++++++++++++++++++++
16
diff --git a/qemu-img.c b/qemu-img.c
48
MAINTAINERS | 1 +
17
index XXXXXXX..XXXXXXX 100644
49
block/meson.build | 1 +
18
--- a/qemu-img.c
50
5 files changed, 143 insertions(+), 1 deletion(-)
19
+++ b/qemu-img.c
51
create mode 100644 block/snapshot-access.c
20
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn convert_co_read(ImgConvertState *s, int64_t sector_num,
52
21
{
53
diff --git a/qapi/block-core.json b/qapi/block-core.json
22
int n, ret;
54
index XXXXXXX..XXXXXXX 100644
23
QEMUIOVector qiov;
55
--- a/qapi/block-core.json
24
- struct iovec iov;
56
+++ b/qapi/block-core.json
25
57
@@ -XXX,XX +XXX,XX @@
26
assert(nb_sectors <= s->buf_sectors);
58
# @blkreplay: Since 4.2
27
while (nb_sectors > 0) {
59
# @compress: Since 5.0
28
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn convert_co_read(ImgConvertState *s, int64_t sector_num,
60
# @copy-before-write: Since 6.2
29
bs_sectors = s->src_sectors[src_cur];
61
+# @snapshot-access: Since 7.0
30
62
#
31
n = MIN(nb_sectors, bs_sectors - (sector_num - src_cur_offset));
63
# Since: 2.9
32
- iov.iov_base = buf;
64
##
33
- iov.iov_len = n << BDRV_SECTOR_BITS;
65
{ 'enum': 'BlockdevDriver',
34
- qemu_iovec_init_external(&qiov, &iov, 1);
66
'data': [ 'blkdebug', 'blklogwrites', 'blkreplay', 'blkverify', 'bochs',
35
+ qemu_iovec_init_buf(&qiov, buf, n << BDRV_SECTOR_BITS);
67
'cloop', 'compress', 'copy-before-write', 'copy-on-read', 'dmg',
36
68
- 'file', 'ftp', 'ftps', 'gluster',
37
ret = blk_co_preadv(
69
+ 'file', 'snapshot-access', 'ftp', 'ftps', 'gluster',
38
blk, (sector_num - src_cur_offset) << BDRV_SECTOR_BITS,
70
{'name': 'host_cdrom', 'if': 'HAVE_HOST_BLOCK_DEVICE' },
39
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn convert_co_write(ImgConvertState *s, int64_t sector_num,
71
{'name': 'host_device', 'if': 'HAVE_HOST_BLOCK_DEVICE' },
40
{
72
'http', 'https', 'iscsi',
41
int ret;
73
@@ -XXX,XX +XXX,XX @@
42
QEMUIOVector qiov;
74
'rbd': 'BlockdevOptionsRbd',
43
- struct iovec iov;
75
'replication': { 'type': 'BlockdevOptionsReplication',
44
76
'if': 'CONFIG_REPLICATION' },
45
while (nb_sectors > 0) {
77
+ 'snapshot-access': 'BlockdevOptionsGenericFormat',
46
int n = nb_sectors;
78
'ssh': 'BlockdevOptionsSsh',
47
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn convert_co_write(ImgConvertState *s, int64_t sector_num,
79
'throttle': 'BlockdevOptionsThrottle',
48
(s->compressed &&
80
'vdi': 'BlockdevOptionsGenericFormat',
49
!buffer_is_zero(buf, n * BDRV_SECTOR_SIZE)))
81
diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h
50
{
82
index XXXXXXX..XXXXXXX 100644
51
- iov.iov_base = buf;
83
--- a/include/block/block_int-common.h
52
- iov.iov_len = n << BDRV_SECTOR_BITS;
84
+++ b/include/block/block_int-common.h
53
- qemu_iovec_init_external(&qiov, &iov, 1);
85
@@ -XXX,XX +XXX,XX @@ struct BlockDriver {
54
+ qemu_iovec_init_buf(&qiov, buf, n << BDRV_SECTOR_BITS);
86
* in generic block-layer: no serializing, no alignment, no tracked
55
87
* requests. So, block-driver that realizes these APIs is fully responsible
56
ret = blk_co_pwritev(s->target, sector_num << BDRV_SECTOR_BITS,
88
* for synchronization between snapshot-access API and normal IO requests.
57
n << BDRV_SECTOR_BITS, &qiov, flags);
89
+ *
90
+ * TODO: To be able to support qcow2's internal snapshots, this API will
91
+ * need to be extended to:
92
+ * - be able to select a specific snapshot
93
+ * - receive the snapshot's actual length (which may differ from bs's
94
+ * length)
95
*/
96
int coroutine_fn (*bdrv_co_preadv_snapshot)(BlockDriverState *bs,
97
int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset);
98
diff --git a/block/snapshot-access.c b/block/snapshot-access.c
99
new file mode 100644
100
index XXXXXXX..XXXXXXX
101
--- /dev/null
102
+++ b/block/snapshot-access.c
103
@@ -XXX,XX +XXX,XX @@
104
+/*
105
+ * snapshot_access block driver
106
+ *
107
+ * Copyright (c) 2022 Virtuozzo International GmbH.
108
+ *
109
+ * Author:
110
+ * Sementsov-Ogievskiy Vladimir <vsementsov@virtuozzo.com>
111
+ *
112
+ * This program is free software; you can redistribute it and/or modify
113
+ * it under the terms of the GNU General Public License as published by
114
+ * the Free Software Foundation; either version 2 of the License, or
115
+ * (at your option) any later version.
116
+ *
117
+ * This program is distributed in the hope that it will be useful,
118
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
119
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
120
+ * GNU General Public License for more details.
121
+ *
122
+ * You should have received a copy of the GNU General Public License
123
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
124
+ */
125
+
126
+#include "qemu/osdep.h"
127
+
128
+#include "sysemu/block-backend.h"
129
+#include "qemu/cutils.h"
130
+#include "block/block_int.h"
131
+
132
+static coroutine_fn int
133
+snapshot_access_co_preadv_part(BlockDriverState *bs,
134
+ int64_t offset, int64_t bytes,
135
+ QEMUIOVector *qiov, size_t qiov_offset,
136
+ BdrvRequestFlags flags)
137
+{
138
+ if (flags) {
139
+ return -ENOTSUP;
140
+ }
141
+
142
+ return bdrv_co_preadv_snapshot(bs->file, offset, bytes, qiov, qiov_offset);
143
+}
144
+
145
+static int coroutine_fn
146
+snapshot_access_co_block_status(BlockDriverState *bs,
147
+ bool want_zero, int64_t offset,
148
+ int64_t bytes, int64_t *pnum,
149
+ int64_t *map, BlockDriverState **file)
150
+{
151
+ return bdrv_co_snapshot_block_status(bs->file->bs, want_zero, offset,
152
+ bytes, pnum, map, file);
153
+}
154
+
155
+static int coroutine_fn snapshot_access_co_pdiscard(BlockDriverState *bs,
156
+ int64_t offset, int64_t bytes)
157
+{
158
+ return bdrv_co_pdiscard_snapshot(bs->file->bs, offset, bytes);
159
+}
160
+
161
+static int coroutine_fn
162
+snapshot_access_co_pwrite_zeroes(BlockDriverState *bs,
163
+ int64_t offset, int64_t bytes,
164
+ BdrvRequestFlags flags)
165
+{
166
+ return -ENOTSUP;
167
+}
168
+
169
+static coroutine_fn int
170
+snapshot_access_co_pwritev_part(BlockDriverState *bs,
171
+ int64_t offset, int64_t bytes,
172
+ QEMUIOVector *qiov, size_t qiov_offset,
173
+ BdrvRequestFlags flags)
174
+{
175
+ return -ENOTSUP;
176
+}
177
+
178
+
179
+static void snapshot_access_refresh_filename(BlockDriverState *bs)
180
+{
181
+ pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
182
+ bs->file->bs->filename);
183
+}
184
+
185
+static int snapshot_access_open(BlockDriverState *bs, QDict *options, int flags,
186
+ Error **errp)
187
+{
188
+ bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
189
+ BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
190
+ false, errp);
191
+ if (!bs->file) {
192
+ return -EINVAL;
193
+ }
194
+
195
+ bs->total_sectors = bs->file->bs->total_sectors;
196
+
197
+ return 0;
198
+}
199
+
200
+static void snapshot_access_child_perm(BlockDriverState *bs, BdrvChild *c,
201
+ BdrvChildRole role,
202
+ BlockReopenQueue *reopen_queue,
203
+ uint64_t perm, uint64_t shared,
204
+ uint64_t *nperm, uint64_t *nshared)
205
+{
206
+ /*
207
+ * Currently, we don't need any permissions. If bs->file provides
208
+ * snapshot-access API, we can use it.
209
+ */
210
+ *nperm = 0;
211
+ *nshared = BLK_PERM_ALL;
212
+}
213
+
214
+BlockDriver bdrv_snapshot_access_drv = {
215
+ .format_name = "snapshot-access",
216
+
217
+ .bdrv_open = snapshot_access_open,
218
+
219
+ .bdrv_co_preadv_part = snapshot_access_co_preadv_part,
220
+ .bdrv_co_pwritev_part = snapshot_access_co_pwritev_part,
221
+ .bdrv_co_pwrite_zeroes = snapshot_access_co_pwrite_zeroes,
222
+ .bdrv_co_pdiscard = snapshot_access_co_pdiscard,
223
+ .bdrv_co_block_status = snapshot_access_co_block_status,
224
+
225
+ .bdrv_refresh_filename = snapshot_access_refresh_filename,
226
+
227
+ .bdrv_child_perm = snapshot_access_child_perm,
228
+};
229
+
230
+static void snapshot_access_init(void)
231
+{
232
+ bdrv_register(&bdrv_snapshot_access_drv);
233
+}
234
+
235
+block_init(snapshot_access_init);
236
diff --git a/MAINTAINERS b/MAINTAINERS
237
index XXXXXXX..XXXXXXX 100644
238
--- a/MAINTAINERS
239
+++ b/MAINTAINERS
240
@@ -XXX,XX +XXX,XX @@ F: block/reqlist.c
241
F: include/block/reqlist.h
242
F: block/copy-before-write.h
243
F: block/copy-before-write.c
244
+F: block/snapshot-access.c
245
F: include/block/aio_task.h
246
F: block/aio_task.c
247
F: util/qemu-co-shared-resource.c
248
diff --git a/block/meson.build b/block/meson.build
249
index XXXXXXX..XXXXXXX 100644
250
--- a/block/meson.build
251
+++ b/block/meson.build
252
@@ -XXX,XX +XXX,XX @@ block_ss.add(files(
253
'raw-format.c',
254
'reqlist.c',
255
'snapshot.c',
256
+ 'snapshot-access.c',
257
'throttle-groups.c',
258
'throttle.c',
259
'vhdx-endian.c',
58
--
260
--
59
2.20.1
261
2.34.1
60
61
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Add a possibility of embedded iovec, for cases when we need only one
3
Current scheme of image fleecing looks like this:
4
local iov.
4
5
[guest] [NBD export]
6
| |
7
|root | root
8
v v
9
[copy-before-write] -----> [temp.qcow2]
10
| target |
11
|file |backing
12
v |
13
[active disk] <-------------+
14
15
- On guest writes copy-before-write filter copies old data from active
16
disk to temp.qcow2. So fleecing client (NBD export) when reads
17
changed regions from temp.qcow2 image and unchanged from active disk
18
through backing link.
19
20
This patch makes possible new image fleecing scheme:
21
22
[guest] [NBD export]
23
| |
24
| root | root
25
v file v
26
[copy-before-write]<------[snapshot-access]
27
| |
28
| file | target
29
v v
30
[active-disk] [temp.img]
31
32
- copy-before-write does CBW operations and also provides
33
snapshot-access API. The API may be accessed through
34
snapshot-access driver.
35
36
Benefits of new scheme:
37
38
1. Access control: if remote client try to read data that not covered
39
by original dirty bitmap used on copy-before-write open, client gets
40
-EACCES.
41
42
2. Discard support: if remote client do DISCARD, this additionally to
43
discarding data in temp.img informs block-copy process to not copy
44
these clusters. Next read from discarded area will return -EACCES.
45
This is significant thing: when fleecing user reads data that was
46
not yet copied to temp.img, we can avoid copying it on further guest
47
write.
48
49
3. Synchronisation between client reads and block-copy write is more
50
efficient. In old scheme we just rely on BDRV_REQ_SERIALISING flag
51
used for writes to temp.qcow2. New scheme is less blocking:
52
- fleecing reads are never blocked: if data region is untouched or
53
in-flight, we just read from active-disk, otherwise we read from
54
temp.img
55
- writes to temp.img are not blocked by fleecing reads
56
- still, guest writes of-course are blocked by in-flight fleecing
57
reads, that currently read from active-disk - it's the minimum
58
necessary blocking
59
60
4. Temporary image may be of any format, as we don't rely on backing
61
feature.
62
63
5. Permission relation are simplified. With old scheme we have to share
64
write permission on target child of copy-before-write, otherwise
65
backing link conflicts with copy-before-write file child write
66
permissions. With new scheme we don't have backing link, and
67
copy-before-write node may have unshared access to temporary node.
68
(Not realized in this commit, will be in future).
69
70
6. Having control on fleecing reads we'll be able to implement
71
alternative behavior on failed copy-before-write operations.
72
Currently we just break guest request (that's a historical behavior
73
of backup). But in some scenarios it's a bad behavior: better
74
is to drop the backup as failed but don't break guest request.
75
With new scheme we can simply unset some bits in a bitmap on CBW
76
failure and further fleecing reads will -EACCES, or something like
77
this. (Not implemented in this commit, will be in future)
78
Additional application for this is implementing timeout for CBW
79
operations.
80
81
Iotest 257 output is updated, as two more bitmaps now live in
82
copy-before-write filter.
5
83
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
84
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
85
Message-Id: <20220303194349.2304213-13-vsementsov@virtuozzo.com>
8
Message-id: 20190218140926.333779-2-vsementsov@virtuozzo.com
86
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
9
Message-Id: <20190218140926.333779-2-vsementsov@virtuozzo.com>
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
87
---
12
include/qemu/iov.h | 64 ++++++++++++++++++++++++++++++++++++++++++++--
88
block/copy-before-write.c | 212 ++++++++++++++++++++++++++++++++++-
13
1 file changed, 62 insertions(+), 2 deletions(-)
89
tests/qemu-iotests/257.out | 224 +++++++++++++++++++++++++++++++++++++
14
90
2 files changed, 435 insertions(+), 1 deletion(-)
15
diff --git a/include/qemu/iov.h b/include/qemu/iov.h
91
92
diff --git a/block/copy-before-write.c b/block/copy-before-write.c
16
index XXXXXXX..XXXXXXX 100644
93
index XXXXXXX..XXXXXXX 100644
17
--- a/include/qemu/iov.h
94
--- a/block/copy-before-write.c
18
+++ b/include/qemu/iov.h
95
+++ b/block/copy-before-write.c
19
@@ -XXX,XX +XXX,XX @@ size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt,
96
@@ -XXX,XX +XXX,XX @@
20
typedef struct QEMUIOVector {
97
#include "block/block-copy.h"
21
struct iovec *iov;
98
22
int niov;
99
#include "block/copy-before-write.h"
23
- int nalloc;
100
+#include "block/reqlist.h"
24
- size_t size;
101
102
#include "qapi/qapi-visit-block-core.h"
103
104
typedef struct BDRVCopyBeforeWriteState {
105
BlockCopyState *bcs;
106
BdrvChild *target;
25
+
107
+
26
+ /*
108
+ /*
27
+ * For external @iov (qemu_iovec_init_external()) or allocated @iov
109
+ * @lock: protects access to @access_bitmap, @done_bitmap and
28
+ * (qemu_iovec_init()), @size is the cumulative size of iovecs and
110
+ * @frozen_read_reqs
29
+ * @local_iov is invalid and unused.
30
+ *
31
+ * For embedded @iov (QEMU_IOVEC_INIT_BUF() or qemu_iovec_init_buf()),
32
+ * @iov is equal to &@local_iov, and @size is valid, as it has same
33
+ * offset and type as @local_iov.iov_len, which is guaranteed by
34
+ * static assertion below.
35
+ *
36
+ * @nalloc is always valid and is -1 both for embedded and external
37
+ * cases. It is included in the union only to ensure the padding prior
38
+ * to the @size field will not result in a 0-length array.
39
+ */
111
+ */
40
+ union {
112
+ CoMutex lock;
41
+ struct {
113
+
42
+ int nalloc;
114
+ /*
43
+ struct iovec local_iov;
115
+ * @access_bitmap: represents areas allowed for reading by fleecing user.
44
+ };
116
+ * Reading from non-dirty areas leads to -EACCES.
45
+ struct {
117
+ */
46
+ char __pad[sizeof(int) + offsetof(struct iovec, iov_len)];
118
+ BdrvDirtyBitmap *access_bitmap;
47
+ size_t size;
119
+
48
+ };
120
+ /*
49
+ };
121
+ * @done_bitmap: represents areas that was successfully copied to @target by
50
} QEMUIOVector;
122
+ * copy-before-write operations.
51
123
+ */
52
+QEMU_BUILD_BUG_ON(offsetof(QEMUIOVector, size) !=
124
+ BdrvDirtyBitmap *done_bitmap;
53
+ offsetof(QEMUIOVector, local_iov.iov_len));
125
+
54
+
126
+ /*
55
+#define QEMU_IOVEC_INIT_BUF(self, buf, len) \
127
+ * @frozen_read_reqs: current read requests for fleecing user in bs->file
56
+{ \
128
+ * node. These areas must not be rewritten by guest.
57
+ .iov = &(self).local_iov, \
129
+ */
58
+ .niov = 1, \
130
+ BlockReqList frozen_read_reqs;
59
+ .nalloc = -1, \
131
} BDRVCopyBeforeWriteState;
60
+ .local_iov = { \
132
61
+ .iov_base = (void *)(buf), /* cast away const */ \
133
static coroutine_fn int cbw_co_preadv(
62
+ .iov_len = (len), \
134
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int cbw_co_preadv(
63
+ }, \
135
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
136
}
137
138
+/*
139
+ * Do copy-before-write operation.
140
+ *
141
+ * On failure guest request must be failed too.
142
+ *
143
+ * On success, we also wait for all in-flight fleecing read requests in source
144
+ * node, and it's guaranteed that after cbw_do_copy_before_write() successful
145
+ * return there are no such requests and they will never appear.
146
+ */
147
static coroutine_fn int cbw_do_copy_before_write(BlockDriverState *bs,
148
uint64_t offset, uint64_t bytes, BdrvRequestFlags flags)
149
{
150
BDRVCopyBeforeWriteState *s = bs->opaque;
151
+ int ret;
152
uint64_t off, end;
153
int64_t cluster_size = block_copy_cluster_size(s->bcs);
154
155
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int cbw_do_copy_before_write(BlockDriverState *bs,
156
off = QEMU_ALIGN_DOWN(offset, cluster_size);
157
end = QEMU_ALIGN_UP(offset + bytes, cluster_size);
158
159
- return block_copy(s->bcs, off, end - off, true);
160
+ ret = block_copy(s->bcs, off, end - off, true);
161
+ if (ret < 0) {
162
+ return ret;
163
+ }
164
+
165
+ WITH_QEMU_LOCK_GUARD(&s->lock) {
166
+ bdrv_set_dirty_bitmap(s->done_bitmap, off, end - off);
167
+ reqlist_wait_all(&s->frozen_read_reqs, off, end - off, &s->lock);
168
+ }
169
+
170
+ return 0;
171
}
172
173
static int coroutine_fn cbw_co_pdiscard(BlockDriverState *bs,
174
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn cbw_co_flush(BlockDriverState *bs)
175
return bdrv_co_flush(bs->file->bs);
176
}
177
178
+/*
179
+ * If @offset not accessible - return NULL.
180
+ *
181
+ * Otherwise, set @pnum to some bytes that accessible from @file (@file is set
182
+ * to bs->file or to s->target). Return newly allocated BlockReq object that
183
+ * should be than passed to cbw_snapshot_read_unlock().
184
+ *
185
+ * It's guaranteed that guest writes will not interact in the region until
186
+ * cbw_snapshot_read_unlock() called.
187
+ */
188
+static BlockReq *cbw_snapshot_read_lock(BlockDriverState *bs,
189
+ int64_t offset, int64_t bytes,
190
+ int64_t *pnum, BdrvChild **file)
191
+{
192
+ BDRVCopyBeforeWriteState *s = bs->opaque;
193
+ BlockReq *req = g_new(BlockReq, 1);
194
+ bool done;
195
+
196
+ QEMU_LOCK_GUARD(&s->lock);
197
+
198
+ if (bdrv_dirty_bitmap_next_zero(s->access_bitmap, offset, bytes) != -1) {
199
+ g_free(req);
200
+ return NULL;
201
+ }
202
+
203
+ done = bdrv_dirty_bitmap_status(s->done_bitmap, offset, bytes, pnum);
204
+ if (done) {
205
+ /*
206
+ * Special invalid BlockReq, that is handled in
207
+ * cbw_snapshot_read_unlock(). We don't need to lock something to read
208
+ * from s->target.
209
+ */
210
+ *req = (BlockReq) {.offset = -1, .bytes = -1};
211
+ *file = s->target;
212
+ } else {
213
+ reqlist_init_req(&s->frozen_read_reqs, req, offset, bytes);
214
+ *file = bs->file;
215
+ }
216
+
217
+ return req;
64
+}
218
+}
65
+
219
+
66
+/*
220
+static void cbw_snapshot_read_unlock(BlockDriverState *bs, BlockReq *req)
67
+ * qemu_iovec_init_buf
68
+ *
69
+ * Initialize embedded QEMUIOVector.
70
+ *
71
+ * Note: "const" is used over @buf pointer to make it simple to pass
72
+ * const pointers, appearing in read functions. Then this "const" is
73
+ * cast away by QEMU_IOVEC_INIT_BUF().
74
+ */
75
+static inline void qemu_iovec_init_buf(QEMUIOVector *qiov,
76
+ const void *buf, size_t len)
77
+{
221
+{
78
+ *qiov = (QEMUIOVector) QEMU_IOVEC_INIT_BUF(*qiov, buf, len);
222
+ BDRVCopyBeforeWriteState *s = bs->opaque;
223
+
224
+ if (req->offset == -1 && req->bytes == -1) {
225
+ g_free(req);
226
+ return;
227
+ }
228
+
229
+ QEMU_LOCK_GUARD(&s->lock);
230
+
231
+ reqlist_remove_req(req);
232
+ g_free(req);
79
+}
233
+}
80
+
234
+
81
+static inline void *qemu_iovec_buf(QEMUIOVector *qiov)
235
+static coroutine_fn int
236
+cbw_co_preadv_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes,
237
+ QEMUIOVector *qiov, size_t qiov_offset)
82
+{
238
+{
83
+ /* Only supports embedded iov */
239
+ BlockReq *req;
84
+ assert(qiov->nalloc == -1 && qiov->iov == &qiov->local_iov);
240
+ BdrvChild *file;
85
+
241
+ int ret;
86
+ return qiov->local_iov.iov_base;
242
+
243
+ /* TODO: upgrade to async loop using AioTask */
244
+ while (bytes) {
245
+ int64_t cur_bytes;
246
+
247
+ req = cbw_snapshot_read_lock(bs, offset, bytes, &cur_bytes, &file);
248
+ if (!req) {
249
+ return -EACCES;
250
+ }
251
+
252
+ ret = bdrv_co_preadv_part(file, offset, cur_bytes,
253
+ qiov, qiov_offset, 0);
254
+ cbw_snapshot_read_unlock(bs, req);
255
+ if (ret < 0) {
256
+ return ret;
257
+ }
258
+
259
+ bytes -= cur_bytes;
260
+ offset += cur_bytes;
261
+ qiov_offset += cur_bytes;
262
+ }
263
+
264
+ return 0;
87
+}
265
+}
88
+
266
+
89
void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint);
267
+static int coroutine_fn
90
void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov);
268
+cbw_co_snapshot_block_status(BlockDriverState *bs,
91
void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len);
269
+ bool want_zero, int64_t offset, int64_t bytes,
270
+ int64_t *pnum, int64_t *map,
271
+ BlockDriverState **file)
272
+{
273
+ BDRVCopyBeforeWriteState *s = bs->opaque;
274
+ BlockReq *req;
275
+ int ret;
276
+ int64_t cur_bytes;
277
+ BdrvChild *child;
278
+
279
+ req = cbw_snapshot_read_lock(bs, offset, bytes, &cur_bytes, &child);
280
+ if (!req) {
281
+ return -EACCES;
282
+ }
283
+
284
+ ret = bdrv_block_status(child->bs, offset, cur_bytes, pnum, map, file);
285
+ if (child == s->target) {
286
+ /*
287
+ * We refer to s->target only for areas that we've written to it.
288
+ * And we can not report unallocated blocks in s->target: this will
289
+ * break generic block-status-above logic, that will go to
290
+ * copy-before-write filtered child in this case.
291
+ */
292
+ assert(ret & BDRV_BLOCK_ALLOCATED);
293
+ }
294
+
295
+ cbw_snapshot_read_unlock(bs, req);
296
+
297
+ return ret;
298
+}
299
+
300
+static int coroutine_fn cbw_co_pdiscard_snapshot(BlockDriverState *bs,
301
+ int64_t offset, int64_t bytes)
302
+{
303
+ BDRVCopyBeforeWriteState *s = bs->opaque;
304
+
305
+ WITH_QEMU_LOCK_GUARD(&s->lock) {
306
+ bdrv_reset_dirty_bitmap(s->access_bitmap, offset, bytes);
307
+ }
308
+
309
+ block_copy_reset(s->bcs, offset, bytes);
310
+
311
+ return bdrv_co_pdiscard(s->target, offset, bytes);
312
+}
313
+
314
static void cbw_refresh_filename(BlockDriverState *bs)
315
{
316
pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
317
@@ -XXX,XX +XXX,XX @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
318
{
319
BDRVCopyBeforeWriteState *s = bs->opaque;
320
BdrvDirtyBitmap *bitmap = NULL;
321
+ int64_t cluster_size;
322
323
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
324
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
325
@@ -XXX,XX +XXX,XX @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags,
326
return -EINVAL;
327
}
328
329
+ cluster_size = block_copy_cluster_size(s->bcs);
330
+
331
+ s->done_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
332
+ if (!s->done_bitmap) {
333
+ return -EINVAL;
334
+ }
335
+ bdrv_disable_dirty_bitmap(s->done_bitmap);
336
+
337
+ /* s->access_bitmap starts equal to bcs bitmap */
338
+ s->access_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
339
+ if (!s->access_bitmap) {
340
+ return -EINVAL;
341
+ }
342
+ bdrv_disable_dirty_bitmap(s->access_bitmap);
343
+ bdrv_dirty_bitmap_merge_internal(s->access_bitmap,
344
+ block_copy_dirty_bitmap(s->bcs), NULL,
345
+ true);
346
+
347
+ qemu_co_mutex_init(&s->lock);
348
+ QLIST_INIT(&s->frozen_read_reqs);
349
+
350
return 0;
351
}
352
353
@@ -XXX,XX +XXX,XX @@ static void cbw_close(BlockDriverState *bs)
354
{
355
BDRVCopyBeforeWriteState *s = bs->opaque;
356
357
+ bdrv_release_dirty_bitmap(s->access_bitmap);
358
+ bdrv_release_dirty_bitmap(s->done_bitmap);
359
+
360
block_copy_state_free(s->bcs);
361
s->bcs = NULL;
362
}
363
@@ -XXX,XX +XXX,XX @@ BlockDriver bdrv_cbw_filter = {
364
.bdrv_co_pdiscard = cbw_co_pdiscard,
365
.bdrv_co_flush = cbw_co_flush,
366
367
+ .bdrv_co_preadv_snapshot = cbw_co_preadv_snapshot,
368
+ .bdrv_co_pdiscard_snapshot = cbw_co_pdiscard_snapshot,
369
+ .bdrv_co_snapshot_block_status = cbw_co_snapshot_block_status,
370
+
371
.bdrv_refresh_filename = cbw_refresh_filename,
372
373
.bdrv_child_perm = cbw_child_perm,
374
diff --git a/tests/qemu-iotests/257.out b/tests/qemu-iotests/257.out
375
index XXXXXXX..XXXXXXX 100644
376
--- a/tests/qemu-iotests/257.out
377
+++ b/tests/qemu-iotests/257.out
378
@@ -XXX,XX +XXX,XX @@ write -P0x67 0x3fe0000 0x20000
379
{"return": ""}
380
{
381
"bitmaps": {
382
+ "backup-top": [
383
+ {
384
+ "busy": false,
385
+ "count": 67108864,
386
+ "granularity": 65536,
387
+ "persistent": false,
388
+ "recording": false
389
+ },
390
+ {
391
+ "busy": false,
392
+ "count": 458752,
393
+ "granularity": 65536,
394
+ "persistent": false,
395
+ "recording": false
396
+ }
397
+ ],
398
"drive0": [
399
{
400
"busy": false,
401
@@ -XXX,XX +XXX,XX @@ write -P0x67 0x3fe0000 0x20000
402
{"return": ""}
403
{
404
"bitmaps": {
405
+ "backup-top": [
406
+ {
407
+ "busy": false,
408
+ "count": 67108864,
409
+ "granularity": 65536,
410
+ "persistent": false,
411
+ "recording": false
412
+ },
413
+ {
414
+ "busy": false,
415
+ "count": 458752,
416
+ "granularity": 65536,
417
+ "persistent": false,
418
+ "recording": false
419
+ }
420
+ ],
421
"drive0": [
422
{
423
"busy": false,
424
@@ -XXX,XX +XXX,XX @@ write -P0x67 0x3fe0000 0x20000
425
{"return": ""}
426
{
427
"bitmaps": {
428
+ "backup-top": [
429
+ {
430
+ "busy": false,
431
+ "count": 67108864,
432
+ "granularity": 65536,
433
+ "persistent": false,
434
+ "recording": false
435
+ },
436
+ {
437
+ "busy": false,
438
+ "count": 458752,
439
+ "granularity": 65536,
440
+ "persistent": false,
441
+ "recording": false
442
+ }
443
+ ],
444
"drive0": [
445
{
446
"busy": false,
447
@@ -XXX,XX +XXX,XX @@ write -P0x67 0x3fe0000 0x20000
448
{"return": ""}
449
{
450
"bitmaps": {
451
+ "backup-top": [
452
+ {
453
+ "busy": false,
454
+ "count": 67108864,
455
+ "granularity": 65536,
456
+ "persistent": false,
457
+ "recording": false
458
+ },
459
+ {
460
+ "busy": false,
461
+ "count": 458752,
462
+ "granularity": 65536,
463
+ "persistent": false,
464
+ "recording": false
465
+ }
466
+ ],
467
"drive0": [
468
{
469
"busy": false,
470
@@ -XXX,XX +XXX,XX @@ write -P0x67 0x3fe0000 0x20000
471
{"return": ""}
472
{
473
"bitmaps": {
474
+ "backup-top": [
475
+ {
476
+ "busy": false,
477
+ "count": 67108864,
478
+ "granularity": 65536,
479
+ "persistent": false,
480
+ "recording": false
481
+ },
482
+ {
483
+ "busy": false,
484
+ "count": 458752,
485
+ "granularity": 65536,
486
+ "persistent": false,
487
+ "recording": false
488
+ }
489
+ ],
490
"drive0": [
491
{
492
"busy": false,
493
@@ -XXX,XX +XXX,XX @@ write -P0x67 0x3fe0000 0x20000
494
{"return": ""}
495
{
496
"bitmaps": {
497
+ "backup-top": [
498
+ {
499
+ "busy": false,
500
+ "count": 67108864,
501
+ "granularity": 65536,
502
+ "persistent": false,
503
+ "recording": false
504
+ },
505
+ {
506
+ "busy": false,
507
+ "count": 458752,
508
+ "granularity": 65536,
509
+ "persistent": false,
510
+ "recording": false
511
+ }
512
+ ],
513
"drive0": [
514
{
515
"busy": false,
516
@@ -XXX,XX +XXX,XX @@ write -P0x67 0x3fe0000 0x20000
517
{"return": ""}
518
{
519
"bitmaps": {
520
+ "backup-top": [
521
+ {
522
+ "busy": false,
523
+ "count": 67108864,
524
+ "granularity": 65536,
525
+ "persistent": false,
526
+ "recording": false
527
+ },
528
+ {
529
+ "busy": false,
530
+ "count": 458752,
531
+ "granularity": 65536,
532
+ "persistent": false,
533
+ "recording": false
534
+ }
535
+ ],
536
"drive0": [
537
{
538
"busy": false,
539
@@ -XXX,XX +XXX,XX @@ write -P0x67 0x3fe0000 0x20000
540
{"return": ""}
541
{
542
"bitmaps": {
543
+ "backup-top": [
544
+ {
545
+ "busy": false,
546
+ "count": 67108864,
547
+ "granularity": 65536,
548
+ "persistent": false,
549
+ "recording": false
550
+ },
551
+ {
552
+ "busy": false,
553
+ "count": 458752,
554
+ "granularity": 65536,
555
+ "persistent": false,
556
+ "recording": false
557
+ }
558
+ ],
559
"drive0": [
560
{
561
"busy": false,
562
@@ -XXX,XX +XXX,XX @@ write -P0x67 0x3fe0000 0x20000
563
{"return": ""}
564
{
565
"bitmaps": {
566
+ "backup-top": [
567
+ {
568
+ "busy": false,
569
+ "count": 67108864,
570
+ "granularity": 65536,
571
+ "persistent": false,
572
+ "recording": false
573
+ },
574
+ {
575
+ "busy": false,
576
+ "count": 458752,
577
+ "granularity": 65536,
578
+ "persistent": false,
579
+ "recording": false
580
+ }
581
+ ],
582
"drive0": [
583
{
584
"busy": false,
585
@@ -XXX,XX +XXX,XX @@ write -P0x67 0x3fe0000 0x20000
586
{"return": ""}
587
{
588
"bitmaps": {
589
+ "backup-top": [
590
+ {
591
+ "busy": false,
592
+ "count": 67108864,
593
+ "granularity": 65536,
594
+ "persistent": false,
595
+ "recording": false
596
+ },
597
+ {
598
+ "busy": false,
599
+ "count": 458752,
600
+ "granularity": 65536,
601
+ "persistent": false,
602
+ "recording": false
603
+ }
604
+ ],
605
"drive0": [
606
{
607
"busy": false,
608
@@ -XXX,XX +XXX,XX @@ write -P0x67 0x3fe0000 0x20000
609
{"return": ""}
610
{
611
"bitmaps": {
612
+ "backup-top": [
613
+ {
614
+ "busy": false,
615
+ "count": 67108864,
616
+ "granularity": 65536,
617
+ "persistent": false,
618
+ "recording": false
619
+ },
620
+ {
621
+ "busy": false,
622
+ "count": 458752,
623
+ "granularity": 65536,
624
+ "persistent": false,
625
+ "recording": false
626
+ }
627
+ ],
628
"drive0": [
629
{
630
"busy": false,
631
@@ -XXX,XX +XXX,XX @@ write -P0x67 0x3fe0000 0x20000
632
{"return": ""}
633
{
634
"bitmaps": {
635
+ "backup-top": [
636
+ {
637
+ "busy": false,
638
+ "count": 67108864,
639
+ "granularity": 65536,
640
+ "persistent": false,
641
+ "recording": false
642
+ },
643
+ {
644
+ "busy": false,
645
+ "count": 458752,
646
+ "granularity": 65536,
647
+ "persistent": false,
648
+ "recording": false
649
+ }
650
+ ],
651
"drive0": [
652
{
653
"busy": false,
654
@@ -XXX,XX +XXX,XX @@ write -P0x67 0x3fe0000 0x20000
655
{"return": ""}
656
{
657
"bitmaps": {
658
+ "backup-top": [
659
+ {
660
+ "busy": false,
661
+ "count": 67108864,
662
+ "granularity": 65536,
663
+ "persistent": false,
664
+ "recording": false
665
+ },
666
+ {
667
+ "busy": false,
668
+ "count": 458752,
669
+ "granularity": 65536,
670
+ "persistent": false,
671
+ "recording": false
672
+ }
673
+ ],
674
"drive0": [
675
{
676
"busy": false,
677
@@ -XXX,XX +XXX,XX @@ write -P0x67 0x3fe0000 0x20000
678
{"return": ""}
679
{
680
"bitmaps": {
681
+ "backup-top": [
682
+ {
683
+ "busy": false,
684
+ "count": 67108864,
685
+ "granularity": 65536,
686
+ "persistent": false,
687
+ "recording": false
688
+ },
689
+ {
690
+ "busy": false,
691
+ "count": 458752,
692
+ "granularity": 65536,
693
+ "persistent": false,
694
+ "recording": false
695
+ }
696
+ ],
697
"drive0": [
698
{
699
"busy": false,
92
--
700
--
93
2.20.1
701
2.34.1
94
95
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Use new qemu_iovec_init_buf() instead of
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Message-id: 20190218140926.333779-5-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-5-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
block/backup.c | 5 +----
14
1 file changed, 1 insertion(+), 4 deletions(-)
15
16
diff --git a/block/backup.c b/block/backup.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/backup.c
19
+++ b/block/backup.c
20
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
21
void **bounce_buffer)
22
{
23
int ret;
24
- struct iovec iov;
25
QEMUIOVector qiov;
26
BlockBackend *blk = job->common.blk;
27
int nbytes;
28
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
29
if (!*bounce_buffer) {
30
*bounce_buffer = blk_blockalign(blk, job->cluster_size);
31
}
32
- iov.iov_base = *bounce_buffer;
33
- iov.iov_len = nbytes;
34
- qemu_iovec_init_external(&qiov, &iov, 1);
35
+ qemu_iovec_init_buf(&qiov, *bounce_buffer, nbytes);
36
37
ret = blk_co_preadv(blk, start, qiov.size, &qiov, read_flags);
38
if (ret < 0) {
39
--
40
2.20.1
41
42
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Use new qemu_iovec_init_buf() instead of
3
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
4
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
5
Message-Id: <20220303194349.2304213-14-vsementsov@virtuozzo.com>
6
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
7
---
8
tests/qemu-iotests/tests/image-fleecing | 64 +++++++++++++-----
9
tests/qemu-iotests/tests/image-fleecing.out | 74 ++++++++++++++++++++-
10
2 files changed, 119 insertions(+), 19 deletions(-)
5
11
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
12
diff --git a/tests/qemu-iotests/tests/image-fleecing b/tests/qemu-iotests/tests/image-fleecing
7
Reviewed-by: Eric Blake <eblake@redhat.com>
13
index XXXXXXX..XXXXXXX 100755
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
14
--- a/tests/qemu-iotests/tests/image-fleecing
9
Message-id: 20190218140926.333779-10-vsementsov@virtuozzo.com
15
+++ b/tests/qemu-iotests/tests/image-fleecing
10
Message-Id: <20190218140926.333779-10-vsementsov@virtuozzo.com>
16
@@ -XXX,XX +XXX,XX @@ remainder = [('0xd5', '0x108000', '32k'), # Right-end of partial-left [1]
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
('0xdc', '32M', '32k'), # Left-end of partial-right [2]
12
---
18
('0xcd', '0x3ff0000', '64k')] # patterns[3]
13
block/qcow2.c | 12 ++----------
19
14
1 file changed, 2 insertions(+), 10 deletions(-)
20
-def do_test(use_cbw, base_img_path, fleece_img_path, nbd_sock_path, vm):
15
21
+def do_test(use_cbw, use_snapshot_access_filter, base_img_path,
16
diff --git a/block/qcow2.c b/block/qcow2.c
22
+ fleece_img_path, nbd_sock_path, vm):
23
log('--- Setting up images ---')
24
log('')
25
26
assert qemu_img('create', '-f', iotests.imgfmt, base_img_path, '64M') == 0
27
- assert qemu_img('create', '-f', 'qcow2', fleece_img_path, '64M') == 0
28
+ if use_snapshot_access_filter:
29
+ assert use_cbw
30
+ assert qemu_img('create', '-f', 'raw', fleece_img_path, '64M') == 0
31
+ else:
32
+ assert qemu_img('create', '-f', 'qcow2', fleece_img_path, '64M') == 0
33
34
for p in patterns:
35
qemu_io('-f', iotests.imgfmt,
36
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, base_img_path, fleece_img_path, nbd_sock_path, vm):
37
log('')
38
39
40
- # create tmp_node backed by src_node
41
- log(vm.qmp('blockdev-add', {
42
- 'driver': 'qcow2',
43
- 'node-name': tmp_node,
44
- 'file': {
45
+ if use_snapshot_access_filter:
46
+ log(vm.qmp('blockdev-add', {
47
+ 'node-name': tmp_node,
48
'driver': 'file',
49
'filename': fleece_img_path,
50
- },
51
- 'backing': src_node,
52
- }))
53
+ }))
54
+ else:
55
+ # create tmp_node backed by src_node
56
+ log(vm.qmp('blockdev-add', {
57
+ 'driver': 'qcow2',
58
+ 'node-name': tmp_node,
59
+ 'file': {
60
+ 'driver': 'file',
61
+ 'filename': fleece_img_path,
62
+ },
63
+ 'backing': src_node,
64
+ }))
65
66
# Establish CBW from source to fleecing node
67
if use_cbw:
68
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, base_img_path, fleece_img_path, nbd_sock_path, vm):
69
}))
70
71
log(vm.qmp('qom-set', path=qom_path, property='drive', value='fl-cbw'))
72
+
73
+ if use_snapshot_access_filter:
74
+ log(vm.qmp('blockdev-add', {
75
+ 'driver': 'snapshot-access',
76
+ 'node-name': 'fl-access',
77
+ 'file': 'fl-cbw',
78
+ }))
79
else:
80
log(vm.qmp('blockdev-backup',
81
job_id='fleecing',
82
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, base_img_path, fleece_img_path, nbd_sock_path, vm):
83
target=tmp_node,
84
sync='none'))
85
86
+ export_node = 'fl-access' if use_snapshot_access_filter else tmp_node
87
+
88
log('')
89
log('--- Setting up NBD Export ---')
90
log('')
91
92
- nbd_uri = 'nbd+unix:///%s?socket=%s' % (tmp_node, nbd_sock_path)
93
+ nbd_uri = 'nbd+unix:///%s?socket=%s' % (export_node, nbd_sock_path)
94
log(vm.qmp('nbd-server-start',
95
{'addr': {'type': 'unix',
96
'data': {'path': nbd_sock_path}}}))
97
98
- log(vm.qmp('nbd-server-add', device=tmp_node))
99
+ log(vm.qmp('nbd-server-add', device=export_node))
100
101
log('')
102
log('--- Sanity Check ---')
103
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, base_img_path, fleece_img_path, nbd_sock_path, vm):
104
log('--- Cleanup ---')
105
log('')
106
107
+ log(vm.qmp('nbd-server-stop'))
108
+
109
if use_cbw:
110
+ if use_snapshot_access_filter:
111
+ log(vm.qmp('blockdev-del', node_name='fl-access'))
112
log(vm.qmp('qom-set', path=qom_path, property='drive', value=src_node))
113
log(vm.qmp('blockdev-del', node_name='fl-cbw'))
114
else:
115
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, base_img_path, fleece_img_path, nbd_sock_path, vm):
116
assert e is not None
117
log(e, filters=[iotests.filter_qmp_event])
118
119
- log(vm.qmp('nbd-server-stop'))
120
log(vm.qmp('blockdev-del', node_name=tmp_node))
121
vm.shutdown()
122
123
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, base_img_path, fleece_img_path, nbd_sock_path, vm):
124
log('Done')
125
126
127
-def test(use_cbw):
128
+def test(use_cbw, use_snapshot_access_filter):
129
with iotests.FilePath('base.img') as base_img_path, \
130
iotests.FilePath('fleece.img') as fleece_img_path, \
131
iotests.FilePath('nbd.sock',
132
base_dir=iotests.sock_dir) as nbd_sock_path, \
133
iotests.VM() as vm:
134
- do_test(use_cbw, base_img_path, fleece_img_path, nbd_sock_path, vm)
135
+ do_test(use_cbw, use_snapshot_access_filter, base_img_path,
136
+ fleece_img_path, nbd_sock_path, vm)
137
138
139
log('=== Test backup(sync=none) based fleecing ===\n')
140
-test(False)
141
+test(False, False)
142
+
143
+log('=== Test cbw-filter based fleecing ===\n')
144
+test(True, False)
145
146
-log('=== Test filter based fleecing ===\n')
147
-test(True)
148
+log('=== Test fleecing-format based fleecing ===\n')
149
+test(True, True)
150
diff --git a/tests/qemu-iotests/tests/image-fleecing.out b/tests/qemu-iotests/tests/image-fleecing.out
17
index XXXXXXX..XXXXXXX 100644
151
index XXXXXXX..XXXXXXX 100644
18
--- a/block/qcow2.c
152
--- a/tests/qemu-iotests/tests/image-fleecing.out
19
+++ b/block/qcow2.c
153
+++ b/tests/qemu-iotests/tests/image-fleecing.out
20
@@ -XXX,XX +XXX,XX @@ qcow2_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
154
@@ -XXX,XX +XXX,XX @@ read -P0 0x3fe0000 64k
21
{
155
22
BDRVQcow2State *s = bs->opaque;
156
--- Cleanup ---
23
QEMUIOVector hd_qiov;
157
24
- struct iovec iov;
158
+{"return": {}}
25
int ret;
159
{"return": {}}
26
size_t out_len;
160
{"data": {"device": "fleecing", "len": 67108864, "offset": 393216, "speed": 0, "type": "backup"}, "event": "BLOCK_JOB_CANCELLED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
27
uint8_t *buf, *out_buf;
161
{"return": {}}
28
@@ -XXX,XX +XXX,XX @@ qcow2_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
162
+
29
goto fail;
163
+--- Confirming writes ---
30
}
164
+
31
165
+read -P0xab 0 64k
32
- iov = (struct iovec) {
166
+read -P0xad 0x00f8000 64k
33
- .iov_base = out_buf,
167
+read -P0x1d 0x2008000 64k
34
- .iov_len = out_len,
168
+read -P0xea 0x3fe0000 64k
35
- };
169
+read -P0xd5 0x108000 32k
36
- qemu_iovec_init_external(&hd_qiov, &iov, 1);
170
+read -P0xdc 32M 32k
37
+ qemu_iovec_init_buf(&hd_qiov, out_buf, out_len);
171
+read -P0xcd 0x3ff0000 64k
38
172
+
39
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED);
173
+Done
40
ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0);
174
+=== Test cbw-filter based fleecing ===
41
@@ -XXX,XX +XXX,XX @@ qcow2_co_preadv_compressed(BlockDriverState *bs,
175
+
42
int ret = 0, csize, nb_csectors;
176
+--- Setting up images ---
43
uint64_t coffset;
177
+
44
uint8_t *buf, *out_buf;
178
+Done
45
- struct iovec iov;
179
+
46
QEMUIOVector local_qiov;
180
+--- Launching VM ---
47
int offset_in_cluster = offset_into_cluster(s, offset);
181
+
48
182
+Done
49
@@ -XXX,XX +XXX,XX @@ qcow2_co_preadv_compressed(BlockDriverState *bs,
183
+
50
if (!buf) {
184
+--- Setting up Fleecing Graph ---
51
return -ENOMEM;
185
+
52
}
186
+{"return": {}}
53
- iov.iov_base = buf;
187
+{"return": {}}
54
- iov.iov_len = csize;
188
+{"return": {}}
55
- qemu_iovec_init_external(&local_qiov, &iov, 1);
189
+
56
+ qemu_iovec_init_buf(&local_qiov, buf, csize);
190
+--- Setting up NBD Export ---
57
191
+
58
out_buf = qemu_blockalign(bs, s->cluster_size);
192
+{"return": {}}
193
+{"return": {}}
194
+
195
+--- Sanity Check ---
196
+
197
+read -P0x5d 0 64k
198
+read -P0xd5 1M 64k
199
+read -P0xdc 32M 64k
200
+read -P0xcd 0x3ff0000 64k
201
+read -P0 0x00f8000 32k
202
+read -P0 0x2010000 32k
203
+read -P0 0x3fe0000 64k
204
+
205
+--- Testing COW ---
206
+
207
+write -P0xab 0 64k
208
+{"return": ""}
209
+write -P0xad 0x00f8000 64k
210
+{"return": ""}
211
+write -P0x1d 0x2008000 64k
212
+{"return": ""}
213
+write -P0xea 0x3fe0000 64k
214
+{"return": ""}
215
+
216
+--- Verifying Data ---
217
+
218
+read -P0x5d 0 64k
219
+read -P0xd5 1M 64k
220
+read -P0xdc 32M 64k
221
+read -P0xcd 0x3ff0000 64k
222
+read -P0 0x00f8000 32k
223
+read -P0 0x2010000 32k
224
+read -P0 0x3fe0000 64k
225
+
226
+--- Cleanup ---
227
+
228
+{"return": {}}
229
+{"return": {}}
230
+{"return": {}}
231
{"return": {}}
232
233
--- Confirming writes ---
234
@@ -XXX,XX +XXX,XX @@ read -P0xdc 32M 32k
235
read -P0xcd 0x3ff0000 64k
236
237
Done
238
-=== Test filter based fleecing ===
239
+=== Test fleecing-format based fleecing ===
240
241
--- Setting up images ---
242
243
@@ -XXX,XX +XXX,XX @@ Done
244
{"return": {}}
245
{"return": {}}
246
{"return": {}}
247
+{"return": {}}
248
249
--- Setting up NBD Export ---
250
251
@@ -XXX,XX +XXX,XX @@ read -P0 0x3fe0000 64k
252
{"return": {}}
253
{"return": {}}
254
{"return": {}}
255
+{"return": {}}
256
257
--- Confirming writes ---
59
258
60
--
259
--
61
2.20.1
260
2.34.1
62
63
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Use new qemu_iovec_init_buf() instead of
3
Add helper that returns both status and output, to be used in the
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
4
following commit
5
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
7
Message-Id: <20220303194349.2304213-15-vsementsov@virtuozzo.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
9
Message-id: 20190218140926.333779-12-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-12-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
9
---
13
block/vmdk.c | 7 +------
10
tests/qemu-iotests/iotests.py | 3 +++
14
1 file changed, 1 insertion(+), 6 deletions(-)
11
1 file changed, 3 insertions(+)
15
12
16
diff --git a/block/vmdk.c b/block/vmdk.c
13
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
17
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
18
--- a/block/vmdk.c
15
--- a/tests/qemu-iotests/iotests.py
19
+++ b/block/vmdk.c
16
+++ b/tests/qemu-iotests/iotests.py
20
@@ -XXX,XX +XXX,XX @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
17
@@ -XXX,XX +XXX,XX @@ def qemu_io(*args):
21
VmdkGrainMarker *data = NULL;
18
'''Run qemu-io and return the stdout data'''
22
uLongf buf_len;
19
return qemu_tool_pipe_and_status('qemu-io', qemu_io_wrap_args(args))[0]
23
QEMUIOVector local_qiov;
20
24
- struct iovec iov;
21
+def qemu_io_pipe_and_status(*args):
25
int64_t write_offset;
22
+ return qemu_tool_pipe_and_status('qemu-io', qemu_io_wrap_args(args))
26
int64_t write_end_sector;
23
+
27
24
def qemu_io_log(*args):
28
@@ -XXX,XX +XXX,XX @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
25
result = qemu_io(*args)
29
data->size = cpu_to_le32(buf_len);
26
log(result, filters=[filter_testfiles, filter_qemu_io])
30
31
n_bytes = buf_len + sizeof(VmdkGrainMarker);
32
- iov = (struct iovec) {
33
- .iov_base = data,
34
- .iov_len = n_bytes,
35
- };
36
- qemu_iovec_init_external(&local_qiov, &iov, 1);
37
+ qemu_iovec_init_buf(&local_qiov, data, n_bytes);
38
39
BLKDBG_EVENT(extent->file, BLKDBG_WRITE_COMPRESSED);
40
} else {
41
--
27
--
42
2.20.1
28
2.34.1
43
44
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Use new QEMU_IOVEC_INIT_BUF() instead of
3
Note that reads zero areas (not dirty in the bitmap) fails, that's
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
4
correct.
5
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
7
Message-Id: <20220303194349.2304213-16-vsementsov@virtuozzo.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
9
Message-id: 20190218140926.333779-6-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-6-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
9
---
13
block/commit.c | 7 +------
10
tests/qemu-iotests/tests/image-fleecing | 38 +++++++---
14
1 file changed, 1 insertion(+), 6 deletions(-)
11
tests/qemu-iotests/tests/image-fleecing.out | 84 +++++++++++++++++++++
12
2 files changed, 113 insertions(+), 9 deletions(-)
15
13
16
diff --git a/block/commit.c b/block/commit.c
14
diff --git a/tests/qemu-iotests/tests/image-fleecing b/tests/qemu-iotests/tests/image-fleecing
15
index XXXXXXX..XXXXXXX 100755
16
--- a/tests/qemu-iotests/tests/image-fleecing
17
+++ b/tests/qemu-iotests/tests/image-fleecing
18
@@ -XXX,XX +XXX,XX @@
19
# Creator/Owner: John Snow <jsnow@redhat.com>
20
21
import iotests
22
-from iotests import log, qemu_img, qemu_io, qemu_io_silent
23
+from iotests import log, qemu_img, qemu_io, qemu_io_silent, \
24
+ qemu_io_pipe_and_status
25
26
iotests.script_initialize(
27
- supported_fmts=['qcow2', 'qcow', 'qed', 'vmdk', 'vhdx', 'raw'],
28
+ supported_fmts=['qcow2'],
29
supported_platforms=['linux'],
30
required_fmts=['copy-before-write'],
31
+ unsupported_imgopts=['compat']
32
)
33
34
patterns = [('0x5d', '0', '64k'),
35
@@ -XXX,XX +XXX,XX @@ remainder = [('0xd5', '0x108000', '32k'), # Right-end of partial-left [1]
36
('0xcd', '0x3ff0000', '64k')] # patterns[3]
37
38
def do_test(use_cbw, use_snapshot_access_filter, base_img_path,
39
- fleece_img_path, nbd_sock_path, vm):
40
+ fleece_img_path, nbd_sock_path, vm,
41
+ bitmap=False):
42
log('--- Setting up images ---')
43
log('')
44
45
assert qemu_img('create', '-f', iotests.imgfmt, base_img_path, '64M') == 0
46
+ if bitmap:
47
+ assert qemu_img('bitmap', '--add', base_img_path, 'bitmap0') == 0
48
+
49
if use_snapshot_access_filter:
50
assert use_cbw
51
assert qemu_img('create', '-f', 'raw', fleece_img_path, '64M') == 0
52
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, use_snapshot_access_filter, base_img_path,
53
54
# Establish CBW from source to fleecing node
55
if use_cbw:
56
- log(vm.qmp('blockdev-add', {
57
+ fl_cbw = {
58
'driver': 'copy-before-write',
59
'node-name': 'fl-cbw',
60
'file': src_node,
61
'target': tmp_node
62
- }))
63
+ }
64
+
65
+ if bitmap:
66
+ fl_cbw['bitmap'] = {'node': src_node, 'name': 'bitmap0'}
67
+
68
+ log(vm.qmp('blockdev-add', fl_cbw))
69
70
log(vm.qmp('qom-set', path=qom_path, property='drive', value='fl-cbw'))
71
72
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, use_snapshot_access_filter, base_img_path,
73
for p in patterns + zeroes:
74
cmd = 'read -P%s %s %s' % p
75
log(cmd)
76
- assert qemu_io_silent('-r', '-f', 'raw', '-c', cmd, nbd_uri) == 0
77
+ out, ret = qemu_io_pipe_and_status('-r', '-f', 'raw', '-c', cmd,
78
+ nbd_uri)
79
+ if ret != 0:
80
+ print(out)
81
82
log('')
83
log('--- Testing COW ---')
84
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, use_snapshot_access_filter, base_img_path,
85
for p in patterns + zeroes:
86
cmd = 'read -P%s %s %s' % p
87
log(cmd)
88
- assert qemu_io_silent('-r', '-f', 'raw', '-c', cmd, nbd_uri) == 0
89
+ out, ret = qemu_io_pipe_and_status('-r', '-f', 'raw', '-c', cmd,
90
+ nbd_uri)
91
+ if ret != 0:
92
+ print(out)
93
94
log('')
95
log('--- Cleanup ---')
96
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, use_snapshot_access_filter, base_img_path,
97
log('Done')
98
99
100
-def test(use_cbw, use_snapshot_access_filter):
101
+def test(use_cbw, use_snapshot_access_filter, bitmap=False):
102
with iotests.FilePath('base.img') as base_img_path, \
103
iotests.FilePath('fleece.img') as fleece_img_path, \
104
iotests.FilePath('nbd.sock',
105
base_dir=iotests.sock_dir) as nbd_sock_path, \
106
iotests.VM() as vm:
107
do_test(use_cbw, use_snapshot_access_filter, base_img_path,
108
- fleece_img_path, nbd_sock_path, vm)
109
+ fleece_img_path, nbd_sock_path, vm, bitmap=bitmap)
110
111
112
log('=== Test backup(sync=none) based fleecing ===\n')
113
@@ -XXX,XX +XXX,XX @@ test(True, False)
114
115
log('=== Test fleecing-format based fleecing ===\n')
116
test(True, True)
117
+
118
+log('=== Test fleecing-format based fleecing with bitmap ===\n')
119
+test(True, True, bitmap=True)
120
diff --git a/tests/qemu-iotests/tests/image-fleecing.out b/tests/qemu-iotests/tests/image-fleecing.out
17
index XXXXXXX..XXXXXXX 100644
121
index XXXXXXX..XXXXXXX 100644
18
--- a/block/commit.c
122
--- a/tests/qemu-iotests/tests/image-fleecing.out
19
+++ b/block/commit.c
123
+++ b/tests/qemu-iotests/tests/image-fleecing.out
20
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base,
124
@@ -XXX,XX +XXX,XX @@ read -P0 0x00f8000 32k
21
void *buf)
125
read -P0 0x2010000 32k
22
{
126
read -P0 0x3fe0000 64k
23
int ret = 0;
127
24
- QEMUIOVector qiov;
128
+--- Cleanup ---
25
- struct iovec iov = {
129
+
26
- .iov_base = buf,
130
+{"return": {}}
27
- .iov_len = bytes,
131
+{"return": {}}
28
- };
132
+{"return": {}}
29
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
133
+{"return": {}}
30
134
+{"return": {}}
31
assert(bytes < SIZE_MAX);
135
+
32
- qemu_iovec_init_external(&qiov, &iov, 1);
136
+--- Confirming writes ---
33
137
+
34
ret = blk_co_preadv(bs, offset, qiov.size, &qiov, 0);
138
+read -P0xab 0 64k
35
if (ret < 0) {
139
+read -P0xad 0x00f8000 64k
140
+read -P0x1d 0x2008000 64k
141
+read -P0xea 0x3fe0000 64k
142
+read -P0xd5 0x108000 32k
143
+read -P0xdc 32M 32k
144
+read -P0xcd 0x3ff0000 64k
145
+
146
+Done
147
+=== Test fleecing-format based fleecing with bitmap ===
148
+
149
+--- Setting up images ---
150
+
151
+Done
152
+
153
+--- Launching VM ---
154
+
155
+Done
156
+
157
+--- Setting up Fleecing Graph ---
158
+
159
+{"return": {}}
160
+{"return": {}}
161
+{"return": {}}
162
+{"return": {}}
163
+
164
+--- Setting up NBD Export ---
165
+
166
+{"return": {}}
167
+{"return": {}}
168
+
169
+--- Sanity Check ---
170
+
171
+read -P0x5d 0 64k
172
+read -P0xd5 1M 64k
173
+read -P0xdc 32M 64k
174
+read -P0xcd 0x3ff0000 64k
175
+read -P0 0x00f8000 32k
176
+read failed: Invalid argument
177
+
178
+read -P0 0x2010000 32k
179
+read failed: Invalid argument
180
+
181
+read -P0 0x3fe0000 64k
182
+read failed: Invalid argument
183
+
184
+
185
+--- Testing COW ---
186
+
187
+write -P0xab 0 64k
188
+{"return": ""}
189
+write -P0xad 0x00f8000 64k
190
+{"return": ""}
191
+write -P0x1d 0x2008000 64k
192
+{"return": ""}
193
+write -P0xea 0x3fe0000 64k
194
+{"return": ""}
195
+
196
+--- Verifying Data ---
197
+
198
+read -P0x5d 0 64k
199
+read -P0xd5 1M 64k
200
+read -P0xdc 32M 64k
201
+read -P0xcd 0x3ff0000 64k
202
+read -P0 0x00f8000 32k
203
+read failed: Invalid argument
204
+
205
+read -P0 0x2010000 32k
206
+read failed: Invalid argument
207
+
208
+read -P0 0x3fe0000 64k
209
+read failed: Invalid argument
210
+
211
+
212
--- Cleanup ---
213
214
{"return": {}}
36
--
215
--
37
2.20.1
216
2.34.1
38
39
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Use new QEMU_IOVEC_INIT_BUF() instead of
3
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
4
Message-Id: <20220303194349.2304213-17-vsementsov@virtuozzo.com>
5
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
6
---
7
tests/qemu-iotests/tests/image-fleecing | 125 +++++++++++++++-----
8
tests/qemu-iotests/tests/image-fleecing.out | 63 ++++++++++
9
2 files changed, 156 insertions(+), 32 deletions(-)
5
10
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
11
diff --git a/tests/qemu-iotests/tests/image-fleecing b/tests/qemu-iotests/tests/image-fleecing
7
Reviewed-by: Eric Blake <eblake@redhat.com>
12
index XXXXXXX..XXXXXXX 100755
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
13
--- a/tests/qemu-iotests/tests/image-fleecing
9
Message-id: 20190218140926.333779-7-vsementsov@virtuozzo.com
14
+++ b/tests/qemu-iotests/tests/image-fleecing
10
Message-Id: <20190218140926.333779-7-vsementsov@virtuozzo.com>
15
@@ -XXX,XX +XXX,XX @@ remainder = [('0xd5', '0x108000', '32k'), # Right-end of partial-left [1]
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
16
('0xdc', '32M', '32k'), # Left-end of partial-right [2]
12
---
17
('0xcd', '0x3ff0000', '64k')] # patterns[3]
13
block/stream.c | 7 +------
18
14
1 file changed, 1 insertion(+), 6 deletions(-)
19
-def do_test(use_cbw, use_snapshot_access_filter, base_img_path,
15
20
- fleece_img_path, nbd_sock_path, vm,
16
diff --git a/block/stream.c b/block/stream.c
21
+def do_test(vm, use_cbw, use_snapshot_access_filter, base_img_path,
22
+ fleece_img_path, nbd_sock_path=None,
23
+ target_img_path=None,
24
bitmap=False):
25
+ push_backup = target_img_path is not None
26
+ assert (nbd_sock_path is not None) != push_backup
27
+ if push_backup:
28
+ assert use_cbw
29
+
30
log('--- Setting up images ---')
31
log('')
32
33
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, use_snapshot_access_filter, base_img_path,
34
else:
35
assert qemu_img('create', '-f', 'qcow2', fleece_img_path, '64M') == 0
36
37
+ if push_backup:
38
+ assert qemu_img('create', '-f', 'qcow2', target_img_path, '64M') == 0
39
+
40
for p in patterns:
41
qemu_io('-f', iotests.imgfmt,
42
'-c', 'write -P%s %s %s' % p, base_img_path)
43
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, use_snapshot_access_filter, base_img_path,
44
45
export_node = 'fl-access' if use_snapshot_access_filter else tmp_node
46
47
- log('')
48
- log('--- Setting up NBD Export ---')
49
- log('')
50
+ if push_backup:
51
+ log('')
52
+ log('--- Starting actual backup ---')
53
+ log('')
54
55
- nbd_uri = 'nbd+unix:///%s?socket=%s' % (export_node, nbd_sock_path)
56
- log(vm.qmp('nbd-server-start',
57
- {'addr': {'type': 'unix',
58
- 'data': {'path': nbd_sock_path}}}))
59
+ log(vm.qmp('blockdev-add', **{
60
+ 'driver': iotests.imgfmt,
61
+ 'node-name': 'target',
62
+ 'file': {
63
+ 'driver': 'file',
64
+ 'filename': target_img_path
65
+ }
66
+ }))
67
+ log(vm.qmp('blockdev-backup', device=export_node,
68
+ sync='full', target='target',
69
+ job_id='push-backup', speed=1))
70
+ else:
71
+ log('')
72
+ log('--- Setting up NBD Export ---')
73
+ log('')
74
75
- log(vm.qmp('nbd-server-add', device=export_node))
76
+ nbd_uri = 'nbd+unix:///%s?socket=%s' % (export_node, nbd_sock_path)
77
+ log(vm.qmp('nbd-server-start',
78
+ {'addr': { 'type': 'unix',
79
+ 'data': { 'path': nbd_sock_path } } }))
80
81
- log('')
82
- log('--- Sanity Check ---')
83
- log('')
84
+ log(vm.qmp('nbd-server-add', device=export_node))
85
86
- for p in patterns + zeroes:
87
- cmd = 'read -P%s %s %s' % p
88
- log(cmd)
89
- out, ret = qemu_io_pipe_and_status('-r', '-f', 'raw', '-c', cmd,
90
- nbd_uri)
91
- if ret != 0:
92
- print(out)
93
+ log('')
94
+ log('--- Sanity Check ---')
95
+ log('')
96
+
97
+ for p in patterns + zeroes:
98
+ cmd = 'read -P%s %s %s' % p
99
+ log(cmd)
100
+ out, ret = qemu_io_pipe_and_status('-r', '-f', 'raw', '-c', cmd,
101
+ nbd_uri)
102
+ if ret != 0:
103
+ print(out)
104
105
log('')
106
log('--- Testing COW ---')
107
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, use_snapshot_access_filter, base_img_path,
108
log(cmd)
109
log(vm.hmp_qemu_io(qom_path, cmd, qdev=True))
110
111
+ if push_backup:
112
+ # Check that previous operations were done during backup, not after
113
+ # If backup is already finished, it's possible that it was finished
114
+ # even before hmp qemu_io write, and we didn't actually test
115
+ # copy-before-write operation. This should not happen, as we use
116
+ # speed=1. But worth checking.
117
+ result = vm.qmp('query-block-jobs')
118
+ assert len(result['return']) == 1
119
+
120
+ result = vm.qmp('block-job-set-speed', device='push-backup', speed=0)
121
+ assert result == {'return': {}}
122
+
123
+ log(vm.event_wait(name='BLOCK_JOB_COMPLETED',
124
+ match={'data': {'device': 'push-backup'}}),
125
+ filters=[iotests.filter_qmp_event])
126
+ log(vm.qmp('blockdev-del', node_name='target'))
127
+
128
log('')
129
log('--- Verifying Data ---')
130
log('')
131
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, use_snapshot_access_filter, base_img_path,
132
for p in patterns + zeroes:
133
cmd = 'read -P%s %s %s' % p
134
log(cmd)
135
- out, ret = qemu_io_pipe_and_status('-r', '-f', 'raw', '-c', cmd,
136
- nbd_uri)
137
+ args = ['-r', '-c', cmd]
138
+ if push_backup:
139
+ args += [target_img_path]
140
+ else:
141
+ args += ['-f', 'raw', nbd_uri]
142
+ out, ret = qemu_io_pipe_and_status(*args)
143
if ret != 0:
144
print(out)
145
146
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, use_snapshot_access_filter, base_img_path,
147
log('--- Cleanup ---')
148
log('')
149
150
- log(vm.qmp('nbd-server-stop'))
151
+ if not push_backup:
152
+ log(vm.qmp('nbd-server-stop'))
153
154
if use_cbw:
155
if use_snapshot_access_filter:
156
@@ -XXX,XX +XXX,XX @@ def do_test(use_cbw, use_snapshot_access_filter, base_img_path,
157
log('Done')
158
159
160
-def test(use_cbw, use_snapshot_access_filter, bitmap=False):
161
+def test(use_cbw, use_snapshot_access_filter,
162
+ nbd_sock_path=None, target_img_path=None, bitmap=False):
163
with iotests.FilePath('base.img') as base_img_path, \
164
iotests.FilePath('fleece.img') as fleece_img_path, \
165
- iotests.FilePath('nbd.sock',
166
- base_dir=iotests.sock_dir) as nbd_sock_path, \
167
iotests.VM() as vm:
168
- do_test(use_cbw, use_snapshot_access_filter, base_img_path,
169
- fleece_img_path, nbd_sock_path, vm, bitmap=bitmap)
170
+ do_test(vm, use_cbw, use_snapshot_access_filter, base_img_path,
171
+ fleece_img_path, nbd_sock_path, target_img_path,
172
+ bitmap=bitmap)
173
+
174
+def test_pull(use_cbw, use_snapshot_access_filter, bitmap=False):
175
+ with iotests.FilePath('nbd.sock',
176
+ base_dir=iotests.sock_dir) as nbd_sock_path:
177
+ test(use_cbw, use_snapshot_access_filter, nbd_sock_path, None,
178
+ bitmap=bitmap)
179
+
180
+def test_push():
181
+ with iotests.FilePath('target.img') as target_img_path:
182
+ test(True, True, None, target_img_path)
183
184
185
log('=== Test backup(sync=none) based fleecing ===\n')
186
-test(False, False)
187
+test_pull(False, False)
188
189
log('=== Test cbw-filter based fleecing ===\n')
190
-test(True, False)
191
+test_pull(True, False)
192
193
log('=== Test fleecing-format based fleecing ===\n')
194
-test(True, True)
195
+test_pull(True, True)
196
197
log('=== Test fleecing-format based fleecing with bitmap ===\n')
198
-test(True, True, bitmap=True)
199
+test_pull(True, True, bitmap=True)
200
+
201
+log('=== Test push backup with fleecing ===\n')
202
+test_push()
203
diff --git a/tests/qemu-iotests/tests/image-fleecing.out b/tests/qemu-iotests/tests/image-fleecing.out
17
index XXXXXXX..XXXXXXX 100644
204
index XXXXXXX..XXXXXXX 100644
18
--- a/block/stream.c
205
--- a/tests/qemu-iotests/tests/image-fleecing.out
19
+++ b/block/stream.c
206
+++ b/tests/qemu-iotests/tests/image-fleecing.out
20
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn stream_populate(BlockBackend *blk,
207
@@ -XXX,XX +XXX,XX @@ read -P0xdc 32M 32k
21
int64_t offset, uint64_t bytes,
208
read -P0xcd 0x3ff0000 64k
22
void *buf)
209
23
{
210
Done
24
- struct iovec iov = {
211
+=== Test push backup with fleecing ===
25
- .iov_base = buf,
212
+
26
- .iov_len = bytes,
213
+--- Setting up images ---
27
- };
214
+
28
- QEMUIOVector qiov;
215
+Done
29
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
216
+
30
217
+--- Launching VM ---
31
assert(bytes < SIZE_MAX);
218
+
32
- qemu_iovec_init_external(&qiov, &iov, 1);
219
+Done
33
220
+
34
/* Copy-on-read the unallocated clusters */
221
+--- Setting up Fleecing Graph ---
35
return blk_co_preadv(blk, offset, qiov.size, &qiov, BDRV_REQ_COPY_ON_READ);
222
+
223
+{"return": {}}
224
+{"return": {}}
225
+{"return": {}}
226
+{"return": {}}
227
+
228
+--- Starting actual backup ---
229
+
230
+{"return": {}}
231
+{"return": {}}
232
+
233
+--- Testing COW ---
234
+
235
+write -P0xab 0 64k
236
+{"return": ""}
237
+write -P0xad 0x00f8000 64k
238
+{"return": ""}
239
+write -P0x1d 0x2008000 64k
240
+{"return": ""}
241
+write -P0xea 0x3fe0000 64k
242
+{"return": ""}
243
+{"data": {"device": "push-backup", "len": 67108864, "offset": 67108864, "speed": 0, "type": "backup"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
244
+{"return": {}}
245
+
246
+--- Verifying Data ---
247
+
248
+read -P0x5d 0 64k
249
+read -P0xd5 1M 64k
250
+read -P0xdc 32M 64k
251
+read -P0xcd 0x3ff0000 64k
252
+read -P0 0x00f8000 32k
253
+read -P0 0x2010000 32k
254
+read -P0 0x3fe0000 64k
255
+
256
+--- Cleanup ---
257
+
258
+{"return": {}}
259
+{"return": {}}
260
+{"return": {}}
261
+{"return": {}}
262
+
263
+--- Confirming writes ---
264
+
265
+read -P0xab 0 64k
266
+read -P0xad 0x00f8000 64k
267
+read -P0x1d 0x2008000 64k
268
+read -P0xea 0x3fe0000 64k
269
+read -P0xd5 0x108000 32k
270
+read -P0xdc 32M 32k
271
+read -P0xcd 0x3ff0000 64k
272
+
273
+Done
36
--
274
--
37
2.20.1
275
2.34.1
38
39
diff view generated by jsdifflib
Deleted patch
1
From: Stefano Garzarella <sgarzare@redhat.com>
2
1
3
We add acct_failed param in order to use virtio_blk_handle_rw_error()
4
also when is not required to call block_acct_failed(). (eg. a discard
5
operation is failed)
6
7
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
10
Message-id: 20190221103314.58500-2-sgarzare@redhat.com
11
Message-Id: <20190221103314.58500-2-sgarzare@redhat.com>
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
hw/block/virtio-blk.c | 10 ++++++----
15
1 file changed, 6 insertions(+), 4 deletions(-)
16
17
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/block/virtio-blk.c
20
+++ b/hw/block/virtio-blk.c
21
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
22
}
23
24
static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
25
- bool is_read)
26
+ bool is_read, bool acct_failed)
27
{
28
VirtIOBlock *s = req->dev;
29
BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
30
@@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
31
s->rq = req;
32
} else if (action == BLOCK_ERROR_ACTION_REPORT) {
33
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
34
- block_acct_failed(blk_get_stats(s->blk), &req->acct);
35
+ if (acct_failed) {
36
+ block_acct_failed(blk_get_stats(s->blk), &req->acct);
37
+ }
38
virtio_blk_free_request(req);
39
}
40
41
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_rw_complete(void *opaque, int ret)
42
* the memory until the request is completed (which will
43
* happen on the other side of the migration).
44
*/
45
- if (virtio_blk_handle_rw_error(req, -ret, is_read)) {
46
+ if (virtio_blk_handle_rw_error(req, -ret, is_read, true)) {
47
continue;
48
}
49
}
50
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_flush_complete(void *opaque, int ret)
51
52
aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
53
if (ret) {
54
- if (virtio_blk_handle_rw_error(req, -ret, 0)) {
55
+ if (virtio_blk_handle_rw_error(req, -ret, 0, true)) {
56
goto out;
57
}
58
}
59
--
60
2.20.1
61
62
diff view generated by jsdifflib
Deleted patch
1
From: Stefano Garzarella <sgarzare@redhat.com>
2
1
3
Since configurable features for virtio-blk are growing, this patch
4
adds host_features field in the struct VirtIOBlock. (as in virtio-net)
5
In this way, we can avoid to add new fields for new properties and
6
we can directly set VIRTIO_BLK_F* flags in the host_features.
7
8
We update "config-wce" and "scsi" property definition to use the new
9
host_features field without change the behaviour.
10
11
Suggested-by: Michael S. Tsirkin <mst@redhat.com>
12
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
14
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
15
Message-id: 20190221103314.58500-3-sgarzare@redhat.com
16
Message-Id: <20190221103314.58500-3-sgarzare@redhat.com>
17
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
18
---
19
include/hw/virtio/virtio-blk.h | 3 +--
20
hw/block/virtio-blk.c | 16 +++++++++-------
21
2 files changed, 10 insertions(+), 9 deletions(-)
22
23
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/include/hw/virtio/virtio-blk.h
26
+++ b/include/hw/virtio/virtio-blk.h
27
@@ -XXX,XX +XXX,XX @@ struct VirtIOBlkConf
28
BlockConf conf;
29
IOThread *iothread;
30
char *serial;
31
- uint32_t scsi;
32
- uint32_t config_wce;
33
uint32_t request_merging;
34
uint16_t num_queues;
35
uint16_t queue_size;
36
@@ -XXX,XX +XXX,XX @@ typedef struct VirtIOBlock {
37
bool dataplane_disabled;
38
bool dataplane_started;
39
struct VirtIOBlockDataPlane *dataplane;
40
+ uint64_t host_features;
41
} VirtIOBlock;
42
43
typedef struct VirtIOBlockReq {
44
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/hw/block/virtio-blk.c
47
+++ b/hw/block/virtio-blk.c
48
@@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req)
49
*/
50
scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base;
51
52
- if (!blk->conf.scsi) {
53
+ if (!virtio_has_feature(blk->host_features, VIRTIO_BLK_F_SCSI)) {
54
status = VIRTIO_BLK_S_UNSUPP;
55
goto fail;
56
}
57
@@ -XXX,XX +XXX,XX @@ static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
58
{
59
VirtIOBlock *s = VIRTIO_BLK(vdev);
60
61
+ /* Firstly sync all virtio-blk possible supported features */
62
+ features |= s->host_features;
63
+
64
virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
65
virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
66
virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
67
virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
68
if (virtio_has_feature(features, VIRTIO_F_VERSION_1)) {
69
- if (s->conf.scsi) {
70
+ if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_SCSI)) {
71
error_setg(errp, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0");
72
return 0;
73
}
74
@@ -XXX,XX +XXX,XX @@ static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
75
virtio_add_feature(&features, VIRTIO_BLK_F_SCSI);
76
}
77
78
- if (s->conf.config_wce) {
79
- virtio_add_feature(&features, VIRTIO_BLK_F_CONFIG_WCE);
80
- }
81
if (blk_enable_write_cache(s->blk)) {
82
virtio_add_feature(&features, VIRTIO_BLK_F_WCE);
83
}
84
@@ -XXX,XX +XXX,XX @@ static Property virtio_blk_properties[] = {
85
DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf),
86
DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf),
87
DEFINE_PROP_STRING("serial", VirtIOBlock, conf.serial),
88
- DEFINE_PROP_BIT("config-wce", VirtIOBlock, conf.config_wce, 0, true),
89
+ DEFINE_PROP_BIT64("config-wce", VirtIOBlock, host_features,
90
+ VIRTIO_BLK_F_CONFIG_WCE, true),
91
#ifdef __linux__
92
- DEFINE_PROP_BIT("scsi", VirtIOBlock, conf.scsi, 0, false),
93
+ DEFINE_PROP_BIT64("scsi", VirtIOBlock, host_features,
94
+ VIRTIO_BLK_F_SCSI, false),
95
#endif
96
DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
97
true),
98
--
99
2.20.1
100
101
diff view generated by jsdifflib
Deleted patch
1
From: Stefano Garzarella <sgarzare@redhat.com>
2
1
3
The size of data in the virtio_blk_request must be a multiple
4
of 512 bytes for IN and OUT requests, or a multiple of the size
5
of struct virtio_blk_discard_write_zeroes for DISCARD and
6
WRITE_ZEROES requests.
7
8
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Reviewed-by: Thomas Huth <thuth@redhat.com>
11
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
12
Message-id: 20190221103314.58500-8-sgarzare@redhat.com
13
Message-Id: <20190221103314.58500-8-sgarzare@redhat.com>
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
---
16
tests/virtio-blk-test.c | 15 ++++++++++++++-
17
1 file changed, 14 insertions(+), 1 deletion(-)
18
19
diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tests/virtio-blk-test.c
22
+++ b/tests/virtio-blk-test.c
23
@@ -XXX,XX +XXX,XX @@ static uint64_t virtio_blk_request(QGuestAllocator *alloc, QVirtioDevice *d,
24
uint64_t addr;
25
uint8_t status = 0xFF;
26
27
- g_assert_cmpuint(data_size % 512, ==, 0);
28
+ switch (req->type) {
29
+ case VIRTIO_BLK_T_IN:
30
+ case VIRTIO_BLK_T_OUT:
31
+ g_assert_cmpuint(data_size % 512, ==, 0);
32
+ break;
33
+ case VIRTIO_BLK_T_DISCARD:
34
+ case VIRTIO_BLK_T_WRITE_ZEROES:
35
+ g_assert_cmpuint(data_size %
36
+ sizeof(struct virtio_blk_discard_write_zeroes), ==, 0);
37
+ break;
38
+ default:
39
+ g_assert_cmpuint(data_size, ==, 0);
40
+ }
41
+
42
addr = guest_alloc(alloc, sizeof(*req) + data_size);
43
44
virtio_blk_fix_request(d, req);
45
--
46
2.20.1
47
48
diff view generated by jsdifflib