1 | The following changes since commit fc3dbb90f2eb069801bfb4cfe9cbc83cf9c5f4a9: | 1 | The following changes since commit 928173659d6e5dc368284f73f90ea1d129e1f57d: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/jnsnow/tags/bitmaps-pull-request' into staging (2019-02-21 13:09:33 +0000) | 3 | Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20200130' into staging (2020-01-30 16:19:04 +0000) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | git://github.com/stefanha/qemu.git tags/block-pull-request | 7 | https://github.com/stefanha/qemu.git tags/block-pull-request |
8 | 8 | ||
9 | for you to fetch changes up to 9a9f4b74fa547b68edb38fa414999836770a4735: | 9 | for you to fetch changes up to 8dff69b9415b4287e900358744b732195e1ab2e2: |
10 | 10 | ||
11 | tests/virtio-blk: add test for DISCARD command (2019-02-22 09:42:17 +0000) | 11 | tests/qemu-iotests: use AIOMODE with various tests (2020-01-30 21:01:40 +0000) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Pull request | 14 | Pull request |
15 | 15 | ||
16 | ---------------------------------------------------------------- | 16 | ---------------------------------------------------------------- |
17 | 17 | ||
18 | Stefano Garzarella (10): | 18 | Aarushi Mehta (15): |
19 | virtio-blk: add acct_failed param to virtio_blk_handle_rw_error() | 19 | configure: permit use of io_uring |
20 | virtio-blk: add host_features field in VirtIOBlock | 20 | qapi/block-core: add option for io_uring |
21 | virtio-blk: add "discard" and "write-zeroes" properties | 21 | block/block: add BDRV flag for io_uring |
22 | virtio-net: make VirtIOFeature usable for other virtio devices | 22 | block/io_uring: implements interfaces for io_uring |
23 | virtio-blk: set config size depending on the features enabled | 23 | stubs: add stubs for io_uring interface |
24 | virtio-blk: add DISCARD and WRITE_ZEROES features | 24 | util/async: add aio interfaces for io_uring |
25 | tests/virtio-blk: change assert on data_size in virtio_blk_request() | 25 | blockdev: adds bdrv_parse_aio to use io_uring |
26 | tests/virtio-blk: add virtio_blk_fix_dwz_hdr() function | 26 | block/file-posix.c: extend to use io_uring |
27 | tests/virtio-blk: add test for WRITE_ZEROES command | 27 | block: add trace events for io_uring |
28 | tests/virtio-blk: add test for DISCARD command | 28 | block/io_uring: adds userspace completion polling |
29 | qemu-io: adds option to use aio engine | ||
30 | qemu-img: adds option to use aio engine for benchmarking | ||
31 | qemu-nbd: adds option for aio engines | ||
32 | tests/qemu-iotests: enable testing with aio options | ||
33 | tests/qemu-iotests: use AIOMODE with various tests | ||
29 | 34 | ||
30 | Vladimir Sementsov-Ogievskiy (17): | 35 | Paolo Bonzini (3): |
31 | block: enhance QEMUIOVector structure | 36 | block: eliminate BDRV_REQ_NO_SERIALISING |
32 | block/io: use qemu_iovec_init_buf | 37 | block/io: wait for serialising requests when a request becomes |
33 | block/block-backend: use QEMU_IOVEC_INIT_BUF | 38 | serialising |
34 | block/backup: use qemu_iovec_init_buf | 39 | block/io: take bs->reqs_lock in bdrv_mark_request_serialising |
35 | block/commit: use QEMU_IOVEC_INIT_BUF | ||
36 | block/stream: use QEMU_IOVEC_INIT_BUF | ||
37 | block/parallels: use QEMU_IOVEC_INIT_BUF | ||
38 | block/qcow: use qemu_iovec_init_buf | ||
39 | block/qcow2: use qemu_iovec_init_buf | ||
40 | block/qed: use qemu_iovec_init_buf | ||
41 | block/vmdk: use qemu_iovec_init_buf | ||
42 | qemu-img: use qemu_iovec_init_buf | ||
43 | migration/block: use qemu_iovec_init_buf | ||
44 | tests/test-bdrv-drain: use QEMU_IOVEC_INIT_BUF | ||
45 | hw/ide: drop iov field from IDEState | ||
46 | hw/ide: drop iov field from IDEBufferedRequest | ||
47 | hw/ide: drop iov field from IDEDMA | ||
48 | 40 | ||
49 | include/hw/ide/internal.h | 3 - | 41 | MAINTAINERS | 9 + |
50 | include/hw/virtio/virtio-blk.h | 6 +- | 42 | block.c | 22 ++ |
51 | include/hw/virtio/virtio.h | 15 ++ | 43 | block/Makefile.objs | 3 + |
52 | include/qemu/iov.h | 64 ++++++++- | 44 | block/file-posix.c | 99 ++++++-- |
53 | block/backup.c | 5 +- | 45 | block/io.c | 162 +++++++------ |
54 | block/block-backend.c | 13 +- | 46 | block/io_uring.c | 433 ++++++++++++++++++++++++++++++++++ |
55 | block/commit.c | 7 +- | 47 | block/trace-events | 12 + |
56 | block/io.c | 89 +++--------- | 48 | blockdev.c | 12 +- |
57 | block/parallels.c | 13 +- | 49 | configure | 27 +++ |
58 | block/qcow.c | 21 +-- | 50 | docs/interop/qemu-nbd.rst | 4 +- |
59 | block/qcow2.c | 12 +- | 51 | include/block/aio.h | 16 +- |
60 | block/qed-table.c | 16 +-- | 52 | include/block/block.h | 14 +- |
61 | block/qed.c | 31 ++--- | 53 | include/block/block_int.h | 3 +- |
62 | block/stream.c | 7 +- | 54 | include/block/raw-aio.h | 12 + |
63 | block/vmdk.c | 7 +- | 55 | qapi/block-core.json | 4 +- |
64 | hw/block/virtio-blk.c | 245 ++++++++++++++++++++++++++++++--- | 56 | qemu-img-cmds.hx | 4 +- |
65 | hw/core/machine.c | 2 + | 57 | qemu-img.c | 11 +- |
66 | hw/ide/atapi.c | 14 +- | 58 | qemu-img.texi | 5 +- |
67 | hw/ide/core.c | 19 ++- | 59 | qemu-io.c | 25 +- |
68 | hw/net/virtio-net.c | 31 +---- | 60 | qemu-nbd.c | 12 +- |
69 | hw/virtio/virtio.c | 15 ++ | 61 | stubs/Makefile.objs | 1 + |
70 | migration/block.c | 10 +- | 62 | stubs/io_uring.c | 32 +++ |
71 | qemu-img.c | 10 +- | 63 | tests/qemu-iotests/028 | 2 +- |
72 | tests/test-bdrv-drain.c | 29 +--- | 64 | tests/qemu-iotests/058 | 2 +- |
73 | tests/virtio-blk-test.c | 127 ++++++++++++++++- | 65 | tests/qemu-iotests/089 | 4 +- |
74 | 25 files changed, 525 insertions(+), 286 deletions(-) | 66 | tests/qemu-iotests/091 | 4 +- |
67 | tests/qemu-iotests/109 | 2 +- | ||
68 | tests/qemu-iotests/147 | 5 +- | ||
69 | tests/qemu-iotests/181 | 8 +- | ||
70 | tests/qemu-iotests/183 | 4 +- | ||
71 | tests/qemu-iotests/185 | 10 +- | ||
72 | tests/qemu-iotests/200 | 2 +- | ||
73 | tests/qemu-iotests/201 | 8 +- | ||
74 | tests/qemu-iotests/check | 15 +- | ||
75 | tests/qemu-iotests/common.rc | 14 ++ | ||
76 | tests/qemu-iotests/iotests.py | 12 +- | ||
77 | util/async.c | 36 +++ | ||
78 | 37 files changed, 878 insertions(+), 172 deletions(-) | ||
79 | create mode 100644 block/io_uring.c | ||
80 | create mode 100644 stubs/io_uring.c | ||
75 | 81 | ||
76 | -- | 82 | -- |
77 | 2.20.1 | 83 | 2.24.1 |
78 | 84 | ||
79 | 85 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Use new qemu_iovec_init_buf() instead of | 3 | It is unused since commit 00e30f0 ("block/backup: use backup-top instead |
4 | qemu_iovec_init_external( ... , 1), which simplifies the code. | 4 | of write notifiers", 2019-10-01), drop it to simplify the code. |
5 | 5 | ||
6 | While being here, use qemu_try_blockalign0 as well. | 6 | While at it, drop redundant assertions on flags. |
7 | 7 | ||
8 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
9 | Reviewed-by: Eric Blake <eblake@redhat.com> | 9 | Message-id: 1578495356-46219-2-git-send-email-pbonzini@redhat.com |
10 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | 10 | Message-Id: <1578495356-46219-2-git-send-email-pbonzini@redhat.com> |
11 | Message-id: 20190218140926.333779-3-vsementsov@virtuozzo.com | ||
12 | Message-Id: <20190218140926.333779-3-vsementsov@virtuozzo.com> | ||
13 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
14 | --- | 12 | --- |
15 | block/io.c | 89 ++++++++++++------------------------------------------ | 13 | block/io.c | 18 ++++-------------- |
16 | 1 file changed, 20 insertions(+), 69 deletions(-) | 14 | include/block/block.h | 12 ------------ |
15 | 2 files changed, 4 insertions(+), 26 deletions(-) | ||
17 | 16 | ||
18 | diff --git a/block/io.c b/block/io.c | 17 | diff --git a/block/io.c b/block/io.c |
19 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/block/io.c | 19 | --- a/block/io.c |
21 | +++ b/block/io.c | 20 | +++ b/block/io.c |
22 | @@ -XXX,XX +XXX,XX @@ static int bdrv_prwv_co(BdrvChild *child, int64_t offset, | 21 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, |
23 | static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf, | 22 | * potential fallback support, if we ever implement any read flags |
24 | int nb_sectors, bool is_write, BdrvRequestFlags flags) | 23 | * to pass through to drivers. For now, there aren't any |
25 | { | 24 | * passthrough flags. */ |
26 | - QEMUIOVector qiov; | 25 | - assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ | |
27 | - struct iovec iov = { | 26 | - BDRV_REQ_PREFETCH))); |
28 | - .iov_base = (void *)buf, | 27 | + assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH))); |
29 | - .iov_len = nb_sectors * BDRV_SECTOR_SIZE, | 28 | |
30 | - }; | 29 | /* Handle Copy on Read and associated serialisation */ |
31 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, | 30 | if (flags & BDRV_REQ_COPY_ON_READ) { |
32 | + nb_sectors * BDRV_SECTOR_SIZE); | 31 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, |
33 | 32 | bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs)); | |
34 | if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) { | ||
35 | return -EINVAL; | ||
36 | } | 33 | } |
37 | 34 | ||
38 | - qemu_iovec_init_external(&qiov, &iov, 1); | 35 | - /* BDRV_REQ_SERIALISING is only for write operation */ |
39 | return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS, | 36 | - assert(!(flags & BDRV_REQ_SERIALISING)); |
40 | &qiov, is_write, flags); | 37 | - |
41 | } | 38 | - if (!(flags & BDRV_REQ_NO_SERIALISING)) { |
42 | @@ -XXX,XX +XXX,XX @@ int bdrv_write(BdrvChild *child, int64_t sector_num, | 39 | - bdrv_wait_serialising_requests(req); |
43 | int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, | 40 | - } |
44 | int bytes, BdrvRequestFlags flags) | 41 | + bdrv_wait_serialising_requests(req); |
45 | { | 42 | |
46 | - QEMUIOVector qiov; | 43 | if (flags & BDRV_REQ_COPY_ON_READ) { |
47 | - struct iovec iov = { | 44 | int64_t pnum; |
48 | - .iov_base = NULL, | 45 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, |
49 | - .iov_len = bytes, | 46 | bdrv_inc_in_flight(bs); |
50 | - }; | 47 | |
51 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes); | 48 | /* Don't do copy-on-read if we read data before write operation */ |
52 | 49 | - if (atomic_read(&bs->copy_on_read) && !(flags & BDRV_REQ_NO_SERIALISING)) { | |
53 | - qemu_iovec_init_external(&qiov, &iov, 1); | 50 | + if (atomic_read(&bs->copy_on_read)) { |
54 | return bdrv_prwv_co(child, offset, &qiov, true, | 51 | flags |= BDRV_REQ_COPY_ON_READ; |
55 | BDRV_REQ_ZERO_WRITE | flags); | ||
56 | } | ||
57 | @@ -XXX,XX +XXX,XX @@ int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov) | ||
58 | |||
59 | int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes) | ||
60 | { | ||
61 | - QEMUIOVector qiov; | ||
62 | - struct iovec iov = { | ||
63 | - .iov_base = (void *)buf, | ||
64 | - .iov_len = bytes, | ||
65 | - }; | ||
66 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); | ||
67 | |||
68 | if (bytes < 0) { | ||
69 | return -EINVAL; | ||
70 | } | 52 | } |
71 | 53 | ||
72 | - qemu_iovec_init_external(&qiov, &iov, 1); | 54 | @@ -XXX,XX +XXX,XX @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes, |
73 | return bdrv_preadv(child, offset, &qiov); | 55 | return -EPERM; |
74 | } | ||
75 | |||
76 | @@ -XXX,XX +XXX,XX @@ int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov) | ||
77 | |||
78 | int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes) | ||
79 | { | ||
80 | - QEMUIOVector qiov; | ||
81 | - struct iovec iov = { | ||
82 | - .iov_base = (void *) buf, | ||
83 | - .iov_len = bytes, | ||
84 | - }; | ||
85 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); | ||
86 | |||
87 | if (bytes < 0) { | ||
88 | return -EINVAL; | ||
89 | } | 56 | } |
90 | 57 | ||
91 | - qemu_iovec_init_external(&qiov, &iov, 1); | 58 | - /* BDRV_REQ_NO_SERIALISING is only for read operation */ |
92 | return bdrv_pwritev(child, offset, &qiov); | 59 | - assert(!(flags & BDRV_REQ_NO_SERIALISING)); |
93 | } | 60 | assert(!(bs->open_flags & BDRV_O_INACTIVE)); |
94 | 61 | assert((bs->open_flags & BDRV_O_NO_IO) == 0); | |
95 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child, | 62 | assert(!(flags & ~BDRV_REQ_MASK)); |
96 | void *bounce_buffer; | 63 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_copy_range_internal( |
97 | 64 | ||
98 | BlockDriver *drv = bs->drv; | 65 | /* BDRV_REQ_SERIALISING is only for write operation */ |
99 | - struct iovec iov; | 66 | assert(!(read_flags & BDRV_REQ_SERIALISING)); |
100 | QEMUIOVector local_qiov; | 67 | - if (!(read_flags & BDRV_REQ_NO_SERIALISING)) { |
101 | int64_t cluster_offset; | 68 | - bdrv_wait_serialising_requests(&req); |
102 | int64_t cluster_bytes; | 69 | - } |
103 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child, | 70 | + bdrv_wait_serialising_requests(&req); |
104 | 71 | ||
105 | if (ret <= 0) { | 72 | ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, |
106 | /* Must copy-on-read; use the bounce buffer */ | 73 | src, src_offset, |
107 | - iov.iov_base = bounce_buffer; | 74 | diff --git a/include/block/block.h b/include/block/block.h |
108 | - iov.iov_len = pnum = MIN(pnum, MAX_BOUNCE_BUFFER); | 75 | index XXXXXXX..XXXXXXX 100644 |
109 | - qemu_iovec_init_external(&local_qiov, &iov, 1); | 76 | --- a/include/block/block.h |
110 | + pnum = MIN(pnum, MAX_BOUNCE_BUFFER); | 77 | +++ b/include/block/block.h |
111 | + qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum); | 78 | @@ -XXX,XX +XXX,XX @@ typedef enum { |
112 | 79 | */ | |
113 | ret = bdrv_driver_preadv(bs, cluster_offset, pnum, | 80 | BDRV_REQ_MAY_UNMAP = 0x4, |
114 | &local_qiov, 0); | 81 | |
115 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, | 82 | - /* |
116 | { | 83 | - * The BDRV_REQ_NO_SERIALISING flag is only valid for reads and means that |
117 | BlockDriver *drv = bs->drv; | 84 | - * we don't want wait_serialising_requests() during the read operation. |
118 | QEMUIOVector qiov; | 85 | - * |
119 | - struct iovec iov = {0}; | 86 | - * This flag is used for backup copy-on-write operations, when we need to |
120 | + void *buf = NULL; | 87 | - * read old data before write (write notifier triggered). It is okay since |
121 | int ret = 0; | 88 | - * we already waited for other serializing requests in the initiating write |
122 | bool need_flush = false; | 89 | - * (see bdrv_aligned_pwritev), and it is necessary if the initiating write |
123 | int head = 0; | 90 | - * is already serializing (without the flag, the read would deadlock |
124 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, | 91 | - * waiting for the serialising write to complete). |
125 | need_flush = true; | 92 | - */ |
126 | } | 93 | - BDRV_REQ_NO_SERIALISING = 0x8, |
127 | num = MIN(num, max_transfer); | 94 | BDRV_REQ_FUA = 0x10, |
128 | - iov.iov_len = num; | 95 | BDRV_REQ_WRITE_COMPRESSED = 0x20, |
129 | - if (iov.iov_base == NULL) { | 96 | |
130 | - iov.iov_base = qemu_try_blockalign(bs, num); | ||
131 | - if (iov.iov_base == NULL) { | ||
132 | + if (buf == NULL) { | ||
133 | + buf = qemu_try_blockalign0(bs, num); | ||
134 | + if (buf == NULL) { | ||
135 | ret = -ENOMEM; | ||
136 | goto fail; | ||
137 | } | ||
138 | - memset(iov.iov_base, 0, num); | ||
139 | } | ||
140 | - qemu_iovec_init_external(&qiov, &iov, 1); | ||
141 | + qemu_iovec_init_buf(&qiov, buf, num); | ||
142 | |||
143 | ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags); | ||
144 | |||
145 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, | ||
146 | * all future requests. | ||
147 | */ | ||
148 | if (num < max_transfer) { | ||
149 | - qemu_vfree(iov.iov_base); | ||
150 | - iov.iov_base = NULL; | ||
151 | + qemu_vfree(buf); | ||
152 | + buf = NULL; | ||
153 | } | ||
154 | } | ||
155 | |||
156 | @@ -XXX,XX +XXX,XX @@ fail: | ||
157 | if (ret == 0 && need_flush) { | ||
158 | ret = bdrv_co_flush(bs); | ||
159 | } | ||
160 | - qemu_vfree(iov.iov_base); | ||
161 | + qemu_vfree(buf); | ||
162 | return ret; | ||
163 | } | ||
164 | |||
165 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, | ||
166 | BlockDriverState *bs = child->bs; | ||
167 | uint8_t *buf = NULL; | ||
168 | QEMUIOVector local_qiov; | ||
169 | - struct iovec iov; | ||
170 | uint64_t align = bs->bl.request_alignment; | ||
171 | unsigned int head_padding_bytes, tail_padding_bytes; | ||
172 | int ret = 0; | ||
173 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, | ||
174 | assert(flags & BDRV_REQ_ZERO_WRITE); | ||
175 | if (head_padding_bytes || tail_padding_bytes) { | ||
176 | buf = qemu_blockalign(bs, align); | ||
177 | - iov = (struct iovec) { | ||
178 | - .iov_base = buf, | ||
179 | - .iov_len = align, | ||
180 | - }; | ||
181 | - qemu_iovec_init_external(&local_qiov, &iov, 1); | ||
182 | + qemu_iovec_init_buf(&local_qiov, buf, align); | ||
183 | } | ||
184 | if (head_padding_bytes) { | ||
185 | uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes); | ||
186 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child, | ||
187 | |||
188 | if (offset & (align - 1)) { | ||
189 | QEMUIOVector head_qiov; | ||
190 | - struct iovec head_iov; | ||
191 | |||
192 | mark_request_serialising(&req, align); | ||
193 | wait_serialising_requests(&req); | ||
194 | |||
195 | head_buf = qemu_blockalign(bs, align); | ||
196 | - head_iov = (struct iovec) { | ||
197 | - .iov_base = head_buf, | ||
198 | - .iov_len = align, | ||
199 | - }; | ||
200 | - qemu_iovec_init_external(&head_qiov, &head_iov, 1); | ||
201 | + qemu_iovec_init_buf(&head_qiov, head_buf, align); | ||
202 | |||
203 | bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); | ||
204 | ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align, | ||
205 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child, | ||
206 | |||
207 | if ((offset + bytes) & (align - 1)) { | ||
208 | QEMUIOVector tail_qiov; | ||
209 | - struct iovec tail_iov; | ||
210 | size_t tail_bytes; | ||
211 | bool waited; | ||
212 | |||
213 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child, | ||
214 | assert(!waited || !use_local_qiov); | ||
215 | |||
216 | tail_buf = qemu_blockalign(bs, align); | ||
217 | - tail_iov = (struct iovec) { | ||
218 | - .iov_base = tail_buf, | ||
219 | - .iov_len = align, | ||
220 | - }; | ||
221 | - qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); | ||
222 | + qemu_iovec_init_buf(&tail_qiov, tail_buf, align); | ||
223 | |||
224 | bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); | ||
225 | ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1), | ||
226 | @@ -XXX,XX +XXX,XX @@ bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos, | ||
227 | int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, | ||
228 | int64_t pos, int size) | ||
229 | { | ||
230 | - QEMUIOVector qiov; | ||
231 | - struct iovec iov = { | ||
232 | - .iov_base = (void *) buf, | ||
233 | - .iov_len = size, | ||
234 | - }; | ||
235 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); | ||
236 | int ret; | ||
237 | |||
238 | - qemu_iovec_init_external(&qiov, &iov, 1); | ||
239 | - | ||
240 | ret = bdrv_writev_vmstate(bs, &qiov, pos); | ||
241 | if (ret < 0) { | ||
242 | return ret; | ||
243 | @@ -XXX,XX +XXX,XX @@ int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) | ||
244 | int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, | ||
245 | int64_t pos, int size) | ||
246 | { | ||
247 | - QEMUIOVector qiov; | ||
248 | - struct iovec iov = { | ||
249 | - .iov_base = buf, | ||
250 | - .iov_len = size, | ||
251 | - }; | ||
252 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size); | ||
253 | int ret; | ||
254 | |||
255 | - qemu_iovec_init_external(&qiov, &iov, 1); | ||
256 | ret = bdrv_readv_vmstate(bs, &qiov, pos); | ||
257 | if (ret < 0) { | ||
258 | return ret; | ||
259 | -- | 97 | -- |
260 | 2.20.1 | 98 | 2.24.1 |
261 | 99 | ||
262 | 100 | diff view generated by jsdifflib |
1 | From: Stefano Garzarella <sgarzare@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Starting from DISABLE and WRITE_ZEROES features, we use an array of | 3 | Marking without waiting would not result in actual serialising behavior. |
4 | VirtIOFeature (as virtio-net) to properly set the config size | 4 | Thus, make a call bdrv_mark_request_serialising sufficient for |
5 | depending on the features enabled. | 5 | serialisation to happen. |
6 | 6 | ||
7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | 7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
8 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | 8 | Message-id: 1578495356-46219-3-git-send-email-pbonzini@redhat.com |
9 | Message-id: 20190221103314.58500-6-sgarzare@redhat.com | 9 | Message-Id: <1578495356-46219-3-git-send-email-pbonzini@redhat.com> |
10 | Message-Id: <20190221103314.58500-6-sgarzare@redhat.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
12 | --- | 11 | --- |
13 | include/hw/virtio/virtio-blk.h | 1 + | 12 | block/file-posix.c | 1 - |
14 | hw/block/virtio-blk.c | 31 +++++++++++++++++++++++++------ | 13 | block/io.c | 40 +++++++++++++++++---------------------- |
15 | 2 files changed, 26 insertions(+), 6 deletions(-) | 14 | include/block/block_int.h | 3 +-- |
15 | 3 files changed, 18 insertions(+), 26 deletions(-) | ||
16 | 16 | ||
17 | diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h | 17 | diff --git a/block/file-posix.c b/block/file-posix.c |
18 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/include/hw/virtio/virtio-blk.h | 19 | --- a/block/file-posix.c |
20 | +++ b/include/hw/virtio/virtio-blk.h | 20 | +++ b/block/file-posix.c |
21 | @@ -XXX,XX +XXX,XX @@ typedef struct VirtIOBlock { | 21 | @@ -XXX,XX +XXX,XX @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, |
22 | bool dataplane_started; | 22 | req->overlap_bytes = req->bytes; |
23 | struct VirtIOBlockDataPlane *dataplane; | 23 | |
24 | uint64_t host_features; | 24 | bdrv_mark_request_serialising(req, bs->bl.request_alignment); |
25 | + size_t config_size; | 25 | - bdrv_wait_serialising_requests(req); |
26 | } VirtIOBlock; | 26 | } |
27 | 27 | #endif | |
28 | typedef struct VirtIOBlockReq { | 28 | |
29 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | 29 | diff --git a/block/io.c b/block/io.c |
30 | index XXXXXXX..XXXXXXX 100644 | 30 | index XXXXXXX..XXXXXXX 100644 |
31 | --- a/hw/block/virtio-blk.c | 31 | --- a/block/io.c |
32 | +++ b/hw/block/virtio-blk.c | 32 | +++ b/block/io.c |
33 | @@ -XXX,XX +XXX,XX @@ | 33 | @@ -XXX,XX +XXX,XX @@ |
34 | #include "hw/virtio/virtio-bus.h" | 34 | #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) |
35 | #include "hw/virtio/virtio-access.h" | 35 | |
36 | 36 | static void bdrv_parent_cb_resize(BlockDriverState *bs); | |
37 | -/* We don't support discard yet, hide associated config fields. */ | 37 | +static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self); |
38 | +/* Config size before the discard support (hide associated config fields) */ | 38 | static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, |
39 | #define VIRTIO_BLK_CFG_SIZE offsetof(struct virtio_blk_config, \ | 39 | int64_t offset, int bytes, BdrvRequestFlags flags); |
40 | max_discard_sectors) | 40 | |
41 | +/* | 41 | @@ -XXX,XX +XXX,XX @@ static void tracked_request_begin(BdrvTrackedRequest *req, |
42 | + * Starting from the discard feature, we can use this array to properly | 42 | qemu_co_mutex_unlock(&bs->reqs_lock); |
43 | + * set the config size depending on the features enabled. | ||
44 | + */ | ||
45 | +static VirtIOFeature feature_sizes[] = { | ||
46 | + {.flags = 1ULL << VIRTIO_BLK_F_DISCARD, | ||
47 | + .end = virtio_endof(struct virtio_blk_config, discard_sector_alignment)}, | ||
48 | + {.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES, | ||
49 | + .end = virtio_endof(struct virtio_blk_config, write_zeroes_may_unmap)}, | ||
50 | + {} | ||
51 | +}; | ||
52 | + | ||
53 | +static void virtio_blk_set_config_size(VirtIOBlock *s, uint64_t host_features) | ||
54 | +{ | ||
55 | + s->config_size = MAX(VIRTIO_BLK_CFG_SIZE, | ||
56 | + virtio_feature_get_config_size(feature_sizes, host_features)); | ||
57 | + | ||
58 | + assert(s->config_size <= sizeof(struct virtio_blk_config)); | ||
59 | +} | ||
60 | |||
61 | static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, | ||
62 | VirtIOBlockReq *req) | ||
63 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) | ||
64 | blkcfg.alignment_offset = 0; | ||
65 | blkcfg.wce = blk_enable_write_cache(s->blk); | ||
66 | virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues); | ||
67 | - memcpy(config, &blkcfg, VIRTIO_BLK_CFG_SIZE); | ||
68 | - QEMU_BUILD_BUG_ON(VIRTIO_BLK_CFG_SIZE > sizeof(blkcfg)); | ||
69 | + memcpy(config, &blkcfg, s->config_size); | ||
70 | } | 43 | } |
71 | 44 | ||
72 | static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config) | 45 | -void bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) |
73 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config) | 46 | +bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) |
74 | VirtIOBlock *s = VIRTIO_BLK(vdev); | 47 | { |
75 | struct virtio_blk_config blkcfg; | 48 | int64_t overlap_offset = req->offset & ~(align - 1); |
76 | 49 | uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align) | |
77 | - memcpy(&blkcfg, config, VIRTIO_BLK_CFG_SIZE); | 50 | @@ -XXX,XX +XXX,XX @@ void bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) |
78 | - QEMU_BUILD_BUG_ON(VIRTIO_BLK_CFG_SIZE > sizeof(blkcfg)); | 51 | |
79 | + memcpy(&blkcfg, config, s->config_size); | 52 | req->overlap_offset = MIN(req->overlap_offset, overlap_offset); |
80 | 53 | req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); | |
81 | aio_context_acquire(blk_get_aio_context(s->blk)); | 54 | -} |
82 | blk_set_enable_write_cache(s->blk, blkcfg.wce != 0); | 55 | - |
83 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) | 56 | -static bool is_request_serialising_and_aligned(BdrvTrackedRequest *req) |
84 | return; | 57 | -{ |
58 | - /* | ||
59 | - * If the request is serialising, overlap_offset and overlap_bytes are set, | ||
60 | - * so we can check if the request is aligned. Otherwise, don't care and | ||
61 | - * return false. | ||
62 | - */ | ||
63 | - | ||
64 | - return req->serialising && (req->offset == req->overlap_offset) && | ||
65 | - (req->bytes == req->overlap_bytes); | ||
66 | + return bdrv_wait_serialising_requests(req); | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | @@ -XXX,XX +XXX,XX @@ void bdrv_dec_in_flight(BlockDriverState *bs) | ||
71 | bdrv_wakeup(bs); | ||
72 | } | ||
73 | |||
74 | -bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self) | ||
75 | +static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self) | ||
76 | { | ||
77 | BlockDriverState *bs = self->bs; | ||
78 | BdrvTrackedRequest *req; | ||
79 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, | ||
80 | * it ensures that the CoR read and write operations are atomic and | ||
81 | * guest writes cannot interleave between them. */ | ||
82 | bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs)); | ||
83 | + } else { | ||
84 | + bdrv_wait_serialising_requests(req); | ||
85 | } | 85 | } |
86 | 86 | ||
87 | - virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, VIRTIO_BLK_CFG_SIZE); | 87 | - bdrv_wait_serialising_requests(req); |
88 | + virtio_blk_set_config_size(s, s->host_features); | 88 | - |
89 | + | 89 | if (flags & BDRV_REQ_COPY_ON_READ) { |
90 | + virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, s->config_size); | 90 | int64_t pnum; |
91 | 91 | ||
92 | s->blk = conf->conf.blk; | 92 | @@ -XXX,XX +XXX,XX @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes, |
93 | s->rq = NULL; | 93 | assert(!(flags & ~BDRV_REQ_MASK)); |
94 | |||
95 | if (flags & BDRV_REQ_SERIALISING) { | ||
96 | - bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs)); | ||
97 | + waited = bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs)); | ||
98 | + /* | ||
99 | + * For a misaligned request we should have already waited earlier, | ||
100 | + * because we come after bdrv_padding_rmw_read which must be called | ||
101 | + * with the request already marked as serialising. | ||
102 | + */ | ||
103 | + assert(!waited || | ||
104 | + (req->offset == req->overlap_offset && | ||
105 | + req->bytes == req->overlap_bytes)); | ||
106 | + } else { | ||
107 | + bdrv_wait_serialising_requests(req); | ||
108 | } | ||
109 | |||
110 | - waited = bdrv_wait_serialising_requests(req); | ||
111 | - | ||
112 | - assert(!waited || !req->serialising || | ||
113 | - is_request_serialising_and_aligned(req)); | ||
114 | assert(req->overlap_offset <= offset); | ||
115 | assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); | ||
116 | assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE); | ||
117 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, | ||
118 | padding = bdrv_init_padding(bs, offset, bytes, &pad); | ||
119 | if (padding) { | ||
120 | bdrv_mark_request_serialising(req, align); | ||
121 | - bdrv_wait_serialising_requests(req); | ||
122 | |||
123 | bdrv_padding_rmw_read(child, req, &pad, true); | ||
124 | |||
125 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, | ||
126 | |||
127 | if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) { | ||
128 | bdrv_mark_request_serialising(&req, align); | ||
129 | - bdrv_wait_serialising_requests(&req); | ||
130 | bdrv_padding_rmw_read(child, &req, &pad, false); | ||
131 | } | ||
132 | |||
133 | diff --git a/include/block/block_int.h b/include/block/block_int.h | ||
134 | index XXXXXXX..XXXXXXX 100644 | ||
135 | --- a/include/block/block_int.h | ||
136 | +++ b/include/block/block_int.h | ||
137 | @@ -XXX,XX +XXX,XX @@ extern unsigned int bdrv_drain_all_count; | ||
138 | void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent); | ||
139 | void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent); | ||
140 | |||
141 | -bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self); | ||
142 | -void bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align); | ||
143 | +bool coroutine_fn bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align); | ||
144 | BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs); | ||
145 | |||
146 | int get_tmp_filename(char *filename, int size); | ||
94 | -- | 147 | -- |
95 | 2.20.1 | 148 | 2.24.1 |
96 | 149 | ||
97 | 150 | diff view generated by jsdifflib |
1 | From: Stefano Garzarella <sgarzare@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | This patch adds the support of DISCARD and WRITE_ZEROES commands, | 3 | bdrv_mark_request_serialising is writing the overlap_offset and |
4 | that have been introduced in the virtio-blk protocol to have | 4 | overlap_bytes fields of BdrvTrackedRequest. Take bs->reqs_lock |
5 | better performance when using SSD backend. | 5 | for the whole duration of it, and not just when waiting for |
6 | serialising requests, so that tracked_request_overlaps does not | ||
7 | look at a half-updated request. | ||
6 | 8 | ||
7 | We support only one segment per request since multiple segments | 9 | The new code does not unlock/relock around retries. This is unnecessary |
8 | are not widely used and there are no userspace APIs that allow | 10 | because a retry is always preceded by a CoQueue wait, which already |
9 | applications to submit multiple segments in a single call. | 11 | releases and reacquires bs->reqs_lock. |
10 | 12 | ||
11 | Reviewed-by: Michael S. Tsirkin <mst@redhat.com> | 13 | Reported-by: Peter Lieven <pl@kamp.de> |
12 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | 14 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
13 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | 15 | Message-id: 1578495356-46219-4-git-send-email-pbonzini@redhat.com |
14 | Message-id: 20190221103314.58500-7-sgarzare@redhat.com | 16 | Message-Id: <1578495356-46219-4-git-send-email-pbonzini@redhat.com> |
15 | Message-Id: <20190221103314.58500-7-sgarzare@redhat.com> | ||
16 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 17 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
17 | --- | 18 | --- |
18 | include/hw/virtio/virtio-blk.h | 2 + | 19 | block/io.c | 112 ++++++++++++++++++++++++++++++----------------------- |
19 | hw/block/virtio-blk.c | 184 +++++++++++++++++++++++++++++++++ | 20 | 1 file changed, 63 insertions(+), 49 deletions(-) |
20 | 2 files changed, 186 insertions(+) | ||
21 | 21 | ||
22 | diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h | 22 | diff --git a/block/io.c b/block/io.c |
23 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
24 | --- a/include/hw/virtio/virtio-blk.h | 24 | --- a/block/io.c |
25 | +++ b/include/hw/virtio/virtio-blk.h | 25 | +++ b/block/io.c |
26 | @@ -XXX,XX +XXX,XX @@ struct VirtIOBlkConf | 26 | @@ -XXX,XX +XXX,XX @@ |
27 | uint32_t request_merging; | 27 | #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) |
28 | uint16_t num_queues; | 28 | |
29 | uint16_t queue_size; | 29 | static void bdrv_parent_cb_resize(BlockDriverState *bs); |
30 | + uint32_t max_discard_sectors; | 30 | -static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self); |
31 | + uint32_t max_write_zeroes_sectors; | 31 | static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, |
32 | }; | 32 | int64_t offset, int bytes, BdrvRequestFlags flags); |
33 | 33 | ||
34 | struct VirtIOBlockDataPlane; | 34 | @@ -XXX,XX +XXX,XX @@ static void tracked_request_begin(BdrvTrackedRequest *req, |
35 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | 35 | qemu_co_mutex_unlock(&bs->reqs_lock); |
36 | index XXXXXXX..XXXXXXX 100644 | ||
37 | --- a/hw/block/virtio-blk.c | ||
38 | +++ b/hw/block/virtio-blk.c | ||
39 | @@ -XXX,XX +XXX,XX @@ out: | ||
40 | aio_context_release(blk_get_aio_context(s->conf.conf.blk)); | ||
41 | } | 36 | } |
42 | 37 | ||
43 | +static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) | 38 | +static bool tracked_request_overlaps(BdrvTrackedRequest *req, |
39 | + int64_t offset, uint64_t bytes) | ||
44 | +{ | 40 | +{ |
45 | + VirtIOBlockReq *req = opaque; | 41 | + /* aaaa bbbb */ |
46 | + VirtIOBlock *s = req->dev; | 42 | + if (offset >= req->overlap_offset + req->overlap_bytes) { |
47 | + bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) & | 43 | + return false; |
48 | + ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES; | ||
49 | + | ||
50 | + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); | ||
51 | + if (ret) { | ||
52 | + if (virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) { | ||
53 | + goto out; | ||
54 | + } | ||
55 | + } | 44 | + } |
56 | + | 45 | + /* bbbb aaaa */ |
57 | + virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); | 46 | + if (req->overlap_offset >= offset + bytes) { |
58 | + if (is_write_zeroes) { | 47 | + return false; |
59 | + block_acct_done(blk_get_stats(s->blk), &req->acct); | ||
60 | + } | 48 | + } |
61 | + virtio_blk_free_request(req); | 49 | + return true; |
62 | + | ||
63 | +out: | ||
64 | + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); | ||
65 | +} | 50 | +} |
66 | + | 51 | + |
67 | #ifdef __linux__ | 52 | +static bool coroutine_fn |
68 | 53 | +bdrv_wait_serialising_requests_locked(BlockDriverState *bs, | |
69 | typedef struct { | 54 | + BdrvTrackedRequest *self) |
70 | @@ -XXX,XX +XXX,XX @@ static bool virtio_blk_sect_range_ok(VirtIOBlock *dev, | ||
71 | return true; | ||
72 | } | ||
73 | |||
74 | +static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq *req, | ||
75 | + struct virtio_blk_discard_write_zeroes *dwz_hdr, bool is_write_zeroes) | ||
76 | +{ | 55 | +{ |
77 | + VirtIOBlock *s = req->dev; | 56 | + BdrvTrackedRequest *req; |
78 | + VirtIODevice *vdev = VIRTIO_DEVICE(s); | 57 | + bool retry; |
79 | + uint64_t sector; | 58 | + bool waited = false; |
80 | + uint32_t num_sectors, flags, max_sectors; | ||
81 | + uint8_t err_status; | ||
82 | + int bytes; | ||
83 | + | 59 | + |
84 | + sector = virtio_ldq_p(vdev, &dwz_hdr->sector); | 60 | + do { |
85 | + num_sectors = virtio_ldl_p(vdev, &dwz_hdr->num_sectors); | 61 | + retry = false; |
86 | + flags = virtio_ldl_p(vdev, &dwz_hdr->flags); | 62 | + QLIST_FOREACH(req, &bs->tracked_requests, list) { |
87 | + max_sectors = is_write_zeroes ? s->conf.max_write_zeroes_sectors : | 63 | + if (req == self || (!req->serialising && !self->serialising)) { |
88 | + s->conf.max_discard_sectors; | 64 | + continue; |
65 | + } | ||
66 | + if (tracked_request_overlaps(req, self->overlap_offset, | ||
67 | + self->overlap_bytes)) | ||
68 | + { | ||
69 | + /* Hitting this means there was a reentrant request, for | ||
70 | + * example, a block driver issuing nested requests. This must | ||
71 | + * never happen since it means deadlock. | ||
72 | + */ | ||
73 | + assert(qemu_coroutine_self() != req->co); | ||
89 | + | 74 | + |
90 | + /* | 75 | + /* If the request is already (indirectly) waiting for us, or |
91 | + * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check | 76 | + * will wait for us as soon as it wakes up, then just go on |
92 | + * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in | 77 | + * (instead of producing a deadlock in the former case). */ |
93 | + * the integer variable. | 78 | + if (!req->waiting_for) { |
94 | + */ | 79 | + self->waiting_for = req; |
95 | + if (unlikely(num_sectors > max_sectors)) { | 80 | + qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock); |
96 | + err_status = VIRTIO_BLK_S_IOERR; | 81 | + self->waiting_for = NULL; |
97 | + goto err; | 82 | + retry = true; |
98 | + } | 83 | + waited = true; |
99 | + | 84 | + break; |
100 | + bytes = num_sectors << BDRV_SECTOR_BITS; | 85 | + } |
101 | + | 86 | + } |
102 | + if (unlikely(!virtio_blk_sect_range_ok(s, sector, bytes))) { | ||
103 | + err_status = VIRTIO_BLK_S_IOERR; | ||
104 | + goto err; | ||
105 | + } | ||
106 | + | ||
107 | + /* | ||
108 | + * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard | ||
109 | + * and write zeroes commands if any unknown flag is set. | ||
110 | + */ | ||
111 | + if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) { | ||
112 | + err_status = VIRTIO_BLK_S_UNSUPP; | ||
113 | + goto err; | ||
114 | + } | ||
115 | + | ||
116 | + if (is_write_zeroes) { /* VIRTIO_BLK_T_WRITE_ZEROES */ | ||
117 | + int blk_aio_flags = 0; | ||
118 | + | ||
119 | + if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) { | ||
120 | + blk_aio_flags |= BDRV_REQ_MAY_UNMAP; | ||
121 | + } | 87 | + } |
122 | + | 88 | + } while (retry); |
123 | + block_acct_start(blk_get_stats(s->blk), &req->acct, bytes, | 89 | + return waited; |
124 | + BLOCK_ACCT_WRITE); | ||
125 | + | ||
126 | + blk_aio_pwrite_zeroes(s->blk, sector << BDRV_SECTOR_BITS, | ||
127 | + bytes, blk_aio_flags, | ||
128 | + virtio_blk_discard_write_zeroes_complete, req); | ||
129 | + } else { /* VIRTIO_BLK_T_DISCARD */ | ||
130 | + /* | ||
131 | + * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for | ||
132 | + * discard commands if the unmap flag is set. | ||
133 | + */ | ||
134 | + if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) { | ||
135 | + err_status = VIRTIO_BLK_S_UNSUPP; | ||
136 | + goto err; | ||
137 | + } | ||
138 | + | ||
139 | + blk_aio_pdiscard(s->blk, sector << BDRV_SECTOR_BITS, bytes, | ||
140 | + virtio_blk_discard_write_zeroes_complete, req); | ||
141 | + } | ||
142 | + | ||
143 | + return VIRTIO_BLK_S_OK; | ||
144 | + | ||
145 | +err: | ||
146 | + if (is_write_zeroes) { | ||
147 | + block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE); | ||
148 | + } | ||
149 | + return err_status; | ||
150 | +} | 90 | +} |
151 | + | 91 | + |
152 | static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | 92 | bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) |
153 | { | 93 | { |
154 | uint32_t type; | 94 | + BlockDriverState *bs = req->bs; |
155 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | 95 | int64_t overlap_offset = req->offset & ~(align - 1); |
156 | virtio_blk_free_request(req); | 96 | uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align) |
157 | break; | 97 | - overlap_offset; |
98 | + bool waited; | ||
99 | |||
100 | + qemu_co_mutex_lock(&bs->reqs_lock); | ||
101 | if (!req->serialising) { | ||
102 | atomic_inc(&req->bs->serialising_in_flight); | ||
103 | req->serialising = true; | ||
104 | @@ -XXX,XX +XXX,XX @@ bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) | ||
105 | |||
106 | req->overlap_offset = MIN(req->overlap_offset, overlap_offset); | ||
107 | req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); | ||
108 | - return bdrv_wait_serialising_requests(req); | ||
109 | + waited = bdrv_wait_serialising_requests_locked(bs, req); | ||
110 | + qemu_co_mutex_unlock(&bs->reqs_lock); | ||
111 | + return waited; | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | @@ -XXX,XX +XXX,XX @@ static int bdrv_get_cluster_size(BlockDriverState *bs) | ||
158 | } | 116 | } |
159 | + /* | ||
160 | + * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with | ||
161 | + * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement, | ||
162 | + * so we must mask it for these requests, then we will check if it is set. | ||
163 | + */ | ||
164 | + case VIRTIO_BLK_T_DISCARD & ~VIRTIO_BLK_T_OUT: | ||
165 | + case VIRTIO_BLK_T_WRITE_ZEROES & ~VIRTIO_BLK_T_OUT: | ||
166 | + { | ||
167 | + struct virtio_blk_discard_write_zeroes dwz_hdr; | ||
168 | + size_t out_len = iov_size(out_iov, out_num); | ||
169 | + bool is_write_zeroes = (type & ~VIRTIO_BLK_T_BARRIER) == | ||
170 | + VIRTIO_BLK_T_WRITE_ZEROES; | ||
171 | + uint8_t err_status; | ||
172 | + | ||
173 | + /* | ||
174 | + * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains | ||
175 | + * more than one segment. | ||
176 | + */ | ||
177 | + if (unlikely(!(type & VIRTIO_BLK_T_OUT) || | ||
178 | + out_len > sizeof(dwz_hdr))) { | ||
179 | + virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); | ||
180 | + virtio_blk_free_request(req); | ||
181 | + return 0; | ||
182 | + } | ||
183 | + | ||
184 | + if (unlikely(iov_to_buf(out_iov, out_num, 0, &dwz_hdr, | ||
185 | + sizeof(dwz_hdr)) != sizeof(dwz_hdr))) { | ||
186 | + virtio_error(vdev, "virtio-blk discard/write_zeroes header" | ||
187 | + " too short"); | ||
188 | + return -1; | ||
189 | + } | ||
190 | + | ||
191 | + err_status = virtio_blk_handle_discard_write_zeroes(req, &dwz_hdr, | ||
192 | + is_write_zeroes); | ||
193 | + if (err_status != VIRTIO_BLK_S_OK) { | ||
194 | + virtio_blk_req_complete(req, err_status); | ||
195 | + virtio_blk_free_request(req); | ||
196 | + } | ||
197 | + | ||
198 | + break; | ||
199 | + } | ||
200 | default: | ||
201 | virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); | ||
202 | virtio_blk_free_request(req); | ||
203 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) | ||
204 | blkcfg.alignment_offset = 0; | ||
205 | blkcfg.wce = blk_enable_write_cache(s->blk); | ||
206 | virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues); | ||
207 | + if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD)) { | ||
208 | + virtio_stl_p(vdev, &blkcfg.max_discard_sectors, | ||
209 | + s->conf.max_discard_sectors); | ||
210 | + virtio_stl_p(vdev, &blkcfg.discard_sector_alignment, | ||
211 | + blk_size >> BDRV_SECTOR_BITS); | ||
212 | + /* | ||
213 | + * We support only one segment per request since multiple segments | ||
214 | + * are not widely used and there are no userspace APIs that allow | ||
215 | + * applications to submit multiple segments in a single call. | ||
216 | + */ | ||
217 | + virtio_stl_p(vdev, &blkcfg.max_discard_seg, 1); | ||
218 | + } | ||
219 | + if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES)) { | ||
220 | + virtio_stl_p(vdev, &blkcfg.max_write_zeroes_sectors, | ||
221 | + s->conf.max_write_zeroes_sectors); | ||
222 | + blkcfg.write_zeroes_may_unmap = 1; | ||
223 | + virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1); | ||
224 | + } | ||
225 | memcpy(config, &blkcfg, s->config_size); | ||
226 | } | 117 | } |
227 | 118 | ||
228 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) | 119 | -static bool tracked_request_overlaps(BdrvTrackedRequest *req, |
229 | return; | 120 | - int64_t offset, uint64_t bytes) |
121 | -{ | ||
122 | - /* aaaa bbbb */ | ||
123 | - if (offset >= req->overlap_offset + req->overlap_bytes) { | ||
124 | - return false; | ||
125 | - } | ||
126 | - /* bbbb aaaa */ | ||
127 | - if (req->overlap_offset >= offset + bytes) { | ||
128 | - return false; | ||
129 | - } | ||
130 | - return true; | ||
131 | -} | ||
132 | - | ||
133 | void bdrv_inc_in_flight(BlockDriverState *bs) | ||
134 | { | ||
135 | atomic_inc(&bs->in_flight); | ||
136 | @@ -XXX,XX +XXX,XX @@ void bdrv_dec_in_flight(BlockDriverState *bs) | ||
137 | static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self) | ||
138 | { | ||
139 | BlockDriverState *bs = self->bs; | ||
140 | - BdrvTrackedRequest *req; | ||
141 | - bool retry; | ||
142 | bool waited = false; | ||
143 | |||
144 | if (!atomic_read(&bs->serialising_in_flight)) { | ||
145 | return false; | ||
230 | } | 146 | } |
231 | 147 | ||
232 | + if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) && | 148 | - do { |
233 | + (!conf->max_discard_sectors || | 149 | - retry = false; |
234 | + conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) { | 150 | - qemu_co_mutex_lock(&bs->reqs_lock); |
235 | + error_setg(errp, "invalid max-discard-sectors property (%" PRIu32 ")" | 151 | - QLIST_FOREACH(req, &bs->tracked_requests, list) { |
236 | + ", must be between 1 and %d", | 152 | - if (req == self || (!req->serialising && !self->serialising)) { |
237 | + conf->max_discard_sectors, (int)BDRV_REQUEST_MAX_SECTORS); | 153 | - continue; |
238 | + return; | 154 | - } |
239 | + } | 155 | - if (tracked_request_overlaps(req, self->overlap_offset, |
240 | + | 156 | - self->overlap_bytes)) |
241 | + if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES) && | 157 | - { |
242 | + (!conf->max_write_zeroes_sectors || | 158 | - /* Hitting this means there was a reentrant request, for |
243 | + conf->max_write_zeroes_sectors > BDRV_REQUEST_MAX_SECTORS)) { | 159 | - * example, a block driver issuing nested requests. This must |
244 | + error_setg(errp, "invalid max-write-zeroes-sectors property (%" PRIu32 | 160 | - * never happen since it means deadlock. |
245 | + "), must be between 1 and %d", | 161 | - */ |
246 | + conf->max_write_zeroes_sectors, | 162 | - assert(qemu_coroutine_self() != req->co); |
247 | + (int)BDRV_REQUEST_MAX_SECTORS); | 163 | - |
248 | + return; | 164 | - /* If the request is already (indirectly) waiting for us, or |
249 | + } | 165 | - * will wait for us as soon as it wakes up, then just go on |
250 | + | 166 | - * (instead of producing a deadlock in the former case). */ |
251 | virtio_blk_set_config_size(s, s->host_features); | 167 | - if (!req->waiting_for) { |
252 | 168 | - self->waiting_for = req; | |
253 | virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, s->config_size); | 169 | - qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock); |
254 | @@ -XXX,XX +XXX,XX @@ static Property virtio_blk_properties[] = { | 170 | - self->waiting_for = NULL; |
255 | VIRTIO_BLK_F_DISCARD, true), | 171 | - retry = true; |
256 | DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features, | 172 | - waited = true; |
257 | VIRTIO_BLK_F_WRITE_ZEROES, true), | 173 | - break; |
258 | + DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock, | 174 | - } |
259 | + conf.max_discard_sectors, BDRV_REQUEST_MAX_SECTORS), | 175 | - } |
260 | + DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock, | 176 | - } |
261 | + conf.max_write_zeroes_sectors, BDRV_REQUEST_MAX_SECTORS), | 177 | - qemu_co_mutex_unlock(&bs->reqs_lock); |
262 | DEFINE_PROP_END_OF_LIST(), | 178 | - } while (retry); |
263 | }; | 179 | + qemu_co_mutex_lock(&bs->reqs_lock); |
264 | 180 | + waited = bdrv_wait_serialising_requests_locked(bs, self); | |
181 | + qemu_co_mutex_unlock(&bs->reqs_lock); | ||
182 | |||
183 | return waited; | ||
184 | } | ||
265 | -- | 185 | -- |
266 | 2.20.1 | 186 | 2.24.1 |
267 | 187 | ||
268 | 188 | diff view generated by jsdifflib |
1 | From: Stefano Garzarella <sgarzare@redhat.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | If the DISCARD feature is enabled, we try this command in the | 3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> |
4 | test_basic(), checking only the status returned by the request. | 4 | Reviewed-by: Maxim Levitsky <maximlevitsky@gmail.com> |
5 | 5 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | |
6 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | 6 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
7 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | 7 | Message-id: 20200120141858.587874-2-stefanha@redhat.com |
8 | Message-id: 20190221103314.58500-11-sgarzare@redhat.com | 8 | Message-Id: <20200120141858.587874-2-stefanha@redhat.com> |
9 | Message-Id: <20190221103314.58500-11-sgarzare@redhat.com> | ||
10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
11 | --- | 10 | --- |
12 | tests/virtio-blk-test.c | 27 +++++++++++++++++++++++++++ | 11 | configure | 27 +++++++++++++++++++++++++++ |
13 | 1 file changed, 27 insertions(+) | 12 | 1 file changed, 27 insertions(+) |
14 | 13 | ||
15 | diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c | 14 | diff --git a/configure b/configure |
16 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100755 |
17 | --- a/tests/virtio-blk-test.c | 16 | --- a/configure |
18 | +++ b/tests/virtio-blk-test.c | 17 | +++ b/configure |
19 | @@ -XXX,XX +XXX,XX @@ static void test_basic(QVirtioDevice *dev, QGuestAllocator *alloc, | 18 | @@ -XXX,XX +XXX,XX @@ xen="" |
20 | guest_free(alloc, req_addr); | 19 | xen_ctrl_version="" |
21 | } | 20 | xen_pci_passthrough="" |
22 | 21 | linux_aio="" | |
23 | + if (features & (1u << VIRTIO_BLK_F_DISCARD)) { | 22 | +linux_io_uring="" |
24 | + struct virtio_blk_discard_write_zeroes dwz_hdr; | 23 | cap_ng="" |
24 | attr="" | ||
25 | libattr="" | ||
26 | @@ -XXX,XX +XXX,XX @@ for opt do | ||
27 | ;; | ||
28 | --enable-linux-aio) linux_aio="yes" | ||
29 | ;; | ||
30 | + --disable-linux-io-uring) linux_io_uring="no" | ||
31 | + ;; | ||
32 | + --enable-linux-io-uring) linux_io_uring="yes" | ||
33 | + ;; | ||
34 | --disable-attr) attr="no" | ||
35 | ;; | ||
36 | --enable-attr) attr="yes" | ||
37 | @@ -XXX,XX +XXX,XX @@ disabled with --disable-FEATURE, default is enabled if available: | ||
38 | vde support for vde network | ||
39 | netmap support for netmap network | ||
40 | linux-aio Linux AIO support | ||
41 | + linux-io-uring Linux io_uring support | ||
42 | cap-ng libcap-ng support | ||
43 | attr attr and xattr support | ||
44 | vhost-net vhost-net kernel acceleration support | ||
45 | @@ -XXX,XX +XXX,XX @@ EOF | ||
46 | linux_aio=no | ||
47 | fi | ||
48 | fi | ||
49 | +########################################## | ||
50 | +# linux-io-uring probe | ||
25 | + | 51 | + |
26 | + req.type = VIRTIO_BLK_T_DISCARD; | 52 | +if test "$linux_io_uring" != "no" ; then |
27 | + req.data = (char *) &dwz_hdr; | 53 | + if $pkg_config liburing; then |
28 | + dwz_hdr.sector = 0; | 54 | + linux_io_uring_cflags=$($pkg_config --cflags liburing) |
29 | + dwz_hdr.num_sectors = 1; | 55 | + linux_io_uring_libs=$($pkg_config --libs liburing) |
30 | + dwz_hdr.flags = 0; | 56 | + linux_io_uring=yes |
31 | + | 57 | + else |
32 | + virtio_blk_fix_dwz_hdr(dev, &dwz_hdr); | 58 | + if test "$linux_io_uring" = "yes" ; then |
33 | + | 59 | + feature_not_found "linux io_uring" "Install liburing devel" |
34 | + req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr)); | 60 | + fi |
35 | + | 61 | + linux_io_uring=no |
36 | + free_head = qvirtqueue_add(vq, req_addr, 16, false, true); | 62 | + fi |
37 | + qvirtqueue_add(vq, req_addr + 16, sizeof(dwz_hdr), false, true); | 63 | +fi |
38 | + qvirtqueue_add(vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, false); | 64 | |
39 | + | 65 | ########################################## |
40 | + qvirtqueue_kick(dev, vq, free_head); | 66 | # TPM emulation is only on POSIX |
41 | + | 67 | @@ -XXX,XX +XXX,XX @@ echo "PIE $pie" |
42 | + qvirtio_wait_used_elem(dev, vq, free_head, NULL, | 68 | echo "vde support $vde" |
43 | + QVIRTIO_BLK_TIMEOUT_US); | 69 | echo "netmap support $netmap" |
44 | + status = readb(req_addr + 16 + sizeof(dwz_hdr)); | 70 | echo "Linux AIO support $linux_aio" |
45 | + g_assert_cmpint(status, ==, 0); | 71 | +echo "Linux io_uring support $linux_io_uring" |
46 | + | 72 | echo "ATTR/XATTR support $attr" |
47 | + guest_free(alloc, req_addr); | 73 | echo "Install blobs $blobs" |
48 | + } | 74 | echo "KVM support $kvm" |
49 | + | 75 | @@ -XXX,XX +XXX,XX @@ fi |
50 | if (features & (1u << VIRTIO_F_ANY_LAYOUT)) { | 76 | if test "$linux_aio" = "yes" ; then |
51 | /* Write and read with 2 descriptor layout */ | 77 | echo "CONFIG_LINUX_AIO=y" >> $config_host_mak |
52 | /* Write request */ | 78 | fi |
79 | +if test "$linux_io_uring" = "yes" ; then | ||
80 | + echo "CONFIG_LINUX_IO_URING=y" >> $config_host_mak | ||
81 | + echo "LINUX_IO_URING_CFLAGS=$linux_io_uring_cflags" >> $config_host_mak | ||
82 | + echo "LINUX_IO_URING_LIBS=$linux_io_uring_libs" >> $config_host_mak | ||
83 | +fi | ||
84 | if test "$attr" = "yes" ; then | ||
85 | echo "CONFIG_ATTR=y" >> $config_host_mak | ||
86 | fi | ||
53 | -- | 87 | -- |
54 | 2.20.1 | 88 | 2.24.1 |
55 | 89 | ||
56 | 90 | diff view generated by jsdifflib |
1 | From: Stefano Garzarella <sgarzare@redhat.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | In order to avoid migration issues, we enable DISCARD and | 3 | Since io_uring is the actual name of the Linux API, we use it as enum |
4 | WRITE_ZEROES features only for machine type >= 4.0 | 4 | value even though the QAPI schema conventions would prefer io-uring. |
5 | 5 | ||
6 | As discussed with Michael S. Tsirkin and Stefan Hajnoczi on the | 6 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> |
7 | list [1], DISCARD operation should not have security implications | 7 | Acked-by: Markus Armbruster <armbru@redhat.com> |
8 | (eg. page cache attacks), so we can enable it by default. | 8 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> |
9 | 9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | |
10 | [1] https://lists.gnu.org/archive/html/qemu-devel/2019-02/msg00504.html | 10 | Message-id: 20200120141858.587874-3-stefanha@redhat.com |
11 | 11 | Message-Id: <20200120141858.587874-3-stefanha@redhat.com> | |
12 | Suggested-by: Dr. David Alan Gilbert <dgilbert@redhat.com> | ||
13 | Reviewed-by: Michael S. Tsirkin <mst@redhat.com> | ||
14 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
15 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | ||
16 | Message-id: 20190221103314.58500-4-sgarzare@redhat.com | ||
17 | Message-Id: <20190221103314.58500-4-sgarzare@redhat.com> | ||
18 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
19 | --- | 13 | --- |
20 | hw/block/virtio-blk.c | 4 ++++ | 14 | qapi/block-core.json | 4 +++- |
21 | hw/core/machine.c | 2 ++ | 15 | 1 file changed, 3 insertions(+), 1 deletion(-) |
22 | 2 files changed, 6 insertions(+) | ||
23 | 16 | ||
24 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | 17 | diff --git a/qapi/block-core.json b/qapi/block-core.json |
25 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/hw/block/virtio-blk.c | 19 | --- a/qapi/block-core.json |
27 | +++ b/hw/block/virtio-blk.c | 20 | +++ b/qapi/block-core.json |
28 | @@ -XXX,XX +XXX,XX @@ static Property virtio_blk_properties[] = { | 21 | @@ -XXX,XX +XXX,XX @@ |
29 | DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 128), | 22 | # |
30 | DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD, | 23 | # @threads: Use qemu's thread pool |
31 | IOThread *), | 24 | # @native: Use native AIO backend (only Linux and Windows) |
32 | + DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features, | 25 | +# @io_uring: Use linux io_uring (since 5.0) |
33 | + VIRTIO_BLK_F_DISCARD, true), | 26 | # |
34 | + DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features, | 27 | # Since: 2.9 |
35 | + VIRTIO_BLK_F_WRITE_ZEROES, true), | 28 | ## |
36 | DEFINE_PROP_END_OF_LIST(), | 29 | { 'enum': 'BlockdevAioOptions', |
37 | }; | 30 | - 'data': [ 'threads', 'native' ] } |
38 | 31 | + 'data': [ 'threads', 'native', | |
39 | diff --git a/hw/core/machine.c b/hw/core/machine.c | 32 | + { 'name': 'io_uring', 'if': 'defined(CONFIG_LINUX_IO_URING)' } ] } |
40 | index XXXXXXX..XXXXXXX 100644 | 33 | |
41 | --- a/hw/core/machine.c | 34 | ## |
42 | +++ b/hw/core/machine.c | 35 | # @BlockdevCacheOptions: |
43 | @@ -XXX,XX +XXX,XX @@ GlobalProperty hw_compat_3_1[] = { | ||
44 | { "usb-kbd", "serial", "42" }, | ||
45 | { "usb-mouse", "serial", "42" }, | ||
46 | { "usb-kbd", "serial", "42" }, | ||
47 | + { "virtio-blk-device", "discard", "false" }, | ||
48 | + { "virtio-blk-device", "write-zeroes", "false" }, | ||
49 | }; | ||
50 | const size_t hw_compat_3_1_len = G_N_ELEMENTS(hw_compat_3_1); | ||
51 | |||
52 | -- | 36 | -- |
53 | 2.20.1 | 37 | 2.24.1 |
54 | 38 | ||
55 | 39 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | Use new qemu_iovec_init_buf() instead of | 3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> |
4 | qemu_iovec_init_external( ... , 1), which simplifies the code. | 4 | Reviewed-by: Maxim Levitsky <maximlevitsky@gmail.com> |
5 | 5 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | |
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 6 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | 7 | Message-id: 20200120141858.587874-4-stefanha@redhat.com |
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | 8 | Message-Id: <20200120141858.587874-4-stefanha@redhat.com> |
9 | Message-id: 20190218140926.333779-10-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-10-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
12 | --- | 10 | --- |
13 | block/qcow2.c | 12 ++---------- | 11 | include/block/block.h | 1 + |
14 | 1 file changed, 2 insertions(+), 10 deletions(-) | 12 | 1 file changed, 1 insertion(+) |
15 | 13 | ||
16 | diff --git a/block/qcow2.c b/block/qcow2.c | 14 | diff --git a/include/block/block.h b/include/block/block.h |
17 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/block/qcow2.c | 16 | --- a/include/block/block.h |
19 | +++ b/block/qcow2.c | 17 | +++ b/include/block/block.h |
20 | @@ -XXX,XX +XXX,XX @@ qcow2_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, | 18 | @@ -XXX,XX +XXX,XX @@ typedef struct HDGeometry { |
21 | { | 19 | ignoring the format layer */ |
22 | BDRVQcow2State *s = bs->opaque; | 20 | #define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */ |
23 | QEMUIOVector hd_qiov; | 21 | #define BDRV_O_AUTO_RDONLY 0x20000 /* degrade to read-only if opening read-write fails */ |
24 | - struct iovec iov; | 22 | +#define BDRV_O_IO_URING 0x40000 /* use io_uring instead of the thread pool */ |
25 | int ret; | 23 | |
26 | size_t out_len; | 24 | #define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH) |
27 | uint8_t *buf, *out_buf; | ||
28 | @@ -XXX,XX +XXX,XX @@ qcow2_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, | ||
29 | goto fail; | ||
30 | } | ||
31 | |||
32 | - iov = (struct iovec) { | ||
33 | - .iov_base = out_buf, | ||
34 | - .iov_len = out_len, | ||
35 | - }; | ||
36 | - qemu_iovec_init_external(&hd_qiov, &iov, 1); | ||
37 | + qemu_iovec_init_buf(&hd_qiov, out_buf, out_len); | ||
38 | |||
39 | BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); | ||
40 | ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0); | ||
41 | @@ -XXX,XX +XXX,XX @@ qcow2_co_preadv_compressed(BlockDriverState *bs, | ||
42 | int ret = 0, csize, nb_csectors; | ||
43 | uint64_t coffset; | ||
44 | uint8_t *buf, *out_buf; | ||
45 | - struct iovec iov; | ||
46 | QEMUIOVector local_qiov; | ||
47 | int offset_in_cluster = offset_into_cluster(s, offset); | ||
48 | |||
49 | @@ -XXX,XX +XXX,XX @@ qcow2_co_preadv_compressed(BlockDriverState *bs, | ||
50 | if (!buf) { | ||
51 | return -ENOMEM; | ||
52 | } | ||
53 | - iov.iov_base = buf; | ||
54 | - iov.iov_len = csize; | ||
55 | - qemu_iovec_init_external(&local_qiov, &iov, 1); | ||
56 | + qemu_iovec_init_buf(&local_qiov, buf, csize); | ||
57 | |||
58 | out_buf = qemu_blockalign(bs, s->cluster_size); | ||
59 | 25 | ||
60 | -- | 26 | -- |
61 | 2.20.1 | 27 | 2.24.1 |
62 | 28 | ||
63 | 29 | diff view generated by jsdifflib |
1 | From: Stefano Garzarella <sgarzare@redhat.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | If the WRITE_ZEROES feature is enabled, we check this command | 3 | Aborts when sqe fails to be set as sqes cannot be returned to the |
4 | in the test_basic(). | 4 | ring. Adds slow path for short reads for older kernels |
5 | 5 | ||
6 | Reviewed-by: Michael S. Tsirkin <mst@redhat.com> | 6 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> |
7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | 7 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> |
8 | Acked-by: Thomas Huth <thuth@redhat.com> | 8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
9 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | 9 | Message-id: 20200120141858.587874-5-stefanha@redhat.com |
10 | Message-id: 20190221103314.58500-10-sgarzare@redhat.com | 10 | Message-Id: <20200120141858.587874-5-stefanha@redhat.com> |
11 | Message-Id: <20190221103314.58500-10-sgarzare@redhat.com> | ||
12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
13 | --- | 12 | --- |
14 | tests/virtio-blk-test.c | 62 +++++++++++++++++++++++++++++++++++++++++ | 13 | MAINTAINERS | 8 + |
15 | 1 file changed, 62 insertions(+) | 14 | block/Makefile.objs | 3 + |
15 | block/io_uring.c | 401 ++++++++++++++++++++++++++++++++++++++++ | ||
16 | include/block/aio.h | 16 +- | ||
17 | include/block/raw-aio.h | 12 ++ | ||
18 | 5 files changed, 439 insertions(+), 1 deletion(-) | ||
19 | create mode 100644 block/io_uring.c | ||
16 | 20 | ||
17 | diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c | 21 | diff --git a/MAINTAINERS b/MAINTAINERS |
18 | index XXXXXXX..XXXXXXX 100644 | 22 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/tests/virtio-blk-test.c | 23 | --- a/MAINTAINERS |
20 | +++ b/tests/virtio-blk-test.c | 24 | +++ b/MAINTAINERS |
21 | @@ -XXX,XX +XXX,XX @@ static void test_basic(QVirtioDevice *dev, QGuestAllocator *alloc, | 25 | @@ -XXX,XX +XXX,XX @@ F: block/file-posix.c |
22 | 26 | F: block/file-win32.c | |
23 | guest_free(alloc, req_addr); | 27 | F: block/win32-aio.c |
24 | 28 | ||
25 | + if (features & (1u << VIRTIO_BLK_F_WRITE_ZEROES)) { | 29 | +Linux io_uring |
26 | + struct virtio_blk_discard_write_zeroes dwz_hdr; | 30 | +M: Aarushi Mehta <mehta.aaru20@gmail.com> |
27 | + void *expected; | 31 | +M: Julia Suvorova <jusual@redhat.com> |
32 | +M: Stefan Hajnoczi <stefanha@redhat.com> | ||
33 | +L: qemu-block@nongnu.org | ||
34 | +S: Maintained | ||
35 | +F: block/io_uring.c | ||
36 | + | ||
37 | qcow2 | ||
38 | M: Kevin Wolf <kwolf@redhat.com> | ||
39 | M: Max Reitz <mreitz@redhat.com> | ||
40 | diff --git a/block/Makefile.objs b/block/Makefile.objs | ||
41 | index XXXXXXX..XXXXXXX 100644 | ||
42 | --- a/block/Makefile.objs | ||
43 | +++ b/block/Makefile.objs | ||
44 | @@ -XXX,XX +XXX,XX @@ block-obj-y += block-backend.o snapshot.o qapi.o | ||
45 | block-obj-$(CONFIG_WIN32) += file-win32.o win32-aio.o | ||
46 | block-obj-$(CONFIG_POSIX) += file-posix.o | ||
47 | block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o | ||
48 | +block-obj-$(CONFIG_LINUX_IO_URING) += io_uring.o | ||
49 | block-obj-y += null.o mirror.o commit.o io.o create.o | ||
50 | block-obj-y += throttle-groups.o | ||
51 | block-obj-$(CONFIG_LINUX) += nvme.o | ||
52 | @@ -XXX,XX +XXX,XX @@ block-obj-$(if $(CONFIG_LZFSE),m,n) += dmg-lzfse.o | ||
53 | dmg-lzfse.o-libs := $(LZFSE_LIBS) | ||
54 | qcow.o-libs := -lz | ||
55 | linux-aio.o-libs := -laio | ||
56 | +io_uring.o-cflags := $(LINUX_IO_URING_CFLAGS) | ||
57 | +io_uring.o-libs := $(LINUX_IO_URING_LIBS) | ||
58 | parallels.o-cflags := $(LIBXML2_CFLAGS) | ||
59 | parallels.o-libs := $(LIBXML2_LIBS) | ||
60 | diff --git a/block/io_uring.c b/block/io_uring.c | ||
61 | new file mode 100644 | ||
62 | index XXXXXXX..XXXXXXX | ||
63 | --- /dev/null | ||
64 | +++ b/block/io_uring.c | ||
65 | @@ -XXX,XX +XXX,XX @@ | ||
66 | +/* | ||
67 | + * Linux io_uring support. | ||
68 | + * | ||
69 | + * Copyright (C) 2009 IBM, Corp. | ||
70 | + * Copyright (C) 2009 Red Hat, Inc. | ||
71 | + * Copyright (C) 2019 Aarushi Mehta | ||
72 | + * | ||
73 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
74 | + * See the COPYING file in the top-level directory. | ||
75 | + */ | ||
76 | +#include "qemu/osdep.h" | ||
77 | +#include <liburing.h> | ||
78 | +#include "qemu-common.h" | ||
79 | +#include "block/aio.h" | ||
80 | +#include "qemu/queue.h" | ||
81 | +#include "block/block.h" | ||
82 | +#include "block/raw-aio.h" | ||
83 | +#include "qemu/coroutine.h" | ||
84 | +#include "qapi/error.h" | ||
85 | + | ||
86 | +/* io_uring ring size */ | ||
87 | +#define MAX_ENTRIES 128 | ||
88 | + | ||
89 | +typedef struct LuringAIOCB { | ||
90 | + Coroutine *co; | ||
91 | + struct io_uring_sqe sqeq; | ||
92 | + ssize_t ret; | ||
93 | + QEMUIOVector *qiov; | ||
94 | + bool is_read; | ||
95 | + QSIMPLEQ_ENTRY(LuringAIOCB) next; | ||
96 | + | ||
97 | + /* | ||
98 | + * Buffered reads may require resubmission, see | ||
99 | + * luring_resubmit_short_read(). | ||
100 | + */ | ||
101 | + int total_read; | ||
102 | + QEMUIOVector resubmit_qiov; | ||
103 | +} LuringAIOCB; | ||
104 | + | ||
105 | +typedef struct LuringQueue { | ||
106 | + int plugged; | ||
107 | + unsigned int in_queue; | ||
108 | + unsigned int in_flight; | ||
109 | + bool blocked; | ||
110 | + QSIMPLEQ_HEAD(, LuringAIOCB) submit_queue; | ||
111 | +} LuringQueue; | ||
112 | + | ||
113 | +typedef struct LuringState { | ||
114 | + AioContext *aio_context; | ||
115 | + | ||
116 | + struct io_uring ring; | ||
117 | + | ||
118 | + /* io queue for submit at batch. Protected by AioContext lock. */ | ||
119 | + LuringQueue io_q; | ||
120 | + | ||
121 | + /* I/O completion processing. Only runs in I/O thread. */ | ||
122 | + QEMUBH *completion_bh; | ||
123 | +} LuringState; | ||
124 | + | ||
125 | +/** | ||
126 | + * luring_resubmit: | ||
127 | + * | ||
128 | + * Resubmit a request by appending it to submit_queue. The caller must ensure | ||
129 | + * that ioq_submit() is called later so that submit_queue requests are started. | ||
130 | + */ | ||
131 | +static void luring_resubmit(LuringState *s, LuringAIOCB *luringcb) | ||
132 | +{ | ||
133 | + QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next); | ||
134 | + s->io_q.in_queue++; | ||
135 | +} | ||
136 | + | ||
137 | +/** | ||
138 | + * luring_resubmit_short_read: | ||
139 | + * | ||
140 | + * Before Linux commit 9d93a3f5a0c ("io_uring: punt short reads to async | ||
141 | + * context") a buffered I/O request with the start of the file range in the | ||
142 | + * page cache could result in a short read. Applications need to resubmit the | ||
143 | + * remaining read request. | ||
144 | + * | ||
145 | + * This is a slow path but recent kernels never take it. | ||
146 | + */ | ||
147 | +static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb, | ||
148 | + int nread) | ||
149 | +{ | ||
150 | + QEMUIOVector *resubmit_qiov; | ||
151 | + size_t remaining; | ||
152 | + | ||
153 | + /* Update read position */ | ||
154 | + luringcb->total_read = nread; | ||
155 | + remaining = luringcb->qiov->size - luringcb->total_read; | ||
156 | + | ||
157 | + /* Shorten qiov */ | ||
158 | + resubmit_qiov = &luringcb->resubmit_qiov; | ||
159 | + if (resubmit_qiov->iov == NULL) { | ||
160 | + qemu_iovec_init(resubmit_qiov, luringcb->qiov->niov); | ||
161 | + } else { | ||
162 | + qemu_iovec_reset(resubmit_qiov); | ||
163 | + } | ||
164 | + qemu_iovec_concat(resubmit_qiov, luringcb->qiov, luringcb->total_read, | ||
165 | + remaining); | ||
166 | + | ||
167 | + /* Update sqe */ | ||
168 | + luringcb->sqeq.off = nread; | ||
169 | + luringcb->sqeq.addr = (__u64)(uintptr_t)luringcb->resubmit_qiov.iov; | ||
170 | + luringcb->sqeq.len = luringcb->resubmit_qiov.niov; | ||
171 | + | ||
172 | + luring_resubmit(s, luringcb); | ||
173 | +} | ||
174 | + | ||
175 | +/** | ||
176 | + * luring_process_completions: | ||
177 | + * @s: AIO state | ||
178 | + * | ||
179 | + * Fetches completed I/O requests, consumes cqes and invokes their callbacks | ||
180 | + * The function is somewhat tricky because it supports nested event loops, for | ||
181 | + * example when a request callback invokes aio_poll(). | ||
182 | + * | ||
183 | + * Function schedules BH completion so it can be called again in a nested | ||
184 | + * event loop. When there are no events left to complete the BH is being | ||
185 | + * canceled. | ||
186 | + * | ||
187 | + */ | ||
188 | +static void luring_process_completions(LuringState *s) | ||
189 | +{ | ||
190 | + struct io_uring_cqe *cqes; | ||
191 | + int total_bytes; | ||
192 | + /* | ||
193 | + * Request completion callbacks can run the nested event loop. | ||
194 | + * Schedule ourselves so the nested event loop will "see" remaining | ||
195 | + * completed requests and process them. Without this, completion | ||
196 | + * callbacks that wait for other requests using a nested event loop | ||
197 | + * would hang forever. | ||
198 | + * | ||
199 | + * This workaround is needed because io_uring uses poll_wait, which | ||
200 | + * is woken up when new events are added to the uring, thus polling on | ||
201 | + * the same uring fd will block unless more events are received. | ||
202 | + * | ||
203 | + * Other leaf block drivers (drivers that access the data themselves) | ||
204 | + * are networking based, so they poll sockets for data and run the | ||
205 | + * correct coroutine. | ||
206 | + */ | ||
207 | + qemu_bh_schedule(s->completion_bh); | ||
208 | + | ||
209 | + while (io_uring_peek_cqe(&s->ring, &cqes) == 0) { | ||
210 | + LuringAIOCB *luringcb; | ||
211 | + int ret; | ||
212 | + | ||
213 | + if (!cqes) { | ||
214 | + break; | ||
215 | + } | ||
216 | + | ||
217 | + luringcb = io_uring_cqe_get_data(cqes); | ||
218 | + ret = cqes->res; | ||
219 | + io_uring_cqe_seen(&s->ring, cqes); | ||
220 | + cqes = NULL; | ||
221 | + | ||
222 | + /* Change counters one-by-one because we can be nested. */ | ||
223 | + s->io_q.in_flight--; | ||
224 | + | ||
225 | + /* total_read is non-zero only for resubmitted read requests */ | ||
226 | + total_bytes = ret + luringcb->total_read; | ||
227 | + | ||
228 | + if (ret < 0) { | ||
229 | + if (ret == -EINTR) { | ||
230 | + luring_resubmit(s, luringcb); | ||
231 | + continue; | ||
232 | + } | ||
233 | + } else if (!luringcb->qiov) { | ||
234 | + goto end; | ||
235 | + } else if (total_bytes == luringcb->qiov->size) { | ||
236 | + ret = 0; | ||
237 | + /* Only read/write */ | ||
238 | + } else { | ||
239 | + /* Short Read/Write */ | ||
240 | + if (luringcb->is_read) { | ||
241 | + if (ret > 0) { | ||
242 | + luring_resubmit_short_read(s, luringcb, ret); | ||
243 | + continue; | ||
244 | + } else { | ||
245 | + /* Pad with zeroes */ | ||
246 | + qemu_iovec_memset(luringcb->qiov, total_bytes, 0, | ||
247 | + luringcb->qiov->size - total_bytes); | ||
248 | + ret = 0; | ||
249 | + } | ||
250 | + } else { | ||
251 | + ret = -ENOSPC;; | ||
252 | + } | ||
253 | + } | ||
254 | +end: | ||
255 | + luringcb->ret = ret; | ||
256 | + qemu_iovec_destroy(&luringcb->resubmit_qiov); | ||
28 | + | 257 | + |
29 | + /* | 258 | + /* |
30 | + * WRITE_ZEROES request on the same sector of previous test where | 259 | + * If the coroutine is already entered it must be in ioq_submit() |
31 | + * we wrote "TEST". | 260 | + * and will notice luringcb->ret has been filled in when it |
261 | + * eventually runs later. Coroutines cannot be entered recursively | ||
262 | + * so avoid doing that! | ||
32 | + */ | 263 | + */ |
33 | + req.type = VIRTIO_BLK_T_WRITE_ZEROES; | 264 | + if (!qemu_coroutine_entered(luringcb->co)) { |
34 | + req.data = (char *) &dwz_hdr; | 265 | + aio_co_wake(luringcb->co); |
35 | + dwz_hdr.sector = 0; | 266 | + } |
36 | + dwz_hdr.num_sectors = 1; | 267 | + } |
37 | + dwz_hdr.flags = 0; | 268 | + qemu_bh_cancel(s->completion_bh); |
38 | + | 269 | +} |
39 | + virtio_blk_fix_dwz_hdr(dev, &dwz_hdr); | 270 | + |
40 | + | 271 | +static int ioq_submit(LuringState *s) |
41 | + req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr)); | 272 | +{ |
42 | + | 273 | + int ret = 0; |
43 | + free_head = qvirtqueue_add(vq, req_addr, 16, false, true); | 274 | + LuringAIOCB *luringcb, *luringcb_next; |
44 | + qvirtqueue_add(vq, req_addr + 16, sizeof(dwz_hdr), false, true); | 275 | + |
45 | + qvirtqueue_add(vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, false); | 276 | + while (s->io_q.in_queue > 0) { |
46 | + | 277 | + /* |
47 | + qvirtqueue_kick(dev, vq, free_head); | 278 | + * Try to fetch sqes from the ring for requests waiting in |
48 | + | 279 | + * the overflow queue |
49 | + qvirtio_wait_used_elem(dev, vq, free_head, NULL, | 280 | + */ |
50 | + QVIRTIO_BLK_TIMEOUT_US); | 281 | + QSIMPLEQ_FOREACH_SAFE(luringcb, &s->io_q.submit_queue, next, |
51 | + status = readb(req_addr + 16 + sizeof(dwz_hdr)); | 282 | + luringcb_next) { |
52 | + g_assert_cmpint(status, ==, 0); | 283 | + struct io_uring_sqe *sqes = io_uring_get_sqe(&s->ring); |
53 | + | 284 | + if (!sqes) { |
54 | + guest_free(alloc, req_addr); | 285 | + break; |
55 | + | 286 | + } |
56 | + /* Read request to check if the sector contains all zeroes */ | 287 | + /* Prep sqe for submission */ |
57 | + req.type = VIRTIO_BLK_T_IN; | 288 | + *sqes = luringcb->sqeq; |
58 | + req.ioprio = 1; | 289 | + QSIMPLEQ_REMOVE_HEAD(&s->io_q.submit_queue, next); |
59 | + req.sector = 0; | 290 | + } |
60 | + req.data = g_malloc0(512); | 291 | + ret = io_uring_submit(&s->ring); |
61 | + | 292 | + /* Prevent infinite loop if submission is refused */ |
62 | + req_addr = virtio_blk_request(alloc, dev, &req, 512); | 293 | + if (ret <= 0) { |
63 | + | 294 | + if (ret == -EAGAIN) { |
64 | + g_free(req.data); | 295 | + continue; |
65 | + | 296 | + } |
66 | + free_head = qvirtqueue_add(vq, req_addr, 16, false, true); | 297 | + break; |
67 | + qvirtqueue_add(vq, req_addr + 16, 512, true, true); | 298 | + } |
68 | + qvirtqueue_add(vq, req_addr + 528, 1, true, false); | 299 | + s->io_q.in_flight += ret; |
69 | + | 300 | + s->io_q.in_queue -= ret; |
70 | + qvirtqueue_kick(dev, vq, free_head); | 301 | + } |
71 | + | 302 | + s->io_q.blocked = (s->io_q.in_queue > 0); |
72 | + qvirtio_wait_used_elem(dev, vq, free_head, NULL, | 303 | + |
73 | + QVIRTIO_BLK_TIMEOUT_US); | 304 | + if (s->io_q.in_flight) { |
74 | + status = readb(req_addr + 528); | 305 | + /* |
75 | + g_assert_cmpint(status, ==, 0); | 306 | + * We can try to complete something just right away if there are |
76 | + | 307 | + * still requests in-flight. |
77 | + data = g_malloc(512); | 308 | + */ |
78 | + expected = g_malloc0(512); | 309 | + luring_process_completions(s); |
79 | + memread(req_addr + 16, data, 512); | 310 | + } |
80 | + g_assert_cmpmem(data, 512, expected, 512); | 311 | + return ret; |
81 | + g_free(expected); | 312 | +} |
82 | + g_free(data); | 313 | + |
83 | + | 314 | +static void luring_process_completions_and_submit(LuringState *s) |
84 | + guest_free(alloc, req_addr); | 315 | +{ |
85 | + } | 316 | + aio_context_acquire(s->aio_context); |
86 | + | 317 | + luring_process_completions(s); |
87 | if (features & (1u << VIRTIO_F_ANY_LAYOUT)) { | 318 | + |
88 | /* Write and read with 2 descriptor layout */ | 319 | + if (!s->io_q.plugged && s->io_q.in_queue > 0) { |
89 | /* Write request */ | 320 | + ioq_submit(s); |
321 | + } | ||
322 | + aio_context_release(s->aio_context); | ||
323 | +} | ||
324 | + | ||
325 | +static void qemu_luring_completion_bh(void *opaque) | ||
326 | +{ | ||
327 | + LuringState *s = opaque; | ||
328 | + luring_process_completions_and_submit(s); | ||
329 | +} | ||
330 | + | ||
331 | +static void qemu_luring_completion_cb(void *opaque) | ||
332 | +{ | ||
333 | + LuringState *s = opaque; | ||
334 | + luring_process_completions_and_submit(s); | ||
335 | +} | ||
336 | + | ||
337 | +static void ioq_init(LuringQueue *io_q) | ||
338 | +{ | ||
339 | + QSIMPLEQ_INIT(&io_q->submit_queue); | ||
340 | + io_q->plugged = 0; | ||
341 | + io_q->in_queue = 0; | ||
342 | + io_q->in_flight = 0; | ||
343 | + io_q->blocked = false; | ||
344 | +} | ||
345 | + | ||
346 | +void luring_io_plug(BlockDriverState *bs, LuringState *s) | ||
347 | +{ | ||
348 | + s->io_q.plugged++; | ||
349 | +} | ||
350 | + | ||
351 | +void luring_io_unplug(BlockDriverState *bs, LuringState *s) | ||
352 | +{ | ||
353 | + assert(s->io_q.plugged); | ||
354 | + if (--s->io_q.plugged == 0 && | ||
355 | + !s->io_q.blocked && s->io_q.in_queue > 0) { | ||
356 | + ioq_submit(s); | ||
357 | + } | ||
358 | +} | ||
359 | + | ||
360 | +/** | ||
361 | + * luring_do_submit: | ||
362 | + * @fd: file descriptor for I/O | ||
363 | + * @luringcb: AIO control block | ||
364 | + * @s: AIO state | ||
365 | + * @offset: offset for request | ||
366 | + * @type: type of request | ||
367 | + * | ||
368 | + * Fetches sqes from ring, adds to pending queue and preps them | ||
369 | + * | ||
370 | + */ | ||
371 | +static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s, | ||
372 | + uint64_t offset, int type) | ||
373 | +{ | ||
374 | + struct io_uring_sqe *sqes = &luringcb->sqeq; | ||
375 | + | ||
376 | + switch (type) { | ||
377 | + case QEMU_AIO_WRITE: | ||
378 | + io_uring_prep_writev(sqes, fd, luringcb->qiov->iov, | ||
379 | + luringcb->qiov->niov, offset); | ||
380 | + break; | ||
381 | + case QEMU_AIO_READ: | ||
382 | + io_uring_prep_readv(sqes, fd, luringcb->qiov->iov, | ||
383 | + luringcb->qiov->niov, offset); | ||
384 | + break; | ||
385 | + case QEMU_AIO_FLUSH: | ||
386 | + io_uring_prep_fsync(sqes, fd, IORING_FSYNC_DATASYNC); | ||
387 | + break; | ||
388 | + default: | ||
389 | + fprintf(stderr, "%s: invalid AIO request type, aborting 0x%x.\n", | ||
390 | + __func__, type); | ||
391 | + abort(); | ||
392 | + } | ||
393 | + io_uring_sqe_set_data(sqes, luringcb); | ||
394 | + | ||
395 | + QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next); | ||
396 | + s->io_q.in_queue++; | ||
397 | + | ||
398 | + if (!s->io_q.blocked && | ||
399 | + (!s->io_q.plugged || | ||
400 | + s->io_q.in_flight + s->io_q.in_queue >= MAX_ENTRIES)) { | ||
401 | + return ioq_submit(s); | ||
402 | + } | ||
403 | + return 0; | ||
404 | +} | ||
405 | + | ||
406 | +int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd, | ||
407 | + uint64_t offset, QEMUIOVector *qiov, int type) | ||
408 | +{ | ||
409 | + int ret; | ||
410 | + LuringAIOCB luringcb = { | ||
411 | + .co = qemu_coroutine_self(), | ||
412 | + .ret = -EINPROGRESS, | ||
413 | + .qiov = qiov, | ||
414 | + .is_read = (type == QEMU_AIO_READ), | ||
415 | + }; | ||
416 | + | ||
417 | + ret = luring_do_submit(fd, &luringcb, s, offset, type); | ||
418 | + if (ret < 0) { | ||
419 | + return ret; | ||
420 | + } | ||
421 | + | ||
422 | + if (luringcb.ret == -EINPROGRESS) { | ||
423 | + qemu_coroutine_yield(); | ||
424 | + } | ||
425 | + return luringcb.ret; | ||
426 | +} | ||
427 | + | ||
428 | +void luring_detach_aio_context(LuringState *s, AioContext *old_context) | ||
429 | +{ | ||
430 | + aio_set_fd_handler(old_context, s->ring.ring_fd, false, NULL, NULL, NULL, | ||
431 | + s); | ||
432 | + qemu_bh_delete(s->completion_bh); | ||
433 | + s->aio_context = NULL; | ||
434 | +} | ||
435 | + | ||
436 | +void luring_attach_aio_context(LuringState *s, AioContext *new_context) | ||
437 | +{ | ||
438 | + s->aio_context = new_context; | ||
439 | + s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s); | ||
440 | + aio_set_fd_handler(s->aio_context, s->ring.ring_fd, false, | ||
441 | + qemu_luring_completion_cb, NULL, NULL, s); | ||
442 | +} | ||
443 | + | ||
444 | +LuringState *luring_init(Error **errp) | ||
445 | +{ | ||
446 | + int rc; | ||
447 | + LuringState *s = g_new0(LuringState, 1); | ||
448 | + struct io_uring *ring = &s->ring; | ||
449 | + | ||
450 | + rc = io_uring_queue_init(MAX_ENTRIES, ring, 0); | ||
451 | + if (rc < 0) { | ||
452 | + error_setg_errno(errp, errno, "failed to init linux io_uring ring"); | ||
453 | + g_free(s); | ||
454 | + return NULL; | ||
455 | + } | ||
456 | + | ||
457 | + ioq_init(&s->io_q); | ||
458 | + return s; | ||
459 | + | ||
460 | +} | ||
461 | + | ||
462 | +void luring_cleanup(LuringState *s) | ||
463 | +{ | ||
464 | + io_uring_queue_exit(&s->ring); | ||
465 | + g_free(s); | ||
466 | +} | ||
467 | diff --git a/include/block/aio.h b/include/block/aio.h | ||
468 | index XXXXXXX..XXXXXXX 100644 | ||
469 | --- a/include/block/aio.h | ||
470 | +++ b/include/block/aio.h | ||
471 | @@ -XXX,XX +XXX,XX @@ typedef void IOHandler(void *opaque); | ||
472 | struct Coroutine; | ||
473 | struct ThreadPool; | ||
474 | struct LinuxAioState; | ||
475 | +struct LuringState; | ||
476 | |||
477 | struct AioContext { | ||
478 | GSource source; | ||
479 | @@ -XXX,XX +XXX,XX @@ struct AioContext { | ||
480 | struct ThreadPool *thread_pool; | ||
481 | |||
482 | #ifdef CONFIG_LINUX_AIO | ||
483 | - /* State for native Linux AIO. Uses aio_context_acquire/release for | ||
484 | + /* | ||
485 | + * State for native Linux AIO. Uses aio_context_acquire/release for | ||
486 | * locking. | ||
487 | */ | ||
488 | struct LinuxAioState *linux_aio; | ||
489 | #endif | ||
490 | +#ifdef CONFIG_LINUX_IO_URING | ||
491 | + /* | ||
492 | + * State for Linux io_uring. Uses aio_context_acquire/release for | ||
493 | + * locking. | ||
494 | + */ | ||
495 | + struct LuringState *linux_io_uring; | ||
496 | +#endif | ||
497 | |||
498 | /* TimerLists for calling timers - one per clock type. Has its own | ||
499 | * locking. | ||
500 | @@ -XXX,XX +XXX,XX @@ struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp); | ||
501 | /* Return the LinuxAioState bound to this AioContext */ | ||
502 | struct LinuxAioState *aio_get_linux_aio(AioContext *ctx); | ||
503 | |||
504 | +/* Setup the LuringState bound to this AioContext */ | ||
505 | +struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp); | ||
506 | + | ||
507 | +/* Return the LuringState bound to this AioContext */ | ||
508 | +struct LuringState *aio_get_linux_io_uring(AioContext *ctx); | ||
509 | /** | ||
510 | * aio_timer_new_with_attrs: | ||
511 | * @ctx: the aio context | ||
512 | diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h | ||
513 | index XXXXXXX..XXXXXXX 100644 | ||
514 | --- a/include/block/raw-aio.h | ||
515 | +++ b/include/block/raw-aio.h | ||
516 | @@ -XXX,XX +XXX,XX @@ void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context); | ||
517 | void laio_io_plug(BlockDriverState *bs, LinuxAioState *s); | ||
518 | void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s); | ||
519 | #endif | ||
520 | +/* io_uring.c - Linux io_uring implementation */ | ||
521 | +#ifdef CONFIG_LINUX_IO_URING | ||
522 | +typedef struct LuringState LuringState; | ||
523 | +LuringState *luring_init(Error **errp); | ||
524 | +void luring_cleanup(LuringState *s); | ||
525 | +int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd, | ||
526 | + uint64_t offset, QEMUIOVector *qiov, int type); | ||
527 | +void luring_detach_aio_context(LuringState *s, AioContext *old_context); | ||
528 | +void luring_attach_aio_context(LuringState *s, AioContext *new_context); | ||
529 | +void luring_io_plug(BlockDriverState *bs, LuringState *s); | ||
530 | +void luring_io_unplug(BlockDriverState *bs, LuringState *s); | ||
531 | +#endif | ||
532 | |||
533 | #ifdef _WIN32 | ||
534 | typedef struct QEMUWin32AIOState QEMUWin32AIOState; | ||
90 | -- | 535 | -- |
91 | 2.20.1 | 536 | 2.24.1 |
92 | 537 | ||
93 | 538 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | Add a possibility of embedded iovec, for cases when we need only one | 3 | Follow linux-aio.o and stub out the block/io_uring.o APIs that will be |
4 | local iov. | 4 | missing when a binary is linked with obj-util-y but without |
5 | block-util-y (e.g. vhost-user-gpu). | ||
5 | 6 | ||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 7 | For example, the stubs are necessary so that a binary using util/async.o |
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | 8 | from obj-util-y for qemu_bh_new() links successfully. In this case |
8 | Message-id: 20190218140926.333779-2-vsementsov@virtuozzo.com | 9 | block/io_uring.o from block-util-y isn't needed and we can avoid |
9 | Message-Id: <20190218140926.333779-2-vsementsov@virtuozzo.com> | 10 | dragging in the block layer by linking the stubs instead. The stub |
11 | functions never get called. | ||
12 | |||
13 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | ||
14 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | ||
15 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
16 | Message-id: 20200120141858.587874-6-stefanha@redhat.com | ||
17 | Message-Id: <20200120141858.587874-6-stefanha@redhat.com> | ||
10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 18 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
11 | --- | 19 | --- |
12 | include/qemu/iov.h | 64 ++++++++++++++++++++++++++++++++++++++++++++-- | 20 | MAINTAINERS | 1 + |
13 | 1 file changed, 62 insertions(+), 2 deletions(-) | 21 | stubs/Makefile.objs | 1 + |
22 | stubs/io_uring.c | 32 ++++++++++++++++++++++++++++++++ | ||
23 | 3 files changed, 34 insertions(+) | ||
24 | create mode 100644 stubs/io_uring.c | ||
14 | 25 | ||
15 | diff --git a/include/qemu/iov.h b/include/qemu/iov.h | 26 | diff --git a/MAINTAINERS b/MAINTAINERS |
16 | index XXXXXXX..XXXXXXX 100644 | 27 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/include/qemu/iov.h | 28 | --- a/MAINTAINERS |
18 | +++ b/include/qemu/iov.h | 29 | +++ b/MAINTAINERS |
19 | @@ -XXX,XX +XXX,XX @@ size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt, | 30 | @@ -XXX,XX +XXX,XX @@ M: Stefan Hajnoczi <stefanha@redhat.com> |
20 | typedef struct QEMUIOVector { | 31 | L: qemu-block@nongnu.org |
21 | struct iovec *iov; | 32 | S: Maintained |
22 | int niov; | 33 | F: block/io_uring.c |
23 | - int nalloc; | 34 | +F: stubs/io_uring.c |
24 | - size_t size; | 35 | |
36 | qcow2 | ||
37 | M: Kevin Wolf <kwolf@redhat.com> | ||
38 | diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs | ||
39 | index XXXXXXX..XXXXXXX 100644 | ||
40 | --- a/stubs/Makefile.objs | ||
41 | +++ b/stubs/Makefile.objs | ||
42 | @@ -XXX,XX +XXX,XX @@ stub-obj-y += iothread.o | ||
43 | stub-obj-y += iothread-lock.o | ||
44 | stub-obj-y += is-daemonized.o | ||
45 | stub-obj-$(CONFIG_LINUX_AIO) += linux-aio.o | ||
46 | +stub-obj-$(CONFIG_LINUX_IO_URING) += io_uring.o | ||
47 | stub-obj-y += machine-init-done.o | ||
48 | stub-obj-y += migr-blocker.o | ||
49 | stub-obj-y += change-state-handler.o | ||
50 | diff --git a/stubs/io_uring.c b/stubs/io_uring.c | ||
51 | new file mode 100644 | ||
52 | index XXXXXXX..XXXXXXX | ||
53 | --- /dev/null | ||
54 | +++ b/stubs/io_uring.c | ||
55 | @@ -XXX,XX +XXX,XX @@ | ||
56 | +/* | ||
57 | + * Linux io_uring support. | ||
58 | + * | ||
59 | + * Copyright (C) 2009 IBM, Corp. | ||
60 | + * Copyright (C) 2009 Red Hat, Inc. | ||
61 | + * | ||
62 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
63 | + * See the COPYING file in the top-level directory. | ||
64 | + */ | ||
65 | +#include "qemu/osdep.h" | ||
66 | +#include "block/aio.h" | ||
67 | +#include "block/raw-aio.h" | ||
25 | + | 68 | + |
26 | + /* | 69 | +void luring_detach_aio_context(LuringState *s, AioContext *old_context) |
27 | + * For external @iov (qemu_iovec_init_external()) or allocated @iov | 70 | +{ |
28 | + * (qemu_iovec_init()), @size is the cumulative size of iovecs and | 71 | + abort(); |
29 | + * @local_iov is invalid and unused. | ||
30 | + * | ||
31 | + * For embedded @iov (QEMU_IOVEC_INIT_BUF() or qemu_iovec_init_buf()), | ||
32 | + * @iov is equal to &@local_iov, and @size is valid, as it has same | ||
33 | + * offset and type as @local_iov.iov_len, which is guaranteed by | ||
34 | + * static assertion below. | ||
35 | + * | ||
36 | + * @nalloc is always valid and is -1 both for embedded and external | ||
37 | + * cases. It is included in the union only to ensure the padding prior | ||
38 | + * to the @size field will not result in a 0-length array. | ||
39 | + */ | ||
40 | + union { | ||
41 | + struct { | ||
42 | + int nalloc; | ||
43 | + struct iovec local_iov; | ||
44 | + }; | ||
45 | + struct { | ||
46 | + char __pad[sizeof(int) + offsetof(struct iovec, iov_len)]; | ||
47 | + size_t size; | ||
48 | + }; | ||
49 | + }; | ||
50 | } QEMUIOVector; | ||
51 | |||
52 | +QEMU_BUILD_BUG_ON(offsetof(QEMUIOVector, size) != | ||
53 | + offsetof(QEMUIOVector, local_iov.iov_len)); | ||
54 | + | ||
55 | +#define QEMU_IOVEC_INIT_BUF(self, buf, len) \ | ||
56 | +{ \ | ||
57 | + .iov = &(self).local_iov, \ | ||
58 | + .niov = 1, \ | ||
59 | + .nalloc = -1, \ | ||
60 | + .local_iov = { \ | ||
61 | + .iov_base = (void *)(buf), /* cast away const */ \ | ||
62 | + .iov_len = (len), \ | ||
63 | + }, \ | ||
64 | +} | 72 | +} |
65 | + | 73 | + |
66 | +/* | 74 | +void luring_attach_aio_context(LuringState *s, AioContext *new_context) |
67 | + * qemu_iovec_init_buf | ||
68 | + * | ||
69 | + * Initialize embedded QEMUIOVector. | ||
70 | + * | ||
71 | + * Note: "const" is used over @buf pointer to make it simple to pass | ||
72 | + * const pointers, appearing in read functions. Then this "const" is | ||
73 | + * cast away by QEMU_IOVEC_INIT_BUF(). | ||
74 | + */ | ||
75 | +static inline void qemu_iovec_init_buf(QEMUIOVector *qiov, | ||
76 | + const void *buf, size_t len) | ||
77 | +{ | 75 | +{ |
78 | + *qiov = (QEMUIOVector) QEMU_IOVEC_INIT_BUF(*qiov, buf, len); | 76 | + abort(); |
79 | +} | 77 | +} |
80 | + | 78 | + |
81 | +static inline void *qemu_iovec_buf(QEMUIOVector *qiov) | 79 | +LuringState *luring_init(Error **errp) |
82 | +{ | 80 | +{ |
83 | + /* Only supports embedded iov */ | 81 | + abort(); |
84 | + assert(qiov->nalloc == -1 && qiov->iov == &qiov->local_iov); | ||
85 | + | ||
86 | + return qiov->local_iov.iov_base; | ||
87 | +} | 82 | +} |
88 | + | 83 | + |
89 | void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint); | 84 | +void luring_cleanup(LuringState *s) |
90 | void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov); | 85 | +{ |
91 | void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len); | 86 | + abort(); |
87 | +} | ||
92 | -- | 88 | -- |
93 | 2.20.1 | 89 | 2.24.1 |
94 | 90 | ||
95 | 91 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
2 | 1 | ||
3 | Use new QEMU_IOVEC_INIT_BUF() instead of | ||
4 | qemu_iovec_init_external( ... , 1), which simplifies the code. | ||
5 | |||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Message-id: 20190218140926.333779-4-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-4-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | ||
13 | block/block-backend.c | 13 ++----------- | ||
14 | 1 file changed, 2 insertions(+), 11 deletions(-) | ||
15 | |||
16 | diff --git a/block/block-backend.c b/block/block-backend.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/block-backend.c | ||
19 | +++ b/block/block-backend.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf, | ||
21 | int64_t bytes, CoroutineEntry co_entry, | ||
22 | BdrvRequestFlags flags) | ||
23 | { | ||
24 | - QEMUIOVector qiov; | ||
25 | - struct iovec iov; | ||
26 | - BlkRwCo rwco; | ||
27 | - | ||
28 | - iov = (struct iovec) { | ||
29 | - .iov_base = buf, | ||
30 | - .iov_len = bytes, | ||
31 | - }; | ||
32 | - qemu_iovec_init_external(&qiov, &iov, 1); | ||
33 | - | ||
34 | - rwco = (BlkRwCo) { | ||
35 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); | ||
36 | + BlkRwCo rwco = { | ||
37 | .blk = blk, | ||
38 | .offset = offset, | ||
39 | .iobuf = &qiov, | ||
40 | -- | ||
41 | 2.20.1 | ||
42 | |||
43 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
2 | 1 | ||
3 | Use new qemu_iovec_init_buf() instead of | ||
4 | qemu_iovec_init_external( ... , 1), which simplifies the code. | ||
5 | |||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Message-id: 20190218140926.333779-5-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-5-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | ||
13 | block/backup.c | 5 +---- | ||
14 | 1 file changed, 1 insertion(+), 4 deletions(-) | ||
15 | |||
16 | diff --git a/block/backup.c b/block/backup.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/backup.c | ||
19 | +++ b/block/backup.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job, | ||
21 | void **bounce_buffer) | ||
22 | { | ||
23 | int ret; | ||
24 | - struct iovec iov; | ||
25 | QEMUIOVector qiov; | ||
26 | BlockBackend *blk = job->common.blk; | ||
27 | int nbytes; | ||
28 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job, | ||
29 | if (!*bounce_buffer) { | ||
30 | *bounce_buffer = blk_blockalign(blk, job->cluster_size); | ||
31 | } | ||
32 | - iov.iov_base = *bounce_buffer; | ||
33 | - iov.iov_len = nbytes; | ||
34 | - qemu_iovec_init_external(&qiov, &iov, 1); | ||
35 | + qemu_iovec_init_buf(&qiov, *bounce_buffer, nbytes); | ||
36 | |||
37 | ret = blk_co_preadv(blk, start, qiov.size, &qiov, read_flags); | ||
38 | if (ret < 0) { | ||
39 | -- | ||
40 | 2.20.1 | ||
41 | |||
42 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
2 | 1 | ||
3 | Use new QEMU_IOVEC_INIT_BUF() instead of | ||
4 | qemu_iovec_init_external( ... , 1), which simplifies the code. | ||
5 | |||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Message-id: 20190218140926.333779-6-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-6-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | ||
13 | block/commit.c | 7 +------ | ||
14 | 1 file changed, 1 insertion(+), 6 deletions(-) | ||
15 | |||
16 | diff --git a/block/commit.c b/block/commit.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/commit.c | ||
19 | +++ b/block/commit.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base, | ||
21 | void *buf) | ||
22 | { | ||
23 | int ret = 0; | ||
24 | - QEMUIOVector qiov; | ||
25 | - struct iovec iov = { | ||
26 | - .iov_base = buf, | ||
27 | - .iov_len = bytes, | ||
28 | - }; | ||
29 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); | ||
30 | |||
31 | assert(bytes < SIZE_MAX); | ||
32 | - qemu_iovec_init_external(&qiov, &iov, 1); | ||
33 | |||
34 | ret = blk_co_preadv(bs, offset, qiov.size, &qiov, 0); | ||
35 | if (ret < 0) { | ||
36 | -- | ||
37 | 2.20.1 | ||
38 | |||
39 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
2 | 1 | ||
3 | Use new QEMU_IOVEC_INIT_BUF() instead of | ||
4 | qemu_iovec_init_external( ... , 1), which simplifies the code. | ||
5 | |||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Message-id: 20190218140926.333779-7-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-7-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | ||
13 | block/stream.c | 7 +------ | ||
14 | 1 file changed, 1 insertion(+), 6 deletions(-) | ||
15 | |||
16 | diff --git a/block/stream.c b/block/stream.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/stream.c | ||
19 | +++ b/block/stream.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn stream_populate(BlockBackend *blk, | ||
21 | int64_t offset, uint64_t bytes, | ||
22 | void *buf) | ||
23 | { | ||
24 | - struct iovec iov = { | ||
25 | - .iov_base = buf, | ||
26 | - .iov_len = bytes, | ||
27 | - }; | ||
28 | - QEMUIOVector qiov; | ||
29 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); | ||
30 | |||
31 | assert(bytes < SIZE_MAX); | ||
32 | - qemu_iovec_init_external(&qiov, &iov, 1); | ||
33 | |||
34 | /* Copy-on-read the unallocated clusters */ | ||
35 | return blk_co_preadv(blk, offset, qiov.size, &qiov, BDRV_REQ_COPY_ON_READ); | ||
36 | -- | ||
37 | 2.20.1 | ||
38 | |||
39 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
2 | 1 | ||
3 | Use new QEMU_IOVEC_INIT_BUF() instead of | ||
4 | qemu_iovec_init_external( ... , 1), which simplifies the code. | ||
5 | |||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Message-id: 20190218140926.333779-8-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-8-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | ||
13 | block/parallels.c | 13 +++++-------- | ||
14 | 1 file changed, 5 insertions(+), 8 deletions(-) | ||
15 | |||
16 | diff --git a/block/parallels.c b/block/parallels.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/parallels.c | ||
19 | +++ b/block/parallels.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num, | ||
21 | if (bs->backing) { | ||
22 | int64_t nb_cow_sectors = to_allocate * s->tracks; | ||
23 | int64_t nb_cow_bytes = nb_cow_sectors << BDRV_SECTOR_BITS; | ||
24 | - QEMUIOVector qiov; | ||
25 | - struct iovec iov = { | ||
26 | - .iov_len = nb_cow_bytes, | ||
27 | - .iov_base = qemu_blockalign(bs, nb_cow_bytes) | ||
28 | - }; | ||
29 | - qemu_iovec_init_external(&qiov, &iov, 1); | ||
30 | + QEMUIOVector qiov = | ||
31 | + QEMU_IOVEC_INIT_BUF(qiov, qemu_blockalign(bs, nb_cow_bytes), | ||
32 | + nb_cow_bytes); | ||
33 | |||
34 | ret = bdrv_co_preadv(bs->backing, idx * s->tracks * BDRV_SECTOR_SIZE, | ||
35 | nb_cow_bytes, &qiov, 0); | ||
36 | if (ret < 0) { | ||
37 | - qemu_vfree(iov.iov_base); | ||
38 | + qemu_vfree(qemu_iovec_buf(&qiov)); | ||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | ret = bdrv_co_pwritev(bs->file, s->data_end * BDRV_SECTOR_SIZE, | ||
43 | nb_cow_bytes, &qiov, 0); | ||
44 | - qemu_vfree(iov.iov_base); | ||
45 | + qemu_vfree(qemu_iovec_buf(&qiov)); | ||
46 | if (ret < 0) { | ||
47 | return ret; | ||
48 | } | ||
49 | -- | ||
50 | 2.20.1 | ||
51 | |||
52 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
2 | 1 | ||
3 | Use new qemu_iovec_init_buf() instead of | ||
4 | qemu_iovec_init_external( ... , 1), which simplifies the code. | ||
5 | |||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Message-id: 20190218140926.333779-9-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-9-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | ||
13 | block/qcow.c | 21 ++++----------------- | ||
14 | 1 file changed, 4 insertions(+), 17 deletions(-) | ||
15 | |||
16 | diff --git a/block/qcow.c b/block/qcow.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/qcow.c | ||
19 | +++ b/block/qcow.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset, | ||
21 | int offset_in_cluster; | ||
22 | int ret = 0, n; | ||
23 | uint64_t cluster_offset; | ||
24 | - struct iovec hd_iov; | ||
25 | QEMUIOVector hd_qiov; | ||
26 | uint8_t *buf; | ||
27 | void *orig_buf; | ||
28 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset, | ||
29 | if (!cluster_offset) { | ||
30 | if (bs->backing) { | ||
31 | /* read from the base image */ | ||
32 | - hd_iov.iov_base = (void *)buf; | ||
33 | - hd_iov.iov_len = n; | ||
34 | - qemu_iovec_init_external(&hd_qiov, &hd_iov, 1); | ||
35 | + qemu_iovec_init_buf(&hd_qiov, buf, n); | ||
36 | qemu_co_mutex_unlock(&s->lock); | ||
37 | /* qcow2 emits this on bs->file instead of bs->backing */ | ||
38 | BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); | ||
39 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset, | ||
40 | ret = -EIO; | ||
41 | break; | ||
42 | } | ||
43 | - hd_iov.iov_base = (void *)buf; | ||
44 | - hd_iov.iov_len = n; | ||
45 | - qemu_iovec_init_external(&hd_qiov, &hd_iov, 1); | ||
46 | + qemu_iovec_init_buf(&hd_qiov, buf, n); | ||
47 | qemu_co_mutex_unlock(&s->lock); | ||
48 | BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); | ||
49 | ret = bdrv_co_preadv(bs->file, cluster_offset + offset_in_cluster, | ||
50 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, uint64_t offset, | ||
51 | int offset_in_cluster; | ||
52 | uint64_t cluster_offset; | ||
53 | int ret = 0, n; | ||
54 | - struct iovec hd_iov; | ||
55 | QEMUIOVector hd_qiov; | ||
56 | uint8_t *buf; | ||
57 | void *orig_buf; | ||
58 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, uint64_t offset, | ||
59 | } | ||
60 | } | ||
61 | |||
62 | - hd_iov.iov_base = (void *)buf; | ||
63 | - hd_iov.iov_len = n; | ||
64 | - qemu_iovec_init_external(&hd_qiov, &hd_iov, 1); | ||
65 | + qemu_iovec_init_buf(&hd_qiov, buf, n); | ||
66 | qemu_co_mutex_unlock(&s->lock); | ||
67 | BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); | ||
68 | ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster, | ||
69 | @@ -XXX,XX +XXX,XX @@ qcow_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, | ||
70 | { | ||
71 | BDRVQcowState *s = bs->opaque; | ||
72 | QEMUIOVector hd_qiov; | ||
73 | - struct iovec iov; | ||
74 | z_stream strm; | ||
75 | int ret, out_len; | ||
76 | uint8_t *buf, *out_buf; | ||
77 | @@ -XXX,XX +XXX,XX @@ qcow_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, | ||
78 | } | ||
79 | cluster_offset &= s->cluster_offset_mask; | ||
80 | |||
81 | - iov = (struct iovec) { | ||
82 | - .iov_base = out_buf, | ||
83 | - .iov_len = out_len, | ||
84 | - }; | ||
85 | - qemu_iovec_init_external(&hd_qiov, &iov, 1); | ||
86 | + qemu_iovec_init_buf(&hd_qiov, out_buf, out_len); | ||
87 | BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); | ||
88 | ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0); | ||
89 | if (ret < 0) { | ||
90 | -- | ||
91 | 2.20.1 | ||
92 | |||
93 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
2 | 1 | ||
3 | Use new qemu_iovec_init_buf() instead of | ||
4 | qemu_iovec_init_external( ... , 1), which simplifies the code. | ||
5 | |||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Message-id: 20190218140926.333779-11-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-11-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | ||
13 | block/qed-table.c | 16 +++------------- | ||
14 | block/qed.c | 31 +++++++++---------------------- | ||
15 | 2 files changed, 12 insertions(+), 35 deletions(-) | ||
16 | |||
17 | diff --git a/block/qed-table.c b/block/qed-table.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/block/qed-table.c | ||
20 | +++ b/block/qed-table.c | ||
21 | @@ -XXX,XX +XXX,XX @@ | ||
22 | /* Called with table_lock held. */ | ||
23 | static int qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table) | ||
24 | { | ||
25 | - QEMUIOVector qiov; | ||
26 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF( | ||
27 | + qiov, table->offsets, s->header.cluster_size * s->header.table_size); | ||
28 | int noffsets; | ||
29 | int i, ret; | ||
30 | |||
31 | - struct iovec iov = { | ||
32 | - .iov_base = table->offsets, | ||
33 | - .iov_len = s->header.cluster_size * s->header.table_size, | ||
34 | - }; | ||
35 | - qemu_iovec_init_external(&qiov, &iov, 1); | ||
36 | - | ||
37 | trace_qed_read_table(s, offset, table); | ||
38 | |||
39 | qemu_co_mutex_unlock(&s->table_lock); | ||
40 | @@ -XXX,XX +XXX,XX @@ static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table, | ||
41 | unsigned int sector_mask = BDRV_SECTOR_SIZE / sizeof(uint64_t) - 1; | ||
42 | unsigned int start, end, i; | ||
43 | QEDTable *new_table; | ||
44 | - struct iovec iov; | ||
45 | QEMUIOVector qiov; | ||
46 | size_t len_bytes; | ||
47 | int ret; | ||
48 | @@ -XXX,XX +XXX,XX @@ static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table, | ||
49 | len_bytes = (end - start) * sizeof(uint64_t); | ||
50 | |||
51 | new_table = qemu_blockalign(s->bs, len_bytes); | ||
52 | - iov = (struct iovec) { | ||
53 | - .iov_base = new_table->offsets, | ||
54 | - .iov_len = len_bytes, | ||
55 | - }; | ||
56 | - qemu_iovec_init_external(&qiov, &iov, 1); | ||
57 | + qemu_iovec_init_buf(&qiov, new_table->offsets, len_bytes); | ||
58 | |||
59 | /* Byteswap table */ | ||
60 | for (i = start; i < end; i++) { | ||
61 | diff --git a/block/qed.c b/block/qed.c | ||
62 | index XXXXXXX..XXXXXXX 100644 | ||
63 | --- a/block/qed.c | ||
64 | +++ b/block/qed.c | ||
65 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn qed_write_header(BDRVQEDState *s) | ||
66 | int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE); | ||
67 | size_t len = nsectors * BDRV_SECTOR_SIZE; | ||
68 | uint8_t *buf; | ||
69 | - struct iovec iov; | ||
70 | QEMUIOVector qiov; | ||
71 | int ret; | ||
72 | |||
73 | assert(s->allocating_acb || s->allocating_write_reqs_plugged); | ||
74 | |||
75 | buf = qemu_blockalign(s->bs, len); | ||
76 | - iov = (struct iovec) { | ||
77 | - .iov_base = buf, | ||
78 | - .iov_len = len, | ||
79 | - }; | ||
80 | - qemu_iovec_init_external(&qiov, &iov, 1); | ||
81 | + qemu_iovec_init_buf(&qiov, buf, len); | ||
82 | |||
83 | ret = bdrv_co_preadv(s->bs->file, 0, qiov.size, &qiov, 0); | ||
84 | if (ret < 0) { | ||
85 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s, | ||
86 | { | ||
87 | QEMUIOVector qiov; | ||
88 | QEMUIOVector *backing_qiov = NULL; | ||
89 | - struct iovec iov; | ||
90 | int ret; | ||
91 | |||
92 | /* Skip copy entirely if there is no work to do */ | ||
93 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s, | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | - iov = (struct iovec) { | ||
98 | - .iov_base = qemu_blockalign(s->bs, len), | ||
99 | - .iov_len = len, | ||
100 | - }; | ||
101 | - qemu_iovec_init_external(&qiov, &iov, 1); | ||
102 | + qemu_iovec_init_buf(&qiov, qemu_blockalign(s->bs, len), len); | ||
103 | |||
104 | ret = qed_read_backing_file(s, pos, &qiov, &backing_qiov); | ||
105 | |||
106 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s, | ||
107 | } | ||
108 | ret = 0; | ||
109 | out: | ||
110 | - qemu_vfree(iov.iov_base); | ||
111 | + qemu_vfree(qemu_iovec_buf(&qiov)); | ||
112 | return ret; | ||
113 | } | ||
114 | |||
115 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs, | ||
116 | BdrvRequestFlags flags) | ||
117 | { | ||
118 | BDRVQEDState *s = bs->opaque; | ||
119 | - QEMUIOVector qiov; | ||
120 | - struct iovec iov; | ||
121 | + | ||
122 | + /* | ||
123 | + * Zero writes start without an I/O buffer. If a buffer becomes necessary | ||
124 | + * then it will be allocated during request processing. | ||
125 | + */ | ||
126 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes); | ||
127 | |||
128 | /* Fall back if the request is not aligned */ | ||
129 | if (qed_offset_into_cluster(s, offset) || | ||
130 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs, | ||
131 | return -ENOTSUP; | ||
132 | } | ||
133 | |||
134 | - /* Zero writes start without an I/O buffer. If a buffer becomes necessary | ||
135 | - * then it will be allocated during request processing. | ||
136 | - */ | ||
137 | - iov.iov_base = NULL; | ||
138 | - iov.iov_len = bytes; | ||
139 | - | ||
140 | - qemu_iovec_init_external(&qiov, &iov, 1); | ||
141 | return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov, | ||
142 | bytes >> BDRV_SECTOR_BITS, | ||
143 | QED_AIOCB_WRITE | QED_AIOCB_ZERO); | ||
144 | -- | ||
145 | 2.20.1 | ||
146 | |||
147 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
2 | 1 | ||
3 | Use new qemu_iovec_init_buf() instead of | ||
4 | qemu_iovec_init_external( ... , 1), which simplifies the code. | ||
5 | |||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Message-id: 20190218140926.333779-12-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-12-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | ||
13 | block/vmdk.c | 7 +------ | ||
14 | 1 file changed, 1 insertion(+), 6 deletions(-) | ||
15 | |||
16 | diff --git a/block/vmdk.c b/block/vmdk.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/vmdk.c | ||
19 | +++ b/block/vmdk.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, | ||
21 | VmdkGrainMarker *data = NULL; | ||
22 | uLongf buf_len; | ||
23 | QEMUIOVector local_qiov; | ||
24 | - struct iovec iov; | ||
25 | int64_t write_offset; | ||
26 | int64_t write_end_sector; | ||
27 | |||
28 | @@ -XXX,XX +XXX,XX @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, | ||
29 | data->size = cpu_to_le32(buf_len); | ||
30 | |||
31 | n_bytes = buf_len + sizeof(VmdkGrainMarker); | ||
32 | - iov = (struct iovec) { | ||
33 | - .iov_base = data, | ||
34 | - .iov_len = n_bytes, | ||
35 | - }; | ||
36 | - qemu_iovec_init_external(&local_qiov, &iov, 1); | ||
37 | + qemu_iovec_init_buf(&local_qiov, data, n_bytes); | ||
38 | |||
39 | BLKDBG_EVENT(extent->file, BLKDBG_WRITE_COMPRESSED); | ||
40 | } else { | ||
41 | -- | ||
42 | 2.20.1 | ||
43 | |||
44 | diff view generated by jsdifflib |
1 | From: Stefano Garzarella <sgarzare@redhat.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | This function is useful to fix the endianness of struct | 3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> |
4 | virtio_blk_discard_write_zeroes headers. | 4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> |
5 | 5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | |
6 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | 6 | Message-id: 20200120141858.587874-7-stefanha@redhat.com |
7 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | 7 | Message-Id: <20200120141858.587874-7-stefanha@redhat.com> |
8 | Message-id: 20190221103314.58500-9-sgarzare@redhat.com | ||
9 | Message-Id: <20190221103314.58500-9-sgarzare@redhat.com> | ||
10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
11 | --- | 9 | --- |
12 | tests/virtio-blk-test.c | 23 +++++++++++++++++------ | 10 | util/async.c | 36 ++++++++++++++++++++++++++++++++++++ |
13 | 1 file changed, 17 insertions(+), 6 deletions(-) | 11 | 1 file changed, 36 insertions(+) |
14 | 12 | ||
15 | diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c | 13 | diff --git a/util/async.c b/util/async.c |
16 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/tests/virtio-blk-test.c | 15 | --- a/util/async.c |
18 | +++ b/tests/virtio-blk-test.c | 16 | +++ b/util/async.c |
19 | @@ -XXX,XX +XXX,XX @@ typedef struct QVirtioBlkReq { | 17 | @@ -XXX,XX +XXX,XX @@ aio_ctx_finalize(GSource *source) |
20 | uint8_t status; | 18 | } |
21 | } QVirtioBlkReq; | 19 | #endif |
22 | 20 | ||
23 | +#ifdef HOST_WORDS_BIGENDIAN | 21 | +#ifdef CONFIG_LINUX_IO_URING |
24 | +const bool host_is_big_endian = true; | 22 | + if (ctx->linux_io_uring) { |
25 | +#else | 23 | + luring_detach_aio_context(ctx->linux_io_uring, ctx); |
26 | +const bool host_is_big_endian; /* false */ | 24 | + luring_cleanup(ctx->linux_io_uring); |
25 | + ctx->linux_io_uring = NULL; | ||
26 | + } | ||
27 | +#endif | 27 | +#endif |
28 | + | 28 | + |
29 | static char *drive_create(void) | 29 | assert(QSLIST_EMPTY(&ctx->scheduled_coroutines)); |
30 | { | 30 | qemu_bh_delete(ctx->co_schedule_bh); |
31 | int fd, ret; | 31 | |
32 | @@ -XXX,XX +XXX,XX @@ static QVirtioPCIDevice *virtio_blk_pci_init(QPCIBus *bus, int slot) | 32 | @@ -XXX,XX +XXX,XX @@ LinuxAioState *aio_get_linux_aio(AioContext *ctx) |
33 | |||
34 | static inline void virtio_blk_fix_request(QVirtioDevice *d, QVirtioBlkReq *req) | ||
35 | { | ||
36 | -#ifdef HOST_WORDS_BIGENDIAN | ||
37 | - const bool host_is_big_endian = true; | ||
38 | -#else | ||
39 | - const bool host_is_big_endian = false; | ||
40 | -#endif | ||
41 | - | ||
42 | if (qvirtio_is_big_endian(d) != host_is_big_endian) { | ||
43 | req->type = bswap32(req->type); | ||
44 | req->ioprio = bswap32(req->ioprio); | ||
45 | @@ -XXX,XX +XXX,XX @@ static inline void virtio_blk_fix_request(QVirtioDevice *d, QVirtioBlkReq *req) | ||
46 | } | ||
47 | } | 33 | } |
48 | 34 | #endif | |
35 | |||
36 | +#ifdef CONFIG_LINUX_IO_URING | ||
37 | +LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp) | ||
38 | +{ | ||
39 | + if (ctx->linux_io_uring) { | ||
40 | + return ctx->linux_io_uring; | ||
41 | + } | ||
49 | + | 42 | + |
50 | +static inline void virtio_blk_fix_dwz_hdr(QVirtioDevice *d, | 43 | + ctx->linux_io_uring = luring_init(errp); |
51 | + struct virtio_blk_discard_write_zeroes *dwz_hdr) | 44 | + if (!ctx->linux_io_uring) { |
52 | +{ | 45 | + return NULL; |
53 | + if (qvirtio_is_big_endian(d) != host_is_big_endian) { | ||
54 | + dwz_hdr->sector = bswap64(dwz_hdr->sector); | ||
55 | + dwz_hdr->num_sectors = bswap32(dwz_hdr->num_sectors); | ||
56 | + dwz_hdr->flags = bswap32(dwz_hdr->flags); | ||
57 | + } | 46 | + } |
47 | + | ||
48 | + luring_attach_aio_context(ctx->linux_io_uring, ctx); | ||
49 | + return ctx->linux_io_uring; | ||
58 | +} | 50 | +} |
59 | + | 51 | + |
60 | static uint64_t virtio_blk_request(QGuestAllocator *alloc, QVirtioDevice *d, | 52 | +LuringState *aio_get_linux_io_uring(AioContext *ctx) |
61 | QVirtioBlkReq *req, uint64_t data_size) | 53 | +{ |
54 | + assert(ctx->linux_io_uring); | ||
55 | + return ctx->linux_io_uring; | ||
56 | +} | ||
57 | +#endif | ||
58 | + | ||
59 | void aio_notify(AioContext *ctx) | ||
62 | { | 60 | { |
61 | /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs | ||
62 | @@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp) | ||
63 | #ifdef CONFIG_LINUX_AIO | ||
64 | ctx->linux_aio = NULL; | ||
65 | #endif | ||
66 | + | ||
67 | +#ifdef CONFIG_LINUX_IO_URING | ||
68 | + ctx->linux_io_uring = NULL; | ||
69 | +#endif | ||
70 | + | ||
71 | ctx->thread_pool = NULL; | ||
72 | qemu_rec_mutex_init(&ctx->lock); | ||
73 | timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); | ||
63 | -- | 74 | -- |
64 | 2.20.1 | 75 | 2.24.1 |
65 | 76 | ||
66 | 77 | diff view generated by jsdifflib |
1 | From: Stefano Garzarella <sgarzare@redhat.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | We add acct_failed param in order to use virtio_blk_handle_rw_error() | 3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> |
4 | also when is not required to call block_acct_failed(). (eg. a discard | 4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> |
5 | operation is failed) | 5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
6 | 6 | Message-id: 20200120141858.587874-8-stefanha@redhat.com | |
7 | Reviewed-by: Michael S. Tsirkin <mst@redhat.com> | 7 | Message-Id: <20200120141858.587874-8-stefanha@redhat.com> |
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | ||
10 | Message-id: 20190221103314.58500-2-sgarzare@redhat.com | ||
11 | Message-Id: <20190221103314.58500-2-sgarzare@redhat.com> | ||
12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
13 | --- | 9 | --- |
14 | hw/block/virtio-blk.c | 10 ++++++---- | 10 | block.c | 22 ++++++++++++++++++++++ |
15 | 1 file changed, 6 insertions(+), 4 deletions(-) | 11 | blockdev.c | 12 ++++-------- |
12 | include/block/block.h | 1 + | ||
13 | 3 files changed, 27 insertions(+), 8 deletions(-) | ||
16 | 14 | ||
17 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | 15 | diff --git a/block.c b/block.c |
18 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/hw/block/virtio-blk.c | 17 | --- a/block.c |
20 | +++ b/hw/block/virtio-blk.c | 18 | +++ b/block.c |
21 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) | 19 | @@ -XXX,XX +XXX,XX @@ static BlockdevDetectZeroesOptions bdrv_parse_detect_zeroes(QemuOpts *opts, |
20 | return detect_zeroes; | ||
22 | } | 21 | } |
23 | 22 | ||
24 | static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, | 23 | +/** |
25 | - bool is_read) | 24 | + * Set open flags for aio engine |
26 | + bool is_read, bool acct_failed) | 25 | + * |
27 | { | 26 | + * Return 0 on success, -1 if the engine specified is invalid |
28 | VirtIOBlock *s = req->dev; | 27 | + */ |
29 | BlockErrorAction action = blk_get_error_action(s->blk, is_read, error); | 28 | +int bdrv_parse_aio(const char *mode, int *flags) |
30 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, | 29 | +{ |
31 | s->rq = req; | 30 | + if (!strcmp(mode, "threads")) { |
32 | } else if (action == BLOCK_ERROR_ACTION_REPORT) { | 31 | + /* do nothing, default */ |
33 | virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); | 32 | + } else if (!strcmp(mode, "native")) { |
34 | - block_acct_failed(blk_get_stats(s->blk), &req->acct); | 33 | + *flags |= BDRV_O_NATIVE_AIO; |
35 | + if (acct_failed) { | 34 | +#ifdef CONFIG_LINUX_IO_URING |
36 | + block_acct_failed(blk_get_stats(s->blk), &req->acct); | 35 | + } else if (!strcmp(mode, "io_uring")) { |
37 | + } | 36 | + *flags |= BDRV_O_IO_URING; |
38 | virtio_blk_free_request(req); | 37 | +#endif |
39 | } | 38 | + } else { |
40 | 39 | + return -1; | |
41 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_rw_complete(void *opaque, int ret) | 40 | + } |
42 | * the memory until the request is completed (which will | 41 | + |
43 | * happen on the other side of the migration). | 42 | + return 0; |
44 | */ | 43 | +} |
45 | - if (virtio_blk_handle_rw_error(req, -ret, is_read)) { | 44 | + |
46 | + if (virtio_blk_handle_rw_error(req, -ret, is_read, true)) { | 45 | /** |
47 | continue; | 46 | * Set open flags for a given discard mode |
47 | * | ||
48 | diff --git a/blockdev.c b/blockdev.c | ||
49 | index XXXXXXX..XXXXXXX 100644 | ||
50 | --- a/blockdev.c | ||
51 | +++ b/blockdev.c | ||
52 | @@ -XXX,XX +XXX,XX @@ static void extract_common_blockdev_options(QemuOpts *opts, int *bdrv_flags, | ||
53 | } | ||
54 | |||
55 | if ((aio = qemu_opt_get(opts, "aio")) != NULL) { | ||
56 | - if (!strcmp(aio, "native")) { | ||
57 | - *bdrv_flags |= BDRV_O_NATIVE_AIO; | ||
58 | - } else if (!strcmp(aio, "threads")) { | ||
59 | - /* this is the default */ | ||
60 | - } else { | ||
61 | - error_setg(errp, "invalid aio option"); | ||
62 | - return; | ||
63 | + if (bdrv_parse_aio(aio, bdrv_flags) < 0) { | ||
64 | + error_setg(errp, "invalid aio option"); | ||
65 | + return; | ||
48 | } | 66 | } |
49 | } | 67 | } |
50 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_flush_complete(void *opaque, int ret) | ||
51 | |||
52 | aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); | ||
53 | if (ret) { | ||
54 | - if (virtio_blk_handle_rw_error(req, -ret, 0)) { | ||
55 | + if (virtio_blk_handle_rw_error(req, -ret, 0, true)) { | ||
56 | goto out; | ||
57 | } | ||
58 | } | 68 | } |
69 | @@ -XXX,XX +XXX,XX @@ QemuOptsList qemu_common_drive_opts = { | ||
70 | },{ | ||
71 | .name = "aio", | ||
72 | .type = QEMU_OPT_STRING, | ||
73 | - .help = "host AIO implementation (threads, native)", | ||
74 | + .help = "host AIO implementation (threads, native, io_uring)", | ||
75 | },{ | ||
76 | .name = BDRV_OPT_CACHE_WB, | ||
77 | .type = QEMU_OPT_BOOL, | ||
78 | diff --git a/include/block/block.h b/include/block/block.h | ||
79 | index XXXXXXX..XXXXXXX 100644 | ||
80 | --- a/include/block/block.h | ||
81 | +++ b/include/block/block.h | ||
82 | @@ -XXX,XX +XXX,XX @@ void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, | ||
83 | void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, | ||
84 | Error **errp); | ||
85 | |||
86 | +int bdrv_parse_aio(const char *mode, int *flags); | ||
87 | int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough); | ||
88 | int bdrv_parse_discard_flags(const char *mode, int *flags); | ||
89 | BdrvChild *bdrv_open_child(const char *filename, | ||
59 | -- | 90 | -- |
60 | 2.20.1 | 91 | 2.24.1 |
61 | 92 | ||
62 | 93 | diff view generated by jsdifflib |
1 | From: Stefano Garzarella <sgarzare@redhat.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | The size of data in the virtio_blk_request must be a multiple | 3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> |
4 | of 512 bytes for IN and OUT requests, or a multiple of the size | 4 | Reviewed-by: Maxim Levitsky <maximlevitsky@gmail.com> |
5 | of struct virtio_blk_discard_write_zeroes for DISCARD and | 5 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> |
6 | WRITE_ZEROES requests. | 6 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
7 | 7 | Message-id: 20200120141858.587874-9-stefanha@redhat.com | |
8 | Reviewed-by: Michael S. Tsirkin <mst@redhat.com> | 8 | Message-Id: <20200120141858.587874-9-stefanha@redhat.com> |
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | Reviewed-by: Thomas Huth <thuth@redhat.com> | ||
11 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | ||
12 | Message-id: 20190221103314.58500-8-sgarzare@redhat.com | ||
13 | Message-Id: <20190221103314.58500-8-sgarzare@redhat.com> | ||
14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
15 | --- | 10 | --- |
16 | tests/virtio-blk-test.c | 15 ++++++++++++++- | 11 | block/file-posix.c | 98 +++++++++++++++++++++++++++++++++++++--------- |
17 | 1 file changed, 14 insertions(+), 1 deletion(-) | 12 | 1 file changed, 79 insertions(+), 19 deletions(-) |
18 | 13 | ||
19 | diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c | 14 | diff --git a/block/file-posix.c b/block/file-posix.c |
20 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/tests/virtio-blk-test.c | 16 | --- a/block/file-posix.c |
22 | +++ b/tests/virtio-blk-test.c | 17 | +++ b/block/file-posix.c |
23 | @@ -XXX,XX +XXX,XX @@ static uint64_t virtio_blk_request(QGuestAllocator *alloc, QVirtioDevice *d, | 18 | @@ -XXX,XX +XXX,XX @@ typedef struct BDRVRawState { |
24 | uint64_t addr; | 19 | bool has_write_zeroes:1; |
25 | uint8_t status = 0xFF; | 20 | bool discard_zeroes:1; |
26 | 21 | bool use_linux_aio:1; | |
27 | - g_assert_cmpuint(data_size % 512, ==, 0); | 22 | + bool use_linux_io_uring:1; |
28 | + switch (req->type) { | 23 | bool page_cache_inconsistent:1; |
29 | + case VIRTIO_BLK_T_IN: | 24 | bool has_fallocate; |
30 | + case VIRTIO_BLK_T_OUT: | 25 | bool needs_alignment; |
31 | + g_assert_cmpuint(data_size % 512, ==, 0); | 26 | @@ -XXX,XX +XXX,XX @@ static QemuOptsList raw_runtime_opts = { |
32 | + break; | 27 | { |
33 | + case VIRTIO_BLK_T_DISCARD: | 28 | .name = "aio", |
34 | + case VIRTIO_BLK_T_WRITE_ZEROES: | 29 | .type = QEMU_OPT_STRING, |
35 | + g_assert_cmpuint(data_size % | 30 | - .help = "host AIO implementation (threads, native)", |
36 | + sizeof(struct virtio_blk_discard_write_zeroes), ==, 0); | 31 | + .help = "host AIO implementation (threads, native, io_uring)", |
37 | + break; | 32 | }, |
38 | + default: | 33 | { |
39 | + g_assert_cmpuint(data_size, ==, 0); | 34 | .name = "locking", |
35 | @@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options, | ||
36 | goto fail; | ||
37 | } | ||
38 | |||
39 | - aio_default = (bdrv_flags & BDRV_O_NATIVE_AIO) | ||
40 | - ? BLOCKDEV_AIO_OPTIONS_NATIVE | ||
41 | - : BLOCKDEV_AIO_OPTIONS_THREADS; | ||
42 | + if (bdrv_flags & BDRV_O_NATIVE_AIO) { | ||
43 | + aio_default = BLOCKDEV_AIO_OPTIONS_NATIVE; | ||
44 | +#ifdef CONFIG_LINUX_IO_URING | ||
45 | + } else if (bdrv_flags & BDRV_O_IO_URING) { | ||
46 | + aio_default = BLOCKDEV_AIO_OPTIONS_IO_URING; | ||
47 | +#endif | ||
48 | + } else { | ||
49 | + aio_default = BLOCKDEV_AIO_OPTIONS_THREADS; | ||
40 | + } | 50 | + } |
41 | + | 51 | + |
42 | addr = guest_alloc(alloc, sizeof(*req) + data_size); | 52 | aio = qapi_enum_parse(&BlockdevAioOptions_lookup, |
43 | 53 | qemu_opt_get(opts, "aio"), | |
44 | virtio_blk_fix_request(d, req); | 54 | aio_default, &local_err); |
55 | @@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options, | ||
56 | ret = -EINVAL; | ||
57 | goto fail; | ||
58 | } | ||
59 | + | ||
60 | s->use_linux_aio = (aio == BLOCKDEV_AIO_OPTIONS_NATIVE); | ||
61 | +#ifdef CONFIG_LINUX_IO_URING | ||
62 | + s->use_linux_io_uring = (aio == BLOCKDEV_AIO_OPTIONS_IO_URING); | ||
63 | +#endif | ||
64 | |||
65 | locking = qapi_enum_parse(&OnOffAuto_lookup, | ||
66 | qemu_opt_get(opts, "locking"), | ||
67 | @@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options, | ||
68 | } | ||
69 | #endif /* !defined(CONFIG_LINUX_AIO) */ | ||
70 | |||
71 | +#ifdef CONFIG_LINUX_IO_URING | ||
72 | + if (s->use_linux_io_uring) { | ||
73 | + if (!aio_setup_linux_io_uring(bdrv_get_aio_context(bs), errp)) { | ||
74 | + error_prepend(errp, "Unable to use io_uring: "); | ||
75 | + goto fail; | ||
76 | + } | ||
77 | + } | ||
78 | +#else | ||
79 | + if (s->use_linux_io_uring) { | ||
80 | + error_setg(errp, "aio=io_uring was specified, but is not supported " | ||
81 | + "in this build."); | ||
82 | + ret = -EINVAL; | ||
83 | + goto fail; | ||
84 | + } | ||
85 | +#endif /* !defined(CONFIG_LINUX_IO_URING) */ | ||
86 | + | ||
87 | s->has_discard = true; | ||
88 | s->has_write_zeroes = true; | ||
89 | if ((bs->open_flags & BDRV_O_NOCACHE) != 0) { | ||
90 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, | ||
91 | return -EIO; | ||
92 | |||
93 | /* | ||
94 | - * Check if the underlying device requires requests to be aligned, | ||
95 | - * and if the request we are trying to submit is aligned or not. | ||
96 | - * If this is the case tell the low-level driver that it needs | ||
97 | - * to copy the buffer. | ||
98 | + * When using O_DIRECT, the request must be aligned to be able to use | ||
99 | + * either libaio or io_uring interface. If not fail back to regular thread | ||
100 | + * pool read/write code which emulates this for us if we | ||
101 | + * set QEMU_AIO_MISALIGNED. | ||
102 | */ | ||
103 | - if (s->needs_alignment) { | ||
104 | - if (!bdrv_qiov_is_aligned(bs, qiov)) { | ||
105 | - type |= QEMU_AIO_MISALIGNED; | ||
106 | + if (s->needs_alignment && !bdrv_qiov_is_aligned(bs, qiov)) { | ||
107 | + type |= QEMU_AIO_MISALIGNED; | ||
108 | +#ifdef CONFIG_LINUX_IO_URING | ||
109 | + } else if (s->use_linux_io_uring) { | ||
110 | + LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); | ||
111 | + assert(qiov->size == bytes); | ||
112 | + return luring_co_submit(bs, aio, s->fd, offset, qiov, type); | ||
113 | +#endif | ||
114 | #ifdef CONFIG_LINUX_AIO | ||
115 | - } else if (s->use_linux_aio) { | ||
116 | - LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); | ||
117 | - assert(qiov->size == bytes); | ||
118 | - return laio_co_submit(bs, aio, s->fd, offset, qiov, type); | ||
119 | + } else if (s->use_linux_aio) { | ||
120 | + LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); | ||
121 | + assert(qiov->size == bytes); | ||
122 | + return laio_co_submit(bs, aio, s->fd, offset, qiov, type); | ||
123 | #endif | ||
124 | - } | ||
125 | } | ||
126 | |||
127 | acb = (RawPosixAIOData) { | ||
128 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset, | ||
129 | |||
130 | static void raw_aio_plug(BlockDriverState *bs) | ||
131 | { | ||
132 | + BDRVRawState __attribute__((unused)) *s = bs->opaque; | ||
133 | #ifdef CONFIG_LINUX_AIO | ||
134 | - BDRVRawState *s = bs->opaque; | ||
135 | if (s->use_linux_aio) { | ||
136 | LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); | ||
137 | laio_io_plug(bs, aio); | ||
138 | } | ||
139 | #endif | ||
140 | +#ifdef CONFIG_LINUX_IO_URING | ||
141 | + if (s->use_linux_io_uring) { | ||
142 | + LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); | ||
143 | + luring_io_plug(bs, aio); | ||
144 | + } | ||
145 | +#endif | ||
146 | } | ||
147 | |||
148 | static void raw_aio_unplug(BlockDriverState *bs) | ||
149 | { | ||
150 | + BDRVRawState __attribute__((unused)) *s = bs->opaque; | ||
151 | #ifdef CONFIG_LINUX_AIO | ||
152 | - BDRVRawState *s = bs->opaque; | ||
153 | if (s->use_linux_aio) { | ||
154 | LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); | ||
155 | laio_io_unplug(bs, aio); | ||
156 | } | ||
157 | #endif | ||
158 | +#ifdef CONFIG_LINUX_IO_URING | ||
159 | + if (s->use_linux_io_uring) { | ||
160 | + LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); | ||
161 | + luring_io_unplug(bs, aio); | ||
162 | + } | ||
163 | +#endif | ||
164 | } | ||
165 | |||
166 | static int raw_co_flush_to_disk(BlockDriverState *bs) | ||
167 | @@ -XXX,XX +XXX,XX @@ static int raw_co_flush_to_disk(BlockDriverState *bs) | ||
168 | .aio_type = QEMU_AIO_FLUSH, | ||
169 | }; | ||
170 | |||
171 | +#ifdef CONFIG_LINUX_IO_URING | ||
172 | + if (s->use_linux_io_uring) { | ||
173 | + LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); | ||
174 | + return luring_co_submit(bs, aio, s->fd, 0, NULL, QEMU_AIO_FLUSH); | ||
175 | + } | ||
176 | +#endif | ||
177 | return raw_thread_pool_submit(bs, handle_aiocb_flush, &acb); | ||
178 | } | ||
179 | |||
180 | static void raw_aio_attach_aio_context(BlockDriverState *bs, | ||
181 | AioContext *new_context) | ||
182 | { | ||
183 | + BDRVRawState __attribute__((unused)) *s = bs->opaque; | ||
184 | #ifdef CONFIG_LINUX_AIO | ||
185 | - BDRVRawState *s = bs->opaque; | ||
186 | if (s->use_linux_aio) { | ||
187 | Error *local_err = NULL; | ||
188 | if (!aio_setup_linux_aio(new_context, &local_err)) { | ||
189 | @@ -XXX,XX +XXX,XX @@ static void raw_aio_attach_aio_context(BlockDriverState *bs, | ||
190 | } | ||
191 | } | ||
192 | #endif | ||
193 | +#ifdef CONFIG_LINUX_IO_URING | ||
194 | + if (s->use_linux_io_uring) { | ||
195 | + Error *local_err; | ||
196 | + if (!aio_setup_linux_io_uring(new_context, &local_err)) { | ||
197 | + error_reportf_err(local_err, "Unable to use linux io_uring, " | ||
198 | + "falling back to thread pool: "); | ||
199 | + s->use_linux_io_uring = false; | ||
200 | + } | ||
201 | + } | ||
202 | +#endif | ||
203 | } | ||
204 | |||
205 | static void raw_close(BlockDriverState *bs) | ||
45 | -- | 206 | -- |
46 | 2.20.1 | 207 | 2.24.1 |
47 | 208 | ||
48 | 209 | diff view generated by jsdifflib |
1 | From: Stefano Garzarella <sgarzare@redhat.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | Since configurable features for virtio-blk are growing, this patch | 3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> |
4 | adds host_features field in the struct VirtIOBlock. (as in virtio-net) | 4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> |
5 | In this way, we can avoid to add new fields for new properties and | 5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
6 | we can directly set VIRTIO_BLK_F* flags in the host_features. | 6 | Message-id: 20200120141858.587874-10-stefanha@redhat.com |
7 | 7 | Message-Id: <20200120141858.587874-10-stefanha@redhat.com> | |
8 | We update "config-wce" and "scsi" property definition to use the new | ||
9 | host_features field without change the behaviour. | ||
10 | |||
11 | Suggested-by: Michael S. Tsirkin <mst@redhat.com> | ||
12 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
13 | Reviewed-by: Michael S. Tsirkin <mst@redhat.com> | ||
14 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | ||
15 | Message-id: 20190221103314.58500-3-sgarzare@redhat.com | ||
16 | Message-Id: <20190221103314.58500-3-sgarzare@redhat.com> | ||
17 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
18 | --- | 9 | --- |
19 | include/hw/virtio/virtio-blk.h | 3 +-- | 10 | block/io_uring.c | 23 ++++++++++++++++++++--- |
20 | hw/block/virtio-blk.c | 16 +++++++++------- | 11 | block/trace-events | 12 ++++++++++++ |
21 | 2 files changed, 10 insertions(+), 9 deletions(-) | 12 | 2 files changed, 32 insertions(+), 3 deletions(-) |
22 | 13 | ||
23 | diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h | 14 | diff --git a/block/io_uring.c b/block/io_uring.c |
24 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
25 | --- a/include/hw/virtio/virtio-blk.h | 16 | --- a/block/io_uring.c |
26 | +++ b/include/hw/virtio/virtio-blk.h | 17 | +++ b/block/io_uring.c |
27 | @@ -XXX,XX +XXX,XX @@ struct VirtIOBlkConf | 18 | @@ -XXX,XX +XXX,XX @@ |
28 | BlockConf conf; | 19 | #include "block/raw-aio.h" |
29 | IOThread *iothread; | 20 | #include "qemu/coroutine.h" |
30 | char *serial; | 21 | #include "qapi/error.h" |
31 | - uint32_t scsi; | 22 | +#include "trace.h" |
32 | - uint32_t config_wce; | 23 | |
33 | uint32_t request_merging; | 24 | /* io_uring ring size */ |
34 | uint16_t num_queues; | 25 | #define MAX_ENTRIES 128 |
35 | uint16_t queue_size; | 26 | @@ -XXX,XX +XXX,XX @@ static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb, |
36 | @@ -XXX,XX +XXX,XX @@ typedef struct VirtIOBlock { | 27 | QEMUIOVector *resubmit_qiov; |
37 | bool dataplane_disabled; | 28 | size_t remaining; |
38 | bool dataplane_started; | 29 | |
39 | struct VirtIOBlockDataPlane *dataplane; | 30 | + trace_luring_resubmit_short_read(s, luringcb, nread); |
40 | + uint64_t host_features; | 31 | + |
41 | } VirtIOBlock; | 32 | /* Update read position */ |
42 | 33 | luringcb->total_read = nread; | |
43 | typedef struct VirtIOBlockReq { | 34 | remaining = luringcb->qiov->size - luringcb->total_read; |
44 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | 35 | @@ -XXX,XX +XXX,XX @@ static void luring_process_completions(LuringState *s) |
36 | |||
37 | /* Change counters one-by-one because we can be nested. */ | ||
38 | s->io_q.in_flight--; | ||
39 | + trace_luring_process_completion(s, luringcb, ret); | ||
40 | |||
41 | /* total_read is non-zero only for resubmitted read requests */ | ||
42 | total_bytes = ret + luringcb->total_read; | ||
43 | @@ -XXX,XX +XXX,XX @@ static int ioq_submit(LuringState *s) | ||
44 | QSIMPLEQ_REMOVE_HEAD(&s->io_q.submit_queue, next); | ||
45 | } | ||
46 | ret = io_uring_submit(&s->ring); | ||
47 | + trace_luring_io_uring_submit(s, ret); | ||
48 | /* Prevent infinite loop if submission is refused */ | ||
49 | if (ret <= 0) { | ||
50 | if (ret == -EAGAIN) { | ||
51 | @@ -XXX,XX +XXX,XX @@ static void ioq_init(LuringQueue *io_q) | ||
52 | |||
53 | void luring_io_plug(BlockDriverState *bs, LuringState *s) | ||
54 | { | ||
55 | + trace_luring_io_plug(s); | ||
56 | s->io_q.plugged++; | ||
57 | } | ||
58 | |||
59 | void luring_io_unplug(BlockDriverState *bs, LuringState *s) | ||
60 | { | ||
61 | assert(s->io_q.plugged); | ||
62 | + trace_luring_io_unplug(s, s->io_q.blocked, s->io_q.plugged, | ||
63 | + s->io_q.in_queue, s->io_q.in_flight); | ||
64 | if (--s->io_q.plugged == 0 && | ||
65 | !s->io_q.blocked && s->io_q.in_queue > 0) { | ||
66 | ioq_submit(s); | ||
67 | @@ -XXX,XX +XXX,XX @@ void luring_io_unplug(BlockDriverState *bs, LuringState *s) | ||
68 | static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s, | ||
69 | uint64_t offset, int type) | ||
70 | { | ||
71 | + int ret; | ||
72 | struct io_uring_sqe *sqes = &luringcb->sqeq; | ||
73 | |||
74 | switch (type) { | ||
75 | @@ -XXX,XX +XXX,XX @@ static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s, | ||
76 | |||
77 | QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next); | ||
78 | s->io_q.in_queue++; | ||
79 | - | ||
80 | + trace_luring_do_submit(s, s->io_q.blocked, s->io_q.plugged, | ||
81 | + s->io_q.in_queue, s->io_q.in_flight); | ||
82 | if (!s->io_q.blocked && | ||
83 | (!s->io_q.plugged || | ||
84 | s->io_q.in_flight + s->io_q.in_queue >= MAX_ENTRIES)) { | ||
85 | - return ioq_submit(s); | ||
86 | + ret = ioq_submit(s); | ||
87 | + trace_luring_do_submit_done(s, ret); | ||
88 | + return ret; | ||
89 | } | ||
90 | return 0; | ||
91 | } | ||
92 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd, | ||
93 | .qiov = qiov, | ||
94 | .is_read = (type == QEMU_AIO_READ), | ||
95 | }; | ||
96 | - | ||
97 | + trace_luring_co_submit(bs, s, &luringcb, fd, offset, qiov ? qiov->size : 0, | ||
98 | + type); | ||
99 | ret = luring_do_submit(fd, &luringcb, s, offset, type); | ||
100 | + | ||
101 | if (ret < 0) { | ||
102 | return ret; | ||
103 | } | ||
104 | @@ -XXX,XX +XXX,XX @@ LuringState *luring_init(Error **errp) | ||
105 | LuringState *s = g_new0(LuringState, 1); | ||
106 | struct io_uring *ring = &s->ring; | ||
107 | |||
108 | + trace_luring_init_state(s, sizeof(*s)); | ||
109 | + | ||
110 | rc = io_uring_queue_init(MAX_ENTRIES, ring, 0); | ||
111 | if (rc < 0) { | ||
112 | error_setg_errno(errp, errno, "failed to init linux io_uring ring"); | ||
113 | @@ -XXX,XX +XXX,XX @@ void luring_cleanup(LuringState *s) | ||
114 | { | ||
115 | io_uring_queue_exit(&s->ring); | ||
116 | g_free(s); | ||
117 | + trace_luring_cleanup_state(s); | ||
118 | } | ||
119 | diff --git a/block/trace-events b/block/trace-events | ||
45 | index XXXXXXX..XXXXXXX 100644 | 120 | index XXXXXXX..XXXXXXX 100644 |
46 | --- a/hw/block/virtio-blk.c | 121 | --- a/block/trace-events |
47 | +++ b/hw/block/virtio-blk.c | 122 | +++ b/block/trace-events |
48 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req) | 123 | @@ -XXX,XX +XXX,XX @@ qmp_block_stream(void *bs) "bs %p" |
49 | */ | 124 | file_paio_submit(void *acb, void *opaque, int64_t offset, int count, int type) "acb %p opaque %p offset %"PRId64" count %d type %d" |
50 | scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base; | 125 | file_copy_file_range(void *bs, int src, int64_t src_off, int dst, int64_t dst_off, int64_t bytes, int flags, int64_t ret) "bs %p src_fd %d offset %"PRIu64" dst_fd %d offset %"PRIu64" bytes %"PRIu64" flags %d ret %"PRId64 |
51 | 126 | ||
52 | - if (!blk->conf.scsi) { | 127 | +#io_uring.c |
53 | + if (!virtio_has_feature(blk->host_features, VIRTIO_BLK_F_SCSI)) { | 128 | +luring_init_state(void *s, size_t size) "s %p size %zu" |
54 | status = VIRTIO_BLK_S_UNSUPP; | 129 | +luring_cleanup_state(void *s) "%p freed" |
55 | goto fail; | 130 | +luring_io_plug(void *s) "LuringState %p plug" |
56 | } | 131 | +luring_io_unplug(void *s, int blocked, int plugged, int queued, int inflight) "LuringState %p blocked %d plugged %d queued %d inflight %d" |
57 | @@ -XXX,XX +XXX,XX @@ static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features, | 132 | +luring_do_submit(void *s, int blocked, int plugged, int queued, int inflight) "LuringState %p blocked %d plugged %d queued %d inflight %d" |
58 | { | 133 | +luring_do_submit_done(void *s, int ret) "LuringState %p submitted to kernel %d" |
59 | VirtIOBlock *s = VIRTIO_BLK(vdev); | 134 | +luring_co_submit(void *bs, void *s, void *luringcb, int fd, uint64_t offset, size_t nbytes, int type) "bs %p s %p luringcb %p fd %d offset %" PRId64 " nbytes %zd type %d" |
60 | 135 | +luring_process_completion(void *s, void *aiocb, int ret) "LuringState %p luringcb %p ret %d" | |
61 | + /* Firstly sync all virtio-blk possible supported features */ | 136 | +luring_io_uring_submit(void *s, int ret) "LuringState %p ret %d" |
62 | + features |= s->host_features; | 137 | +luring_resubmit_short_read(void *s, void *luringcb, int nread) "LuringState %p luringcb %p nread %d" |
63 | + | 138 | + |
64 | virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX); | 139 | # qcow2.c |
65 | virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY); | 140 | qcow2_add_task(void *co, void *bs, void *pool, const char *action, int cluster_type, uint64_t file_cluster_offset, uint64_t offset, uint64_t bytes, void *qiov, size_t qiov_offset) "co %p bs %p pool %p: %s: cluster_type %d file_cluster_offset %" PRIu64 " offset %" PRIu64 " bytes %" PRIu64 " qiov %p qiov_offset %zu" |
66 | virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY); | 141 | qcow2_writev_start_req(void *co, int64_t offset, int bytes) "co %p offset 0x%" PRIx64 " bytes %d" |
67 | virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE); | ||
68 | if (virtio_has_feature(features, VIRTIO_F_VERSION_1)) { | ||
69 | - if (s->conf.scsi) { | ||
70 | + if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_SCSI)) { | ||
71 | error_setg(errp, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0"); | ||
72 | return 0; | ||
73 | } | ||
74 | @@ -XXX,XX +XXX,XX @@ static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features, | ||
75 | virtio_add_feature(&features, VIRTIO_BLK_F_SCSI); | ||
76 | } | ||
77 | |||
78 | - if (s->conf.config_wce) { | ||
79 | - virtio_add_feature(&features, VIRTIO_BLK_F_CONFIG_WCE); | ||
80 | - } | ||
81 | if (blk_enable_write_cache(s->blk)) { | ||
82 | virtio_add_feature(&features, VIRTIO_BLK_F_WCE); | ||
83 | } | ||
84 | @@ -XXX,XX +XXX,XX @@ static Property virtio_blk_properties[] = { | ||
85 | DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf), | ||
86 | DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf), | ||
87 | DEFINE_PROP_STRING("serial", VirtIOBlock, conf.serial), | ||
88 | - DEFINE_PROP_BIT("config-wce", VirtIOBlock, conf.config_wce, 0, true), | ||
89 | + DEFINE_PROP_BIT64("config-wce", VirtIOBlock, host_features, | ||
90 | + VIRTIO_BLK_F_CONFIG_WCE, true), | ||
91 | #ifdef __linux__ | ||
92 | - DEFINE_PROP_BIT("scsi", VirtIOBlock, conf.scsi, 0, false), | ||
93 | + DEFINE_PROP_BIT64("scsi", VirtIOBlock, host_features, | ||
94 | + VIRTIO_BLK_F_SCSI, false), | ||
95 | #endif | ||
96 | DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0, | ||
97 | true), | ||
98 | -- | 142 | -- |
99 | 2.20.1 | 143 | 2.24.1 |
100 | 144 | ||
101 | 145 | diff view generated by jsdifflib |
1 | From: Stefano Garzarella <sgarzare@redhat.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | In order to use VirtIOFeature also in other virtio devices, we move | 3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> |
4 | its declaration and the endof() macro (renamed in virtio_endof()) | 4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> |
5 | in virtio.h. | 5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
6 | We add virtio_feature_get_config_size() function to iterate the array | 6 | Message-id: 20200120141858.587874-11-stefanha@redhat.com |
7 | of VirtIOFeature and to return the config size depending on the | 7 | Message-Id: <20200120141858.587874-11-stefanha@redhat.com> |
8 | features enabled. (as virtio_net_set_config_size() did) | ||
9 | |||
10 | Suggested-by: Michael S. Tsirkin <mst@redhat.com> | ||
11 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> | ||
13 | Message-id: 20190221103314.58500-5-sgarzare@redhat.com | ||
14 | Message-Id: <20190221103314.58500-5-sgarzare@redhat.com> | ||
15 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
16 | --- | 9 | --- |
17 | include/hw/virtio/virtio.h | 15 +++++++++++++++ | 10 | block/io_uring.c | 17 ++++++++++++++++- |
18 | hw/net/virtio-net.c | 31 +++++++------------------------ | 11 | 1 file changed, 16 insertions(+), 1 deletion(-) |
19 | hw/virtio/virtio.c | 15 +++++++++++++++ | ||
20 | 3 files changed, 37 insertions(+), 24 deletions(-) | ||
21 | 12 | ||
22 | diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h | 13 | diff --git a/block/io_uring.c b/block/io_uring.c |
23 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
24 | --- a/include/hw/virtio/virtio.h | 15 | --- a/block/io_uring.c |
25 | +++ b/include/hw/virtio/virtio.h | 16 | +++ b/block/io_uring.c |
26 | @@ -XXX,XX +XXX,XX @@ static inline hwaddr vring_align(hwaddr addr, | 17 | @@ -XXX,XX +XXX,XX @@ static void qemu_luring_completion_cb(void *opaque) |
27 | return QEMU_ALIGN_UP(addr, align); | 18 | luring_process_completions_and_submit(s); |
28 | } | 19 | } |
29 | 20 | ||
30 | +/* | 21 | +static bool qemu_luring_poll_cb(void *opaque) |
31 | + * Calculate the number of bytes up to and including the given 'field' of | 22 | +{ |
32 | + * 'container'. | 23 | + LuringState *s = opaque; |
33 | + */ | 24 | + struct io_uring_cqe *cqes; |
34 | +#define virtio_endof(container, field) \ | ||
35 | + (offsetof(container, field) + sizeof_field(container, field)) | ||
36 | + | 25 | + |
37 | +typedef struct VirtIOFeature { | 26 | + if (io_uring_peek_cqe(&s->ring, &cqes) == 0) { |
38 | + uint64_t flags; | 27 | + if (cqes) { |
39 | + size_t end; | 28 | + luring_process_completions_and_submit(s); |
40 | +} VirtIOFeature; | 29 | + return true; |
41 | + | ||
42 | +size_t virtio_feature_get_config_size(VirtIOFeature *features, | ||
43 | + uint64_t host_features); | ||
44 | + | ||
45 | typedef struct VirtQueue VirtQueue; | ||
46 | |||
47 | #define VIRTQUEUE_MAX_SIZE 1024 | ||
48 | diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c | ||
49 | index XXXXXXX..XXXXXXX 100644 | ||
50 | --- a/hw/net/virtio-net.c | ||
51 | +++ b/hw/net/virtio-net.c | ||
52 | @@ -XXX,XX +XXX,XX @@ static inline __virtio16 *virtio_net_rsc_ext_num_dupacks( | ||
53 | |||
54 | #endif | ||
55 | |||
56 | -/* | ||
57 | - * Calculate the number of bytes up to and including the given 'field' of | ||
58 | - * 'container'. | ||
59 | - */ | ||
60 | -#define endof(container, field) \ | ||
61 | - (offsetof(container, field) + sizeof_field(container, field)) | ||
62 | - | ||
63 | -typedef struct VirtIOFeature { | ||
64 | - uint64_t flags; | ||
65 | - size_t end; | ||
66 | -} VirtIOFeature; | ||
67 | - | ||
68 | static VirtIOFeature feature_sizes[] = { | ||
69 | {.flags = 1ULL << VIRTIO_NET_F_MAC, | ||
70 | - .end = endof(struct virtio_net_config, mac)}, | ||
71 | + .end = virtio_endof(struct virtio_net_config, mac)}, | ||
72 | {.flags = 1ULL << VIRTIO_NET_F_STATUS, | ||
73 | - .end = endof(struct virtio_net_config, status)}, | ||
74 | + .end = virtio_endof(struct virtio_net_config, status)}, | ||
75 | {.flags = 1ULL << VIRTIO_NET_F_MQ, | ||
76 | - .end = endof(struct virtio_net_config, max_virtqueue_pairs)}, | ||
77 | + .end = virtio_endof(struct virtio_net_config, max_virtqueue_pairs)}, | ||
78 | {.flags = 1ULL << VIRTIO_NET_F_MTU, | ||
79 | - .end = endof(struct virtio_net_config, mtu)}, | ||
80 | + .end = virtio_endof(struct virtio_net_config, mtu)}, | ||
81 | {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX, | ||
82 | - .end = endof(struct virtio_net_config, duplex)}, | ||
83 | + .end = virtio_endof(struct virtio_net_config, duplex)}, | ||
84 | {} | ||
85 | }; | ||
86 | |||
87 | @@ -XXX,XX +XXX,XX @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, | ||
88 | |||
89 | static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features) | ||
90 | { | ||
91 | - int i, config_size = 0; | ||
92 | virtio_add_feature(&host_features, VIRTIO_NET_F_MAC); | ||
93 | |||
94 | - for (i = 0; feature_sizes[i].flags != 0; i++) { | ||
95 | - if (host_features & feature_sizes[i].flags) { | ||
96 | - config_size = MAX(feature_sizes[i].end, config_size); | ||
97 | - } | ||
98 | - } | ||
99 | - n->config_size = config_size; | ||
100 | + n->config_size = virtio_feature_get_config_size(feature_sizes, | ||
101 | + host_features); | ||
102 | } | ||
103 | |||
104 | void virtio_net_set_netclient_name(VirtIONet *n, const char *name, | ||
105 | diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c | ||
106 | index XXXXXXX..XXXXXXX 100644 | ||
107 | --- a/hw/virtio/virtio.c | ||
108 | +++ b/hw/virtio/virtio.c | ||
109 | @@ -XXX,XX +XXX,XX @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val) | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | +size_t virtio_feature_get_config_size(VirtIOFeature *feature_sizes, | ||
114 | + uint64_t host_features) | ||
115 | +{ | ||
116 | + size_t config_size = 0; | ||
117 | + int i; | ||
118 | + | ||
119 | + for (i = 0; feature_sizes[i].flags != 0; i++) { | ||
120 | + if (host_features & feature_sizes[i].flags) { | ||
121 | + config_size = MAX(feature_sizes[i].end, config_size); | ||
122 | + } | 30 | + } |
123 | + } | 31 | + } |
124 | + | 32 | + |
125 | + return config_size; | 33 | + return false; |
126 | +} | 34 | +} |
127 | + | 35 | + |
128 | int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) | 36 | static void ioq_init(LuringQueue *io_q) |
129 | { | 37 | { |
130 | int i, ret; | 38 | QSIMPLEQ_INIT(&io_q->submit_queue); |
39 | @@ -XXX,XX +XXX,XX @@ void luring_attach_aio_context(LuringState *s, AioContext *new_context) | ||
40 | s->aio_context = new_context; | ||
41 | s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s); | ||
42 | aio_set_fd_handler(s->aio_context, s->ring.ring_fd, false, | ||
43 | - qemu_luring_completion_cb, NULL, NULL, s); | ||
44 | + qemu_luring_completion_cb, NULL, qemu_luring_poll_cb, s); | ||
45 | } | ||
46 | |||
47 | LuringState *luring_init(Error **errp) | ||
131 | -- | 48 | -- |
132 | 2.20.1 | 49 | 2.24.1 |
133 | 50 | ||
134 | 51 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | @iov is used only to initialize @qiov. Let's use new | 3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> |
4 | qemu_iovec_init_buf() instead, which simplifies the code. | 4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> |
5 | 5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | |
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 6 | Message-id: 20200120141858.587874-12-stefanha@redhat.com |
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | 7 | Message-Id: <20200120141858.587874-12-stefanha@redhat.com> |
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Message-id: 20190218140926.333779-18-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-18-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
12 | --- | 9 | --- |
13 | include/hw/ide/internal.h | 1 - | 10 | qemu-io.c | 25 +++++++++++++++++++++---- |
14 | hw/ide/atapi.c | 5 ++--- | 11 | 1 file changed, 21 insertions(+), 4 deletions(-) |
15 | 2 files changed, 2 insertions(+), 4 deletions(-) | ||
16 | 12 | ||
17 | diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h | 13 | diff --git a/qemu-io.c b/qemu-io.c |
18 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/include/hw/ide/internal.h | 15 | --- a/qemu-io.c |
20 | +++ b/include/hw/ide/internal.h | 16 | +++ b/qemu-io.c |
21 | @@ -XXX,XX +XXX,XX @@ struct IDEDMAOps { | 17 | @@ -XXX,XX +XXX,XX @@ static void open_help(void) |
22 | 18 | " -C, -- use copy-on-read\n" | |
23 | struct IDEDMA { | 19 | " -n, -- disable host cache, short for -t none\n" |
24 | const struct IDEDMAOps *ops; | 20 | " -U, -- force shared permissions\n" |
25 | - struct iovec iov; | 21 | -" -k, -- use kernel AIO implementation (on Linux only)\n" |
26 | QEMUIOVector qiov; | 22 | +" -k, -- use kernel AIO implementation (Linux only, prefer use of -i)\n" |
27 | BlockAIOCB *aiocb; | 23 | +" -i, -- use AIO mode (threads, native or io_uring)\n" |
28 | }; | 24 | " -t, -- use the given cache mode for the image\n" |
29 | diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c | 25 | " -d, -- use the given discard mode for the image\n" |
30 | index XXXXXXX..XXXXXXX 100644 | 26 | " -o, -- options to be given to the block driver" |
31 | --- a/hw/ide/atapi.c | 27 | @@ -XXX,XX +XXX,XX @@ static int open_f(BlockBackend *blk, int argc, char **argv) |
32 | +++ b/hw/ide/atapi.c | 28 | QDict *opts; |
33 | @@ -XXX,XX +XXX,XX @@ static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret) | 29 | bool force_share = false; |
34 | data_offset = 0; | 30 | |
35 | } | 31 | - while ((c = getopt(argc, argv, "snCro:kt:d:U")) != -1) { |
36 | trace_ide_atapi_cmd_read_dma_cb_aio(s, s->lba, n); | 32 | + while ((c = getopt(argc, argv, "snCro:ki:t:d:U")) != -1) { |
37 | - s->bus->dma->iov.iov_base = (void *)(s->io_buffer + data_offset); | 33 | switch (c) { |
38 | - s->bus->dma->iov.iov_len = n * ATAPI_SECTOR_SIZE; | 34 | case 's': |
39 | - qemu_iovec_init_external(&s->bus->dma->qiov, &s->bus->dma->iov, 1); | 35 | flags |= BDRV_O_SNAPSHOT; |
40 | + qemu_iovec_init_buf(&s->bus->dma->qiov, s->io_buffer + data_offset, | 36 | @@ -XXX,XX +XXX,XX @@ static int open_f(BlockBackend *blk, int argc, char **argv) |
41 | + n * ATAPI_SECTOR_SIZE); | 37 | return -EINVAL; |
42 | 38 | } | |
43 | s->bus->dma->aiocb = ide_buffered_readv(s, (int64_t)s->lba << 2, | 39 | break; |
44 | &s->bus->dma->qiov, n * 4, | 40 | + case 'i': |
41 | + if (bdrv_parse_aio(optarg, &flags) < 0) { | ||
42 | + error_report("Invalid aio option: %s", optarg); | ||
43 | + qemu_opts_reset(&empty_opts); | ||
44 | + return -EINVAL; | ||
45 | + } | ||
46 | + break; | ||
47 | case 'o': | ||
48 | if (imageOpts) { | ||
49 | printf("--image-opts and 'open -o' are mutually exclusive\n"); | ||
50 | @@ -XXX,XX +XXX,XX @@ static void usage(const char *name) | ||
51 | " -n, --nocache disable host cache, short for -t none\n" | ||
52 | " -C, --copy-on-read enable copy-on-read\n" | ||
53 | " -m, --misalign misalign allocations for O_DIRECT\n" | ||
54 | -" -k, --native-aio use kernel AIO implementation (on Linux only)\n" | ||
55 | +" -k, --native-aio use kernel AIO implementation\n" | ||
56 | +" (Linux only, prefer use of -i)\n" | ||
57 | +" -i, --aio=MODE use AIO mode (threads, native or io_uring)\n" | ||
58 | " -t, --cache=MODE use the given cache mode for the image\n" | ||
59 | " -d, --discard=MODE use the given discard mode for the image\n" | ||
60 | " -T, --trace [[enable=]<pattern>][,events=<file>][,file=<file>]\n" | ||
61 | @@ -XXX,XX +XXX,XX @@ static QemuOptsList file_opts = { | ||
62 | int main(int argc, char **argv) | ||
63 | { | ||
64 | int readonly = 0; | ||
65 | - const char *sopt = "hVc:d:f:rsnCmkt:T:U"; | ||
66 | + const char *sopt = "hVc:d:f:rsnCmki:t:T:U"; | ||
67 | const struct option lopt[] = { | ||
68 | { "help", no_argument, NULL, 'h' }, | ||
69 | { "version", no_argument, NULL, 'V' }, | ||
70 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
71 | { "copy-on-read", no_argument, NULL, 'C' }, | ||
72 | { "misalign", no_argument, NULL, 'm' }, | ||
73 | { "native-aio", no_argument, NULL, 'k' }, | ||
74 | + { "aio", required_argument, NULL, 'i' }, | ||
75 | { "discard", required_argument, NULL, 'd' }, | ||
76 | { "cache", required_argument, NULL, 't' }, | ||
77 | { "trace", required_argument, NULL, 'T' }, | ||
78 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
79 | case 'k': | ||
80 | flags |= BDRV_O_NATIVE_AIO; | ||
81 | break; | ||
82 | + case 'i': | ||
83 | + if (bdrv_parse_aio(optarg, &flags) < 0) { | ||
84 | + error_report("Invalid aio option: %s", optarg); | ||
85 | + exit(1); | ||
86 | + } | ||
87 | + break; | ||
88 | case 't': | ||
89 | if (bdrv_parse_cache_mode(optarg, &flags, &writethrough) < 0) { | ||
90 | error_report("Invalid cache option: %s", optarg); | ||
45 | -- | 91 | -- |
46 | 2.20.1 | 92 | 2.24.1 |
47 | 93 | ||
48 | 94 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | Use new qemu_iovec_init_buf() instead of | 3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> |
4 | qemu_iovec_init_external( ... , 1), which simplifies the code. | 4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> |
5 | 5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | |
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 6 | Message-id: 20200120141858.587874-13-stefanha@redhat.com |
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | 7 | Message-Id: <20200120141858.587874-13-stefanha@redhat.com> |
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Message-id: 20190218140926.333779-13-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-13-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
12 | --- | 9 | --- |
13 | qemu-img.c | 10 ++-------- | 10 | qemu-img-cmds.hx | 4 ++-- |
14 | 1 file changed, 2 insertions(+), 8 deletions(-) | 11 | qemu-img.c | 11 ++++++++++- |
12 | qemu-img.texi | 5 ++++- | ||
13 | 3 files changed, 16 insertions(+), 4 deletions(-) | ||
15 | 14 | ||
15 | diff --git a/qemu-img-cmds.hx b/qemu-img-cmds.hx | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/qemu-img-cmds.hx | ||
18 | +++ b/qemu-img-cmds.hx | ||
19 | @@ -XXX,XX +XXX,XX @@ STEXI | ||
20 | ETEXI | ||
21 | |||
22 | DEF("bench", img_bench, | ||
23 | - "bench [-c count] [-d depth] [-f fmt] [--flush-interval=flush_interval] [-n] [--no-drain] [-o offset] [--pattern=pattern] [-q] [-s buffer_size] [-S step_size] [-t cache] [-w] [-U] filename") | ||
24 | + "bench [-c count] [-d depth] [-f fmt] [--flush-interval=flush_interval] [-n] [--no-drain] [-o offset] [--pattern=pattern] [-q] [-s buffer_size] [-S step_size] [-t cache] [-i aio] [-w] [-U] filename") | ||
25 | STEXI | ||
26 | -@item bench [-c @var{count}] [-d @var{depth}] [-f @var{fmt}] [--flush-interval=@var{flush_interval}] [-n] [--no-drain] [-o @var{offset}] [--pattern=@var{pattern}] [-q] [-s @var{buffer_size}] [-S @var{step_size}] [-t @var{cache}] [-w] [-U] @var{filename} | ||
27 | +@item bench [-c @var{count}] [-d @var{depth}] [-f @var{fmt}] [--flush-interval=@var{flush_interval}] [-n] [--no-drain] [-o @var{offset}] [--pattern=@var{pattern}] [-q] [-s @var{buffer_size}] [-S @var{step_size}] [-t @var{cache}] [-i @var{aio}] [-w] [-U] @var{filename} | ||
28 | ETEXI | ||
29 | |||
30 | DEF("check", img_check, | ||
16 | diff --git a/qemu-img.c b/qemu-img.c | 31 | diff --git a/qemu-img.c b/qemu-img.c |
17 | index XXXXXXX..XXXXXXX 100644 | 32 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/qemu-img.c | 33 | --- a/qemu-img.c |
19 | +++ b/qemu-img.c | 34 | +++ b/qemu-img.c |
20 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn convert_co_read(ImgConvertState *s, int64_t sector_num, | 35 | @@ -XXX,XX +XXX,XX @@ static int img_bench(int argc, char **argv) |
21 | { | 36 | {"force-share", no_argument, 0, 'U'}, |
22 | int n, ret; | 37 | {0, 0, 0, 0} |
23 | QEMUIOVector qiov; | 38 | }; |
24 | - struct iovec iov; | 39 | - c = getopt_long(argc, argv, ":hc:d:f:no:qs:S:t:wU", long_options, NULL); |
25 | 40 | + c = getopt_long(argc, argv, ":hc:d:f:ni:o:qs:S:t:wU", long_options, | |
26 | assert(nb_sectors <= s->buf_sectors); | 41 | + NULL); |
27 | while (nb_sectors > 0) { | 42 | if (c == -1) { |
28 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn convert_co_read(ImgConvertState *s, int64_t sector_num, | 43 | break; |
29 | bs_sectors = s->src_sectors[src_cur]; | 44 | } |
30 | 45 | @@ -XXX,XX +XXX,XX @@ static int img_bench(int argc, char **argv) | |
31 | n = MIN(nb_sectors, bs_sectors - (sector_num - src_cur_offset)); | 46 | case 'n': |
32 | - iov.iov_base = buf; | 47 | flags |= BDRV_O_NATIVE_AIO; |
33 | - iov.iov_len = n << BDRV_SECTOR_BITS; | 48 | break; |
34 | - qemu_iovec_init_external(&qiov, &iov, 1); | 49 | + case 'i': |
35 | + qemu_iovec_init_buf(&qiov, buf, n << BDRV_SECTOR_BITS); | 50 | + ret = bdrv_parse_aio(optarg, &flags); |
36 | 51 | + if (ret < 0) { | |
37 | ret = blk_co_preadv( | 52 | + error_report("Invalid aio option: %s", optarg); |
38 | blk, (sector_num - src_cur_offset) << BDRV_SECTOR_BITS, | 53 | + ret = -1; |
39 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn convert_co_write(ImgConvertState *s, int64_t sector_num, | 54 | + goto out; |
40 | { | 55 | + } |
41 | int ret; | 56 | + break; |
42 | QEMUIOVector qiov; | 57 | case 'o': |
43 | - struct iovec iov; | 58 | { |
44 | 59 | offset = cvtnum(optarg); | |
45 | while (nb_sectors > 0) { | 60 | diff --git a/qemu-img.texi b/qemu-img.texi |
46 | int n = nb_sectors; | 61 | index XXXXXXX..XXXXXXX 100644 |
47 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn convert_co_write(ImgConvertState *s, int64_t sector_num, | 62 | --- a/qemu-img.texi |
48 | (s->compressed && | 63 | +++ b/qemu-img.texi |
49 | !buffer_is_zero(buf, n * BDRV_SECTOR_SIZE))) | 64 | @@ -XXX,XX +XXX,XX @@ Command description: |
50 | { | 65 | Amends the image format specific @var{options} for the image file |
51 | - iov.iov_base = buf; | 66 | @var{filename}. Not all file formats support this operation. |
52 | - iov.iov_len = n << BDRV_SECTOR_BITS; | 67 | |
53 | - qemu_iovec_init_external(&qiov, &iov, 1); | 68 | -@item bench [-c @var{count}] [-d @var{depth}] [-f @var{fmt}] [--flush-interval=@var{flush_interval}] [-n] [--no-drain] [-o @var{offset}] [--pattern=@var{pattern}] [-q] [-s @var{buffer_size}] [-S @var{step_size}] [-t @var{cache}] [-w] [-U] @var{filename} |
54 | + qemu_iovec_init_buf(&qiov, buf, n << BDRV_SECTOR_BITS); | 69 | +@item bench [-c @var{count}] [-d @var{depth}] [-f @var{fmt}] [--flush-interval=@var{flush_interval}] [-n] [-i @var{aio}] [--no-drain] [-o @var{offset}] [--pattern=@var{pattern}] [-q] [-s @var{buffer_size}] [-S @var{step_size}] [-t @var{cache}] [-w] [-U] @var{filename} |
55 | 70 | ||
56 | ret = blk_co_pwritev(s->target, sector_num << BDRV_SECTOR_BITS, | 71 | Run a simple sequential I/O benchmark on the specified image. If @code{-w} is |
57 | n << BDRV_SECTOR_BITS, &qiov, flags); | 72 | specified, a write test is performed, otherwise a read test is performed. |
73 | @@ -XXX,XX +XXX,XX @@ If @code{-n} is specified, the native AIO backend is used if possible. On | ||
74 | Linux, this option only works if @code{-t none} or @code{-t directsync} is | ||
75 | specified as well. | ||
76 | |||
77 | +If @code{-i} is specified, aio option can be used to specify different AIO | ||
78 | +backends: @var{threads}, @var{native} or @var{io_uring}. | ||
79 | + | ||
80 | For write tests, by default a buffer filled with zeros is written. This can be | ||
81 | overridden with a pattern byte specified by @var{pattern}. | ||
82 | |||
58 | -- | 83 | -- |
59 | 2.20.1 | 84 | 2.24.1 |
60 | 85 | ||
61 | 86 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
2 | 1 | ||
3 | Use new qemu_iovec_init_buf() instead of | ||
4 | qemu_iovec_init_external( ... , 1), which simplifies the code. | ||
5 | |||
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Message-id: 20190218140926.333779-14-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-14-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | ||
13 | migration/block.c | 10 +++------- | ||
14 | 1 file changed, 3 insertions(+), 7 deletions(-) | ||
15 | |||
16 | diff --git a/migration/block.c b/migration/block.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/migration/block.c | ||
19 | +++ b/migration/block.c | ||
20 | @@ -XXX,XX +XXX,XX @@ typedef struct BlkMigBlock { | ||
21 | BlkMigDevState *bmds; | ||
22 | int64_t sector; | ||
23 | int nr_sectors; | ||
24 | - struct iovec iov; | ||
25 | QEMUIOVector qiov; | ||
26 | BlockAIOCB *aiocb; | ||
27 | |||
28 | @@ -XXX,XX +XXX,XX @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) | ||
29 | blk->sector = cur_sector; | ||
30 | blk->nr_sectors = nr_sectors; | ||
31 | |||
32 | - blk->iov.iov_base = blk->buf; | ||
33 | - blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; | ||
34 | - qemu_iovec_init_external(&blk->qiov, &blk->iov, 1); | ||
35 | + qemu_iovec_init_buf(&blk->qiov, blk->buf, nr_sectors * BDRV_SECTOR_SIZE); | ||
36 | |||
37 | blk_mig_lock(); | ||
38 | block_mig_state.submitted++; | ||
39 | @@ -XXX,XX +XXX,XX @@ static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds, | ||
40 | blk->nr_sectors = nr_sectors; | ||
41 | |||
42 | if (is_async) { | ||
43 | - blk->iov.iov_base = blk->buf; | ||
44 | - blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; | ||
45 | - qemu_iovec_init_external(&blk->qiov, &blk->iov, 1); | ||
46 | + qemu_iovec_init_buf(&blk->qiov, blk->buf, | ||
47 | + nr_sectors * BDRV_SECTOR_SIZE); | ||
48 | |||
49 | blk->aiocb = blk_aio_preadv(bmds->blk, | ||
50 | sector * BDRV_SECTOR_SIZE, | ||
51 | -- | ||
52 | 2.20.1 | ||
53 | |||
54 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | @iov is used only to initialize @qiov. Let's use new | 3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> |
4 | qemu_iovec_init_buf() instead, which simplifies the code. | 4 | Acked-by: Eric Blake <eblake@redhat.com> |
5 | 5 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | |
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 6 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | 7 | Message-id: 20200120141858.587874-14-stefanha@redhat.com |
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | 8 | Message-Id: <20200120141858.587874-14-stefanha@redhat.com> |
9 | Message-id: 20190218140926.333779-17-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-17-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
12 | --- | 10 | --- |
13 | include/hw/ide/internal.h | 1 - | 11 | docs/interop/qemu-nbd.rst | 4 ++-- |
14 | hw/ide/core.c | 11 ++++++----- | 12 | qemu-nbd.c | 12 ++++-------- |
15 | 2 files changed, 6 insertions(+), 6 deletions(-) | 13 | 2 files changed, 6 insertions(+), 10 deletions(-) |
16 | 14 | ||
17 | diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h | 15 | diff --git a/docs/interop/qemu-nbd.rst b/docs/interop/qemu-nbd.rst |
18 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/include/hw/ide/internal.h | 17 | --- a/docs/interop/qemu-nbd.rst |
20 | +++ b/include/hw/ide/internal.h | 18 | +++ b/docs/interop/qemu-nbd.rst |
21 | @@ -XXX,XX +XXX,XX @@ extern const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT]; | 19 | @@ -XXX,XX +XXX,XX @@ driver options if ``--image-opts`` is specified. |
22 | 20 | ||
23 | typedef struct IDEBufferedRequest { | 21 | .. option:: --aio=AIO |
24 | QLIST_ENTRY(IDEBufferedRequest) list; | 22 | |
25 | - struct iovec iov; | 23 | - Set the asynchronous I/O mode between ``threads`` (the default) |
26 | QEMUIOVector qiov; | 24 | - and ``native`` (Linux only). |
27 | QEMUIOVector *original_qiov; | 25 | + Set the asynchronous I/O mode between ``threads`` (the default), |
28 | BlockCompletionFunc *original_cb; | 26 | + ``native`` (Linux only), and ``io_uring`` (Linux 5.1+). |
29 | diff --git a/hw/ide/core.c b/hw/ide/core.c | 27 | |
28 | .. option:: --discard=DISCARD | ||
29 | |||
30 | diff --git a/qemu-nbd.c b/qemu-nbd.c | ||
30 | index XXXXXXX..XXXXXXX 100644 | 31 | index XXXXXXX..XXXXXXX 100644 |
31 | --- a/hw/ide/core.c | 32 | --- a/qemu-nbd.c |
32 | +++ b/hw/ide/core.c | 33 | +++ b/qemu-nbd.c |
33 | @@ -XXX,XX +XXX,XX @@ static void ide_buffered_readv_cb(void *opaque, int ret) | 34 | @@ -XXX,XX +XXX,XX @@ static void usage(const char *name) |
34 | IDEBufferedRequest *req = opaque; | 35 | " '[ID_OR_NAME]'\n" |
35 | if (!req->orphaned) { | 36 | " -n, --nocache disable host cache\n" |
36 | if (!ret) { | 37 | " --cache=MODE set cache mode (none, writeback, ...)\n" |
37 | - qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base, | 38 | -" --aio=MODE set AIO mode (native or threads)\n" |
38 | + assert(req->qiov.size == req->original_qiov->size); | 39 | +" --aio=MODE set AIO mode (native, io_uring or threads)\n" |
39 | + qemu_iovec_from_buf(req->original_qiov, 0, | 40 | " --discard=MODE set discard mode (ignore, unmap)\n" |
40 | + req->qiov.local_iov.iov_base, | 41 | " --detect-zeroes=MODE set detect-zeroes mode (off, on, unmap)\n" |
41 | req->original_qiov->size); | 42 | " --image-opts treat FILE as a full set of image options\n" |
42 | } | 43 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) |
43 | req->original_cb(req->original_opaque, ret); | 44 | exit(EXIT_FAILURE); |
44 | } | 45 | } |
45 | QLIST_REMOVE(req, list); | 46 | seen_aio = true; |
46 | - qemu_vfree(req->iov.iov_base); | 47 | - if (!strcmp(optarg, "native")) { |
47 | + qemu_vfree(qemu_iovec_buf(&req->qiov)); | 48 | - flags |= BDRV_O_NATIVE_AIO; |
48 | g_free(req); | 49 | - } else if (!strcmp(optarg, "threads")) { |
49 | } | 50 | - /* this is the default */ |
50 | 51 | - } else { | |
51 | @@ -XXX,XX +XXX,XX @@ BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num, | 52 | - error_report("invalid aio mode `%s'", optarg); |
52 | req->original_qiov = iov; | 53 | - exit(EXIT_FAILURE); |
53 | req->original_cb = cb; | 54 | + if (bdrv_parse_aio(optarg, &flags) < 0) { |
54 | req->original_opaque = opaque; | 55 | + error_report("Invalid aio mode '%s'", optarg); |
55 | - req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size); | 56 | + exit(EXIT_FAILURE); |
56 | - req->iov.iov_len = iov->size; | 57 | } |
57 | - qemu_iovec_init_external(&req->qiov, &req->iov, 1); | 58 | break; |
58 | + qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size), | 59 | case QEMU_NBD_OPT_DISCARD: |
59 | + iov->size); | ||
60 | |||
61 | aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS, | ||
62 | &req->qiov, 0, ide_buffered_readv_cb, req); | ||
63 | -- | 60 | -- |
64 | 2.20.1 | 61 | 2.24.1 |
65 | 62 | ||
66 | 63 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | @iov is used only to initialize @qiov. Let's use new | 3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> |
4 | qemu_iovec_init_buf() instead, which simplifies the code. | 4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> |
5 | 5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | |
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 6 | Message-id: 20200120141858.587874-15-stefanha@redhat.com |
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | 7 | Message-Id: <20200120141858.587874-15-stefanha@redhat.com> |
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Message-id: 20190218140926.333779-16-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-16-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
12 | --- | 9 | --- |
13 | include/hw/ide/internal.h | 1 - | 10 | tests/qemu-iotests/check | 15 ++++++++++++++- |
14 | hw/ide/atapi.c | 9 ++++----- | 11 | tests/qemu-iotests/common.rc | 14 ++++++++++++++ |
15 | hw/ide/core.c | 8 ++------ | 12 | tests/qemu-iotests/iotests.py | 12 ++++++++++-- |
16 | 3 files changed, 6 insertions(+), 12 deletions(-) | 13 | 3 files changed, 38 insertions(+), 3 deletions(-) |
17 | 14 | ||
18 | diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h | 15 | diff --git a/tests/qemu-iotests/check b/tests/qemu-iotests/check |
16 | index XXXXXXX..XXXXXXX 100755 | ||
17 | --- a/tests/qemu-iotests/check | ||
18 | +++ b/tests/qemu-iotests/check | ||
19 | @@ -XXX,XX +XXX,XX @@ sortme=false | ||
20 | expunge=true | ||
21 | have_test_arg=false | ||
22 | cachemode=false | ||
23 | +aiomode=false | ||
24 | |||
25 | tmp="${TEST_DIR}"/$$ | ||
26 | rm -f $tmp.list $tmp.tmp $tmp.sed | ||
27 | @@ -XXX,XX +XXX,XX @@ export IMGFMT_GENERIC=true | ||
28 | export IMGPROTO=file | ||
29 | export IMGOPTS="" | ||
30 | export CACHEMODE="writeback" | ||
31 | +export AIOMODE="threads" | ||
32 | export QEMU_IO_OPTIONS="" | ||
33 | export QEMU_IO_OPTIONS_NO_FMT="" | ||
34 | export CACHEMODE_IS_DEFAULT=true | ||
35 | @@ -XXX,XX +XXX,XX @@ s/ .*//p | ||
36 | CACHEMODE_IS_DEFAULT=false | ||
37 | cachemode=false | ||
38 | continue | ||
39 | + elif $aiomode | ||
40 | + then | ||
41 | + AIOMODE="$r" | ||
42 | + aiomode=false | ||
43 | + continue | ||
44 | fi | ||
45 | |||
46 | xpand=true | ||
47 | @@ -XXX,XX +XXX,XX @@ other options | ||
48 | -n show me, do not run tests | ||
49 | -o options -o options to pass to qemu-img create/convert | ||
50 | -c mode cache mode | ||
51 | + -i mode AIO mode | ||
52 | -makecheck pretty print output for make check | ||
53 | |||
54 | testlist options | ||
55 | @@ -XXX,XX +XXX,XX @@ testlist options | ||
56 | cachemode=true | ||
57 | xpand=false | ||
58 | ;; | ||
59 | + -i) | ||
60 | + aiomode=true | ||
61 | + xpand=false | ||
62 | + ;; | ||
63 | -T) # deprecated timestamp option | ||
64 | xpand=false | ||
65 | ;; | ||
66 | - | ||
67 | -v) | ||
68 | verbose=true | ||
69 | xpand=false | ||
70 | @@ -XXX,XX +XXX,XX @@ done | ||
71 | |||
72 | # Set qemu-io cache mode with $CACHEMODE we have | ||
73 | QEMU_IO_OPTIONS="$QEMU_IO_OPTIONS --cache $CACHEMODE" | ||
74 | +# Set qemu-io aio mode with $AIOMODE we have | ||
75 | +QEMU_IO_OPTIONS="$QEMU_IO_OPTIONS --aio $AIOMODE" | ||
76 | |||
77 | QEMU_IO_OPTIONS_NO_FMT="$QEMU_IO_OPTIONS" | ||
78 | if [ "$IMGOPTSSYNTAX" != "true" ]; then | ||
79 | diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc | ||
19 | index XXXXXXX..XXXXXXX 100644 | 80 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/include/hw/ide/internal.h | 81 | --- a/tests/qemu-iotests/common.rc |
21 | +++ b/include/hw/ide/internal.h | 82 | +++ b/tests/qemu-iotests/common.rc |
22 | @@ -XXX,XX +XXX,XX @@ struct IDEState { | 83 | @@ -XXX,XX +XXX,XX @@ _default_cache_mode() |
23 | int atapi_dma; /* true if dma is requested for the packet cmd */ | 84 | return |
24 | BlockAcctCookie acct; | 85 | fi |
25 | BlockAIOCB *pio_aiocb; | 86 | } |
26 | - struct iovec iov; | 87 | +_supported_aio_modes() |
27 | QEMUIOVector qiov; | 88 | +{ |
28 | QLIST_HEAD(, IDEBufferedRequest) buffered_requests; | 89 | + for mode; do |
29 | /* ATA DMA state */ | 90 | + if [ "$mode" = "$AIOMODE" ]; then |
30 | diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c | 91 | + return |
92 | + fi | ||
93 | + done | ||
94 | + _notrun "not suitable for aio mode: $AIOMODE" | ||
95 | +} | ||
96 | +_default_aio_mode() | ||
97 | +{ | ||
98 | + AIOMODE="$1" | ||
99 | + QEMU_IO="$QEMU_IO --aio $1" | ||
100 | +} | ||
101 | |||
102 | _unsupported_imgopts() | ||
103 | { | ||
104 | diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py | ||
31 | index XXXXXXX..XXXXXXX 100644 | 105 | index XXXXXXX..XXXXXXX 100644 |
32 | --- a/hw/ide/atapi.c | 106 | --- a/tests/qemu-iotests/iotests.py |
33 | +++ b/hw/ide/atapi.c | 107 | +++ b/tests/qemu-iotests/iotests.py |
34 | @@ -XXX,XX +XXX,XX @@ static void cd_read_sector_cb(void *opaque, int ret) | 108 | @@ -XXX,XX +XXX,XX @@ test_dir = os.environ.get('TEST_DIR') |
35 | 109 | sock_dir = os.environ.get('SOCK_DIR') | |
36 | static int cd_read_sector(IDEState *s) | 110 | output_dir = os.environ.get('OUTPUT_DIR', '.') |
37 | { | 111 | cachemode = os.environ.get('CACHEMODE') |
38 | + void *buf; | 112 | +aiomode = os.environ.get('AIOMODE') |
113 | qemu_default_machine = os.environ.get('QEMU_DEFAULT_MACHINE') | ||
114 | |||
115 | socket_scm_helper = os.environ.get('SOCKET_SCM_HELPER', 'socket_scm_helper') | ||
116 | @@ -XXX,XX +XXX,XX @@ class VM(qtest.QEMUQtestMachine): | ||
117 | options.append('file=%s' % path) | ||
118 | options.append('format=%s' % format) | ||
119 | options.append('cache=%s' % cachemode) | ||
120 | + options.append('aio=%s' % aiomode) | ||
121 | |||
122 | if opts: | ||
123 | options.append(opts) | ||
124 | @@ -XXX,XX +XXX,XX @@ def verify_cache_mode(supported_cache_modes=[]): | ||
125 | if supported_cache_modes and (cachemode not in supported_cache_modes): | ||
126 | notrun('not suitable for this cache mode: %s' % cachemode) | ||
127 | |||
128 | +def verify_aio_mode(supported_aio_modes=[]): | ||
129 | + if supported_aio_modes and (aiomode not in supported_aio_modes): | ||
130 | + notrun('not suitable for this aio mode: %s' % aiomode) | ||
39 | + | 131 | + |
40 | if (s->cd_sector_size != 2048 && s->cd_sector_size != 2352) { | 132 | def supports_quorum(): |
41 | block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ); | 133 | return 'quorum' in qemu_img_pipe('--help') |
42 | return -EINVAL; | 134 | |
43 | } | 135 | @@ -XXX,XX +XXX,XX @@ def execute_unittest(output, verbosity, debug): |
44 | 136 | ||
45 | - s->iov.iov_base = (s->cd_sector_size == 2352) ? | 137 | def execute_test(test_function=None, |
46 | - s->io_buffer + 16 : s->io_buffer; | 138 | supported_fmts=[], supported_oses=['linux'], |
47 | - | 139 | - supported_cache_modes=[], unsupported_fmts=[], |
48 | - s->iov.iov_len = ATAPI_SECTOR_SIZE; | 140 | - supported_protocols=[], unsupported_protocols=[]): |
49 | - qemu_iovec_init_external(&s->qiov, &s->iov, 1); | 141 | + supported_cache_modes=[], supported_aio_modes={}, |
50 | + buf = (s->cd_sector_size == 2352) ? s->io_buffer + 16 : s->io_buffer; | 142 | + unsupported_fmts=[], supported_protocols=[], |
51 | + qemu_iovec_init_buf(&s->qiov, buf, ATAPI_SECTOR_SIZE); | 143 | + unsupported_protocols=[]): |
52 | 144 | """Run either unittest or script-style tests.""" | |
53 | trace_cd_read_sector(s->lba); | 145 | |
54 | 146 | # We are using TEST_DIR and QEMU_DEFAULT_MACHINE as proxies to | |
55 | diff --git a/hw/ide/core.c b/hw/ide/core.c | 147 | @@ -XXX,XX +XXX,XX @@ def execute_test(test_function=None, |
56 | index XXXXXXX..XXXXXXX 100644 | 148 | verify_protocol(supported_protocols, unsupported_protocols) |
57 | --- a/hw/ide/core.c | 149 | verify_platform(supported_oses) |
58 | +++ b/hw/ide/core.c | 150 | verify_cache_mode(supported_cache_modes) |
59 | @@ -XXX,XX +XXX,XX @@ static void ide_sector_read(IDEState *s) | 151 | + verify_aio_mode(supported_aio_modes) |
60 | return; | 152 | |
61 | } | 153 | if debug: |
62 | 154 | output = sys.stdout | |
63 | - s->iov.iov_base = s->io_buffer; | ||
64 | - s->iov.iov_len = n * BDRV_SECTOR_SIZE; | ||
65 | - qemu_iovec_init_external(&s->qiov, &s->iov, 1); | ||
66 | + qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE); | ||
67 | |||
68 | block_acct_start(blk_get_stats(s->blk), &s->acct, | ||
69 | n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); | ||
70 | @@ -XXX,XX +XXX,XX @@ static void ide_sector_write(IDEState *s) | ||
71 | return; | ||
72 | } | ||
73 | |||
74 | - s->iov.iov_base = s->io_buffer; | ||
75 | - s->iov.iov_len = n * BDRV_SECTOR_SIZE; | ||
76 | - qemu_iovec_init_external(&s->qiov, &s->iov, 1); | ||
77 | + qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE); | ||
78 | |||
79 | block_acct_start(blk_get_stats(s->blk), &s->acct, | ||
80 | n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE); | ||
81 | -- | 155 | -- |
82 | 2.20.1 | 156 | 2.24.1 |
83 | 157 | ||
84 | 158 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | Use new QEMU_IOVEC_INIT_BUF() instead of | 3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> |
4 | qemu_iovec_init_external( ... , 1), which simplifies the code. | 4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> |
5 | 5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | |
6 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 6 | Message-id: 20200120141858.587874-16-stefanha@redhat.com |
7 | Reviewed-by: Eric Blake <eblake@redhat.com> | 7 | Message-Id: <20200120141858.587874-16-stefanha@redhat.com> |
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Message-id: 20190218140926.333779-15-vsementsov@virtuozzo.com | ||
10 | Message-Id: <20190218140926.333779-15-vsementsov@virtuozzo.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
12 | --- | 9 | --- |
13 | tests/test-bdrv-drain.c | 29 ++++------------------------- | 10 | tests/qemu-iotests/028 | 2 +- |
14 | 1 file changed, 4 insertions(+), 25 deletions(-) | 11 | tests/qemu-iotests/058 | 2 +- |
15 | 12 | tests/qemu-iotests/089 | 4 ++-- | |
16 | diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c | 13 | tests/qemu-iotests/091 | 4 ++-- |
17 | index XXXXXXX..XXXXXXX 100644 | 14 | tests/qemu-iotests/109 | 2 +- |
18 | --- a/tests/test-bdrv-drain.c | 15 | tests/qemu-iotests/147 | 5 +++-- |
19 | +++ b/tests/test-bdrv-drain.c | 16 | tests/qemu-iotests/181 | 8 ++++---- |
20 | @@ -XXX,XX +XXX,XX @@ static void test_drv_cb_common(enum drain_type drain_type, bool recursive) | 17 | tests/qemu-iotests/183 | 4 ++-- |
21 | BlockAIOCB *acb; | 18 | tests/qemu-iotests/185 | 10 +++++----- |
22 | int aio_ret; | 19 | tests/qemu-iotests/200 | 2 +- |
23 | 20 | tests/qemu-iotests/201 | 8 ++++---- | |
24 | - QEMUIOVector qiov; | 21 | 11 files changed, 26 insertions(+), 25 deletions(-) |
25 | - struct iovec iov = { | 22 | |
26 | - .iov_base = NULL, | 23 | diff --git a/tests/qemu-iotests/028 b/tests/qemu-iotests/028 |
27 | - .iov_len = 0, | 24 | index XXXXXXX..XXXXXXX 100755 |
28 | - }; | 25 | --- a/tests/qemu-iotests/028 |
29 | - qemu_iovec_init_external(&qiov, &iov, 1); | 26 | +++ b/tests/qemu-iotests/028 |
30 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0); | 27 | @@ -XXX,XX +XXX,XX @@ echo block-backup |
31 | 28 | echo | |
32 | blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL); | 29 | |
33 | bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, | 30 | qemu_comm_method="monitor" |
34 | @@ -XXX,XX +XXX,XX @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) | 31 | -_launch_qemu -drive file="${TEST_IMG}",cache=${CACHEMODE},id=disk |
35 | AioContext *ctx_a = iothread_get_aio_context(a); | 32 | +_launch_qemu -drive file="${TEST_IMG}",cache=${CACHEMODE},aio=${AIOMODE},id=disk |
36 | AioContext *ctx_b = iothread_get_aio_context(b); | 33 | h=$QEMU_HANDLE |
37 | 34 | if [ "${VALGRIND_QEMU}" == "y" ]; then | |
38 | - QEMUIOVector qiov; | 35 | QEMU_COMM_TIMEOUT=7 |
39 | - struct iovec iov = { | 36 | diff --git a/tests/qemu-iotests/058 b/tests/qemu-iotests/058 |
40 | - .iov_base = NULL, | 37 | index XXXXXXX..XXXXXXX 100755 |
41 | - .iov_len = 0, | 38 | --- a/tests/qemu-iotests/058 |
42 | - }; | 39 | +++ b/tests/qemu-iotests/058 |
43 | - qemu_iovec_init_external(&qiov, &iov, 1); | 40 | @@ -XXX,XX +XXX,XX @@ nbd_snapshot_img="nbd:unix:$nbd_unix_socket" |
44 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0); | 41 | converted_image=$TEST_IMG.converted |
45 | 42 | ||
46 | /* bdrv_drain_all() may only be called from the main loop thread */ | 43 | # Use -f raw instead of -f $IMGFMT for the NBD connection |
47 | if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) { | 44 | -QEMU_IO_NBD="$QEMU_IO -f raw --cache=$CACHEMODE" |
48 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn test_co_delete_by_drain(void *opaque) | 45 | +QEMU_IO_NBD="$QEMU_IO -f raw --cache=$CACHEMODE --aio=$AIOMODE" |
49 | BlockDriverState *bs = blk_bs(blk); | 46 | |
50 | BDRVTestTopState *tts = bs->opaque; | 47 | echo |
51 | void *buffer = g_malloc(65536); | 48 | echo "== preparing image ==" |
52 | - QEMUIOVector qiov; | 49 | diff --git a/tests/qemu-iotests/089 b/tests/qemu-iotests/089 |
53 | - struct iovec iov = { | 50 | index XXXXXXX..XXXXXXX 100755 |
54 | - .iov_base = buffer, | 51 | --- a/tests/qemu-iotests/089 |
55 | - .iov_len = 65536, | 52 | +++ b/tests/qemu-iotests/089 |
56 | - }; | 53 | @@ -XXX,XX +XXX,XX @@ $QEMU_IO -c 'write -P 42 0 512' -c 'write -P 23 512 512' \ |
57 | - | 54 | |
58 | - qemu_iovec_init_external(&qiov, &iov, 1); | 55 | $QEMU_IMG convert -f raw -O $IMGFMT "$TEST_IMG.base" "$TEST_IMG" |
59 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536); | 56 | |
60 | 57 | -$QEMU_IO_PROG --cache $CACHEMODE \ | |
61 | /* Pretend some internal write operation from parent to child. | 58 | +$QEMU_IO_PROG --cache $CACHEMODE --aio $AIOMODE \ |
62 | * Important: We have to read from the child, not from the parent! | 59 | -c 'read -P 42 0 512' -c 'read -P 23 512 512' \ |
63 | @@ -XXX,XX +XXX,XX @@ static void test_detach_indirect(bool by_parent_cb) | 60 | -c 'read -P 66 1024 512' "json:{ |
64 | BdrvChild *child_a, *child_b; | 61 | \"driver\": \"$IMGFMT\", |
65 | BlockAIOCB *acb; | 62 | @@ -XXX,XX +XXX,XX @@ $QEMU_IO -c 'write -P 42 0x38000 512' "$TEST_IMG" | _filter_qemu_io |
66 | 63 | ||
67 | - QEMUIOVector qiov; | 64 | # The "image.filename" part tests whether "a": { "b": "c" } and "a.b": "c" do |
68 | - struct iovec iov = { | 65 | # the same (which they should). |
69 | - .iov_base = NULL, | 66 | -$QEMU_IO_PROG --cache $CACHEMODE \ |
70 | - .iov_len = 0, | 67 | +$QEMU_IO_PROG --cache $CACHEMODE --aio $AIOMODE \ |
71 | - }; | 68 | -c 'read -P 42 0x38000 512' "json:{ |
72 | - qemu_iovec_init_external(&qiov, &iov, 1); | 69 | \"driver\": \"$IMGFMT\", |
73 | + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0); | 70 | \"file\": { |
74 | 71 | diff --git a/tests/qemu-iotests/091 b/tests/qemu-iotests/091 | |
75 | if (!by_parent_cb) { | 72 | index XXXXXXX..XXXXXXX 100755 |
76 | detach_by_driver_cb_role = child_file; | 73 | --- a/tests/qemu-iotests/091 |
74 | +++ b/tests/qemu-iotests/091 | ||
75 | @@ -XXX,XX +XXX,XX @@ echo === Starting QEMU VM1 === | ||
76 | echo | ||
77 | |||
78 | qemu_comm_method="monitor" | ||
79 | -_launch_qemu -drive file="${TEST_IMG}",cache=${CACHEMODE},id=disk | ||
80 | +_launch_qemu -drive file="${TEST_IMG}",cache=${CACHEMODE},aio=${AIOMODE},id=disk | ||
81 | h1=$QEMU_HANDLE | ||
82 | |||
83 | echo | ||
84 | echo === Starting QEMU VM2 === | ||
85 | echo | ||
86 | -_launch_qemu -drive file="${TEST_IMG}",cache=${CACHEMODE},id=disk \ | ||
87 | +_launch_qemu -drive file="${TEST_IMG}",cache=${CACHEMODE},aio=${AIOMODE},id=disk \ | ||
88 | -incoming "exec: cat '${MIG_FIFO}'" | ||
89 | h2=$QEMU_HANDLE | ||
90 | |||
91 | diff --git a/tests/qemu-iotests/109 b/tests/qemu-iotests/109 | ||
92 | index XXXXXXX..XXXXXXX 100755 | ||
93 | --- a/tests/qemu-iotests/109 | ||
94 | +++ b/tests/qemu-iotests/109 | ||
95 | @@ -XXX,XX +XXX,XX @@ run_qemu() | ||
96 | local qmp_format="$3" | ||
97 | local qmp_event="$4" | ||
98 | |||
99 | - _launch_qemu -drive file="${source_img}",format=raw,cache=${CACHEMODE},id=src | ||
100 | + _launch_qemu -drive file="${source_img}",format=raw,cache=${CACHEMODE},aio=${AIOMODE},id=src | ||
101 | _send_qemu_cmd $QEMU_HANDLE "{ 'execute': 'qmp_capabilities' }" "return" | ||
102 | |||
103 | _send_qemu_cmd $QEMU_HANDLE \ | ||
104 | diff --git a/tests/qemu-iotests/147 b/tests/qemu-iotests/147 | ||
105 | index XXXXXXX..XXXXXXX 100755 | ||
106 | --- a/tests/qemu-iotests/147 | ||
107 | +++ b/tests/qemu-iotests/147 | ||
108 | @@ -XXX,XX +XXX,XX @@ import socket | ||
109 | import stat | ||
110 | import time | ||
111 | import iotests | ||
112 | -from iotests import cachemode, imgfmt, qemu_img, qemu_nbd, qemu_nbd_early_pipe | ||
113 | +from iotests import cachemode, aiomode, imgfmt, qemu_img, qemu_nbd, qemu_nbd_early_pipe | ||
114 | |||
115 | NBD_PORT_START = 32768 | ||
116 | NBD_PORT_END = NBD_PORT_START + 1024 | ||
117 | @@ -XXX,XX +XXX,XX @@ class BuiltinNBD(NBDBlockdevAddBase): | ||
118 | self.server.add_drive_raw('if=none,id=nbd-export,' + | ||
119 | 'file=%s,' % test_img + | ||
120 | 'format=%s,' % imgfmt + | ||
121 | - 'cache=%s' % cachemode) | ||
122 | + 'cache=%s' % cachemode + | ||
123 | + 'aio=%s' % aiomode) | ||
124 | self.server.launch() | ||
125 | |||
126 | def tearDown(self): | ||
127 | diff --git a/tests/qemu-iotests/181 b/tests/qemu-iotests/181 | ||
128 | index XXXXXXX..XXXXXXX 100755 | ||
129 | --- a/tests/qemu-iotests/181 | ||
130 | +++ b/tests/qemu-iotests/181 | ||
131 | @@ -XXX,XX +XXX,XX @@ qemu_comm_method="monitor" | ||
132 | |||
133 | if [ "$IMGOPTSSYNTAX" = "true" ]; then | ||
134 | _launch_qemu \ | ||
135 | - -drive "${TEST_IMG}",cache=${CACHEMODE},id=disk | ||
136 | + -drive "${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,id=disk | ||
137 | else | ||
138 | _launch_qemu \ | ||
139 | - -drive file="${TEST_IMG}",cache=${CACHEMODE},driver=$IMGFMT,id=disk | ||
140 | + -drive file="${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
141 | fi | ||
142 | src=$QEMU_HANDLE | ||
143 | |||
144 | if [ "$IMGOPTSSYNTAX" = "true" ]; then | ||
145 | _launch_qemu \ | ||
146 | - -drive "${TEST_IMG}",cache=${CACHEMODE},id=disk \ | ||
147 | + -drive "${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,id=disk \ | ||
148 | -incoming "unix:${MIG_SOCKET}" | ||
149 | else | ||
150 | _launch_qemu \ | ||
151 | - -drive file="${TEST_IMG}",cache=${CACHEMODE},driver=$IMGFMT,id=disk \ | ||
152 | + -drive file="${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,driver=$IMGFMT,id=disk \ | ||
153 | -incoming "unix:${MIG_SOCKET}" | ||
154 | fi | ||
155 | dest=$QEMU_HANDLE | ||
156 | diff --git a/tests/qemu-iotests/183 b/tests/qemu-iotests/183 | ||
157 | index XXXXXXX..XXXXXXX 100755 | ||
158 | --- a/tests/qemu-iotests/183 | ||
159 | +++ b/tests/qemu-iotests/183 | ||
160 | @@ -XXX,XX +XXX,XX @@ echo | ||
161 | qemu_comm_method="qmp" | ||
162 | |||
163 | _launch_qemu \ | ||
164 | - -drive file="${TEST_IMG}",cache=$CACHEMODE,driver=$IMGFMT,id=disk | ||
165 | + -drive file="${TEST_IMG}",cache=$CACHEMODE,aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
166 | src=$QEMU_HANDLE | ||
167 | _send_qemu_cmd $src "{ 'execute': 'qmp_capabilities' }" 'return' | ||
168 | |||
169 | _launch_qemu \ | ||
170 | - -drive file="${TEST_IMG}.dest",cache=$CACHEMODE,driver=$IMGFMT,id=disk \ | ||
171 | + -drive file="${TEST_IMG}.dest",cache=$CACHEMODE,aio=$AIOMODE,driver=$IMGFMT,id=disk \ | ||
172 | -incoming "unix:${MIG_SOCKET}" | ||
173 | dest=$QEMU_HANDLE | ||
174 | _send_qemu_cmd $dest "{ 'execute': 'qmp_capabilities' }" 'return' | ||
175 | diff --git a/tests/qemu-iotests/185 b/tests/qemu-iotests/185 | ||
176 | index XXXXXXX..XXXXXXX 100755 | ||
177 | --- a/tests/qemu-iotests/185 | ||
178 | +++ b/tests/qemu-iotests/185 | ||
179 | @@ -XXX,XX +XXX,XX @@ echo | ||
180 | qemu_comm_method="qmp" | ||
181 | |||
182 | _launch_qemu \ | ||
183 | - -drive file="${TEST_IMG}.base",cache=$CACHEMODE,driver=$IMGFMT,id=disk | ||
184 | + -drive file="${TEST_IMG}.base",cache=$CACHEMODE,aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
185 | h=$QEMU_HANDLE | ||
186 | _send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return' | ||
187 | |||
188 | @@ -XXX,XX +XXX,XX @@ echo === Start active commit job and exit qemu === | ||
189 | echo | ||
190 | |||
191 | _launch_qemu \ | ||
192 | - -drive file="${TEST_IMG}",cache=$CACHEMODE,driver=$IMGFMT,id=disk | ||
193 | + -drive file="${TEST_IMG}",cache=$CACHEMODE,aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
194 | h=$QEMU_HANDLE | ||
195 | _send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return' | ||
196 | |||
197 | @@ -XXX,XX +XXX,XX @@ echo === Start mirror job and exit qemu === | ||
198 | echo | ||
199 | |||
200 | _launch_qemu \ | ||
201 | - -drive file="${TEST_IMG}",cache=$CACHEMODE,driver=$IMGFMT,id=disk | ||
202 | + -drive file="${TEST_IMG}",cache=$CACHEMODE,aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
203 | h=$QEMU_HANDLE | ||
204 | _send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return' | ||
205 | |||
206 | @@ -XXX,XX +XXX,XX @@ echo === Start backup job and exit qemu === | ||
207 | echo | ||
208 | |||
209 | _launch_qemu \ | ||
210 | - -drive file="${TEST_IMG}",cache=$CACHEMODE,driver=$IMGFMT,id=disk | ||
211 | + -drive file="${TEST_IMG}",cache=$CACHEMODE,aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
212 | h=$QEMU_HANDLE | ||
213 | _send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return' | ||
214 | |||
215 | @@ -XXX,XX +XXX,XX @@ echo === Start streaming job and exit qemu === | ||
216 | echo | ||
217 | |||
218 | _launch_qemu \ | ||
219 | - -drive file="${TEST_IMG}",cache=$CACHEMODE,driver=$IMGFMT,id=disk | ||
220 | + -drive file="${TEST_IMG}",cache=$CACHEMODE,aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
221 | h=$QEMU_HANDLE | ||
222 | _send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return' | ||
223 | |||
224 | diff --git a/tests/qemu-iotests/200 b/tests/qemu-iotests/200 | ||
225 | index XXXXXXX..XXXXXXX 100755 | ||
226 | --- a/tests/qemu-iotests/200 | ||
227 | +++ b/tests/qemu-iotests/200 | ||
228 | @@ -XXX,XX +XXX,XX @@ echo === Starting QEMU VM === | ||
229 | echo | ||
230 | qemu_comm_method="qmp" | ||
231 | _launch_qemu -object iothread,id=iothread0 $virtio_scsi \ | ||
232 | - -drive file="${TEST_IMG}",media=disk,if=none,cache=$CACHEMODE,id=drive_sysdisk,format=$IMGFMT \ | ||
233 | + -drive file="${TEST_IMG}",media=disk,if=none,cache=$CACHEMODE,aio=$AIOMODE,id=drive_sysdisk,format=$IMGFMT \ | ||
234 | -device scsi-hd,drive=drive_sysdisk,bus=scsi0.0,id=sysdisk,bootindex=0 | ||
235 | h1=$QEMU_HANDLE | ||
236 | |||
237 | diff --git a/tests/qemu-iotests/201 b/tests/qemu-iotests/201 | ||
238 | index XXXXXXX..XXXXXXX 100755 | ||
239 | --- a/tests/qemu-iotests/201 | ||
240 | +++ b/tests/qemu-iotests/201 | ||
241 | @@ -XXX,XX +XXX,XX @@ qemu_comm_method="monitor" | ||
242 | |||
243 | if [ "$IMGOPTSSYNTAX" = "true" ]; then | ||
244 | _launch_qemu \ | ||
245 | - -drive "${TEST_IMG}",cache=${CACHEMODE},id=disk | ||
246 | + -drive "${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,id=disk | ||
247 | else | ||
248 | _launch_qemu \ | ||
249 | - -drive file="${TEST_IMG}",cache=${CACHEMODE},driver=$IMGFMT,id=disk | ||
250 | + -drive file="${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
251 | fi | ||
252 | src=$QEMU_HANDLE | ||
253 | |||
254 | if [ "$IMGOPTSSYNTAX" = "true" ]; then | ||
255 | _launch_qemu \ | ||
256 | - -drive "${TEST_IMG}",cache=${CACHEMODE},id=disk \ | ||
257 | + -drive "${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,id=disk \ | ||
258 | -incoming "unix:${MIG_SOCKET}" | ||
259 | else | ||
260 | _launch_qemu \ | ||
261 | - -drive file="${TEST_IMG}",cache=${CACHEMODE},driver=$IMGFMT,id=disk \ | ||
262 | + -drive file="${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,driver=$IMGFMT,id=disk \ | ||
263 | -incoming "unix:${MIG_SOCKET}" | ||
264 | fi | ||
265 | dest=$QEMU_HANDLE | ||
77 | -- | 266 | -- |
78 | 2.20.1 | 267 | 2.24.1 |
79 | 268 | ||
80 | 269 | diff view generated by jsdifflib |