1
The following changes since commit 19b599f7664b2ebfd0f405fb79c14dd241557452:
1
The following changes since commit fc3dbb90f2eb069801bfb4cfe9cbc83cf9c5f4a9:
2
2
3
Merge remote-tracking branch 'remotes/armbru/tags/pull-error-2018-08-27-v2' into staging (2018-08-27 16:44:20 +0100)
3
Merge remote-tracking branch 'remotes/jnsnow/tags/bitmaps-pull-request' into staging (2019-02-21 13:09:33 +0000)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://git.xanclic.moe/XanClic/qemu.git tags/pull-block-2018-08-31
7
git://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 40954cc7831c4f95f9ce6402ae3d6761f44f31ff:
9
for you to fetch changes up to 9a9f4b74fa547b68edb38fa414999836770a4735:
10
10
11
jobs: remove job_defer_to_main_loop (2018-08-31 16:11:27 +0200)
11
tests/virtio-blk: add test for DISCARD command (2019-02-22 09:42:17 +0000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Block patches:
14
Pull request
15
- (Block) job exit refactoring, part 1
16
(removing job_defer_to_main_loop())
17
- Locking fix for the file-posix block driver
18
- test-bdrv-drain leak fix
19
15
20
----------------------------------------------------------------
16
----------------------------------------------------------------
21
Fam Zheng (1):
22
file-posix: Skip effectiveless OFD lock operations
23
17
24
John Snow (9):
18
Stefano Garzarella (10):
25
jobs: change start callback to run callback
19
virtio-blk: add acct_failed param to virtio_blk_handle_rw_error()
26
jobs: canonize Error object
20
virtio-blk: add host_features field in VirtIOBlock
27
jobs: add exit shim
21
virtio-blk: add "discard" and "write-zeroes" properties
28
block/commit: utilize job_exit shim
22
virtio-net: make VirtIOFeature usable for other virtio devices
29
block/mirror: utilize job_exit shim
23
virtio-blk: set config size depending on the features enabled
30
jobs: utilize job_exit shim
24
virtio-blk: add DISCARD and WRITE_ZEROES features
31
block/backup: make function variables consistently named
25
tests/virtio-blk: change assert on data_size in virtio_blk_request()
32
jobs: remove ret argument to job_completed; privatize it
26
tests/virtio-blk: add virtio_blk_fix_dwz_hdr() function
33
jobs: remove job_defer_to_main_loop
27
tests/virtio-blk: add test for WRITE_ZEROES command
28
tests/virtio-blk: add test for DISCARD command
34
29
35
Marc-André Lureau (1):
30
Vladimir Sementsov-Ogievskiy (17):
36
tests: fix bdrv-drain leak
31
block: enhance QEMUIOVector structure
32
block/io: use qemu_iovec_init_buf
33
block/block-backend: use QEMU_IOVEC_INIT_BUF
34
block/backup: use qemu_iovec_init_buf
35
block/commit: use QEMU_IOVEC_INIT_BUF
36
block/stream: use QEMU_IOVEC_INIT_BUF
37
block/parallels: use QEMU_IOVEC_INIT_BUF
38
block/qcow: use qemu_iovec_init_buf
39
block/qcow2: use qemu_iovec_init_buf
40
block/qed: use qemu_iovec_init_buf
41
block/vmdk: use qemu_iovec_init_buf
42
qemu-img: use qemu_iovec_init_buf
43
migration/block: use qemu_iovec_init_buf
44
tests/test-bdrv-drain: use QEMU_IOVEC_INIT_BUF
45
hw/ide: drop iov field from IDEState
46
hw/ide: drop iov field from IDEBufferedRequest
47
hw/ide: drop iov field from IDEDMA
37
48
38
include/qemu/job.h | 70 ++++++++++++++++-----------------
49
include/hw/ide/internal.h | 3 -
39
block/backup.c | 81 ++++++++++++++++-----------------------
50
include/hw/virtio/virtio-blk.h | 6 +-
40
block/commit.c | 29 +++++---------
51
include/hw/virtio/virtio.h | 15 ++
41
block/create.c | 19 +++------
52
include/qemu/iov.h | 64 ++++++++-
42
block/file-posix.c | 41 +++++++++++++++-----
53
block/backup.c | 5 +-
43
block/mirror.c | 39 ++++++++-----------
54
block/block-backend.c | 13 +-
44
block/stream.c | 29 ++++++--------
55
block/commit.c | 7 +-
45
job-qmp.c | 5 ++-
56
block/io.c | 89 +++---------
46
job.c | 73 ++++++++++++-----------------------
57
block/parallels.c | 13 +-
47
tests/test-bdrv-drain.c | 14 +++----
58
block/qcow.c | 21 +--
48
tests/test-blockjob-txn.c | 25 +++++-------
59
block/qcow2.c | 12 +-
49
tests/test-blockjob.c | 17 ++++----
60
block/qed-table.c | 16 +--
50
trace-events | 2 +-
61
block/qed.c | 31 ++---
51
13 files changed, 192 insertions(+), 252 deletions(-)
62
block/stream.c | 7 +-
63
block/vmdk.c | 7 +-
64
hw/block/virtio-blk.c | 245 ++++++++++++++++++++++++++++++---
65
hw/core/machine.c | 2 +
66
hw/ide/atapi.c | 14 +-
67
hw/ide/core.c | 19 ++-
68
hw/net/virtio-net.c | 31 +----
69
hw/virtio/virtio.c | 15 ++
70
migration/block.c | 10 +-
71
qemu-img.c | 10 +-
72
tests/test-bdrv-drain.c | 29 +---
73
tests/virtio-blk-test.c | 127 ++++++++++++++++-
74
25 files changed, 525 insertions(+), 286 deletions(-)
52
75
53
--
76
--
54
2.17.1
77
2.20.1
55
78
56
79
diff view generated by jsdifflib
New patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
2
3
Add a possibility of embedded iovec, for cases when we need only one
4
local iov.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
Message-id: 20190218140926.333779-2-vsementsov@virtuozzo.com
9
Message-Id: <20190218140926.333779-2-vsementsov@virtuozzo.com>
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
12
include/qemu/iov.h | 64 ++++++++++++++++++++++++++++++++++++++++++++--
13
1 file changed, 62 insertions(+), 2 deletions(-)
14
15
diff --git a/include/qemu/iov.h b/include/qemu/iov.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/qemu/iov.h
18
+++ b/include/qemu/iov.h
19
@@ -XXX,XX +XXX,XX @@ size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt,
20
typedef struct QEMUIOVector {
21
struct iovec *iov;
22
int niov;
23
- int nalloc;
24
- size_t size;
25
+
26
+ /*
27
+ * For external @iov (qemu_iovec_init_external()) or allocated @iov
28
+ * (qemu_iovec_init()), @size is the cumulative size of iovecs and
29
+ * @local_iov is invalid and unused.
30
+ *
31
+ * For embedded @iov (QEMU_IOVEC_INIT_BUF() or qemu_iovec_init_buf()),
32
+ * @iov is equal to &@local_iov, and @size is valid, as it has same
33
+ * offset and type as @local_iov.iov_len, which is guaranteed by
34
+ * static assertion below.
35
+ *
36
+ * @nalloc is always valid and is -1 both for embedded and external
37
+ * cases. It is included in the union only to ensure the padding prior
38
+ * to the @size field will not result in a 0-length array.
39
+ */
40
+ union {
41
+ struct {
42
+ int nalloc;
43
+ struct iovec local_iov;
44
+ };
45
+ struct {
46
+ char __pad[sizeof(int) + offsetof(struct iovec, iov_len)];
47
+ size_t size;
48
+ };
49
+ };
50
} QEMUIOVector;
51
52
+QEMU_BUILD_BUG_ON(offsetof(QEMUIOVector, size) !=
53
+ offsetof(QEMUIOVector, local_iov.iov_len));
54
+
55
+#define QEMU_IOVEC_INIT_BUF(self, buf, len) \
56
+{ \
57
+ .iov = &(self).local_iov, \
58
+ .niov = 1, \
59
+ .nalloc = -1, \
60
+ .local_iov = { \
61
+ .iov_base = (void *)(buf), /* cast away const */ \
62
+ .iov_len = (len), \
63
+ }, \
64
+}
65
+
66
+/*
67
+ * qemu_iovec_init_buf
68
+ *
69
+ * Initialize embedded QEMUIOVector.
70
+ *
71
+ * Note: "const" is used over @buf pointer to make it simple to pass
72
+ * const pointers, appearing in read functions. Then this "const" is
73
+ * cast away by QEMU_IOVEC_INIT_BUF().
74
+ */
75
+static inline void qemu_iovec_init_buf(QEMUIOVector *qiov,
76
+ const void *buf, size_t len)
77
+{
78
+ *qiov = (QEMUIOVector) QEMU_IOVEC_INIT_BUF(*qiov, buf, len);
79
+}
80
+
81
+static inline void *qemu_iovec_buf(QEMUIOVector *qiov)
82
+{
83
+ /* Only supports embedded iov */
84
+ assert(qiov->nalloc == -1 && qiov->iov == &qiov->local_iov);
85
+
86
+ return qiov->local_iov.iov_base;
87
+}
88
+
89
void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint);
90
void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov);
91
void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len);
92
--
93
2.20.1
94
95
diff view generated by jsdifflib
New patch
1
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
3
Use new qemu_iovec_init_buf() instead of
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
5
6
While being here, use qemu_try_blockalign0 as well.
7
8
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
9
Reviewed-by: Eric Blake <eblake@redhat.com>
10
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Message-id: 20190218140926.333779-3-vsementsov@virtuozzo.com
12
Message-Id: <20190218140926.333779-3-vsementsov@virtuozzo.com>
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
---
15
block/io.c | 89 ++++++++++++------------------------------------------
16
1 file changed, 20 insertions(+), 69 deletions(-)
17
18
diff --git a/block/io.c b/block/io.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/block/io.c
21
+++ b/block/io.c
22
@@ -XXX,XX +XXX,XX @@ static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
23
static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
24
int nb_sectors, bool is_write, BdrvRequestFlags flags)
25
{
26
- QEMUIOVector qiov;
27
- struct iovec iov = {
28
- .iov_base = (void *)buf,
29
- .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
30
- };
31
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf,
32
+ nb_sectors * BDRV_SECTOR_SIZE);
33
34
if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
35
return -EINVAL;
36
}
37
38
- qemu_iovec_init_external(&qiov, &iov, 1);
39
return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
40
&qiov, is_write, flags);
41
}
42
@@ -XXX,XX +XXX,XX @@ int bdrv_write(BdrvChild *child, int64_t sector_num,
43
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
44
int bytes, BdrvRequestFlags flags)
45
{
46
- QEMUIOVector qiov;
47
- struct iovec iov = {
48
- .iov_base = NULL,
49
- .iov_len = bytes,
50
- };
51
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
52
53
- qemu_iovec_init_external(&qiov, &iov, 1);
54
return bdrv_prwv_co(child, offset, &qiov, true,
55
BDRV_REQ_ZERO_WRITE | flags);
56
}
57
@@ -XXX,XX +XXX,XX @@ int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
58
59
int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
60
{
61
- QEMUIOVector qiov;
62
- struct iovec iov = {
63
- .iov_base = (void *)buf,
64
- .iov_len = bytes,
65
- };
66
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
67
68
if (bytes < 0) {
69
return -EINVAL;
70
}
71
72
- qemu_iovec_init_external(&qiov, &iov, 1);
73
return bdrv_preadv(child, offset, &qiov);
74
}
75
76
@@ -XXX,XX +XXX,XX @@ int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
77
78
int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
79
{
80
- QEMUIOVector qiov;
81
- struct iovec iov = {
82
- .iov_base = (void *) buf,
83
- .iov_len = bytes,
84
- };
85
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
86
87
if (bytes < 0) {
88
return -EINVAL;
89
}
90
91
- qemu_iovec_init_external(&qiov, &iov, 1);
92
return bdrv_pwritev(child, offset, &qiov);
93
}
94
95
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
96
void *bounce_buffer;
97
98
BlockDriver *drv = bs->drv;
99
- struct iovec iov;
100
QEMUIOVector local_qiov;
101
int64_t cluster_offset;
102
int64_t cluster_bytes;
103
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
104
105
if (ret <= 0) {
106
/* Must copy-on-read; use the bounce buffer */
107
- iov.iov_base = bounce_buffer;
108
- iov.iov_len = pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
109
- qemu_iovec_init_external(&local_qiov, &iov, 1);
110
+ pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
111
+ qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
112
113
ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
114
&local_qiov, 0);
115
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
116
{
117
BlockDriver *drv = bs->drv;
118
QEMUIOVector qiov;
119
- struct iovec iov = {0};
120
+ void *buf = NULL;
121
int ret = 0;
122
bool need_flush = false;
123
int head = 0;
124
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
125
need_flush = true;
126
}
127
num = MIN(num, max_transfer);
128
- iov.iov_len = num;
129
- if (iov.iov_base == NULL) {
130
- iov.iov_base = qemu_try_blockalign(bs, num);
131
- if (iov.iov_base == NULL) {
132
+ if (buf == NULL) {
133
+ buf = qemu_try_blockalign0(bs, num);
134
+ if (buf == NULL) {
135
ret = -ENOMEM;
136
goto fail;
137
}
138
- memset(iov.iov_base, 0, num);
139
}
140
- qemu_iovec_init_external(&qiov, &iov, 1);
141
+ qemu_iovec_init_buf(&qiov, buf, num);
142
143
ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
144
145
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
146
* all future requests.
147
*/
148
if (num < max_transfer) {
149
- qemu_vfree(iov.iov_base);
150
- iov.iov_base = NULL;
151
+ qemu_vfree(buf);
152
+ buf = NULL;
153
}
154
}
155
156
@@ -XXX,XX +XXX,XX @@ fail:
157
if (ret == 0 && need_flush) {
158
ret = bdrv_co_flush(bs);
159
}
160
- qemu_vfree(iov.iov_base);
161
+ qemu_vfree(buf);
162
return ret;
163
}
164
165
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
166
BlockDriverState *bs = child->bs;
167
uint8_t *buf = NULL;
168
QEMUIOVector local_qiov;
169
- struct iovec iov;
170
uint64_t align = bs->bl.request_alignment;
171
unsigned int head_padding_bytes, tail_padding_bytes;
172
int ret = 0;
173
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
174
assert(flags & BDRV_REQ_ZERO_WRITE);
175
if (head_padding_bytes || tail_padding_bytes) {
176
buf = qemu_blockalign(bs, align);
177
- iov = (struct iovec) {
178
- .iov_base = buf,
179
- .iov_len = align,
180
- };
181
- qemu_iovec_init_external(&local_qiov, &iov, 1);
182
+ qemu_iovec_init_buf(&local_qiov, buf, align);
183
}
184
if (head_padding_bytes) {
185
uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
186
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
187
188
if (offset & (align - 1)) {
189
QEMUIOVector head_qiov;
190
- struct iovec head_iov;
191
192
mark_request_serialising(&req, align);
193
wait_serialising_requests(&req);
194
195
head_buf = qemu_blockalign(bs, align);
196
- head_iov = (struct iovec) {
197
- .iov_base = head_buf,
198
- .iov_len = align,
199
- };
200
- qemu_iovec_init_external(&head_qiov, &head_iov, 1);
201
+ qemu_iovec_init_buf(&head_qiov, head_buf, align);
202
203
bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
204
ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align,
205
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
206
207
if ((offset + bytes) & (align - 1)) {
208
QEMUIOVector tail_qiov;
209
- struct iovec tail_iov;
210
size_t tail_bytes;
211
bool waited;
212
213
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
214
assert(!waited || !use_local_qiov);
215
216
tail_buf = qemu_blockalign(bs, align);
217
- tail_iov = (struct iovec) {
218
- .iov_base = tail_buf,
219
- .iov_len = align,
220
- };
221
- qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
222
+ qemu_iovec_init_buf(&tail_qiov, tail_buf, align);
223
224
bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
225
ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1),
226
@@ -XXX,XX +XXX,XX @@ bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
227
int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
228
int64_t pos, int size)
229
{
230
- QEMUIOVector qiov;
231
- struct iovec iov = {
232
- .iov_base = (void *) buf,
233
- .iov_len = size,
234
- };
235
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
236
int ret;
237
238
- qemu_iovec_init_external(&qiov, &iov, 1);
239
-
240
ret = bdrv_writev_vmstate(bs, &qiov, pos);
241
if (ret < 0) {
242
return ret;
243
@@ -XXX,XX +XXX,XX @@ int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
244
int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
245
int64_t pos, int size)
246
{
247
- QEMUIOVector qiov;
248
- struct iovec iov = {
249
- .iov_base = buf,
250
- .iov_len = size,
251
- };
252
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
253
int ret;
254
255
- qemu_iovec_init_external(&qiov, &iov, 1);
256
ret = bdrv_readv_vmstate(bs, &qiov, pos);
257
if (ret < 0) {
258
return ret;
259
--
260
2.20.1
261
262
diff view generated by jsdifflib
New patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
2
3
Use new QEMU_IOVEC_INIT_BUF() instead of
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Message-id: 20190218140926.333779-4-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-4-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
block/block-backend.c | 13 ++-----------
14
1 file changed, 2 insertions(+), 11 deletions(-)
15
16
diff --git a/block/block-backend.c b/block/block-backend.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/block-backend.c
19
+++ b/block/block-backend.c
20
@@ -XXX,XX +XXX,XX @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
21
int64_t bytes, CoroutineEntry co_entry,
22
BdrvRequestFlags flags)
23
{
24
- QEMUIOVector qiov;
25
- struct iovec iov;
26
- BlkRwCo rwco;
27
-
28
- iov = (struct iovec) {
29
- .iov_base = buf,
30
- .iov_len = bytes,
31
- };
32
- qemu_iovec_init_external(&qiov, &iov, 1);
33
-
34
- rwco = (BlkRwCo) {
35
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
36
+ BlkRwCo rwco = {
37
.blk = blk,
38
.offset = offset,
39
.iobuf = &qiov,
40
--
41
2.20.1
42
43
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Utilize the job_exit shim by not calling job_defer_to_main_loop, and
3
Use new qemu_iovec_init_buf() instead of
4
where applicable, converting the deferred callback into the job_exit
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
5
callback.
6
5
7
This converts backup, stream, create, and the unit tests all at once.
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
8
Most of these jobs do not see any changes to the order in which they
7
Reviewed-by: Eric Blake <eblake@redhat.com>
9
clean up their resources, except the test-blockjob-txn test, which
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
now puts down its bs before job_completed is called.
9
Message-id: 20190218140926.333779-5-vsementsov@virtuozzo.com
11
10
Message-Id: <20190218140926.333779-5-vsementsov@virtuozzo.com>
12
This is safe for the same reason the reordering in the mirror job is
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
safe, because job_completed no longer runs under two locks, making
14
the unref safe even if it causes a flush.
15
16
Signed-off-by: John Snow <jsnow@redhat.com>
17
Reviewed-by: Max Reitz <mreitz@redhat.com>
18
Message-id: 20180830015734.19765-7-jsnow@redhat.com
19
Signed-off-by: Max Reitz <mreitz@redhat.com>
20
---
12
---
21
block/backup.c | 16 ----------------
13
block/backup.c | 5 +----
22
block/create.c | 14 +++-----------
14
1 file changed, 1 insertion(+), 4 deletions(-)
23
block/stream.c | 22 +++++++---------------
24
tests/test-bdrv-drain.c | 6 ------
25
tests/test-blockjob-txn.c | 11 ++---------
26
tests/test-blockjob.c | 10 ++++------
27
6 files changed, 16 insertions(+), 63 deletions(-)
28
15
29
diff --git a/block/backup.c b/block/backup.c
16
diff --git a/block/backup.c b/block/backup.c
30
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
31
--- a/block/backup.c
18
--- a/block/backup.c
32
+++ b/block/backup.c
19
+++ b/block/backup.c
33
@@ -XXX,XX +XXX,XX @@ static BlockErrorAction backup_error_action(BackupBlockJob *job,
20
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
21
void **bounce_buffer)
22
{
23
int ret;
24
- struct iovec iov;
25
QEMUIOVector qiov;
26
BlockBackend *blk = job->common.blk;
27
int nbytes;
28
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
29
if (!*bounce_buffer) {
30
*bounce_buffer = blk_blockalign(blk, job->cluster_size);
34
}
31
}
35
}
32
- iov.iov_base = *bounce_buffer;
36
33
- iov.iov_len = nbytes;
37
-typedef struct {
34
- qemu_iovec_init_external(&qiov, &iov, 1);
38
- int ret;
35
+ qemu_iovec_init_buf(&qiov, *bounce_buffer, nbytes);
39
-} BackupCompleteData;
36
40
-
37
ret = blk_co_preadv(blk, start, qiov.size, &qiov, read_flags);
41
-static void backup_complete(Job *job, void *opaque)
38
if (ret < 0) {
42
-{
43
- BackupCompleteData *data = opaque;
44
-
45
- job_completed(job, data->ret);
46
- g_free(data);
47
-}
48
-
49
static bool coroutine_fn yield_and_check(BackupBlockJob *job)
50
{
51
uint64_t delay_ns;
52
@@ -XXX,XX +XXX,XX @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
53
static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
54
{
55
BackupBlockJob *job = container_of(opaque_job, BackupBlockJob, common.job);
56
- BackupCompleteData *data;
57
BlockDriverState *bs = blk_bs(job->common.blk);
58
int64_t offset, nb_clusters;
59
int ret = 0;
60
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
61
qemu_co_rwlock_unlock(&job->flush_rwlock);
62
hbitmap_free(job->copy_bitmap);
63
64
- data = g_malloc(sizeof(*data));
65
- data->ret = ret;
66
- job_defer_to_main_loop(&job->common.job, backup_complete, data);
67
return ret;
68
}
69
70
diff --git a/block/create.c b/block/create.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/block/create.c
73
+++ b/block/create.c
74
@@ -XXX,XX +XXX,XX @@ typedef struct BlockdevCreateJob {
75
Job common;
76
BlockDriver *drv;
77
BlockdevCreateOptions *opts;
78
- int ret;
79
} BlockdevCreateJob;
80
81
-static void blockdev_create_complete(Job *job, void *opaque)
82
-{
83
- BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
84
-
85
- job_completed(job, s->ret);
86
-}
87
-
88
static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
89
{
90
BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
91
+ int ret;
92
93
job_progress_set_remaining(&s->common, 1);
94
- s->ret = s->drv->bdrv_co_create(s->opts, errp);
95
+ ret = s->drv->bdrv_co_create(s->opts, errp);
96
job_progress_update(&s->common, 1);
97
98
qapi_free_BlockdevCreateOptions(s->opts);
99
- job_defer_to_main_loop(&s->common, blockdev_create_complete, NULL);
100
101
- return s->ret;
102
+ return ret;
103
}
104
105
static const JobDriver blockdev_create_job_driver = {
106
diff --git a/block/stream.c b/block/stream.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/block/stream.c
109
+++ b/block/stream.c
110
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn stream_populate(BlockBackend *blk,
111
return blk_co_preadv(blk, offset, qiov.size, &qiov, BDRV_REQ_COPY_ON_READ);
112
}
113
114
-typedef struct {
115
- int ret;
116
-} StreamCompleteData;
117
-
118
-static void stream_complete(Job *job, void *opaque)
119
+static void stream_exit(Job *job)
120
{
121
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
122
BlockJob *bjob = &s->common;
123
- StreamCompleteData *data = opaque;
124
BlockDriverState *bs = blk_bs(bjob->blk);
125
BlockDriverState *base = s->base;
126
Error *local_err = NULL;
127
+ int ret = job->ret;
128
129
- if (!job_is_cancelled(job) && bs->backing && data->ret == 0) {
130
+ if (!job_is_cancelled(job) && bs->backing && ret == 0) {
131
const char *base_id = NULL, *base_fmt = NULL;
132
if (base) {
133
base_id = s->backing_file_str;
134
@@ -XXX,XX +XXX,XX @@ static void stream_complete(Job *job, void *opaque)
135
base_fmt = base->drv->format_name;
136
}
137
}
138
- data->ret = bdrv_change_backing_file(bs, base_id, base_fmt);
139
+ ret = bdrv_change_backing_file(bs, base_id, base_fmt);
140
bdrv_set_backing_hd(bs, base, &local_err);
141
if (local_err) {
142
error_report_err(local_err);
143
- data->ret = -EPERM;
144
+ ret = -EPERM;
145
goto out;
146
}
147
}
148
@@ -XXX,XX +XXX,XX @@ out:
149
}
150
151
g_free(s->backing_file_str);
152
- job_completed(job, data->ret);
153
- g_free(data);
154
+ job->ret = ret;
155
}
156
157
static int coroutine_fn stream_run(Job *job, Error **errp)
158
{
159
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
160
- StreamCompleteData *data;
161
BlockBackend *blk = s->common.blk;
162
BlockDriverState *bs = blk_bs(blk);
163
BlockDriverState *base = s->base;
164
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn stream_run(Job *job, Error **errp)
165
166
out:
167
/* Modify backing chain and close BDSes in main loop */
168
- data = g_malloc(sizeof(*data));
169
- data->ret = ret;
170
- job_defer_to_main_loop(&s->common.job, stream_complete, data);
171
return ret;
172
}
173
174
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver stream_job_driver = {
175
.job_type = JOB_TYPE_STREAM,
176
.free = block_job_free,
177
.run = stream_run,
178
+ .exit = stream_exit,
179
.user_resume = block_job_user_resume,
180
.drain = block_job_drain,
181
},
182
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
183
index XXXXXXX..XXXXXXX 100644
184
--- a/tests/test-bdrv-drain.c
185
+++ b/tests/test-bdrv-drain.c
186
@@ -XXX,XX +XXX,XX @@ typedef struct TestBlockJob {
187
bool should_complete;
188
} TestBlockJob;
189
190
-static void test_job_completed(Job *job, void *opaque)
191
-{
192
- job_completed(job, 0);
193
-}
194
-
195
static int coroutine_fn test_job_run(Job *job, Error **errp)
196
{
197
TestBlockJob *s = container_of(job, TestBlockJob, common.job);
198
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn test_job_run(Job *job, Error **errp)
199
job_pause_point(&s->common.job);
200
}
201
202
- job_defer_to_main_loop(&s->common.job, test_job_completed, NULL);
203
return 0;
204
}
205
206
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
207
index XXXXXXX..XXXXXXX 100644
208
--- a/tests/test-blockjob-txn.c
209
+++ b/tests/test-blockjob-txn.c
210
@@ -XXX,XX +XXX,XX @@ typedef struct {
211
int *result;
212
} TestBlockJob;
213
214
-static void test_block_job_complete(Job *job, void *opaque)
215
+static void test_block_job_exit(Job *job)
216
{
217
BlockJob *bjob = container_of(job, BlockJob, job);
218
BlockDriverState *bs = blk_bs(bjob->blk);
219
- int rc = (intptr_t)opaque;
220
221
- if (job_is_cancelled(job)) {
222
- rc = -ECANCELED;
223
- }
224
-
225
- job_completed(job, rc);
226
bdrv_unref(bs);
227
}
228
229
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn test_block_job_run(Job *job, Error **errp)
230
}
231
}
232
233
- job_defer_to_main_loop(job, test_block_job_complete,
234
- (void *)(intptr_t)s->rc);
235
return s->rc;
236
}
237
238
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_block_job_driver = {
239
.user_resume = block_job_user_resume,
240
.drain = block_job_drain,
241
.run = test_block_job_run,
242
+ .exit = test_block_job_exit,
243
},
244
};
245
246
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
247
index XXXXXXX..XXXXXXX 100644
248
--- a/tests/test-blockjob.c
249
+++ b/tests/test-blockjob.c
250
@@ -XXX,XX +XXX,XX @@ typedef struct CancelJob {
251
bool completed;
252
} CancelJob;
253
254
-static void cancel_job_completed(Job *job, void *opaque)
255
+static void cancel_job_exit(Job *job)
256
{
257
- CancelJob *s = opaque;
258
+ CancelJob *s = container_of(job, CancelJob, common.job);
259
s->completed = true;
260
- job_completed(job, 0);
261
}
262
263
static void cancel_job_complete(Job *job, Error **errp)
264
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn cancel_job_run(Job *job, Error **errp)
265
266
while (!s->should_complete) {
267
if (job_is_cancelled(&s->common.job)) {
268
- goto defer;
269
+ return 0;
270
}
271
272
if (!job_is_ready(&s->common.job) && s->should_converge) {
273
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn cancel_job_run(Job *job, Error **errp)
274
job_sleep_ns(&s->common.job, 100000);
275
}
276
277
- defer:
278
- job_defer_to_main_loop(&s->common.job, cancel_job_completed, s);
279
return 0;
280
}
281
282
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_cancel_driver = {
283
.user_resume = block_job_user_resume,
284
.drain = block_job_drain,
285
.run = cancel_job_run,
286
+ .exit = cancel_job_exit,
287
.complete = cancel_job_complete,
288
},
289
};
290
--
39
--
291
2.17.1
40
2.20.1
292
41
293
42
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Change the manual deferment to commit_complete into the implicit
3
Use new QEMU_IOVEC_INIT_BUF() instead of
4
callback to job_exit, renaming commit_complete to commit_exit.
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
5
5
6
This conversion does change the timing of when job_completed is
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
called to after the bdrv_replace_node and bdrv_unref calls, which
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
could have implications for bjob->blk which will now be put down
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
after this cleanup.
9
Message-id: 20190218140926.333779-6-vsementsov@virtuozzo.com
10
10
Message-Id: <20190218140926.333779-6-vsementsov@virtuozzo.com>
11
Kevin highlights that we did not take any permissions for that backend
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
at job creation time, so it is safe to reorder these operations.
13
14
Signed-off-by: John Snow <jsnow@redhat.com>
15
Reviewed-by: Max Reitz <mreitz@redhat.com>
16
Message-id: 20180830015734.19765-5-jsnow@redhat.com
17
Reviewed-by: Jeff Cody <jcody@redhat.com>
18
Signed-off-by: Max Reitz <mreitz@redhat.com>
19
---
12
---
20
block/commit.c | 22 +++++-----------------
13
block/commit.c | 7 +------
21
1 file changed, 5 insertions(+), 17 deletions(-)
14
1 file changed, 1 insertion(+), 6 deletions(-)
22
15
23
diff --git a/block/commit.c b/block/commit.c
16
diff --git a/block/commit.c b/block/commit.c
24
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
25
--- a/block/commit.c
18
--- a/block/commit.c
26
+++ b/block/commit.c
19
+++ b/block/commit.c
27
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base,
20
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base,
28
return 0;
21
void *buf)
29
}
30
31
-typedef struct {
32
- int ret;
33
-} CommitCompleteData;
34
-
35
-static void commit_complete(Job *job, void *opaque)
36
+static void commit_exit(Job *job)
37
{
22
{
38
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
39
BlockJob *bjob = &s->common;
40
- CommitCompleteData *data = opaque;
41
BlockDriverState *top = blk_bs(s->top);
42
BlockDriverState *base = blk_bs(s->base);
43
BlockDriverState *commit_top_bs = s->commit_top_bs;
44
- int ret = data->ret;
45
bool remove_commit_top_bs = false;
46
47
/* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
48
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
49
* the normal backing chain can be restored. */
50
blk_unref(s->base);
51
52
- if (!job_is_cancelled(job) && ret == 0) {
53
+ if (!job_is_cancelled(job) && job->ret == 0) {
54
/* success */
55
- ret = bdrv_drop_intermediate(s->commit_top_bs, base,
56
- s->backing_file_str);
57
+ job->ret = bdrv_drop_intermediate(s->commit_top_bs, base,
58
+ s->backing_file_str);
59
} else {
60
/* XXX Can (or should) we somehow keep 'consistent read' blocked even
61
* after the failed/cancelled commit job is gone? If we already wrote
62
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
63
* bdrv_set_backing_hd() to fail. */
64
block_job_remove_all_bdrv(bjob);
65
66
- job_completed(job, ret);
67
- g_free(data);
68
-
69
/* If bdrv_drop_intermediate() didn't already do that, remove the commit
70
* filter driver from the backing chain. Do this as the final step so that
71
* the 'consistent read' permission can be granted. */
72
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
73
static int coroutine_fn commit_run(Job *job, Error **errp)
74
{
75
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
76
- CommitCompleteData *data;
77
int64_t offset;
78
uint64_t delay_ns = 0;
79
int ret = 0;
23
int ret = 0;
80
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn commit_run(Job *job, Error **errp)
24
- QEMUIOVector qiov;
81
out:
25
- struct iovec iov = {
82
qemu_vfree(buf);
26
- .iov_base = buf,
83
27
- .iov_len = bytes,
84
- data = g_malloc(sizeof(*data));
28
- };
85
- data->ret = ret;
29
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
86
- job_defer_to_main_loop(&s->common.job, commit_complete, data);
30
87
return ret;
31
assert(bytes < SIZE_MAX);
88
}
32
- qemu_iovec_init_external(&qiov, &iov, 1);
89
33
90
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_job_driver = {
34
ret = blk_co_preadv(bs, offset, qiov.size, &qiov, 0);
91
.user_resume = block_job_user_resume,
35
if (ret < 0) {
92
.drain = block_job_drain,
93
.run = commit_run,
94
+ .exit = commit_exit,
95
},
96
};
97
98
--
36
--
99
2.17.1
37
2.20.1
100
38
101
39
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Presently we codify the entry point for a job as the "start" callback,
3
Use new QEMU_IOVEC_INIT_BUF() instead of
4
but a more apt name would be "run" to clarify the idea that when this
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
5
function returns we consider the job to have "finished," except for
6
any cleanup which occurs in separate callbacks later.
7
5
8
As part of this clarification, change the signature to include an error
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
9
object and a return code. The error ptr is not yet used, and the return
7
Reviewed-by: Eric Blake <eblake@redhat.com>
10
code while captured, will be overwritten by actions in the job_completed
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
11
function.
9
Message-id: 20190218140926.333779-7-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-7-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
block/stream.c | 7 +------
14
1 file changed, 1 insertion(+), 6 deletions(-)
12
15
13
Signed-off-by: John Snow <jsnow@redhat.com>
14
Reviewed-by: Max Reitz <mreitz@redhat.com>
15
Message-id: 20180830015734.19765-2-jsnow@redhat.com
16
Reviewed-by: Jeff Cody <jcody@redhat.com>
17
Signed-off-by: Max Reitz <mreitz@redhat.com>
18
---
19
include/qemu/job.h | 2 +-
20
block/backup.c | 7 ++++---
21
block/commit.c | 7 ++++---
22
block/create.c | 8 +++++---
23
block/mirror.c | 10 ++++++----
24
block/stream.c | 7 ++++---
25
job.c | 6 +++---
26
tests/test-bdrv-drain.c | 7 ++++---
27
tests/test-blockjob-txn.c | 16 ++++++++--------
28
tests/test-blockjob.c | 7 ++++---
29
10 files changed, 43 insertions(+), 34 deletions(-)
30
31
diff --git a/include/qemu/job.h b/include/qemu/job.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/include/qemu/job.h
34
+++ b/include/qemu/job.h
35
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
36
JobType job_type;
37
38
/** Mandatory: Entrypoint for the Coroutine. */
39
- CoroutineEntry *start;
40
+ int coroutine_fn (*run)(Job *job, Error **errp);
41
42
/**
43
* If the callback is not NULL, it will be invoked when the job transitions
44
diff --git a/block/backup.c b/block/backup.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/block/backup.c
47
+++ b/block/backup.c
48
@@ -XXX,XX +XXX,XX @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
49
bdrv_dirty_iter_free(dbi);
50
}
51
52
-static void coroutine_fn backup_run(void *opaque)
53
+static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
54
{
55
- BackupBlockJob *job = opaque;
56
+ BackupBlockJob *job = container_of(opaque_job, BackupBlockJob, common.job);
57
BackupCompleteData *data;
58
BlockDriverState *bs = blk_bs(job->common.blk);
59
int64_t offset, nb_clusters;
60
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn backup_run(void *opaque)
61
data = g_malloc(sizeof(*data));
62
data->ret = ret;
63
job_defer_to_main_loop(&job->common.job, backup_complete, data);
64
+ return ret;
65
}
66
67
static const BlockJobDriver backup_job_driver = {
68
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver backup_job_driver = {
69
.free = block_job_free,
70
.user_resume = block_job_user_resume,
71
.drain = block_job_drain,
72
- .start = backup_run,
73
+ .run = backup_run,
74
.commit = backup_commit,
75
.abort = backup_abort,
76
.clean = backup_clean,
77
diff --git a/block/commit.c b/block/commit.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/block/commit.c
80
+++ b/block/commit.c
81
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
82
bdrv_unref(top);
83
}
84
85
-static void coroutine_fn commit_run(void *opaque)
86
+static int coroutine_fn commit_run(Job *job, Error **errp)
87
{
88
- CommitBlockJob *s = opaque;
89
+ CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
90
CommitCompleteData *data;
91
int64_t offset;
92
uint64_t delay_ns = 0;
93
@@ -XXX,XX +XXX,XX @@ out:
94
data = g_malloc(sizeof(*data));
95
data->ret = ret;
96
job_defer_to_main_loop(&s->common.job, commit_complete, data);
97
+ return ret;
98
}
99
100
static const BlockJobDriver commit_job_driver = {
101
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_job_driver = {
102
.free = block_job_free,
103
.user_resume = block_job_user_resume,
104
.drain = block_job_drain,
105
- .start = commit_run,
106
+ .run = commit_run,
107
},
108
};
109
110
diff --git a/block/create.c b/block/create.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/block/create.c
113
+++ b/block/create.c
114
@@ -XXX,XX +XXX,XX @@ static void blockdev_create_complete(Job *job, void *opaque)
115
job_completed(job, s->ret, s->err);
116
}
117
118
-static void coroutine_fn blockdev_create_run(void *opaque)
119
+static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
120
{
121
- BlockdevCreateJob *s = opaque;
122
+ BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
123
124
job_progress_set_remaining(&s->common, 1);
125
s->ret = s->drv->bdrv_co_create(s->opts, &s->err);
126
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn blockdev_create_run(void *opaque)
127
128
qapi_free_BlockdevCreateOptions(s->opts);
129
job_defer_to_main_loop(&s->common, blockdev_create_complete, NULL);
130
+
131
+ return s->ret;
132
}
133
134
static const JobDriver blockdev_create_job_driver = {
135
.instance_size = sizeof(BlockdevCreateJob),
136
.job_type = JOB_TYPE_CREATE,
137
- .start = blockdev_create_run,
138
+ .run = blockdev_create_run,
139
};
140
141
void qmp_blockdev_create(const char *job_id, BlockdevCreateOptions *options,
142
diff --git a/block/mirror.c b/block/mirror.c
143
index XXXXXXX..XXXXXXX 100644
144
--- a/block/mirror.c
145
+++ b/block/mirror.c
146
@@ -XXX,XX +XXX,XX @@ static int mirror_flush(MirrorBlockJob *s)
147
return ret;
148
}
149
150
-static void coroutine_fn mirror_run(void *opaque)
151
+static int coroutine_fn mirror_run(Job *job, Error **errp)
152
{
153
- MirrorBlockJob *s = opaque;
154
+ MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
155
MirrorExitData *data;
156
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
157
BlockDriverState *target_bs = blk_bs(s->target);
158
@@ -XXX,XX +XXX,XX @@ immediate_exit:
159
if (need_drain) {
160
bdrv_drained_begin(bs);
161
}
162
+
163
job_defer_to_main_loop(&s->common.job, mirror_exit, data);
164
+ return ret;
165
}
166
167
static void mirror_complete(Job *job, Error **errp)
168
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver mirror_job_driver = {
169
.free = block_job_free,
170
.user_resume = block_job_user_resume,
171
.drain = block_job_drain,
172
- .start = mirror_run,
173
+ .run = mirror_run,
174
.pause = mirror_pause,
175
.complete = mirror_complete,
176
},
177
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_active_job_driver = {
178
.free = block_job_free,
179
.user_resume = block_job_user_resume,
180
.drain = block_job_drain,
181
- .start = mirror_run,
182
+ .run = mirror_run,
183
.pause = mirror_pause,
184
.complete = mirror_complete,
185
},
186
diff --git a/block/stream.c b/block/stream.c
16
diff --git a/block/stream.c b/block/stream.c
187
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
188
--- a/block/stream.c
18
--- a/block/stream.c
189
+++ b/block/stream.c
19
+++ b/block/stream.c
190
@@ -XXX,XX +XXX,XX @@ out:
20
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn stream_populate(BlockBackend *blk,
191
g_free(data);
21
int64_t offset, uint64_t bytes,
192
}
22
void *buf)
193
194
-static void coroutine_fn stream_run(void *opaque)
195
+static int coroutine_fn stream_run(Job *job, Error **errp)
196
{
23
{
197
- StreamBlockJob *s = opaque;
24
- struct iovec iov = {
198
+ StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
25
- .iov_base = buf,
199
StreamCompleteData *data;
26
- .iov_len = bytes,
200
BlockBackend *blk = s->common.blk;
27
- };
201
BlockDriverState *bs = blk_bs(blk);
28
- QEMUIOVector qiov;
202
@@ -XXX,XX +XXX,XX @@ out:
29
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
203
data = g_malloc(sizeof(*data));
30
204
data->ret = ret;
31
assert(bytes < SIZE_MAX);
205
job_defer_to_main_loop(&s->common.job, stream_complete, data);
32
- qemu_iovec_init_external(&qiov, &iov, 1);
206
+ return ret;
33
207
}
34
/* Copy-on-read the unallocated clusters */
208
35
return blk_co_preadv(blk, offset, qiov.size, &qiov, BDRV_REQ_COPY_ON_READ);
209
static const BlockJobDriver stream_job_driver = {
210
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver stream_job_driver = {
211
.instance_size = sizeof(StreamBlockJob),
212
.job_type = JOB_TYPE_STREAM,
213
.free = block_job_free,
214
- .start = stream_run,
215
+ .run = stream_run,
216
.user_resume = block_job_user_resume,
217
.drain = block_job_drain,
218
},
219
diff --git a/job.c b/job.c
220
index XXXXXXX..XXXXXXX 100644
221
--- a/job.c
222
+++ b/job.c
223
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_co_entry(void *opaque)
224
{
225
Job *job = opaque;
226
227
- assert(job && job->driver && job->driver->start);
228
+ assert(job && job->driver && job->driver->run);
229
job_pause_point(job);
230
- job->driver->start(job);
231
+ job->ret = job->driver->run(job, NULL);
232
}
233
234
235
void job_start(Job *job)
236
{
237
assert(job && !job_started(job) && job->paused &&
238
- job->driver && job->driver->start);
239
+ job->driver && job->driver->run);
240
job->co = qemu_coroutine_create(job_co_entry, job);
241
job->pause_count--;
242
job->busy = true;
243
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
244
index XXXXXXX..XXXXXXX 100644
245
--- a/tests/test-bdrv-drain.c
246
+++ b/tests/test-bdrv-drain.c
247
@@ -XXX,XX +XXX,XX @@ static void test_job_completed(Job *job, void *opaque)
248
job_completed(job, 0, NULL);
249
}
250
251
-static void coroutine_fn test_job_start(void *opaque)
252
+static int coroutine_fn test_job_run(Job *job, Error **errp)
253
{
254
- TestBlockJob *s = opaque;
255
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
256
257
job_transition_to_ready(&s->common.job);
258
while (!s->should_complete) {
259
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn test_job_start(void *opaque)
260
}
261
262
job_defer_to_main_loop(&s->common.job, test_job_completed, NULL);
263
+ return 0;
264
}
265
266
static void test_job_complete(Job *job, Error **errp)
267
@@ -XXX,XX +XXX,XX @@ BlockJobDriver test_job_driver = {
268
.free = block_job_free,
269
.user_resume = block_job_user_resume,
270
.drain = block_job_drain,
271
- .start = test_job_start,
272
+ .run = test_job_run,
273
.complete = test_job_complete,
274
},
275
};
276
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
277
index XXXXXXX..XXXXXXX 100644
278
--- a/tests/test-blockjob-txn.c
279
+++ b/tests/test-blockjob-txn.c
280
@@ -XXX,XX +XXX,XX @@ static void test_block_job_complete(Job *job, void *opaque)
281
bdrv_unref(bs);
282
}
283
284
-static void coroutine_fn test_block_job_run(void *opaque)
285
+static int coroutine_fn test_block_job_run(Job *job, Error **errp)
286
{
287
- TestBlockJob *s = opaque;
288
- BlockJob *job = &s->common;
289
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
290
291
while (s->iterations--) {
292
if (s->use_timer) {
293
- job_sleep_ns(&job->job, 0);
294
+ job_sleep_ns(job, 0);
295
} else {
296
- job_yield(&job->job);
297
+ job_yield(job);
298
}
299
300
- if (job_is_cancelled(&job->job)) {
301
+ if (job_is_cancelled(job)) {
302
break;
303
}
304
}
305
306
- job_defer_to_main_loop(&job->job, test_block_job_complete,
307
+ job_defer_to_main_loop(job, test_block_job_complete,
308
(void *)(intptr_t)s->rc);
309
+ return s->rc;
310
}
311
312
typedef struct {
313
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_block_job_driver = {
314
.free = block_job_free,
315
.user_resume = block_job_user_resume,
316
.drain = block_job_drain,
317
- .start = test_block_job_run,
318
+ .run = test_block_job_run,
319
},
320
};
321
322
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
323
index XXXXXXX..XXXXXXX 100644
324
--- a/tests/test-blockjob.c
325
+++ b/tests/test-blockjob.c
326
@@ -XXX,XX +XXX,XX @@ static void cancel_job_complete(Job *job, Error **errp)
327
s->should_complete = true;
328
}
329
330
-static void coroutine_fn cancel_job_start(void *opaque)
331
+static int coroutine_fn cancel_job_run(Job *job, Error **errp)
332
{
333
- CancelJob *s = opaque;
334
+ CancelJob *s = container_of(job, CancelJob, common.job);
335
336
while (!s->should_complete) {
337
if (job_is_cancelled(&s->common.job)) {
338
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn cancel_job_start(void *opaque)
339
340
defer:
341
job_defer_to_main_loop(&s->common.job, cancel_job_completed, s);
342
+ return 0;
343
}
344
345
static const BlockJobDriver test_cancel_driver = {
346
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_cancel_driver = {
347
.free = block_job_free,
348
.user_resume = block_job_user_resume,
349
.drain = block_job_drain,
350
- .start = cancel_job_start,
351
+ .run = cancel_job_run,
352
.complete = cancel_job_complete,
353
},
354
};
355
--
36
--
356
2.17.1
37
2.20.1
357
38
358
39
diff view generated by jsdifflib
New patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
2
3
Use new QEMU_IOVEC_INIT_BUF() instead of
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Message-id: 20190218140926.333779-8-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-8-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
block/parallels.c | 13 +++++--------
14
1 file changed, 5 insertions(+), 8 deletions(-)
15
16
diff --git a/block/parallels.c b/block/parallels.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/parallels.c
19
+++ b/block/parallels.c
20
@@ -XXX,XX +XXX,XX @@ static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num,
21
if (bs->backing) {
22
int64_t nb_cow_sectors = to_allocate * s->tracks;
23
int64_t nb_cow_bytes = nb_cow_sectors << BDRV_SECTOR_BITS;
24
- QEMUIOVector qiov;
25
- struct iovec iov = {
26
- .iov_len = nb_cow_bytes,
27
- .iov_base = qemu_blockalign(bs, nb_cow_bytes)
28
- };
29
- qemu_iovec_init_external(&qiov, &iov, 1);
30
+ QEMUIOVector qiov =
31
+ QEMU_IOVEC_INIT_BUF(qiov, qemu_blockalign(bs, nb_cow_bytes),
32
+ nb_cow_bytes);
33
34
ret = bdrv_co_preadv(bs->backing, idx * s->tracks * BDRV_SECTOR_SIZE,
35
nb_cow_bytes, &qiov, 0);
36
if (ret < 0) {
37
- qemu_vfree(iov.iov_base);
38
+ qemu_vfree(qemu_iovec_buf(&qiov));
39
return ret;
40
}
41
42
ret = bdrv_co_pwritev(bs->file, s->data_end * BDRV_SECTOR_SIZE,
43
nb_cow_bytes, &qiov, 0);
44
- qemu_vfree(iov.iov_base);
45
+ qemu_vfree(qemu_iovec_buf(&qiov));
46
if (ret < 0) {
47
return ret;
48
}
49
--
50
2.20.1
51
52
diff view generated by jsdifflib
New patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
2
3
Use new qemu_iovec_init_buf() instead of
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Message-id: 20190218140926.333779-9-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-9-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
block/qcow.c | 21 ++++-----------------
14
1 file changed, 4 insertions(+), 17 deletions(-)
15
16
diff --git a/block/qcow.c b/block/qcow.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/qcow.c
19
+++ b/block/qcow.c
20
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset,
21
int offset_in_cluster;
22
int ret = 0, n;
23
uint64_t cluster_offset;
24
- struct iovec hd_iov;
25
QEMUIOVector hd_qiov;
26
uint8_t *buf;
27
void *orig_buf;
28
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset,
29
if (!cluster_offset) {
30
if (bs->backing) {
31
/* read from the base image */
32
- hd_iov.iov_base = (void *)buf;
33
- hd_iov.iov_len = n;
34
- qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
35
+ qemu_iovec_init_buf(&hd_qiov, buf, n);
36
qemu_co_mutex_unlock(&s->lock);
37
/* qcow2 emits this on bs->file instead of bs->backing */
38
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
39
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset,
40
ret = -EIO;
41
break;
42
}
43
- hd_iov.iov_base = (void *)buf;
44
- hd_iov.iov_len = n;
45
- qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
46
+ qemu_iovec_init_buf(&hd_qiov, buf, n);
47
qemu_co_mutex_unlock(&s->lock);
48
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
49
ret = bdrv_co_preadv(bs->file, cluster_offset + offset_in_cluster,
50
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, uint64_t offset,
51
int offset_in_cluster;
52
uint64_t cluster_offset;
53
int ret = 0, n;
54
- struct iovec hd_iov;
55
QEMUIOVector hd_qiov;
56
uint8_t *buf;
57
void *orig_buf;
58
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, uint64_t offset,
59
}
60
}
61
62
- hd_iov.iov_base = (void *)buf;
63
- hd_iov.iov_len = n;
64
- qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
65
+ qemu_iovec_init_buf(&hd_qiov, buf, n);
66
qemu_co_mutex_unlock(&s->lock);
67
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
68
ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster,
69
@@ -XXX,XX +XXX,XX @@ qcow_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
70
{
71
BDRVQcowState *s = bs->opaque;
72
QEMUIOVector hd_qiov;
73
- struct iovec iov;
74
z_stream strm;
75
int ret, out_len;
76
uint8_t *buf, *out_buf;
77
@@ -XXX,XX +XXX,XX @@ qcow_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
78
}
79
cluster_offset &= s->cluster_offset_mask;
80
81
- iov = (struct iovec) {
82
- .iov_base = out_buf,
83
- .iov_len = out_len,
84
- };
85
- qemu_iovec_init_external(&hd_qiov, &iov, 1);
86
+ qemu_iovec_init_buf(&hd_qiov, out_buf, out_len);
87
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED);
88
ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0);
89
if (ret < 0) {
90
--
91
2.20.1
92
93
diff view generated by jsdifflib
New patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
2
3
Use new qemu_iovec_init_buf() instead of
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Message-id: 20190218140926.333779-10-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-10-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
block/qcow2.c | 12 ++----------
14
1 file changed, 2 insertions(+), 10 deletions(-)
15
16
diff --git a/block/qcow2.c b/block/qcow2.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/qcow2.c
19
+++ b/block/qcow2.c
20
@@ -XXX,XX +XXX,XX @@ qcow2_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
21
{
22
BDRVQcow2State *s = bs->opaque;
23
QEMUIOVector hd_qiov;
24
- struct iovec iov;
25
int ret;
26
size_t out_len;
27
uint8_t *buf, *out_buf;
28
@@ -XXX,XX +XXX,XX @@ qcow2_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
29
goto fail;
30
}
31
32
- iov = (struct iovec) {
33
- .iov_base = out_buf,
34
- .iov_len = out_len,
35
- };
36
- qemu_iovec_init_external(&hd_qiov, &iov, 1);
37
+ qemu_iovec_init_buf(&hd_qiov, out_buf, out_len);
38
39
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED);
40
ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0);
41
@@ -XXX,XX +XXX,XX @@ qcow2_co_preadv_compressed(BlockDriverState *bs,
42
int ret = 0, csize, nb_csectors;
43
uint64_t coffset;
44
uint8_t *buf, *out_buf;
45
- struct iovec iov;
46
QEMUIOVector local_qiov;
47
int offset_in_cluster = offset_into_cluster(s, offset);
48
49
@@ -XXX,XX +XXX,XX @@ qcow2_co_preadv_compressed(BlockDriverState *bs,
50
if (!buf) {
51
return -ENOMEM;
52
}
53
- iov.iov_base = buf;
54
- iov.iov_len = csize;
55
- qemu_iovec_init_external(&local_qiov, &iov, 1);
56
+ qemu_iovec_init_buf(&local_qiov, buf, csize);
57
58
out_buf = qemu_blockalign(bs, s->cluster_size);
59
60
--
61
2.20.1
62
63
diff view generated by jsdifflib
New patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
2
3
Use new qemu_iovec_init_buf() instead of
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Message-id: 20190218140926.333779-11-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-11-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
block/qed-table.c | 16 +++-------------
14
block/qed.c | 31 +++++++++----------------------
15
2 files changed, 12 insertions(+), 35 deletions(-)
16
17
diff --git a/block/qed-table.c b/block/qed-table.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/qed-table.c
20
+++ b/block/qed-table.c
21
@@ -XXX,XX +XXX,XX @@
22
/* Called with table_lock held. */
23
static int qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table)
24
{
25
- QEMUIOVector qiov;
26
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(
27
+ qiov, table->offsets, s->header.cluster_size * s->header.table_size);
28
int noffsets;
29
int i, ret;
30
31
- struct iovec iov = {
32
- .iov_base = table->offsets,
33
- .iov_len = s->header.cluster_size * s->header.table_size,
34
- };
35
- qemu_iovec_init_external(&qiov, &iov, 1);
36
-
37
trace_qed_read_table(s, offset, table);
38
39
qemu_co_mutex_unlock(&s->table_lock);
40
@@ -XXX,XX +XXX,XX @@ static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
41
unsigned int sector_mask = BDRV_SECTOR_SIZE / sizeof(uint64_t) - 1;
42
unsigned int start, end, i;
43
QEDTable *new_table;
44
- struct iovec iov;
45
QEMUIOVector qiov;
46
size_t len_bytes;
47
int ret;
48
@@ -XXX,XX +XXX,XX @@ static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
49
len_bytes = (end - start) * sizeof(uint64_t);
50
51
new_table = qemu_blockalign(s->bs, len_bytes);
52
- iov = (struct iovec) {
53
- .iov_base = new_table->offsets,
54
- .iov_len = len_bytes,
55
- };
56
- qemu_iovec_init_external(&qiov, &iov, 1);
57
+ qemu_iovec_init_buf(&qiov, new_table->offsets, len_bytes);
58
59
/* Byteswap table */
60
for (i = start; i < end; i++) {
61
diff --git a/block/qed.c b/block/qed.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/block/qed.c
64
+++ b/block/qed.c
65
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn qed_write_header(BDRVQEDState *s)
66
int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE);
67
size_t len = nsectors * BDRV_SECTOR_SIZE;
68
uint8_t *buf;
69
- struct iovec iov;
70
QEMUIOVector qiov;
71
int ret;
72
73
assert(s->allocating_acb || s->allocating_write_reqs_plugged);
74
75
buf = qemu_blockalign(s->bs, len);
76
- iov = (struct iovec) {
77
- .iov_base = buf,
78
- .iov_len = len,
79
- };
80
- qemu_iovec_init_external(&qiov, &iov, 1);
81
+ qemu_iovec_init_buf(&qiov, buf, len);
82
83
ret = bdrv_co_preadv(s->bs->file, 0, qiov.size, &qiov, 0);
84
if (ret < 0) {
85
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s,
86
{
87
QEMUIOVector qiov;
88
QEMUIOVector *backing_qiov = NULL;
89
- struct iovec iov;
90
int ret;
91
92
/* Skip copy entirely if there is no work to do */
93
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s,
94
return 0;
95
}
96
97
- iov = (struct iovec) {
98
- .iov_base = qemu_blockalign(s->bs, len),
99
- .iov_len = len,
100
- };
101
- qemu_iovec_init_external(&qiov, &iov, 1);
102
+ qemu_iovec_init_buf(&qiov, qemu_blockalign(s->bs, len), len);
103
104
ret = qed_read_backing_file(s, pos, &qiov, &backing_qiov);
105
106
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s,
107
}
108
ret = 0;
109
out:
110
- qemu_vfree(iov.iov_base);
111
+ qemu_vfree(qemu_iovec_buf(&qiov));
112
return ret;
113
}
114
115
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
116
BdrvRequestFlags flags)
117
{
118
BDRVQEDState *s = bs->opaque;
119
- QEMUIOVector qiov;
120
- struct iovec iov;
121
+
122
+ /*
123
+ * Zero writes start without an I/O buffer. If a buffer becomes necessary
124
+ * then it will be allocated during request processing.
125
+ */
126
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
127
128
/* Fall back if the request is not aligned */
129
if (qed_offset_into_cluster(s, offset) ||
130
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
131
return -ENOTSUP;
132
}
133
134
- /* Zero writes start without an I/O buffer. If a buffer becomes necessary
135
- * then it will be allocated during request processing.
136
- */
137
- iov.iov_base = NULL;
138
- iov.iov_len = bytes;
139
-
140
- qemu_iovec_init_external(&qiov, &iov, 1);
141
return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov,
142
bytes >> BDRV_SECTOR_BITS,
143
QED_AIOCB_WRITE | QED_AIOCB_ZERO);
144
--
145
2.20.1
146
147
diff view generated by jsdifflib
New patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
2
3
Use new qemu_iovec_init_buf() instead of
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Message-id: 20190218140926.333779-12-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-12-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
block/vmdk.c | 7 +------
14
1 file changed, 1 insertion(+), 6 deletions(-)
15
16
diff --git a/block/vmdk.c b/block/vmdk.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/vmdk.c
19
+++ b/block/vmdk.c
20
@@ -XXX,XX +XXX,XX @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
21
VmdkGrainMarker *data = NULL;
22
uLongf buf_len;
23
QEMUIOVector local_qiov;
24
- struct iovec iov;
25
int64_t write_offset;
26
int64_t write_end_sector;
27
28
@@ -XXX,XX +XXX,XX @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
29
data->size = cpu_to_le32(buf_len);
30
31
n_bytes = buf_len + sizeof(VmdkGrainMarker);
32
- iov = (struct iovec) {
33
- .iov_base = data,
34
- .iov_len = n_bytes,
35
- };
36
- qemu_iovec_init_external(&local_qiov, &iov, 1);
37
+ qemu_iovec_init_buf(&local_qiov, data, n_bytes);
38
39
BLKDBG_EVENT(extent->file, BLKDBG_WRITE_COMPRESSED);
40
} else {
41
--
42
2.20.1
43
44
diff view generated by jsdifflib
New patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
2
3
Use new qemu_iovec_init_buf() instead of
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Message-id: 20190218140926.333779-13-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-13-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
qemu-img.c | 10 ++--------
14
1 file changed, 2 insertions(+), 8 deletions(-)
15
16
diff --git a/qemu-img.c b/qemu-img.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/qemu-img.c
19
+++ b/qemu-img.c
20
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn convert_co_read(ImgConvertState *s, int64_t sector_num,
21
{
22
int n, ret;
23
QEMUIOVector qiov;
24
- struct iovec iov;
25
26
assert(nb_sectors <= s->buf_sectors);
27
while (nb_sectors > 0) {
28
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn convert_co_read(ImgConvertState *s, int64_t sector_num,
29
bs_sectors = s->src_sectors[src_cur];
30
31
n = MIN(nb_sectors, bs_sectors - (sector_num - src_cur_offset));
32
- iov.iov_base = buf;
33
- iov.iov_len = n << BDRV_SECTOR_BITS;
34
- qemu_iovec_init_external(&qiov, &iov, 1);
35
+ qemu_iovec_init_buf(&qiov, buf, n << BDRV_SECTOR_BITS);
36
37
ret = blk_co_preadv(
38
blk, (sector_num - src_cur_offset) << BDRV_SECTOR_BITS,
39
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn convert_co_write(ImgConvertState *s, int64_t sector_num,
40
{
41
int ret;
42
QEMUIOVector qiov;
43
- struct iovec iov;
44
45
while (nb_sectors > 0) {
46
int n = nb_sectors;
47
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn convert_co_write(ImgConvertState *s, int64_t sector_num,
48
(s->compressed &&
49
!buffer_is_zero(buf, n * BDRV_SECTOR_SIZE)))
50
{
51
- iov.iov_base = buf;
52
- iov.iov_len = n << BDRV_SECTOR_BITS;
53
- qemu_iovec_init_external(&qiov, &iov, 1);
54
+ qemu_iovec_init_buf(&qiov, buf, n << BDRV_SECTOR_BITS);
55
56
ret = blk_co_pwritev(s->target, sector_num << BDRV_SECTOR_BITS,
57
n << BDRV_SECTOR_BITS, &qiov, flags);
58
--
59
2.20.1
60
61
diff view generated by jsdifflib
New patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
2
3
Use new qemu_iovec_init_buf() instead of
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Message-id: 20190218140926.333779-14-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-14-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
migration/block.c | 10 +++-------
14
1 file changed, 3 insertions(+), 7 deletions(-)
15
16
diff --git a/migration/block.c b/migration/block.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/migration/block.c
19
+++ b/migration/block.c
20
@@ -XXX,XX +XXX,XX @@ typedef struct BlkMigBlock {
21
BlkMigDevState *bmds;
22
int64_t sector;
23
int nr_sectors;
24
- struct iovec iov;
25
QEMUIOVector qiov;
26
BlockAIOCB *aiocb;
27
28
@@ -XXX,XX +XXX,XX @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
29
blk->sector = cur_sector;
30
blk->nr_sectors = nr_sectors;
31
32
- blk->iov.iov_base = blk->buf;
33
- blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
34
- qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
35
+ qemu_iovec_init_buf(&blk->qiov, blk->buf, nr_sectors * BDRV_SECTOR_SIZE);
36
37
blk_mig_lock();
38
block_mig_state.submitted++;
39
@@ -XXX,XX +XXX,XX @@ static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
40
blk->nr_sectors = nr_sectors;
41
42
if (is_async) {
43
- blk->iov.iov_base = blk->buf;
44
- blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
45
- qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
46
+ qemu_iovec_init_buf(&blk->qiov, blk->buf,
47
+ nr_sectors * BDRV_SECTOR_SIZE);
48
49
blk->aiocb = blk_aio_preadv(bmds->blk,
50
sector * BDRV_SECTOR_SIZE,
51
--
52
2.20.1
53
54
diff view generated by jsdifflib
1
From: Marc-André Lureau <marcandre.lureau@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Spotted by ASAN:
3
Use new QEMU_IOVEC_INIT_BUF() instead of
4
qemu_iovec_init_external( ... , 1), which simplifies the code.
4
5
5
=================================================================
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
==5378==ERROR: LeakSanitizer: detected memory leaks
7
Reviewed-by: Eric Blake <eblake@redhat.com>
7
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Direct leak of 65536 byte(s) in 1 object(s) allocated from:
9
Message-id: 20190218140926.333779-15-vsementsov@virtuozzo.com
9
#0 0x7f788f83bc48 in malloc (/lib64/libasan.so.5+0xeec48)
10
Message-Id: <20190218140926.333779-15-vsementsov@virtuozzo.com>
10
#1 0x7f788c9923c5 in g_malloc (/lib64/libglib-2.0.so.0+0x523c5)
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
#2 0x5622a1fe37bc in coroutine_trampoline /home/elmarco/src/qq/util/coroutine-ucontext.c:116
12
#3 0x7f788a15d75f in __correctly_grouped_prefixwc (/lib64/libc.so.6+0x4c75f)
13
14
(Broken in commit 4c8158e359d.)
15
16
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
17
Message-id: 20180809114417.28718-3-marcandre.lureau@redhat.com
18
Signed-off-by: Max Reitz <mreitz@redhat.com>
19
---
12
---
20
tests/test-bdrv-drain.c | 1 +
13
tests/test-bdrv-drain.c | 29 ++++-------------------------
21
1 file changed, 1 insertion(+)
14
1 file changed, 4 insertions(+), 25 deletions(-)
22
15
23
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
16
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
24
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
25
--- a/tests/test-bdrv-drain.c
18
--- a/tests/test-bdrv-drain.c
26
+++ b/tests/test-bdrv-drain.c
19
+++ b/tests/test-bdrv-drain.c
20
@@ -XXX,XX +XXX,XX @@ static void test_drv_cb_common(enum drain_type drain_type, bool recursive)
21
BlockAIOCB *acb;
22
int aio_ret;
23
24
- QEMUIOVector qiov;
25
- struct iovec iov = {
26
- .iov_base = NULL,
27
- .iov_len = 0,
28
- };
29
- qemu_iovec_init_external(&qiov, &iov, 1);
30
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
31
32
blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
33
bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
34
@@ -XXX,XX +XXX,XX @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
35
AioContext *ctx_a = iothread_get_aio_context(a);
36
AioContext *ctx_b = iothread_get_aio_context(b);
37
38
- QEMUIOVector qiov;
39
- struct iovec iov = {
40
- .iov_base = NULL,
41
- .iov_len = 0,
42
- };
43
- qemu_iovec_init_external(&qiov, &iov, 1);
44
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
45
46
/* bdrv_drain_all() may only be called from the main loop thread */
47
if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) {
27
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn test_co_delete_by_drain(void *opaque)
48
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn test_co_delete_by_drain(void *opaque)
28
}
49
BlockDriverState *bs = blk_bs(blk);
29
50
BDRVTestTopState *tts = bs->opaque;
30
dbdd->done = true;
51
void *buffer = g_malloc(65536);
31
+ g_free(buffer);
52
- QEMUIOVector qiov;
32
}
53
- struct iovec iov = {
33
54
- .iov_base = buffer,
34
/**
55
- .iov_len = 65536,
56
- };
57
-
58
- qemu_iovec_init_external(&qiov, &iov, 1);
59
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536);
60
61
/* Pretend some internal write operation from parent to child.
62
* Important: We have to read from the child, not from the parent!
63
@@ -XXX,XX +XXX,XX @@ static void test_detach_indirect(bool by_parent_cb)
64
BdrvChild *child_a, *child_b;
65
BlockAIOCB *acb;
66
67
- QEMUIOVector qiov;
68
- struct iovec iov = {
69
- .iov_base = NULL,
70
- .iov_len = 0,
71
- };
72
- qemu_iovec_init_external(&qiov, &iov, 1);
73
+ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
74
75
if (!by_parent_cb) {
76
detach_by_driver_cb_role = child_file;
35
--
77
--
36
2.17.1
78
2.20.1
37
79
38
80
diff view generated by jsdifflib
1
From: Fam Zheng <famz@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
If we know we've already locked the bytes, don't do it again; similarly
3
@iov is used only to initialize @qiov. Let's use new
4
don't unlock a byte if we haven't locked it. This doesn't change the
4
qemu_iovec_init_buf() instead, which simplifies the code.
5
behavior, but fixes a corner case explained below.
6
5
7
Libvirt had an error handling bug that an image can get its (ownership,
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
8
file mode, SELinux) permissions changed (RHBZ 1584982) by mistake behind
7
Reviewed-by: Eric Blake <eblake@redhat.com>
9
QEMU. Specifically, an image in use by Libvirt VM has:
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Message-id: 20190218140926.333779-16-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-16-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
include/hw/ide/internal.h | 1 -
14
hw/ide/atapi.c | 9 ++++-----
15
hw/ide/core.c | 8 ++------
16
3 files changed, 6 insertions(+), 12 deletions(-)
10
17
11
$ ls -lhZ b.img
18
diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h
12
-rw-r--r--. qemu qemu system_u:object_r:svirt_image_t:s0:c600,c690 b.img
13
14
Trying to attach it a second time won't work because of image locking.
15
And after the error, it becomes:
16
17
$ ls -lhZ b.img
18
-rw-r--r--. root root system_u:object_r:virt_image_t:s0 b.img
19
20
Then, we won't be able to do OFD lock operations with the existing fd.
21
In other words, the code such as in blk_detach_dev:
22
23
blk_set_perm(blk, 0, BLK_PERM_ALL, &error_abort);
24
25
can abort() QEMU, out of environmental changes.
26
27
This patch is an easy fix to this and the change is regardlessly
28
reasonable, so do it.
29
30
Signed-off-by: Fam Zheng <famz@redhat.com>
31
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
32
---
33
block/file-posix.c | 41 +++++++++++++++++++++++++++++++----------
34
1 file changed, 31 insertions(+), 10 deletions(-)
35
36
diff --git a/block/file-posix.c b/block/file-posix.c
37
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
38
--- a/block/file-posix.c
20
--- a/include/hw/ide/internal.h
39
+++ b/block/file-posix.c
21
+++ b/include/hw/ide/internal.h
40
@@ -XXX,XX +XXX,XX @@ typedef enum {
22
@@ -XXX,XX +XXX,XX @@ struct IDEState {
41
* file; if @unlock == true, also unlock the unneeded bytes.
23
int atapi_dma; /* true if dma is requested for the packet cmd */
42
* @shared_perm_lock_bits is the mask of all permissions that are NOT shared.
24
BlockAcctCookie acct;
43
*/
25
BlockAIOCB *pio_aiocb;
44
-static int raw_apply_lock_bytes(int fd,
26
- struct iovec iov;
45
+static int raw_apply_lock_bytes(BDRVRawState *s, int fd,
27
QEMUIOVector qiov;
46
uint64_t perm_lock_bits,
28
QLIST_HEAD(, IDEBufferedRequest) buffered_requests;
47
uint64_t shared_perm_lock_bits,
29
/* ATA DMA state */
48
bool unlock, Error **errp)
30
diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/hw/ide/atapi.c
33
+++ b/hw/ide/atapi.c
34
@@ -XXX,XX +XXX,XX @@ static void cd_read_sector_cb(void *opaque, int ret)
35
36
static int cd_read_sector(IDEState *s)
49
{
37
{
50
int ret;
38
+ void *buf;
51
int i;
52
+ uint64_t locked_perm, locked_shared_perm;
53
+
39
+
54
+ if (s) {
40
if (s->cd_sector_size != 2048 && s->cd_sector_size != 2352) {
55
+ locked_perm = s->perm;
41
block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
56
+ locked_shared_perm = ~s->shared_perm & BLK_PERM_ALL;
42
return -EINVAL;
57
+ } else {
58
+ /*
59
+ * We don't have the previous bits, just lock/unlock for each of the
60
+ * requested bits.
61
+ */
62
+ if (unlock) {
63
+ locked_perm = BLK_PERM_ALL;
64
+ locked_shared_perm = BLK_PERM_ALL;
65
+ } else {
66
+ locked_perm = 0;
67
+ locked_shared_perm = 0;
68
+ }
69
+ }
70
71
PERM_FOREACH(i) {
72
int off = RAW_LOCK_PERM_BASE + i;
73
- if (perm_lock_bits & (1ULL << i)) {
74
+ uint64_t bit = (1ULL << i);
75
+ if ((perm_lock_bits & bit) && !(locked_perm & bit)) {
76
ret = qemu_lock_fd(fd, off, 1, false);
77
if (ret) {
78
error_setg(errp, "Failed to lock byte %d", off);
79
return ret;
80
}
81
- } else if (unlock) {
82
+ } else if (unlock && (locked_perm & bit) && !(perm_lock_bits & bit)) {
83
ret = qemu_unlock_fd(fd, off, 1);
84
if (ret) {
85
error_setg(errp, "Failed to unlock byte %d", off);
86
@@ -XXX,XX +XXX,XX @@ static int raw_apply_lock_bytes(int fd,
87
}
43
}
88
PERM_FOREACH(i) {
44
89
int off = RAW_LOCK_SHARED_BASE + i;
45
- s->iov.iov_base = (s->cd_sector_size == 2352) ?
90
- if (shared_perm_lock_bits & (1ULL << i)) {
46
- s->io_buffer + 16 : s->io_buffer;
91
+ uint64_t bit = (1ULL << i);
47
-
92
+ if ((shared_perm_lock_bits & bit) && !(locked_shared_perm & bit)) {
48
- s->iov.iov_len = ATAPI_SECTOR_SIZE;
93
ret = qemu_lock_fd(fd, off, 1, false);
49
- qemu_iovec_init_external(&s->qiov, &s->iov, 1);
94
if (ret) {
50
+ buf = (s->cd_sector_size == 2352) ? s->io_buffer + 16 : s->io_buffer;
95
error_setg(errp, "Failed to lock byte %d", off);
51
+ qemu_iovec_init_buf(&s->qiov, buf, ATAPI_SECTOR_SIZE);
96
return ret;
52
97
}
53
trace_cd_read_sector(s->lba);
98
- } else if (unlock) {
54
99
+ } else if (unlock && (locked_shared_perm & bit) &&
55
diff --git a/hw/ide/core.c b/hw/ide/core.c
100
+ !(shared_perm_lock_bits & bit)) {
56
index XXXXXXX..XXXXXXX 100644
101
ret = qemu_unlock_fd(fd, off, 1);
57
--- a/hw/ide/core.c
102
if (ret) {
58
+++ b/hw/ide/core.c
103
error_setg(errp, "Failed to unlock byte %d", off);
59
@@ -XXX,XX +XXX,XX @@ static void ide_sector_read(IDEState *s)
104
@@ -XXX,XX +XXX,XX @@ static int raw_handle_perm_lock(BlockDriverState *bs,
60
return;
105
106
switch (op) {
107
case RAW_PL_PREPARE:
108
- ret = raw_apply_lock_bytes(s->lock_fd, s->perm | new_perm,
109
+ ret = raw_apply_lock_bytes(s, s->lock_fd, s->perm | new_perm,
110
~s->shared_perm | ~new_shared,
111
false, errp);
112
if (!ret) {
113
@@ -XXX,XX +XXX,XX @@ static int raw_handle_perm_lock(BlockDriverState *bs,
114
op = RAW_PL_ABORT;
115
/* fall through to unlock bytes. */
116
case RAW_PL_ABORT:
117
- raw_apply_lock_bytes(s->lock_fd, s->perm, ~s->shared_perm,
118
+ raw_apply_lock_bytes(s, s->lock_fd, s->perm, ~s->shared_perm,
119
true, &local_err);
120
if (local_err) {
121
/* Theoretically the above call only unlocks bytes and it cannot
122
@@ -XXX,XX +XXX,XX @@ static int raw_handle_perm_lock(BlockDriverState *bs,
123
}
124
break;
125
case RAW_PL_COMMIT:
126
- raw_apply_lock_bytes(s->lock_fd, new_perm, ~new_shared,
127
+ raw_apply_lock_bytes(s, s->lock_fd, new_perm, ~new_shared,
128
true, &local_err);
129
if (local_err) {
130
/* Theoretically the above call only unlocks bytes and it cannot
131
@@ -XXX,XX +XXX,XX @@ raw_co_create(BlockdevCreateOptions *options, Error **errp)
132
shared = BLK_PERM_ALL & ~BLK_PERM_RESIZE;
133
134
/* Step one: Take locks */
135
- result = raw_apply_lock_bytes(fd, perm, ~shared, false, errp);
136
+ result = raw_apply_lock_bytes(NULL, fd, perm, ~shared, false, errp);
137
if (result < 0) {
138
goto out_close;
139
}
61
}
140
@@ -XXX,XX +XXX,XX @@ raw_co_create(BlockdevCreateOptions *options, Error **errp)
62
63
- s->iov.iov_base = s->io_buffer;
64
- s->iov.iov_len = n * BDRV_SECTOR_SIZE;
65
- qemu_iovec_init_external(&s->qiov, &s->iov, 1);
66
+ qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
67
68
block_acct_start(blk_get_stats(s->blk), &s->acct,
69
n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
70
@@ -XXX,XX +XXX,XX @@ static void ide_sector_write(IDEState *s)
71
return;
141
}
72
}
142
73
143
out_unlock:
74
- s->iov.iov_base = s->io_buffer;
144
- raw_apply_lock_bytes(fd, 0, 0, true, &local_err);
75
- s->iov.iov_len = n * BDRV_SECTOR_SIZE;
145
+ raw_apply_lock_bytes(NULL, fd, 0, 0, true, &local_err);
76
- qemu_iovec_init_external(&s->qiov, &s->iov, 1);
146
if (local_err) {
77
+ qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
147
/* The above call should not fail, and if it does, that does
78
148
* not mean the whole creation operation has failed. So
79
block_acct_start(blk_get_stats(s->blk), &s->acct,
80
n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
149
--
81
--
150
2.17.1
82
2.20.1
151
83
152
84
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Now that the job infrastructure is handling the job_completed call for
3
@iov is used only to initialize @qiov. Let's use new
4
all implemented jobs, we can remove the interface that allowed jobs to
4
qemu_iovec_init_buf() instead, which simplifies the code.
5
schedule their own completion.
6
5
7
Signed-off-by: John Snow <jsnow@redhat.com>
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
9
Message-id: 20180830015734.19765-10-jsnow@redhat.com
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
9
Message-id: 20190218140926.333779-17-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-17-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
12
---
12
include/qemu/job.h | 17 -----------------
13
include/hw/ide/internal.h | 1 -
13
job.c | 40 ++--------------------------------------
14
hw/ide/core.c | 11 ++++++-----
14
2 files changed, 2 insertions(+), 55 deletions(-)
15
2 files changed, 6 insertions(+), 6 deletions(-)
15
16
16
diff --git a/include/qemu/job.h b/include/qemu/job.h
17
diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h
17
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
18
--- a/include/qemu/job.h
19
--- a/include/hw/ide/internal.h
19
+++ b/include/qemu/job.h
20
+++ b/include/hw/ide/internal.h
20
@@ -XXX,XX +XXX,XX @@ void job_finalize(Job *job, Error **errp);
21
@@ -XXX,XX +XXX,XX @@ extern const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT];
21
*/
22
22
void job_dismiss(Job **job, Error **errp);
23
typedef struct IDEBufferedRequest {
23
24
QLIST_ENTRY(IDEBufferedRequest) list;
24
-typedef void JobDeferToMainLoopFn(Job *job, void *opaque);
25
- struct iovec iov;
25
-
26
QEMUIOVector qiov;
26
-/**
27
QEMUIOVector *original_qiov;
27
- * @job: The job
28
BlockCompletionFunc *original_cb;
28
- * @fn: The function to run in the main loop
29
diff --git a/hw/ide/core.c b/hw/ide/core.c
29
- * @opaque: The opaque value that is passed to @fn
30
- *
31
- * This function must be called by the main job coroutine just before it
32
- * returns. @fn is executed in the main loop with the job AioContext acquired.
33
- *
34
- * Block jobs must call bdrv_unref(), bdrv_close(), and anything that uses
35
- * bdrv_drain_all() in the main loop.
36
- *
37
- * The @job AioContext is held while @fn executes.
38
- */
39
-void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque);
40
-
41
/**
42
* Synchronously finishes the given @job. If @finish is given, it is called to
43
* trigger completion or cancellation of the job.
44
diff --git a/job.c b/job.c
45
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
46
--- a/job.c
31
--- a/hw/ide/core.c
47
+++ b/job.c
32
+++ b/hw/ide/core.c
48
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_co_entry(void *opaque)
33
@@ -XXX,XX +XXX,XX @@ static void ide_buffered_readv_cb(void *opaque, int ret)
49
assert(job && job->driver && job->driver->run);
34
IDEBufferedRequest *req = opaque;
50
job_pause_point(job);
35
if (!req->orphaned) {
51
job->ret = job->driver->run(job, &job->err);
36
if (!ret) {
52
- if (!job->deferred_to_main_loop) {
37
- qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
53
- job->deferred_to_main_loop = true;
38
+ assert(req->qiov.size == req->original_qiov->size);
54
- aio_bh_schedule_oneshot(qemu_get_aio_context(),
39
+ qemu_iovec_from_buf(req->original_qiov, 0,
55
- job_exit,
40
+ req->qiov.local_iov.iov_base,
56
- job);
41
req->original_qiov->size);
57
- }
42
}
58
+ job->deferred_to_main_loop = true;
43
req->original_cb(req->original_opaque, ret);
59
+ aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
44
}
45
QLIST_REMOVE(req, list);
46
- qemu_vfree(req->iov.iov_base);
47
+ qemu_vfree(qemu_iovec_buf(&req->qiov));
48
g_free(req);
60
}
49
}
61
50
62
51
@@ -XXX,XX +XXX,XX @@ BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
63
@@ -XXX,XX +XXX,XX @@ void job_complete(Job *job, Error **errp)
52
req->original_qiov = iov;
64
job->driver->complete(job, errp);
53
req->original_cb = cb;
65
}
54
req->original_opaque = opaque;
66
55
- req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
67
-
56
- req->iov.iov_len = iov->size;
68
-typedef struct {
57
- qemu_iovec_init_external(&req->qiov, &req->iov, 1);
69
- Job *job;
58
+ qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size),
70
- JobDeferToMainLoopFn *fn;
59
+ iov->size);
71
- void *opaque;
60
72
-} JobDeferToMainLoopData;
61
aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
73
-
62
&req->qiov, 0, ide_buffered_readv_cb, req);
74
-static void job_defer_to_main_loop_bh(void *opaque)
75
-{
76
- JobDeferToMainLoopData *data = opaque;
77
- Job *job = data->job;
78
- AioContext *aio_context = job->aio_context;
79
-
80
- aio_context_acquire(aio_context);
81
- data->fn(data->job, data->opaque);
82
- aio_context_release(aio_context);
83
-
84
- g_free(data);
85
-}
86
-
87
-void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque)
88
-{
89
- JobDeferToMainLoopData *data = g_malloc(sizeof(*data));
90
- data->job = job;
91
- data->fn = fn;
92
- data->opaque = opaque;
93
- job->deferred_to_main_loop = true;
94
-
95
- aio_bh_schedule_oneshot(qemu_get_aio_context(),
96
- job_defer_to_main_loop_bh, data);
97
-}
98
-
99
int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
100
{
101
Error *local_err = NULL;
102
--
63
--
103
2.17.1
64
2.20.1
104
65
105
66
diff view generated by jsdifflib
New patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
2
3
@iov is used only to initialize @qiov. Let's use new
4
qemu_iovec_init_buf() instead, which simplifies the code.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Eric Blake <eblake@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Message-id: 20190218140926.333779-18-vsementsov@virtuozzo.com
10
Message-Id: <20190218140926.333779-18-vsementsov@virtuozzo.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
include/hw/ide/internal.h | 1 -
14
hw/ide/atapi.c | 5 ++---
15
2 files changed, 2 insertions(+), 4 deletions(-)
16
17
diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/ide/internal.h
20
+++ b/include/hw/ide/internal.h
21
@@ -XXX,XX +XXX,XX @@ struct IDEDMAOps {
22
23
struct IDEDMA {
24
const struct IDEDMAOps *ops;
25
- struct iovec iov;
26
QEMUIOVector qiov;
27
BlockAIOCB *aiocb;
28
};
29
diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/ide/atapi.c
32
+++ b/hw/ide/atapi.c
33
@@ -XXX,XX +XXX,XX @@ static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret)
34
data_offset = 0;
35
}
36
trace_ide_atapi_cmd_read_dma_cb_aio(s, s->lba, n);
37
- s->bus->dma->iov.iov_base = (void *)(s->io_buffer + data_offset);
38
- s->bus->dma->iov.iov_len = n * ATAPI_SECTOR_SIZE;
39
- qemu_iovec_init_external(&s->bus->dma->qiov, &s->bus->dma->iov, 1);
40
+ qemu_iovec_init_buf(&s->bus->dma->qiov, s->io_buffer + data_offset,
41
+ n * ATAPI_SECTOR_SIZE);
42
43
s->bus->dma->aiocb = ide_buffered_readv(s, (int64_t)s->lba << 2,
44
&s->bus->dma->qiov, n * 4,
45
--
46
2.20.1
47
48
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Stefano Garzarella <sgarzare@redhat.com>
2
2
3
Rename opaque_job to job to be consistent with other job implementations.
3
We add acct_failed param in order to use virtio_blk_handle_rw_error()
4
Rename 'job', the BackupBlockJob object, to 's' to also be consistent.
4
also when is not required to call block_acct_failed(). (eg. a discard
5
operation is failed)
5
6
6
Suggested-by: Eric Blake <eblake@redhat.com>
7
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
7
Signed-off-by: John Snow <jsnow@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
9
Message-id: 20180830015734.19765-8-jsnow@redhat.com
10
Message-id: 20190221103314.58500-2-sgarzare@redhat.com
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
Message-Id: <20190221103314.58500-2-sgarzare@redhat.com>
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
13
---
12
block/backup.c | 62 +++++++++++++++++++++++++-------------------------
14
hw/block/virtio-blk.c | 10 ++++++----
13
1 file changed, 31 insertions(+), 31 deletions(-)
15
1 file changed, 6 insertions(+), 4 deletions(-)
14
16
15
diff --git a/block/backup.c b/block/backup.c
17
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
16
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
17
--- a/block/backup.c
19
--- a/hw/block/virtio-blk.c
18
+++ b/block/backup.c
20
+++ b/hw/block/virtio-blk.c
19
@@ -XXX,XX +XXX,XX @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
21
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
20
bdrv_dirty_iter_free(dbi);
21
}
22
}
22
23
23
-static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
24
static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
24
+static int coroutine_fn backup_run(Job *job, Error **errp)
25
- bool is_read)
26
+ bool is_read, bool acct_failed)
25
{
27
{
26
- BackupBlockJob *job = container_of(opaque_job, BackupBlockJob, common.job);
28
VirtIOBlock *s = req->dev;
27
- BlockDriverState *bs = blk_bs(job->common.blk);
29
BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
28
+ BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
30
@@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
29
+ BlockDriverState *bs = blk_bs(s->common.blk);
31
s->rq = req;
30
int64_t offset, nb_clusters;
32
} else if (action == BLOCK_ERROR_ACTION_REPORT) {
31
int ret = 0;
33
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
32
34
- block_acct_failed(blk_get_stats(s->blk), &req->acct);
33
- QLIST_INIT(&job->inflight_reqs);
35
+ if (acct_failed) {
34
- qemu_co_rwlock_init(&job->flush_rwlock);
36
+ block_acct_failed(blk_get_stats(s->blk), &req->acct);
35
+ QLIST_INIT(&s->inflight_reqs);
37
+ }
36
+ qemu_co_rwlock_init(&s->flush_rwlock);
38
virtio_blk_free_request(req);
37
38
- nb_clusters = DIV_ROUND_UP(job->len, job->cluster_size);
39
- job_progress_set_remaining(&job->common.job, job->len);
40
+ nb_clusters = DIV_ROUND_UP(s->len, s->cluster_size);
41
+ job_progress_set_remaining(job, s->len);
42
43
- job->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
44
- if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
45
- backup_incremental_init_copy_bitmap(job);
46
+ s->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
47
+ if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
48
+ backup_incremental_init_copy_bitmap(s);
49
} else {
50
- hbitmap_set(job->copy_bitmap, 0, nb_clusters);
51
+ hbitmap_set(s->copy_bitmap, 0, nb_clusters);
52
}
39
}
53
40
54
41
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_rw_complete(void *opaque, int ret)
55
- job->before_write.notify = backup_before_write_notify;
42
* the memory until the request is completed (which will
56
- bdrv_add_before_write_notifier(bs, &job->before_write);
43
* happen on the other side of the migration).
57
+ s->before_write.notify = backup_before_write_notify;
44
*/
58
+ bdrv_add_before_write_notifier(bs, &s->before_write);
45
- if (virtio_blk_handle_rw_error(req, -ret, is_read)) {
59
46
+ if (virtio_blk_handle_rw_error(req, -ret, is_read, true)) {
60
- if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
47
continue;
61
+ if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
62
/* All bits are set in copy_bitmap to allow any cluster to be copied.
63
* This does not actually require them to be copied. */
64
- while (!job_is_cancelled(&job->common.job)) {
65
+ while (!job_is_cancelled(job)) {
66
/* Yield until the job is cancelled. We just let our before_write
67
* notify callback service CoW requests. */
68
- job_yield(&job->common.job);
69
+ job_yield(job);
70
}
71
- } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
72
- ret = backup_run_incremental(job);
73
+ } else if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
74
+ ret = backup_run_incremental(s);
75
} else {
76
/* Both FULL and TOP SYNC_MODE's require copying.. */
77
- for (offset = 0; offset < job->len;
78
- offset += job->cluster_size) {
79
+ for (offset = 0; offset < s->len;
80
+ offset += s->cluster_size) {
81
bool error_is_read;
82
int alloced = 0;
83
84
- if (yield_and_check(job)) {
85
+ if (yield_and_check(s)) {
86
break;
87
}
88
89
- if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
90
+ if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
91
int i;
92
int64_t n;
93
94
/* Check to see if these blocks are already in the
95
* backing file. */
96
97
- for (i = 0; i < job->cluster_size;) {
98
+ for (i = 0; i < s->cluster_size;) {
99
/* bdrv_is_allocated() only returns true/false based
100
* on the first set of sectors it comes across that
101
* are are all in the same state.
102
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
103
* needed but at some point that is always the case. */
104
alloced =
105
bdrv_is_allocated(bs, offset + i,
106
- job->cluster_size - i, &n);
107
+ s->cluster_size - i, &n);
108
i += n;
109
110
if (alloced || n == 0) {
111
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
112
if (alloced < 0) {
113
ret = alloced;
114
} else {
115
- ret = backup_do_cow(job, offset, job->cluster_size,
116
+ ret = backup_do_cow(s, offset, s->cluster_size,
117
&error_is_read, false);
118
}
119
if (ret < 0) {
120
/* Depending on error action, fail now or retry cluster */
121
BlockErrorAction action =
122
- backup_error_action(job, error_is_read, -ret);
123
+ backup_error_action(s, error_is_read, -ret);
124
if (action == BLOCK_ERROR_ACTION_REPORT) {
125
break;
126
} else {
127
- offset -= job->cluster_size;
128
+ offset -= s->cluster_size;
129
continue;
130
}
131
}
48
}
132
}
49
}
50
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_flush_complete(void *opaque, int ret)
51
52
aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
53
if (ret) {
54
- if (virtio_blk_handle_rw_error(req, -ret, 0)) {
55
+ if (virtio_blk_handle_rw_error(req, -ret, 0, true)) {
56
goto out;
57
}
133
}
58
}
134
135
- notifier_with_return_remove(&job->before_write);
136
+ notifier_with_return_remove(&s->before_write);
137
138
/* wait until pending backup_do_cow() calls have completed */
139
- qemu_co_rwlock_wrlock(&job->flush_rwlock);
140
- qemu_co_rwlock_unlock(&job->flush_rwlock);
141
- hbitmap_free(job->copy_bitmap);
142
+ qemu_co_rwlock_wrlock(&s->flush_rwlock);
143
+ qemu_co_rwlock_unlock(&s->flush_rwlock);
144
+ hbitmap_free(s->copy_bitmap);
145
146
return ret;
147
}
148
--
59
--
149
2.17.1
60
2.20.1
150
61
151
62
diff view generated by jsdifflib
New patch
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
2
3
Since configurable features for virtio-blk are growing, this patch
4
adds host_features field in the struct VirtIOBlock. (as in virtio-net)
5
In this way, we can avoid to add new fields for new properties and
6
we can directly set VIRTIO_BLK_F* flags in the host_features.
7
8
We update "config-wce" and "scsi" property definition to use the new
9
host_features field without change the behaviour.
10
11
Suggested-by: Michael S. Tsirkin <mst@redhat.com>
12
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
14
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
15
Message-id: 20190221103314.58500-3-sgarzare@redhat.com
16
Message-Id: <20190221103314.58500-3-sgarzare@redhat.com>
17
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
18
---
19
include/hw/virtio/virtio-blk.h | 3 +--
20
hw/block/virtio-blk.c | 16 +++++++++-------
21
2 files changed, 10 insertions(+), 9 deletions(-)
22
23
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/include/hw/virtio/virtio-blk.h
26
+++ b/include/hw/virtio/virtio-blk.h
27
@@ -XXX,XX +XXX,XX @@ struct VirtIOBlkConf
28
BlockConf conf;
29
IOThread *iothread;
30
char *serial;
31
- uint32_t scsi;
32
- uint32_t config_wce;
33
uint32_t request_merging;
34
uint16_t num_queues;
35
uint16_t queue_size;
36
@@ -XXX,XX +XXX,XX @@ typedef struct VirtIOBlock {
37
bool dataplane_disabled;
38
bool dataplane_started;
39
struct VirtIOBlockDataPlane *dataplane;
40
+ uint64_t host_features;
41
} VirtIOBlock;
42
43
typedef struct VirtIOBlockReq {
44
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/hw/block/virtio-blk.c
47
+++ b/hw/block/virtio-blk.c
48
@@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req)
49
*/
50
scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base;
51
52
- if (!blk->conf.scsi) {
53
+ if (!virtio_has_feature(blk->host_features, VIRTIO_BLK_F_SCSI)) {
54
status = VIRTIO_BLK_S_UNSUPP;
55
goto fail;
56
}
57
@@ -XXX,XX +XXX,XX @@ static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
58
{
59
VirtIOBlock *s = VIRTIO_BLK(vdev);
60
61
+ /* Firstly sync all virtio-blk possible supported features */
62
+ features |= s->host_features;
63
+
64
virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
65
virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
66
virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
67
virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
68
if (virtio_has_feature(features, VIRTIO_F_VERSION_1)) {
69
- if (s->conf.scsi) {
70
+ if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_SCSI)) {
71
error_setg(errp, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0");
72
return 0;
73
}
74
@@ -XXX,XX +XXX,XX @@ static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
75
virtio_add_feature(&features, VIRTIO_BLK_F_SCSI);
76
}
77
78
- if (s->conf.config_wce) {
79
- virtio_add_feature(&features, VIRTIO_BLK_F_CONFIG_WCE);
80
- }
81
if (blk_enable_write_cache(s->blk)) {
82
virtio_add_feature(&features, VIRTIO_BLK_F_WCE);
83
}
84
@@ -XXX,XX +XXX,XX @@ static Property virtio_blk_properties[] = {
85
DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf),
86
DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf),
87
DEFINE_PROP_STRING("serial", VirtIOBlock, conf.serial),
88
- DEFINE_PROP_BIT("config-wce", VirtIOBlock, conf.config_wce, 0, true),
89
+ DEFINE_PROP_BIT64("config-wce", VirtIOBlock, host_features,
90
+ VIRTIO_BLK_F_CONFIG_WCE, true),
91
#ifdef __linux__
92
- DEFINE_PROP_BIT("scsi", VirtIOBlock, conf.scsi, 0, false),
93
+ DEFINE_PROP_BIT64("scsi", VirtIOBlock, host_features,
94
+ VIRTIO_BLK_F_SCSI, false),
95
#endif
96
DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
97
true),
98
--
99
2.20.1
100
101
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Stefano Garzarella <sgarzare@redhat.com>
2
2
3
All jobs do the same thing when they leave their running loop:
3
In order to avoid migration issues, we enable DISCARD and
4
- Store the return code in a structure
4
WRITE_ZEROES features only for machine type >= 4.0
5
- wait to receive this structure in the main thread
6
- signal job completion via job_completed
7
5
8
Few jobs do anything beyond exactly this. Consolidate this exit
6
As discussed with Michael S. Tsirkin and Stefan Hajnoczi on the
9
logic for a net reduction in SLOC.
7
list [1], DISCARD operation should not have security implications
8
(eg. page cache attacks), so we can enable it by default.
10
9
11
More seriously, when we utilize job_defer_to_main_loop_bh to call
10
[1] https://lists.gnu.org/archive/html/qemu-devel/2019-02/msg00504.html
12
a function that calls job_completed, job_finalize_single will run
13
in a context where it has recursively taken the aio_context lock,
14
which can cause hangs if it puts down a reference that causes a flush.
15
11
16
You can observe this in practice by looking at mirror_exit's careful
12
Suggested-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
17
placement of job_completed and bdrv_unref calls.
13
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
14
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
15
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
16
Message-id: 20190221103314.58500-4-sgarzare@redhat.com
17
Message-Id: <20190221103314.58500-4-sgarzare@redhat.com>
18
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
19
---
20
hw/block/virtio-blk.c | 4 ++++
21
hw/core/machine.c | 2 ++
22
2 files changed, 6 insertions(+)
18
23
19
If we centralize job exiting, we can signal job completion from outside
24
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
20
of the aio_context, which should allow for job cleanup code to run with
21
only one lock, which makes cleanup callbacks less tricky to write.
22
23
Signed-off-by: John Snow <jsnow@redhat.com>
24
Reviewed-by: Max Reitz <mreitz@redhat.com>
25
Message-id: 20180830015734.19765-4-jsnow@redhat.com
26
Reviewed-by: Jeff Cody <jcody@redhat.com>
27
Signed-off-by: Max Reitz <mreitz@redhat.com>
28
---
29
include/qemu/job.h | 11 +++++++++++
30
job.c | 18 ++++++++++++++++++
31
2 files changed, 29 insertions(+)
32
33
diff --git a/include/qemu/job.h b/include/qemu/job.h
34
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
35
--- a/include/qemu/job.h
26
--- a/hw/block/virtio-blk.c
36
+++ b/include/qemu/job.h
27
+++ b/hw/block/virtio-blk.c
37
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
28
@@ -XXX,XX +XXX,XX @@ static Property virtio_blk_properties[] = {
38
*/
29
DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 128),
39
void (*drain)(Job *job);
30
DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
40
31
IOThread *),
41
+ /**
32
+ DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features,
42
+ * If the callback is not NULL, exit will be invoked from the main thread
33
+ VIRTIO_BLK_F_DISCARD, true),
43
+ * when the job's coroutine has finished, but before transactional
34
+ DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features,
44
+ * convergence; before @prepare or @abort.
35
+ VIRTIO_BLK_F_WRITE_ZEROES, true),
45
+ *
36
DEFINE_PROP_END_OF_LIST(),
46
+ * FIXME TODO: This callback is only temporary to transition remaining jobs
37
};
47
+ * to prepare/commit/abort/clean callbacks and will be removed before 3.1.
38
48
+ * is released.
39
diff --git a/hw/core/machine.c b/hw/core/machine.c
49
+ */
50
+ void (*exit)(Job *job);
51
+
52
/**
53
* If the callback is not NULL, prepare will be invoked when all the jobs
54
* belonging to the same transaction complete; or upon this job's completion
55
diff --git a/job.c b/job.c
56
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
57
--- a/job.c
41
--- a/hw/core/machine.c
58
+++ b/job.c
42
+++ b/hw/core/machine.c
59
@@ -XXX,XX +XXX,XX @@ void job_drain(Job *job)
43
@@ -XXX,XX +XXX,XX @@ GlobalProperty hw_compat_3_1[] = {
60
}
44
{ "usb-kbd", "serial", "42" },
61
}
45
{ "usb-mouse", "serial", "42" },
62
46
{ "usb-kbd", "serial", "42" },
63
+static void job_exit(void *opaque)
47
+ { "virtio-blk-device", "discard", "false" },
64
+{
48
+ { "virtio-blk-device", "write-zeroes", "false" },
65
+ Job *job = (Job *)opaque;
49
};
66
+ AioContext *aio_context = job->aio_context;
50
const size_t hw_compat_3_1_len = G_N_ELEMENTS(hw_compat_3_1);
67
+
68
+ if (job->driver->exit) {
69
+ aio_context_acquire(aio_context);
70
+ job->driver->exit(job);
71
+ aio_context_release(aio_context);
72
+ }
73
+ job_completed(job, job->ret);
74
+}
75
76
/**
77
* All jobs must allow a pause point before entering their job proper. This
78
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_co_entry(void *opaque)
79
assert(job && job->driver && job->driver->run);
80
job_pause_point(job);
81
job->ret = job->driver->run(job, &job->err);
82
+ if (!job->deferred_to_main_loop) {
83
+ job->deferred_to_main_loop = true;
84
+ aio_bh_schedule_oneshot(qemu_get_aio_context(),
85
+ job_exit,
86
+ job);
87
+ }
88
}
89
90
51
91
--
52
--
92
2.17.1
53
2.20.1
93
54
94
55
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Stefano Garzarella <sgarzare@redhat.com>
2
2
3
Change the manual deferment to mirror_exit into the implicit
3
In order to use VirtIOFeature also in other virtio devices, we move
4
callback to job_exit and the mirror_exit callback.
4
its declaration and the endof() macro (renamed in virtio_endof())
5
in virtio.h.
6
We add virtio_feature_get_config_size() function to iterate the array
7
of VirtIOFeature and to return the config size depending on the
8
features enabled. (as virtio_net_set_config_size() did)
5
9
6
This does change the order of some bdrv_unref calls and job_completed,
10
Suggested-by: Michael S. Tsirkin <mst@redhat.com>
7
but thanks to the new context in which we call .exit, this is safe to
11
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
defer the possible flushing of any nodes to the job_finalize_single
12
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
9
cleanup stage.
13
Message-id: 20190221103314.58500-5-sgarzare@redhat.com
14
Message-Id: <20190221103314.58500-5-sgarzare@redhat.com>
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
16
---
17
include/hw/virtio/virtio.h | 15 +++++++++++++++
18
hw/net/virtio-net.c | 31 +++++++------------------------
19
hw/virtio/virtio.c | 15 +++++++++++++++
20
3 files changed, 37 insertions(+), 24 deletions(-)
10
21
11
Signed-off-by: John Snow <jsnow@redhat.com>
22
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
12
Message-id: 20180830015734.19765-6-jsnow@redhat.com
13
Reviewed-by: Max Reitz <mreitz@redhat.com>
14
Reviewed-by: Jeff Cody <jcody@redhat.com>
15
Signed-off-by: Max Reitz <mreitz@redhat.com>
16
---
17
block/mirror.c | 29 +++++++++++------------------
18
1 file changed, 11 insertions(+), 18 deletions(-)
19
20
diff --git a/block/mirror.c b/block/mirror.c
21
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
22
--- a/block/mirror.c
24
--- a/include/hw/virtio/virtio.h
23
+++ b/block/mirror.c
25
+++ b/include/hw/virtio/virtio.h
24
@@ -XXX,XX +XXX,XX @@ static void mirror_wait_for_all_io(MirrorBlockJob *s)
26
@@ -XXX,XX +XXX,XX @@ static inline hwaddr vring_align(hwaddr addr,
25
}
27
return QEMU_ALIGN_UP(addr, align);
26
}
28
}
27
29
28
-typedef struct {
30
+/*
29
- int ret;
31
+ * Calculate the number of bytes up to and including the given 'field' of
30
-} MirrorExitData;
32
+ * 'container'.
33
+ */
34
+#define virtio_endof(container, field) \
35
+ (offsetof(container, field) + sizeof_field(container, field))
36
+
37
+typedef struct VirtIOFeature {
38
+ uint64_t flags;
39
+ size_t end;
40
+} VirtIOFeature;
41
+
42
+size_t virtio_feature_get_config_size(VirtIOFeature *features,
43
+ uint64_t host_features);
44
+
45
typedef struct VirtQueue VirtQueue;
46
47
#define VIRTQUEUE_MAX_SIZE 1024
48
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/hw/net/virtio-net.c
51
+++ b/hw/net/virtio-net.c
52
@@ -XXX,XX +XXX,XX @@ static inline __virtio16 *virtio_net_rsc_ext_num_dupacks(
53
54
#endif
55
56
-/*
57
- * Calculate the number of bytes up to and including the given 'field' of
58
- * 'container'.
59
- */
60
-#define endof(container, field) \
61
- (offsetof(container, field) + sizeof_field(container, field))
31
-
62
-
32
-static void mirror_exit(Job *job, void *opaque)
63
-typedef struct VirtIOFeature {
33
+static void mirror_exit(Job *job)
64
- uint64_t flags;
65
- size_t end;
66
-} VirtIOFeature;
67
-
68
static VirtIOFeature feature_sizes[] = {
69
{.flags = 1ULL << VIRTIO_NET_F_MAC,
70
- .end = endof(struct virtio_net_config, mac)},
71
+ .end = virtio_endof(struct virtio_net_config, mac)},
72
{.flags = 1ULL << VIRTIO_NET_F_STATUS,
73
- .end = endof(struct virtio_net_config, status)},
74
+ .end = virtio_endof(struct virtio_net_config, status)},
75
{.flags = 1ULL << VIRTIO_NET_F_MQ,
76
- .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
77
+ .end = virtio_endof(struct virtio_net_config, max_virtqueue_pairs)},
78
{.flags = 1ULL << VIRTIO_NET_F_MTU,
79
- .end = endof(struct virtio_net_config, mtu)},
80
+ .end = virtio_endof(struct virtio_net_config, mtu)},
81
{.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
82
- .end = endof(struct virtio_net_config, duplex)},
83
+ .end = virtio_endof(struct virtio_net_config, duplex)},
84
{}
85
};
86
87
@@ -XXX,XX +XXX,XX @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
88
89
static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
34
{
90
{
35
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
91
- int i, config_size = 0;
36
BlockJob *bjob = &s->common;
92
virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
37
- MirrorExitData *data = opaque;
93
38
MirrorBDSOpaque *bs_opaque = s->mirror_top_bs->opaque;
94
- for (i = 0; feature_sizes[i].flags != 0; i++) {
39
AioContext *replace_aio_context = NULL;
95
- if (host_features & feature_sizes[i].flags) {
40
BlockDriverState *src = s->mirror_top_bs->backing->bs;
96
- config_size = MAX(feature_sizes[i].end, config_size);
41
BlockDriverState *target_bs = blk_bs(s->target);
97
- }
42
BlockDriverState *mirror_top_bs = s->mirror_top_bs;
98
- }
43
Error *local_err = NULL;
99
- n->config_size = config_size;
44
+ int ret = job->ret;
100
+ n->config_size = virtio_feature_get_config_size(feature_sizes,
45
101
+ host_features);
46
bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
47
48
- /* Make sure that the source BDS doesn't go away before we called
49
- * job_completed(). */
50
+ /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
51
+ * before we can call bdrv_drained_end */
52
bdrv_ref(src);
53
bdrv_ref(mirror_top_bs);
54
bdrv_ref(target_bs);
55
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
56
bdrv_set_backing_hd(target_bs, backing, &local_err);
57
if (local_err) {
58
error_report_err(local_err);
59
- data->ret = -EPERM;
60
+ ret = -EPERM;
61
}
62
}
63
}
64
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
65
aio_context_acquire(replace_aio_context);
66
}
67
68
- if (s->should_complete && data->ret == 0) {
69
+ if (s->should_complete && ret == 0) {
70
BlockDriverState *to_replace = src;
71
if (s->to_replace) {
72
to_replace = s->to_replace;
73
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
74
bdrv_drained_end(target_bs);
75
if (local_err) {
76
error_report_err(local_err);
77
- data->ret = -EPERM;
78
+ ret = -EPERM;
79
}
80
}
81
if (s->to_replace) {
82
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
83
blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
84
85
bs_opaque->job = NULL;
86
- job_completed(job, data->ret);
87
88
- g_free(data);
89
bdrv_drained_end(src);
90
bdrv_unref(mirror_top_bs);
91
bdrv_unref(src);
92
+
93
+ job->ret = ret;
94
}
102
}
95
103
96
static void mirror_throttle(MirrorBlockJob *s)
104
void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
97
@@ -XXX,XX +XXX,XX @@ static int mirror_flush(MirrorBlockJob *s)
105
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
98
static int coroutine_fn mirror_run(Job *job, Error **errp)
106
index XXXXXXX..XXXXXXX 100644
99
{
107
--- a/hw/virtio/virtio.c
100
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
108
+++ b/hw/virtio/virtio.c
101
- MirrorExitData *data;
109
@@ -XXX,XX +XXX,XX @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val)
102
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
103
BlockDriverState *target_bs = blk_bs(s->target);
104
bool need_drain = true;
105
@@ -XXX,XX +XXX,XX @@ immediate_exit:
106
g_free(s->in_flight_bitmap);
107
bdrv_dirty_iter_free(s->dbi);
108
109
- data = g_malloc(sizeof(*data));
110
- data->ret = ret;
111
-
112
if (need_drain) {
113
bdrv_drained_begin(bs);
114
}
115
116
- job_defer_to_main_loop(&s->common.job, mirror_exit, data);
117
return ret;
110
return ret;
118
}
111
}
119
112
120
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver mirror_job_driver = {
113
+size_t virtio_feature_get_config_size(VirtIOFeature *feature_sizes,
121
.user_resume = block_job_user_resume,
114
+ uint64_t host_features)
122
.drain = block_job_drain,
115
+{
123
.run = mirror_run,
116
+ size_t config_size = 0;
124
+ .exit = mirror_exit,
117
+ int i;
125
.pause = mirror_pause,
118
+
126
.complete = mirror_complete,
119
+ for (i = 0; feature_sizes[i].flags != 0; i++) {
127
},
120
+ if (host_features & feature_sizes[i].flags) {
128
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_active_job_driver = {
121
+ config_size = MAX(feature_sizes[i].end, config_size);
129
.user_resume = block_job_user_resume,
122
+ }
130
.drain = block_job_drain,
123
+ }
131
.run = mirror_run,
124
+
132
+ .exit = mirror_exit,
125
+ return config_size;
133
.pause = mirror_pause,
126
+}
134
.complete = mirror_complete,
127
+
135
},
128
int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
129
{
130
int i, ret;
136
--
131
--
137
2.17.1
132
2.20.1
138
133
139
134
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Stefano Garzarella <sgarzare@redhat.com>
2
2
3
Jobs presently use both an Error object in the case of the create job,
3
Starting from DISABLE and WRITE_ZEROES features, we use an array of
4
and char strings in the case of generic errors elsewhere.
4
VirtIOFeature (as virtio-net) to properly set the config size
5
depending on the features enabled.
5
6
6
Unify the two paths as just j->err, and remove the extra argument from
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
job_completed. The integer error code for job_completed is kept for now,
8
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
8
to be removed shortly in a separate patch.
9
Message-id: 20190221103314.58500-6-sgarzare@redhat.com
10
Message-Id: <20190221103314.58500-6-sgarzare@redhat.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
include/hw/virtio/virtio-blk.h | 1 +
14
hw/block/virtio-blk.c | 31 +++++++++++++++++++++++++------
15
2 files changed, 26 insertions(+), 6 deletions(-)
9
16
10
Signed-off-by: John Snow <jsnow@redhat.com>
17
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
11
Message-id: 20180830015734.19765-3-jsnow@redhat.com
12
[mreitz: Dropped a superfluous g_strdup()]
13
Reviewed-by: Eric Blake <eblake@redhat.com>
14
Signed-off-by: Max Reitz <mreitz@redhat.com>
15
---
16
include/qemu/job.h | 14 ++++++++------
17
block/backup.c | 2 +-
18
block/commit.c | 2 +-
19
block/create.c | 5 ++---
20
block/mirror.c | 2 +-
21
block/stream.c | 2 +-
22
job-qmp.c | 5 +++--
23
job.c | 18 ++++++------------
24
tests/test-bdrv-drain.c | 2 +-
25
tests/test-blockjob-txn.c | 2 +-
26
tests/test-blockjob.c | 2 +-
27
11 files changed, 26 insertions(+), 30 deletions(-)
28
29
diff --git a/include/qemu/job.h b/include/qemu/job.h
30
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
31
--- a/include/qemu/job.h
19
--- a/include/hw/virtio/virtio-blk.h
32
+++ b/include/qemu/job.h
20
+++ b/include/hw/virtio/virtio-blk.h
33
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
21
@@ -XXX,XX +XXX,XX @@ typedef struct VirtIOBlock {
34
/** Estimated progress_current value at the completion of the job */
22
bool dataplane_started;
35
int64_t progress_total;
23
struct VirtIOBlockDataPlane *dataplane;
36
24
uint64_t host_features;
37
- /** Error string for a failed job (NULL if, and only if, job->ret == 0) */
25
+ size_t config_size;
38
- char *error;
26
} VirtIOBlock;
39
-
27
40
/** ret code passed to job_completed. */
28
typedef struct VirtIOBlockReq {
41
int ret;
29
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
42
30
index XXXXXXX..XXXXXXX 100644
43
+ /**
31
--- a/hw/block/virtio-blk.c
44
+ * Error object for a failed job.
32
+++ b/hw/block/virtio-blk.c
45
+ * If job->ret is nonzero and an error object was not set, it will be set
33
@@ -XXX,XX +XXX,XX @@
46
+ * to strerror(-job->ret) during job_completed.
34
#include "hw/virtio/virtio-bus.h"
47
+ */
35
#include "hw/virtio/virtio-access.h"
48
+ Error *err;
36
37
-/* We don't support discard yet, hide associated config fields. */
38
+/* Config size before the discard support (hide associated config fields) */
39
#define VIRTIO_BLK_CFG_SIZE offsetof(struct virtio_blk_config, \
40
max_discard_sectors)
41
+/*
42
+ * Starting from the discard feature, we can use this array to properly
43
+ * set the config size depending on the features enabled.
44
+ */
45
+static VirtIOFeature feature_sizes[] = {
46
+ {.flags = 1ULL << VIRTIO_BLK_F_DISCARD,
47
+ .end = virtio_endof(struct virtio_blk_config, discard_sector_alignment)},
48
+ {.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES,
49
+ .end = virtio_endof(struct virtio_blk_config, write_zeroes_may_unmap)},
50
+ {}
51
+};
49
+
52
+
50
/** The completion function that will be called when the job completes. */
53
+static void virtio_blk_set_config_size(VirtIOBlock *s, uint64_t host_features)
51
BlockCompletionFunc *cb;
54
+{
52
55
+ s->config_size = MAX(VIRTIO_BLK_CFG_SIZE,
53
@@ -XXX,XX +XXX,XX @@ void job_transition_to_ready(Job *job);
56
+ virtio_feature_get_config_size(feature_sizes, host_features));
54
/**
57
+
55
* @job: The job being completed.
58
+ assert(s->config_size <= sizeof(struct virtio_blk_config));
56
* @ret: The status code.
59
+}
57
- * @error: The error message for a failing job (only with @ret < 0). If @ret is
60
58
- * negative, but NULL is given for @error, strerror() is used.
61
static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
59
*
62
VirtIOBlockReq *req)
60
* Marks @job as completed. If @ret is non-zero, the job transaction it is part
63
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
61
* of is aborted. If @ret is zero, the job moves into the WAITING state. If it
64
blkcfg.alignment_offset = 0;
62
* is the last job to complete in its transaction, all jobs in the transaction
65
blkcfg.wce = blk_enable_write_cache(s->blk);
63
* move from WAITING to PENDING.
66
virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues);
64
*/
67
- memcpy(config, &blkcfg, VIRTIO_BLK_CFG_SIZE);
65
-void job_completed(Job *job, int ret, Error *error);
68
- QEMU_BUILD_BUG_ON(VIRTIO_BLK_CFG_SIZE > sizeof(blkcfg));
66
+void job_completed(Job *job, int ret);
69
+ memcpy(config, &blkcfg, s->config_size);
67
68
/** Asynchronously complete the specified @job. */
69
void job_complete(Job *job, Error **errp);
70
diff --git a/block/backup.c b/block/backup.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/block/backup.c
73
+++ b/block/backup.c
74
@@ -XXX,XX +XXX,XX @@ static void backup_complete(Job *job, void *opaque)
75
{
76
BackupCompleteData *data = opaque;
77
78
- job_completed(job, data->ret, NULL);
79
+ job_completed(job, data->ret);
80
g_free(data);
81
}
70
}
82
71
83
diff --git a/block/commit.c b/block/commit.c
72
static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
84
index XXXXXXX..XXXXXXX 100644
73
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
85
--- a/block/commit.c
74
VirtIOBlock *s = VIRTIO_BLK(vdev);
86
+++ b/block/commit.c
75
struct virtio_blk_config blkcfg;
87
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
76
88
* bdrv_set_backing_hd() to fail. */
77
- memcpy(&blkcfg, config, VIRTIO_BLK_CFG_SIZE);
89
block_job_remove_all_bdrv(bjob);
78
- QEMU_BUILD_BUG_ON(VIRTIO_BLK_CFG_SIZE > sizeof(blkcfg));
90
79
+ memcpy(&blkcfg, config, s->config_size);
91
- job_completed(job, ret, NULL);
80
92
+ job_completed(job, ret);
81
aio_context_acquire(blk_get_aio_context(s->blk));
93
g_free(data);
82
blk_set_enable_write_cache(s->blk, blkcfg.wce != 0);
94
83
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
95
/* If bdrv_drop_intermediate() didn't already do that, remove the commit
84
return;
96
diff --git a/block/create.c b/block/create.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/block/create.c
99
+++ b/block/create.c
100
@@ -XXX,XX +XXX,XX @@ typedef struct BlockdevCreateJob {
101
BlockDriver *drv;
102
BlockdevCreateOptions *opts;
103
int ret;
104
- Error *err;
105
} BlockdevCreateJob;
106
107
static void blockdev_create_complete(Job *job, void *opaque)
108
{
109
BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
110
111
- job_completed(job, s->ret, s->err);
112
+ job_completed(job, s->ret);
113
}
114
115
static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
116
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
117
BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
118
119
job_progress_set_remaining(&s->common, 1);
120
- s->ret = s->drv->bdrv_co_create(s->opts, &s->err);
121
+ s->ret = s->drv->bdrv_co_create(s->opts, errp);
122
job_progress_update(&s->common, 1);
123
124
qapi_free_BlockdevCreateOptions(s->opts);
125
diff --git a/block/mirror.c b/block/mirror.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/block/mirror.c
128
+++ b/block/mirror.c
129
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
130
blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
131
132
bs_opaque->job = NULL;
133
- job_completed(job, data->ret, NULL);
134
+ job_completed(job, data->ret);
135
136
g_free(data);
137
bdrv_drained_end(src);
138
diff --git a/block/stream.c b/block/stream.c
139
index XXXXXXX..XXXXXXX 100644
140
--- a/block/stream.c
141
+++ b/block/stream.c
142
@@ -XXX,XX +XXX,XX @@ out:
143
}
85
}
144
86
145
g_free(s->backing_file_str);
87
- virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, VIRTIO_BLK_CFG_SIZE);
146
- job_completed(job, data->ret, NULL);
88
+ virtio_blk_set_config_size(s, s->host_features);
147
+ job_completed(job, data->ret);
89
+
148
g_free(data);
90
+ virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, s->config_size);
149
}
91
150
92
s->blk = conf->conf.blk;
151
diff --git a/job-qmp.c b/job-qmp.c
93
s->rq = NULL;
152
index XXXXXXX..XXXXXXX 100644
153
--- a/job-qmp.c
154
+++ b/job-qmp.c
155
@@ -XXX,XX +XXX,XX @@ static JobInfo *job_query_single(Job *job, Error **errp)
156
.status = job->status,
157
.current_progress = job->progress_current,
158
.total_progress = job->progress_total,
159
- .has_error = !!job->error,
160
- .error = g_strdup(job->error),
161
+ .has_error = !!job->err,
162
+ .error = job->err ? \
163
+ g_strdup(error_get_pretty(job->err)) : NULL,
164
};
165
166
return info;
167
diff --git a/job.c b/job.c
168
index XXXXXXX..XXXXXXX 100644
169
--- a/job.c
170
+++ b/job.c
171
@@ -XXX,XX +XXX,XX @@ void job_unref(Job *job)
172
173
QLIST_REMOVE(job, job_list);
174
175
- g_free(job->error);
176
+ error_free(job->err);
177
g_free(job->id);
178
g_free(job);
179
}
180
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_co_entry(void *opaque)
181
182
assert(job && job->driver && job->driver->run);
183
job_pause_point(job);
184
- job->ret = job->driver->run(job, NULL);
185
+ job->ret = job->driver->run(job, &job->err);
186
}
187
188
189
@@ -XXX,XX +XXX,XX @@ static void job_update_rc(Job *job)
190
job->ret = -ECANCELED;
191
}
192
if (job->ret) {
193
- if (!job->error) {
194
- job->error = g_strdup(strerror(-job->ret));
195
+ if (!job->err) {
196
+ error_setg(&job->err, "%s", strerror(-job->ret));
197
}
198
job_state_transition(job, JOB_STATUS_ABORTING);
199
}
200
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_success(Job *job)
201
}
202
}
203
204
-void job_completed(Job *job, int ret, Error *error)
205
+void job_completed(Job *job, int ret)
206
{
207
assert(job && job->txn && !job_is_completed(job));
208
209
job->ret = ret;
210
- if (error) {
211
- assert(job->ret < 0);
212
- job->error = g_strdup(error_get_pretty(error));
213
- error_free(error);
214
- }
215
-
216
job_update_rc(job);
217
trace_job_completed(job, ret, job->ret);
218
if (job->ret) {
219
@@ -XXX,XX +XXX,XX @@ void job_cancel(Job *job, bool force)
220
}
221
job_cancel_async(job, force);
222
if (!job_started(job)) {
223
- job_completed(job, -ECANCELED, NULL);
224
+ job_completed(job, -ECANCELED);
225
} else if (job->deferred_to_main_loop) {
226
job_completed_txn_abort(job);
227
} else {
228
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
229
index XXXXXXX..XXXXXXX 100644
230
--- a/tests/test-bdrv-drain.c
231
+++ b/tests/test-bdrv-drain.c
232
@@ -XXX,XX +XXX,XX @@ typedef struct TestBlockJob {
233
234
static void test_job_completed(Job *job, void *opaque)
235
{
236
- job_completed(job, 0, NULL);
237
+ job_completed(job, 0);
238
}
239
240
static int coroutine_fn test_job_run(Job *job, Error **errp)
241
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
242
index XXXXXXX..XXXXXXX 100644
243
--- a/tests/test-blockjob-txn.c
244
+++ b/tests/test-blockjob-txn.c
245
@@ -XXX,XX +XXX,XX @@ static void test_block_job_complete(Job *job, void *opaque)
246
rc = -ECANCELED;
247
}
248
249
- job_completed(job, rc, NULL);
250
+ job_completed(job, rc);
251
bdrv_unref(bs);
252
}
253
254
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
255
index XXXXXXX..XXXXXXX 100644
256
--- a/tests/test-blockjob.c
257
+++ b/tests/test-blockjob.c
258
@@ -XXX,XX +XXX,XX @@ static void cancel_job_completed(Job *job, void *opaque)
259
{
260
CancelJob *s = opaque;
261
s->completed = true;
262
- job_completed(job, 0, NULL);
263
+ job_completed(job, 0);
264
}
265
266
static void cancel_job_complete(Job *job, Error **errp)
267
--
94
--
268
2.17.1
95
2.20.1
269
96
270
97
diff view generated by jsdifflib
New patch
1
1
From: Stefano Garzarella <sgarzare@redhat.com>
2
3
This patch adds the support of DISCARD and WRITE_ZEROES commands,
4
that have been introduced in the virtio-blk protocol to have
5
better performance when using SSD backend.
6
7
We support only one segment per request since multiple segments
8
are not widely used and there are no userspace APIs that allow
9
applications to submit multiple segments in a single call.
10
11
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
12
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
14
Message-id: 20190221103314.58500-7-sgarzare@redhat.com
15
Message-Id: <20190221103314.58500-7-sgarzare@redhat.com>
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
---
18
include/hw/virtio/virtio-blk.h | 2 +
19
hw/block/virtio-blk.c | 184 +++++++++++++++++++++++++++++++++
20
2 files changed, 186 insertions(+)
21
22
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/include/hw/virtio/virtio-blk.h
25
+++ b/include/hw/virtio/virtio-blk.h
26
@@ -XXX,XX +XXX,XX @@ struct VirtIOBlkConf
27
uint32_t request_merging;
28
uint16_t num_queues;
29
uint16_t queue_size;
30
+ uint32_t max_discard_sectors;
31
+ uint32_t max_write_zeroes_sectors;
32
};
33
34
struct VirtIOBlockDataPlane;
35
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/hw/block/virtio-blk.c
38
+++ b/hw/block/virtio-blk.c
39
@@ -XXX,XX +XXX,XX @@ out:
40
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
41
}
42
43
+static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
44
+{
45
+ VirtIOBlockReq *req = opaque;
46
+ VirtIOBlock *s = req->dev;
47
+ bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) &
48
+ ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES;
49
+
50
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
51
+ if (ret) {
52
+ if (virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) {
53
+ goto out;
54
+ }
55
+ }
56
+
57
+ virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
58
+ if (is_write_zeroes) {
59
+ block_acct_done(blk_get_stats(s->blk), &req->acct);
60
+ }
61
+ virtio_blk_free_request(req);
62
+
63
+out:
64
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
65
+}
66
+
67
#ifdef __linux__
68
69
typedef struct {
70
@@ -XXX,XX +XXX,XX @@ static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
71
return true;
72
}
73
74
+static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq *req,
75
+ struct virtio_blk_discard_write_zeroes *dwz_hdr, bool is_write_zeroes)
76
+{
77
+ VirtIOBlock *s = req->dev;
78
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
79
+ uint64_t sector;
80
+ uint32_t num_sectors, flags, max_sectors;
81
+ uint8_t err_status;
82
+ int bytes;
83
+
84
+ sector = virtio_ldq_p(vdev, &dwz_hdr->sector);
85
+ num_sectors = virtio_ldl_p(vdev, &dwz_hdr->num_sectors);
86
+ flags = virtio_ldl_p(vdev, &dwz_hdr->flags);
87
+ max_sectors = is_write_zeroes ? s->conf.max_write_zeroes_sectors :
88
+ s->conf.max_discard_sectors;
89
+
90
+ /*
91
+ * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check
92
+ * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in
93
+ * the integer variable.
94
+ */
95
+ if (unlikely(num_sectors > max_sectors)) {
96
+ err_status = VIRTIO_BLK_S_IOERR;
97
+ goto err;
98
+ }
99
+
100
+ bytes = num_sectors << BDRV_SECTOR_BITS;
101
+
102
+ if (unlikely(!virtio_blk_sect_range_ok(s, sector, bytes))) {
103
+ err_status = VIRTIO_BLK_S_IOERR;
104
+ goto err;
105
+ }
106
+
107
+ /*
108
+ * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard
109
+ * and write zeroes commands if any unknown flag is set.
110
+ */
111
+ if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
112
+ err_status = VIRTIO_BLK_S_UNSUPP;
113
+ goto err;
114
+ }
115
+
116
+ if (is_write_zeroes) { /* VIRTIO_BLK_T_WRITE_ZEROES */
117
+ int blk_aio_flags = 0;
118
+
119
+ if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
120
+ blk_aio_flags |= BDRV_REQ_MAY_UNMAP;
121
+ }
122
+
123
+ block_acct_start(blk_get_stats(s->blk), &req->acct, bytes,
124
+ BLOCK_ACCT_WRITE);
125
+
126
+ blk_aio_pwrite_zeroes(s->blk, sector << BDRV_SECTOR_BITS,
127
+ bytes, blk_aio_flags,
128
+ virtio_blk_discard_write_zeroes_complete, req);
129
+ } else { /* VIRTIO_BLK_T_DISCARD */
130
+ /*
131
+ * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for
132
+ * discard commands if the unmap flag is set.
133
+ */
134
+ if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
135
+ err_status = VIRTIO_BLK_S_UNSUPP;
136
+ goto err;
137
+ }
138
+
139
+ blk_aio_pdiscard(s->blk, sector << BDRV_SECTOR_BITS, bytes,
140
+ virtio_blk_discard_write_zeroes_complete, req);
141
+ }
142
+
143
+ return VIRTIO_BLK_S_OK;
144
+
145
+err:
146
+ if (is_write_zeroes) {
147
+ block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
148
+ }
149
+ return err_status;
150
+}
151
+
152
static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
153
{
154
uint32_t type;
155
@@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
156
virtio_blk_free_request(req);
157
break;
158
}
159
+ /*
160
+ * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with
161
+ * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement,
162
+ * so we must mask it for these requests, then we will check if it is set.
163
+ */
164
+ case VIRTIO_BLK_T_DISCARD & ~VIRTIO_BLK_T_OUT:
165
+ case VIRTIO_BLK_T_WRITE_ZEROES & ~VIRTIO_BLK_T_OUT:
166
+ {
167
+ struct virtio_blk_discard_write_zeroes dwz_hdr;
168
+ size_t out_len = iov_size(out_iov, out_num);
169
+ bool is_write_zeroes = (type & ~VIRTIO_BLK_T_BARRIER) ==
170
+ VIRTIO_BLK_T_WRITE_ZEROES;
171
+ uint8_t err_status;
172
+
173
+ /*
174
+ * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains
175
+ * more than one segment.
176
+ */
177
+ if (unlikely(!(type & VIRTIO_BLK_T_OUT) ||
178
+ out_len > sizeof(dwz_hdr))) {
179
+ virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
180
+ virtio_blk_free_request(req);
181
+ return 0;
182
+ }
183
+
184
+ if (unlikely(iov_to_buf(out_iov, out_num, 0, &dwz_hdr,
185
+ sizeof(dwz_hdr)) != sizeof(dwz_hdr))) {
186
+ virtio_error(vdev, "virtio-blk discard/write_zeroes header"
187
+ " too short");
188
+ return -1;
189
+ }
190
+
191
+ err_status = virtio_blk_handle_discard_write_zeroes(req, &dwz_hdr,
192
+ is_write_zeroes);
193
+ if (err_status != VIRTIO_BLK_S_OK) {
194
+ virtio_blk_req_complete(req, err_status);
195
+ virtio_blk_free_request(req);
196
+ }
197
+
198
+ break;
199
+ }
200
default:
201
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
202
virtio_blk_free_request(req);
203
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
204
blkcfg.alignment_offset = 0;
205
blkcfg.wce = blk_enable_write_cache(s->blk);
206
virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues);
207
+ if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD)) {
208
+ virtio_stl_p(vdev, &blkcfg.max_discard_sectors,
209
+ s->conf.max_discard_sectors);
210
+ virtio_stl_p(vdev, &blkcfg.discard_sector_alignment,
211
+ blk_size >> BDRV_SECTOR_BITS);
212
+ /*
213
+ * We support only one segment per request since multiple segments
214
+ * are not widely used and there are no userspace APIs that allow
215
+ * applications to submit multiple segments in a single call.
216
+ */
217
+ virtio_stl_p(vdev, &blkcfg.max_discard_seg, 1);
218
+ }
219
+ if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES)) {
220
+ virtio_stl_p(vdev, &blkcfg.max_write_zeroes_sectors,
221
+ s->conf.max_write_zeroes_sectors);
222
+ blkcfg.write_zeroes_may_unmap = 1;
223
+ virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1);
224
+ }
225
memcpy(config, &blkcfg, s->config_size);
226
}
227
228
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
229
return;
230
}
231
232
+ if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) &&
233
+ (!conf->max_discard_sectors ||
234
+ conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) {
235
+ error_setg(errp, "invalid max-discard-sectors property (%" PRIu32 ")"
236
+ ", must be between 1 and %d",
237
+ conf->max_discard_sectors, (int)BDRV_REQUEST_MAX_SECTORS);
238
+ return;
239
+ }
240
+
241
+ if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES) &&
242
+ (!conf->max_write_zeroes_sectors ||
243
+ conf->max_write_zeroes_sectors > BDRV_REQUEST_MAX_SECTORS)) {
244
+ error_setg(errp, "invalid max-write-zeroes-sectors property (%" PRIu32
245
+ "), must be between 1 and %d",
246
+ conf->max_write_zeroes_sectors,
247
+ (int)BDRV_REQUEST_MAX_SECTORS);
248
+ return;
249
+ }
250
+
251
virtio_blk_set_config_size(s, s->host_features);
252
253
virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, s->config_size);
254
@@ -XXX,XX +XXX,XX @@ static Property virtio_blk_properties[] = {
255
VIRTIO_BLK_F_DISCARD, true),
256
DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features,
257
VIRTIO_BLK_F_WRITE_ZEROES, true),
258
+ DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock,
259
+ conf.max_discard_sectors, BDRV_REQUEST_MAX_SECTORS),
260
+ DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock,
261
+ conf.max_write_zeroes_sectors, BDRV_REQUEST_MAX_SECTORS),
262
DEFINE_PROP_END_OF_LIST(),
263
};
264
265
--
266
2.20.1
267
268
diff view generated by jsdifflib
New patch
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
2
3
The size of data in the virtio_blk_request must be a multiple
4
of 512 bytes for IN and OUT requests, or a multiple of the size
5
of struct virtio_blk_discard_write_zeroes for DISCARD and
6
WRITE_ZEROES requests.
7
8
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Reviewed-by: Thomas Huth <thuth@redhat.com>
11
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
12
Message-id: 20190221103314.58500-8-sgarzare@redhat.com
13
Message-Id: <20190221103314.58500-8-sgarzare@redhat.com>
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
---
16
tests/virtio-blk-test.c | 15 ++++++++++++++-
17
1 file changed, 14 insertions(+), 1 deletion(-)
18
19
diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tests/virtio-blk-test.c
22
+++ b/tests/virtio-blk-test.c
23
@@ -XXX,XX +XXX,XX @@ static uint64_t virtio_blk_request(QGuestAllocator *alloc, QVirtioDevice *d,
24
uint64_t addr;
25
uint8_t status = 0xFF;
26
27
- g_assert_cmpuint(data_size % 512, ==, 0);
28
+ switch (req->type) {
29
+ case VIRTIO_BLK_T_IN:
30
+ case VIRTIO_BLK_T_OUT:
31
+ g_assert_cmpuint(data_size % 512, ==, 0);
32
+ break;
33
+ case VIRTIO_BLK_T_DISCARD:
34
+ case VIRTIO_BLK_T_WRITE_ZEROES:
35
+ g_assert_cmpuint(data_size %
36
+ sizeof(struct virtio_blk_discard_write_zeroes), ==, 0);
37
+ break;
38
+ default:
39
+ g_assert_cmpuint(data_size, ==, 0);
40
+ }
41
+
42
addr = guest_alloc(alloc, sizeof(*req) + data_size);
43
44
virtio_blk_fix_request(d, req);
45
--
46
2.20.1
47
48
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Stefano Garzarella <sgarzare@redhat.com>
2
2
3
Jobs are now expected to return their retcode on the stack, from the
3
This function is useful to fix the endianness of struct
4
.run callback, so we can remove that argument.
4
virtio_blk_discard_write_zeroes headers.
5
5
6
job_cancel does not need to set -ECANCELED because job_completed will
6
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
update the return code itself if the job was canceled.
7
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
8
Message-id: 20190221103314.58500-9-sgarzare@redhat.com
9
Message-Id: <20190221103314.58500-9-sgarzare@redhat.com>
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
12
tests/virtio-blk-test.c | 23 +++++++++++++++++------
13
1 file changed, 17 insertions(+), 6 deletions(-)
8
14
9
While we're here, make job_completed static to job.c and remove it from
15
diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c
10
job.h; move the documentation of return code to the .run() callback and
11
to the job->ret property, accordingly.
12
13
Signed-off-by: John Snow <jsnow@redhat.com>
14
Message-id: 20180830015734.19765-9-jsnow@redhat.com
15
Reviewed-by: Max Reitz <mreitz@redhat.com>
16
Signed-off-by: Max Reitz <mreitz@redhat.com>
17
---
18
include/qemu/job.h | 28 +++++++++++++++-------------
19
job.c | 11 ++++++-----
20
trace-events | 2 +-
21
3 files changed, 22 insertions(+), 19 deletions(-)
22
23
diff --git a/include/qemu/job.h b/include/qemu/job.h
24
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
25
--- a/include/qemu/job.h
17
--- a/tests/virtio-blk-test.c
26
+++ b/include/qemu/job.h
18
+++ b/tests/virtio-blk-test.c
27
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
19
@@ -XXX,XX +XXX,XX @@ typedef struct QVirtioBlkReq {
28
/** Estimated progress_current value at the completion of the job */
20
uint8_t status;
29
int64_t progress_total;
21
} QVirtioBlkReq;
30
22
31
- /** ret code passed to job_completed. */
23
+#ifdef HOST_WORDS_BIGENDIAN
32
+ /**
24
+const bool host_is_big_endian = true;
33
+ * Return code from @run and/or @prepare callback(s).
25
+#else
34
+ * Not final until the job has reached the CONCLUDED status.
26
+const bool host_is_big_endian; /* false */
35
+ * 0 on success, -errno on failure.
27
+#endif
36
+ */
28
+
37
int ret;
29
static char *drive_create(void)
38
30
{
39
/**
31
int fd, ret;
40
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
32
@@ -XXX,XX +XXX,XX @@ static QVirtioPCIDevice *virtio_blk_pci_init(QPCIBus *bus, int slot)
41
/** Enum describing the operation */
33
42
JobType job_type;
34
static inline void virtio_blk_fix_request(QVirtioDevice *d, QVirtioBlkReq *req)
43
35
{
44
- /** Mandatory: Entrypoint for the Coroutine. */
36
-#ifdef HOST_WORDS_BIGENDIAN
45
+ /**
37
- const bool host_is_big_endian = true;
46
+ * Mandatory: Entrypoint for the Coroutine.
38
-#else
47
+ *
39
- const bool host_is_big_endian = false;
48
+ * This callback will be invoked when moving from CREATED to RUNNING.
40
-#endif
49
+ *
50
+ * If this callback returns nonzero, the job transaction it is part of is
51
+ * aborted. If it returns zero, the job moves into the WAITING state. If it
52
+ * is the last job to complete in its transaction, all jobs in the
53
+ * transaction move from WAITING to PENDING.
54
+ */
55
int coroutine_fn (*run)(Job *job, Error **errp);
56
57
/**
58
@@ -XXX,XX +XXX,XX @@ void job_early_fail(Job *job);
59
/** Moves the @job from RUNNING to READY */
60
void job_transition_to_ready(Job *job);
61
62
-/**
63
- * @job: The job being completed.
64
- * @ret: The status code.
65
- *
66
- * Marks @job as completed. If @ret is non-zero, the job transaction it is part
67
- * of is aborted. If @ret is zero, the job moves into the WAITING state. If it
68
- * is the last job to complete in its transaction, all jobs in the transaction
69
- * move from WAITING to PENDING.
70
- */
71
-void job_completed(Job *job, int ret);
72
-
41
-
73
/** Asynchronously complete the specified @job. */
42
if (qvirtio_is_big_endian(d) != host_is_big_endian) {
74
void job_complete(Job *job, Error **errp);
43
req->type = bswap32(req->type);
75
44
req->ioprio = bswap32(req->ioprio);
76
diff --git a/job.c b/job.c
45
@@ -XXX,XX +XXX,XX @@ static inline void virtio_blk_fix_request(QVirtioDevice *d, QVirtioBlkReq *req)
77
index XXXXXXX..XXXXXXX 100644
78
--- a/job.c
79
+++ b/job.c
80
@@ -XXX,XX +XXX,XX @@ void job_drain(Job *job)
81
}
46
}
82
}
47
}
83
48
84
+static void job_completed(Job *job);
85
+
49
+
86
static void job_exit(void *opaque)
50
+static inline void virtio_blk_fix_dwz_hdr(QVirtioDevice *d,
51
+ struct virtio_blk_discard_write_zeroes *dwz_hdr)
52
+{
53
+ if (qvirtio_is_big_endian(d) != host_is_big_endian) {
54
+ dwz_hdr->sector = bswap64(dwz_hdr->sector);
55
+ dwz_hdr->num_sectors = bswap32(dwz_hdr->num_sectors);
56
+ dwz_hdr->flags = bswap32(dwz_hdr->flags);
57
+ }
58
+}
59
+
60
static uint64_t virtio_blk_request(QGuestAllocator *alloc, QVirtioDevice *d,
61
QVirtioBlkReq *req, uint64_t data_size)
87
{
62
{
88
Job *job = (Job *)opaque;
89
@@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque)
90
job->driver->exit(job);
91
aio_context_release(aio_context);
92
}
93
- job_completed(job, job->ret);
94
+ job_completed(job);
95
}
96
97
/**
98
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_success(Job *job)
99
}
100
}
101
102
-void job_completed(Job *job, int ret)
103
+static void job_completed(Job *job)
104
{
105
assert(job && job->txn && !job_is_completed(job));
106
107
- job->ret = ret;
108
job_update_rc(job);
109
- trace_job_completed(job, ret, job->ret);
110
+ trace_job_completed(job, job->ret);
111
if (job->ret) {
112
job_completed_txn_abort(job);
113
} else {
114
@@ -XXX,XX +XXX,XX @@ void job_cancel(Job *job, bool force)
115
}
116
job_cancel_async(job, force);
117
if (!job_started(job)) {
118
- job_completed(job, -ECANCELED);
119
+ job_completed(job);
120
} else if (job->deferred_to_main_loop) {
121
job_completed_txn_abort(job);
122
} else {
123
diff --git a/trace-events b/trace-events
124
index XXXXXXX..XXXXXXX 100644
125
--- a/trace-events
126
+++ b/trace-events
127
@@ -XXX,XX +XXX,XX @@ gdbstub_err_checksum_incorrect(uint8_t expected, uint8_t got) "got command packe
128
# job.c
129
job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)"
130
job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)"
131
-job_completed(void *job, int ret, int jret) "job %p ret %d corrected ret %d"
132
+job_completed(void *job, int ret) "job %p ret %d"
133
134
# job-qmp.c
135
qmp_job_cancel(void *job) "job %p"
136
--
63
--
137
2.17.1
64
2.20.1
138
65
139
66
diff view generated by jsdifflib
New patch
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
2
3
If the WRITE_ZEROES feature is enabled, we check this command
4
in the test_basic().
5
6
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Acked-by: Thomas Huth <thuth@redhat.com>
9
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
10
Message-id: 20190221103314.58500-10-sgarzare@redhat.com
11
Message-Id: <20190221103314.58500-10-sgarzare@redhat.com>
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
tests/virtio-blk-test.c | 62 +++++++++++++++++++++++++++++++++++++++++
15
1 file changed, 62 insertions(+)
16
17
diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tests/virtio-blk-test.c
20
+++ b/tests/virtio-blk-test.c
21
@@ -XXX,XX +XXX,XX @@ static void test_basic(QVirtioDevice *dev, QGuestAllocator *alloc,
22
23
guest_free(alloc, req_addr);
24
25
+ if (features & (1u << VIRTIO_BLK_F_WRITE_ZEROES)) {
26
+ struct virtio_blk_discard_write_zeroes dwz_hdr;
27
+ void *expected;
28
+
29
+ /*
30
+ * WRITE_ZEROES request on the same sector of previous test where
31
+ * we wrote "TEST".
32
+ */
33
+ req.type = VIRTIO_BLK_T_WRITE_ZEROES;
34
+ req.data = (char *) &dwz_hdr;
35
+ dwz_hdr.sector = 0;
36
+ dwz_hdr.num_sectors = 1;
37
+ dwz_hdr.flags = 0;
38
+
39
+ virtio_blk_fix_dwz_hdr(dev, &dwz_hdr);
40
+
41
+ req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr));
42
+
43
+ free_head = qvirtqueue_add(vq, req_addr, 16, false, true);
44
+ qvirtqueue_add(vq, req_addr + 16, sizeof(dwz_hdr), false, true);
45
+ qvirtqueue_add(vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, false);
46
+
47
+ qvirtqueue_kick(dev, vq, free_head);
48
+
49
+ qvirtio_wait_used_elem(dev, vq, free_head, NULL,
50
+ QVIRTIO_BLK_TIMEOUT_US);
51
+ status = readb(req_addr + 16 + sizeof(dwz_hdr));
52
+ g_assert_cmpint(status, ==, 0);
53
+
54
+ guest_free(alloc, req_addr);
55
+
56
+ /* Read request to check if the sector contains all zeroes */
57
+ req.type = VIRTIO_BLK_T_IN;
58
+ req.ioprio = 1;
59
+ req.sector = 0;
60
+ req.data = g_malloc0(512);
61
+
62
+ req_addr = virtio_blk_request(alloc, dev, &req, 512);
63
+
64
+ g_free(req.data);
65
+
66
+ free_head = qvirtqueue_add(vq, req_addr, 16, false, true);
67
+ qvirtqueue_add(vq, req_addr + 16, 512, true, true);
68
+ qvirtqueue_add(vq, req_addr + 528, 1, true, false);
69
+
70
+ qvirtqueue_kick(dev, vq, free_head);
71
+
72
+ qvirtio_wait_used_elem(dev, vq, free_head, NULL,
73
+ QVIRTIO_BLK_TIMEOUT_US);
74
+ status = readb(req_addr + 528);
75
+ g_assert_cmpint(status, ==, 0);
76
+
77
+ data = g_malloc(512);
78
+ expected = g_malloc0(512);
79
+ memread(req_addr + 16, data, 512);
80
+ g_assert_cmpmem(data, 512, expected, 512);
81
+ g_free(expected);
82
+ g_free(data);
83
+
84
+ guest_free(alloc, req_addr);
85
+ }
86
+
87
if (features & (1u << VIRTIO_F_ANY_LAYOUT)) {
88
/* Write and read with 2 descriptor layout */
89
/* Write request */
90
--
91
2.20.1
92
93
diff view generated by jsdifflib
New patch
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
2
3
If the DISCARD feature is enabled, we try this command in the
4
test_basic(), checking only the status returned by the request.
5
6
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
8
Message-id: 20190221103314.58500-11-sgarzare@redhat.com
9
Message-Id: <20190221103314.58500-11-sgarzare@redhat.com>
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
12
tests/virtio-blk-test.c | 27 +++++++++++++++++++++++++++
13
1 file changed, 27 insertions(+)
14
15
diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/tests/virtio-blk-test.c
18
+++ b/tests/virtio-blk-test.c
19
@@ -XXX,XX +XXX,XX @@ static void test_basic(QVirtioDevice *dev, QGuestAllocator *alloc,
20
guest_free(alloc, req_addr);
21
}
22
23
+ if (features & (1u << VIRTIO_BLK_F_DISCARD)) {
24
+ struct virtio_blk_discard_write_zeroes dwz_hdr;
25
+
26
+ req.type = VIRTIO_BLK_T_DISCARD;
27
+ req.data = (char *) &dwz_hdr;
28
+ dwz_hdr.sector = 0;
29
+ dwz_hdr.num_sectors = 1;
30
+ dwz_hdr.flags = 0;
31
+
32
+ virtio_blk_fix_dwz_hdr(dev, &dwz_hdr);
33
+
34
+ req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr));
35
+
36
+ free_head = qvirtqueue_add(vq, req_addr, 16, false, true);
37
+ qvirtqueue_add(vq, req_addr + 16, sizeof(dwz_hdr), false, true);
38
+ qvirtqueue_add(vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, false);
39
+
40
+ qvirtqueue_kick(dev, vq, free_head);
41
+
42
+ qvirtio_wait_used_elem(dev, vq, free_head, NULL,
43
+ QVIRTIO_BLK_TIMEOUT_US);
44
+ status = readb(req_addr + 16 + sizeof(dwz_hdr));
45
+ g_assert_cmpint(status, ==, 0);
46
+
47
+ guest_free(alloc, req_addr);
48
+ }
49
+
50
if (features & (1u << VIRTIO_F_ANY_LAYOUT)) {
51
/* Write and read with 2 descriptor layout */
52
/* Write request */
53
--
54
2.20.1
55
56
diff view generated by jsdifflib