1
The following changes since commit a0def594286d9110a6035e02eef558cf3cf5d847:
1
The following changes since commit 7260438b7056469610ee166f7abe9ff8a26b8b16:
2
2
3
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging (2017-01-30 10:23:20 +0000)
3
Merge remote-tracking branch 'remotes/palmer/tags/riscv-for-master-3.2-part2' into staging (2019-01-14 11:41:43 +0000)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/codyprime/qemu-kvm-jtc.git tags/block-pull-request
7
git://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to acf6e5f0962c4be670d4a93ede77423512521876:
9
for you to fetch changes up to fef1660132b0f25bf2d275d7f986ddcfe19a4426:
10
10
11
sheepdog: reorganize check for overlapping requests (2017-02-01 00:17:20 -0500)
11
aio-posix: Fix concurrent aio_poll/set_fd_handler. (2019-01-14 14:09:41 +0000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Block patches
14
Pull request
15
16
No user-visible changes.
17
15
----------------------------------------------------------------
18
----------------------------------------------------------------
16
19
17
Paolo Bonzini (5):
20
Remy Noel (2):
18
sheepdog: remove unused cancellation support
21
aio-posix: Unregister fd from ctx epoll when removing fd_handler.
19
sheepdog: reorganize coroutine flow
22
aio-posix: Fix concurrent aio_poll/set_fd_handler.
20
sheepdog: do not use BlockAIOCB
21
sheepdog: simplify inflight_aio_head management
22
sheepdog: reorganize check for overlapping requests
23
23
24
block/sheepdog.c | 289 ++++++++++++++++---------------------------------------
24
util/aio-posix.c | 90 +++++++++++++++++++++++++++++-------------------
25
1 file changed, 84 insertions(+), 205 deletions(-)
25
util/aio-win32.c | 67 ++++++++++++++++-------------------
26
2 files changed, 84 insertions(+), 73 deletions(-)
26
27
27
--
28
--
28
2.9.3
29
2.20.1
29
30
30
31
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
SheepdogAIOCB is internal to sheepdog.c, hence it is never canceled.
4
5
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
6
Message-id: 20161129113245.32724-2-pbonzini@redhat.com
7
Signed-off-by: Jeff Cody <jcody@redhat.com>
8
---
9
block/sheepdog.c | 52 ----------------------------------------------------
10
1 file changed, 52 deletions(-)
11
12
diff --git a/block/sheepdog.c b/block/sheepdog.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/block/sheepdog.c
15
+++ b/block/sheepdog.c
16
@@ -XXX,XX +XXX,XX @@ struct SheepdogAIOCB {
17
Coroutine *coroutine;
18
void (*aio_done_func)(SheepdogAIOCB *);
19
20
- bool cancelable;
21
int nr_pending;
22
23
uint32_t min_affect_data_idx;
24
@@ -XXX,XX +XXX,XX @@ static inline void free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req)
25
{
26
SheepdogAIOCB *acb = aio_req->aiocb;
27
28
- acb->cancelable = false;
29
QLIST_REMOVE(aio_req, aio_siblings);
30
g_free(aio_req);
31
32
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn sd_finish_aiocb(SheepdogAIOCB *acb)
33
qemu_aio_unref(acb);
34
}
35
36
-/*
37
- * Check whether the specified acb can be canceled
38
- *
39
- * We can cancel aio when any request belonging to the acb is:
40
- * - Not processed by the sheepdog server.
41
- * - Not linked to the inflight queue.
42
- */
43
-static bool sd_acb_cancelable(const SheepdogAIOCB *acb)
44
-{
45
- BDRVSheepdogState *s = acb->common.bs->opaque;
46
- AIOReq *aioreq;
47
-
48
- if (!acb->cancelable) {
49
- return false;
50
- }
51
-
52
- QLIST_FOREACH(aioreq, &s->inflight_aio_head, aio_siblings) {
53
- if (aioreq->aiocb == acb) {
54
- return false;
55
- }
56
- }
57
-
58
- return true;
59
-}
60
-
61
-static void sd_aio_cancel(BlockAIOCB *blockacb)
62
-{
63
- SheepdogAIOCB *acb = (SheepdogAIOCB *)blockacb;
64
- BDRVSheepdogState *s = acb->common.bs->opaque;
65
- AIOReq *aioreq, *next;
66
-
67
- if (sd_acb_cancelable(acb)) {
68
- /* Remove outstanding requests from failed queue. */
69
- QLIST_FOREACH_SAFE(aioreq, &s->failed_aio_head, aio_siblings,
70
- next) {
71
- if (aioreq->aiocb == acb) {
72
- free_aio_req(s, aioreq);
73
- }
74
- }
75
-
76
- assert(acb->nr_pending == 0);
77
- if (acb->common.cb) {
78
- acb->common.cb(acb->common.opaque, -ECANCELED);
79
- }
80
- sd_finish_aiocb(acb);
81
- }
82
-}
83
-
84
static const AIOCBInfo sd_aiocb_info = {
85
.aiocb_size = sizeof(SheepdogAIOCB),
86
- .cancel_async = sd_aio_cancel,
87
};
88
89
static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov,
90
@@ -XXX,XX +XXX,XX @@ static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov,
91
acb->nb_sectors = nb_sectors;
92
93
acb->aio_done_func = NULL;
94
- acb->cancelable = true;
95
acb->coroutine = qemu_coroutine_self();
96
acb->ret = 0;
97
acb->nr_pending = 0;
98
--
99
2.9.3
100
101
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Delimit co_recv's lifetime clearly in aio_read_response.
4
5
Do a simple qemu_coroutine_enter in aio_read_response, letting
6
sd_co_writev call sd_write_done.
7
8
Handle nr_pending in the same way in sd_co_rw_vector,
9
sd_write_done and sd_co_flush_to_disk.
10
11
Remove sd_co_rw_vector's return value; just leave with no
12
pending requests.
13
14
[Jeff: added missing 'return' back, spotted by Paolo after
15
series was applied.]
16
17
Signed-off-by: Jeff Cody <jcody@redhat.com>
18
---
19
block/sheepdog.c | 115 ++++++++++++++++++++-----------------------------------
20
1 file changed, 42 insertions(+), 73 deletions(-)
21
22
diff --git a/block/sheepdog.c b/block/sheepdog.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/block/sheepdog.c
25
+++ b/block/sheepdog.c
26
@@ -XXX,XX +XXX,XX @@ struct SheepdogAIOCB {
27
enum AIOCBState aiocb_type;
28
29
Coroutine *coroutine;
30
- void (*aio_done_func)(SheepdogAIOCB *);
31
-
32
int nr_pending;
33
34
uint32_t min_affect_data_idx;
35
@@ -XXX,XX +XXX,XX @@ static const char * sd_strerror(int err)
36
*
37
* 1. In sd_co_rw_vector, we send the I/O requests to the server and
38
* link the requests to the inflight_list in the
39
- * BDRVSheepdogState. The function exits without waiting for
40
+ * BDRVSheepdogState. The function yields while waiting for
41
* receiving the response.
42
*
43
* 2. We receive the response in aio_read_response, the fd handler to
44
- * the sheepdog connection. If metadata update is needed, we send
45
- * the write request to the vdi object in sd_write_done, the write
46
- * completion function. We switch back to sd_co_readv/writev after
47
- * all the requests belonging to the AIOCB are finished.
48
+ * the sheepdog connection. We switch back to sd_co_readv/sd_writev
49
+ * after all the requests belonging to the AIOCB are finished. If
50
+ * needed, sd_co_writev will send another requests for the vdi object.
51
*/
52
53
static inline AIOReq *alloc_aio_req(BDRVSheepdogState *s, SheepdogAIOCB *acb,
54
@@ -XXX,XX +XXX,XX @@ static inline void free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req)
55
acb->nr_pending--;
56
}
57
58
-static void coroutine_fn sd_finish_aiocb(SheepdogAIOCB *acb)
59
-{
60
- qemu_coroutine_enter(acb->coroutine);
61
- qemu_aio_unref(acb);
62
-}
63
-
64
static const AIOCBInfo sd_aiocb_info = {
65
.aiocb_size = sizeof(SheepdogAIOCB),
66
};
67
@@ -XXX,XX +XXX,XX @@ static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov,
68
acb->sector_num = sector_num;
69
acb->nb_sectors = nb_sectors;
70
71
- acb->aio_done_func = NULL;
72
acb->coroutine = qemu_coroutine_self();
73
acb->ret = 0;
74
acb->nr_pending = 0;
75
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque)
76
77
switch (acb->aiocb_type) {
78
case AIOCB_WRITE_UDATA:
79
- /* this coroutine context is no longer suitable for co_recv
80
- * because we may send data to update vdi objects */
81
- s->co_recv = NULL;
82
if (!is_data_obj(aio_req->oid)) {
83
break;
84
}
85
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque)
86
}
87
}
88
89
+ /* No more data for this aio_req (reload_inode below uses its own file
90
+ * descriptor handler which doesn't use co_recv).
91
+ */
92
+ s->co_recv = NULL;
93
+
94
switch (rsp.result) {
95
case SD_RES_SUCCESS:
96
break;
97
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque)
98
aio_req->oid = vid_to_vdi_oid(s->inode.vdi_id);
99
}
100
resend_aioreq(s, aio_req);
101
- goto out;
102
+ return;
103
default:
104
acb->ret = -EIO;
105
error_report("%s", sd_strerror(rsp.result));
106
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque)
107
* We've finished all requests which belong to the AIOCB, so
108
* we can switch back to sd_co_readv/writev now.
109
*/
110
- acb->aio_done_func(acb);
111
+ qemu_coroutine_enter(acb->coroutine);
112
}
113
-out:
114
- s->co_recv = NULL;
115
+
116
return;
117
+
118
err:
119
- s->co_recv = NULL;
120
reconnect_to_sdog(opaque);
121
}
122
123
@@ -XXX,XX +XXX,XX @@ static int sd_truncate(BlockDriverState *bs, int64_t offset)
124
/*
125
* This function is called after writing data objects. If we need to
126
* update metadata, this sends a write request to the vdi object.
127
- * Otherwise, this switches back to sd_co_readv/writev.
128
*/
129
static void coroutine_fn sd_write_done(SheepdogAIOCB *acb)
130
{
131
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn sd_write_done(SheepdogAIOCB *acb)
132
mx = acb->max_dirty_data_idx;
133
if (mn <= mx) {
134
/* we need to update the vdi object. */
135
+ ++acb->nr_pending;
136
offset = sizeof(s->inode) - sizeof(s->inode.data_vdi_id) +
137
mn * sizeof(s->inode.data_vdi_id[0]);
138
data_len = (mx - mn + 1) * sizeof(s->inode.data_vdi_id[0]);
139
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn sd_write_done(SheepdogAIOCB *acb)
140
data_len, offset, 0, false, 0, offset);
141
QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
142
add_aio_request(s, aio_req, &iov, 1, AIOCB_WRITE_UDATA);
143
-
144
- acb->aio_done_func = sd_finish_aiocb;
145
- acb->aiocb_type = AIOCB_WRITE_UDATA;
146
- return;
147
+ if (--acb->nr_pending) {
148
+ qemu_coroutine_yield();
149
+ }
150
}
151
-
152
- sd_finish_aiocb(acb);
153
}
154
155
/* Delete current working VDI on the snapshot chain */
156
@@ -XXX,XX +XXX,XX @@ out:
157
* Returns 1 when we need to wait a response, 0 when there is no sent
158
* request and -errno in error cases.
159
*/
160
-static int coroutine_fn sd_co_rw_vector(void *p)
161
+static void coroutine_fn sd_co_rw_vector(void *p)
162
{
163
SheepdogAIOCB *acb = p;
164
int ret = 0;
165
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn sd_co_rw_vector(void *p)
166
ret = sd_create_branch(s);
167
if (ret) {
168
acb->ret = -EIO;
169
- goto out;
170
+ return;
171
}
172
}
173
174
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn sd_co_rw_vector(void *p)
175
idx++;
176
done += len;
177
}
178
-out:
179
- if (!--acb->nr_pending) {
180
- return acb->ret;
181
+ if (--acb->nr_pending) {
182
+ qemu_coroutine_yield();
183
}
184
- return 1;
185
}
186
187
static bool check_overlapping_aiocb(BDRVSheepdogState *s, SheepdogAIOCB *aiocb)
188
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num,
189
}
190
191
acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors);
192
- acb->aio_done_func = sd_write_done;
193
acb->aiocb_type = AIOCB_WRITE_UDATA;
194
195
retry:
196
@@ -XXX,XX +XXX,XX @@ retry:
197
goto retry;
198
}
199
200
- ret = sd_co_rw_vector(acb);
201
- if (ret <= 0) {
202
- QLIST_REMOVE(acb, aiocb_siblings);
203
- qemu_co_queue_restart_all(&s->overlapping_queue);
204
- qemu_aio_unref(acb);
205
- return ret;
206
- }
207
-
208
- qemu_coroutine_yield();
209
+ sd_co_rw_vector(acb);
210
+ sd_write_done(acb);
211
212
QLIST_REMOVE(acb, aiocb_siblings);
213
qemu_co_queue_restart_all(&s->overlapping_queue);
214
-
215
- return acb->ret;
216
+ ret = acb->ret;
217
+ qemu_aio_unref(acb);
218
+ return ret;
219
}
220
221
static coroutine_fn int sd_co_readv(BlockDriverState *bs, int64_t sector_num,
222
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_readv(BlockDriverState *bs, int64_t sector_num,
223
224
acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors);
225
acb->aiocb_type = AIOCB_READ_UDATA;
226
- acb->aio_done_func = sd_finish_aiocb;
227
228
retry:
229
if (check_overlapping_aiocb(s, acb)) {
230
@@ -XXX,XX +XXX,XX @@ retry:
231
goto retry;
232
}
233
234
- ret = sd_co_rw_vector(acb);
235
- if (ret <= 0) {
236
- QLIST_REMOVE(acb, aiocb_siblings);
237
- qemu_co_queue_restart_all(&s->overlapping_queue);
238
- qemu_aio_unref(acb);
239
- return ret;
240
- }
241
-
242
- qemu_coroutine_yield();
243
+ sd_co_rw_vector(acb);
244
245
QLIST_REMOVE(acb, aiocb_siblings);
246
qemu_co_queue_restart_all(&s->overlapping_queue);
247
- return acb->ret;
248
+ ret = acb->ret;
249
+ qemu_aio_unref(acb);
250
+ return ret;
251
}
252
253
static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs)
254
{
255
BDRVSheepdogState *s = bs->opaque;
256
SheepdogAIOCB *acb;
257
+ int ret;
258
AIOReq *aio_req;
259
260
if (s->cache_flags != SD_FLAG_CMD_CACHE) {
261
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs)
262
263
acb = sd_aio_setup(bs, NULL, 0, 0);
264
acb->aiocb_type = AIOCB_FLUSH_CACHE;
265
- acb->aio_done_func = sd_finish_aiocb;
266
267
+ acb->nr_pending++;
268
aio_req = alloc_aio_req(s, acb, vid_to_vdi_oid(s->inode.vdi_id),
269
0, 0, 0, false, 0, 0);
270
QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
271
add_aio_request(s, aio_req, NULL, 0, acb->aiocb_type);
272
273
- qemu_coroutine_yield();
274
- return acb->ret;
275
+ if (--acb->nr_pending) {
276
+ qemu_coroutine_yield();
277
+ }
278
+ ret = acb->ret;
279
+ qemu_aio_unref(acb);
280
+ return ret;
281
}
282
283
static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
284
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_pdiscard(BlockDriverState *bs, int64_t offset,
285
acb = sd_aio_setup(bs, &discard_iov, offset >> BDRV_SECTOR_BITS,
286
count >> BDRV_SECTOR_BITS);
287
acb->aiocb_type = AIOCB_DISCARD_OBJ;
288
- acb->aio_done_func = sd_finish_aiocb;
289
290
retry:
291
if (check_overlapping_aiocb(s, acb)) {
292
@@ -XXX,XX +XXX,XX @@ retry:
293
goto retry;
294
}
295
296
- ret = sd_co_rw_vector(acb);
297
- if (ret <= 0) {
298
- QLIST_REMOVE(acb, aiocb_siblings);
299
- qemu_co_queue_restart_all(&s->overlapping_queue);
300
- qemu_aio_unref(acb);
301
- return ret;
302
- }
303
-
304
- qemu_coroutine_yield();
305
+ sd_co_rw_vector(acb);
306
307
QLIST_REMOVE(acb, aiocb_siblings);
308
qemu_co_queue_restart_all(&s->overlapping_queue);
309
-
310
- return acb->ret;
311
+ ret = acb->ret;
312
+ qemu_aio_unref(acb);
313
+ return ret;
314
}
315
316
static coroutine_fn int64_t
317
--
318
2.9.3
319
320
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Remy Noel <remy.noel@blade-group.com>
2
2
3
Wrap the code that was copied repeatedly in the two functions,
3
Cleaning the events will cause aio_epoll_update to unregister the fd.
4
sd_aio_setup and sd_aio_complete.
4
Otherwise, the fd is kept registered until it is destroyed.
5
5
6
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
6
Signed-off-by: Remy Noel <remy.noel@blade-group.com>
7
Message-id: 20161129113245.32724-6-pbonzini@redhat.com
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Signed-off-by: Jeff Cody <jcody@redhat.com>
8
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-id: 20181220152030.28035-2-remy.noel@blade-group.com
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
11
---
10
block/sheepdog.c | 66 ++++++++++++++++++++++++++------------------------------
12
util/aio-posix.c | 3 +++
11
1 file changed, 30 insertions(+), 36 deletions(-)
13
1 file changed, 3 insertions(+)
12
14
13
diff --git a/block/sheepdog.c b/block/sheepdog.c
15
diff --git a/util/aio-posix.c b/util/aio-posix.c
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/block/sheepdog.c
17
--- a/util/aio-posix.c
16
+++ b/block/sheepdog.c
18
+++ b/util/aio-posix.c
17
@@ -XXX,XX +XXX,XX @@ static inline AIOReq *alloc_aio_req(BDRVSheepdogState *s, SheepdogAIOCB *acb,
19
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
18
return aio_req;
20
QLIST_REMOVE(node, node);
19
}
21
deleted = true;
20
22
}
21
+static void wait_for_overlapping_aiocb(BDRVSheepdogState *s, SheepdogAIOCB *acb)
23
+ /* Clean events in order to unregister fd from the ctx epoll. */
22
+{
24
+ node->pfd.events = 0;
23
+ SheepdogAIOCB *cb;
24
+
25
+
25
+retry:
26
poll_disable_change = -!node->io_poll;
26
+ QLIST_FOREACH(cb, &s->inflight_aiocb_head, aiocb_siblings) {
27
} else {
27
+ if (AIOCBOverlapping(acb, cb)) {
28
poll_disable_change = !io_poll - (node && !node->io_poll);
28
+ qemu_co_queue_wait(&s->overlapping_queue);
29
+ goto retry;
30
+ }
31
+ }
32
+}
33
+
34
static void sd_aio_setup(SheepdogAIOCB *acb, BDRVSheepdogState *s,
35
QEMUIOVector *qiov, int64_t sector_num, int nb_sectors,
36
int type)
37
@@ -XXX,XX +XXX,XX @@ static void sd_aio_setup(SheepdogAIOCB *acb, BDRVSheepdogState *s,
38
acb->min_dirty_data_idx = UINT32_MAX;
39
acb->max_dirty_data_idx = 0;
40
acb->aiocb_type = type;
41
+
42
+ if (type == AIOCB_FLUSH_CACHE) {
43
+ return;
44
+ }
45
+
46
+ wait_for_overlapping_aiocb(s, acb);
47
+ QLIST_INSERT_HEAD(&s->inflight_aiocb_head, acb, aiocb_siblings);
48
}
49
50
/* Return -EIO in case of error, file descriptor on success */
51
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn sd_co_rw_vector(SheepdogAIOCB *acb)
52
}
53
}
54
55
-static bool check_overlapping_aiocb(BDRVSheepdogState *s, SheepdogAIOCB *aiocb)
56
+static void sd_aio_complete(SheepdogAIOCB *acb)
57
{
58
- SheepdogAIOCB *cb;
59
-
60
- QLIST_FOREACH(cb, &s->inflight_aiocb_head, aiocb_siblings) {
61
- if (AIOCBOverlapping(aiocb, cb)) {
62
- return true;
63
- }
64
+ if (acb->aiocb_type == AIOCB_FLUSH_CACHE) {
65
+ return;
66
}
67
68
- QLIST_INSERT_HEAD(&s->inflight_aiocb_head, aiocb, aiocb_siblings);
69
- return false;
70
+ QLIST_REMOVE(acb, aiocb_siblings);
71
+ qemu_co_queue_restart_all(&acb->s->overlapping_queue);
72
}
73
74
static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num,
75
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num,
76
}
77
78
sd_aio_setup(&acb, s, qiov, sector_num, nb_sectors, AIOCB_WRITE_UDATA);
79
-
80
-retry:
81
- if (check_overlapping_aiocb(s, &acb)) {
82
- qemu_co_queue_wait(&s->overlapping_queue);
83
- goto retry;
84
- }
85
-
86
sd_co_rw_vector(&acb);
87
sd_write_done(&acb);
88
+ sd_aio_complete(&acb);
89
90
- QLIST_REMOVE(&acb, aiocb_siblings);
91
- qemu_co_queue_restart_all(&s->overlapping_queue);
92
return acb.ret;
93
}
94
95
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_readv(BlockDriverState *bs, int64_t sector_num,
96
BDRVSheepdogState *s = bs->opaque;
97
98
sd_aio_setup(&acb, s, qiov, sector_num, nb_sectors, AIOCB_READ_UDATA);
99
-
100
-retry:
101
- if (check_overlapping_aiocb(s, &acb)) {
102
- qemu_co_queue_wait(&s->overlapping_queue);
103
- goto retry;
104
- }
105
-
106
sd_co_rw_vector(&acb);
107
+ sd_aio_complete(&acb);
108
109
- QLIST_REMOVE(&acb, aiocb_siblings);
110
- qemu_co_queue_restart_all(&s->overlapping_queue);
111
return acb.ret;
112
}
113
114
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs)
115
if (--acb.nr_pending) {
116
qemu_coroutine_yield();
117
}
118
+
119
+ sd_aio_complete(&acb);
120
return acb.ret;
121
}
122
123
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_pdiscard(BlockDriverState *bs, int64_t offset,
124
}
125
sd_aio_setup(&acb, s, &discard_iov, offset >> BDRV_SECTOR_BITS,
126
count >> BDRV_SECTOR_BITS, AIOCB_DISCARD_OBJ);
127
-
128
-retry:
129
- if (check_overlapping_aiocb(s, &acb)) {
130
- qemu_co_queue_wait(&s->overlapping_queue);
131
- goto retry;
132
- }
133
-
134
sd_co_rw_vector(&acb);
135
+ sd_aio_complete(&acb);
136
137
- QLIST_REMOVE(&acb, aiocb_siblings);
138
- qemu_co_queue_restart_all(&s->overlapping_queue);
139
return acb.ret;
140
}
141
142
--
29
--
143
2.9.3
30
2.20.1
144
31
145
32
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Remy Noel <remy.noel@blade-group.com>
2
2
3
Sheepdog's AIOCB are completely internal entities for a group of
3
It is possible for an io_poll callback to be concurrently executed along
4
requests and do not need dynamic allocation.
4
with an aio_set_fd_handlers. This can cause all sorts of problems, like
5
5
a NULL callback or a bad opaque pointer.
6
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
6
7
Message-id: 20161129113245.32724-4-pbonzini@redhat.com
7
This changes set_fd_handlers so that it no longer modify existing handlers
8
Signed-off-by: Jeff Cody <jcody@redhat.com>
8
entries and instead, always insert those after having proper initialisation.
9
10
Tested-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Signed-off-by: Remy Noel <remy.noel@blade-group.com>
12
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
14
Message-id: 20181220152030.28035-3-remy.noel@blade-group.com
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
16
---
10
block/sheepdog.c | 99 ++++++++++++++++++++++----------------------------------
17
util/aio-posix.c | 89 ++++++++++++++++++++++++++++--------------------
11
1 file changed, 39 insertions(+), 60 deletions(-)
18
util/aio-win32.c | 67 ++++++++++++++++--------------------
12
19
2 files changed, 82 insertions(+), 74 deletions(-)
13
diff --git a/block/sheepdog.c b/block/sheepdog.c
20
21
diff --git a/util/aio-posix.c b/util/aio-posix.c
14
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
15
--- a/block/sheepdog.c
23
--- a/util/aio-posix.c
16
+++ b/block/sheepdog.c
24
+++ b/util/aio-posix.c
17
@@ -XXX,XX +XXX,XX @@ static inline size_t count_data_objs(const struct SheepdogInode *inode)
25
@@ -XXX,XX +XXX,XX @@ static AioHandler *find_aio_handler(AioContext *ctx, int fd)
18
} while (0)
26
return NULL;
19
27
}
20
typedef struct SheepdogAIOCB SheepdogAIOCB;
28
21
+typedef struct BDRVSheepdogState BDRVSheepdogState;
29
+static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
22
30
+{
23
typedef struct AIOReq {
31
+ /* If the GSource is in the process of being destroyed then
24
SheepdogAIOCB *aiocb;
32
+ * g_source_remove_poll() causes an assertion failure. Skip
25
@@ -XXX,XX +XXX,XX @@ enum AIOCBState {
33
+ * removal in that case, because glib cleans up its state during
26
|| y->max_affect_data_idx < x->min_affect_data_idx))
34
+ * destruction anyway.
27
35
+ */
28
struct SheepdogAIOCB {
36
+ if (!g_source_is_destroyed(&ctx->source)) {
29
- BlockAIOCB common;
37
+ g_source_remove_poll(&ctx->source, &node->pfd);
30
+ BDRVSheepdogState *s;
38
+ }
31
39
+
32
QEMUIOVector *qiov;
40
+ /* If a read is in progress, just mark the node as deleted */
33
41
+ if (qemu_lockcnt_count(&ctx->list_lock)) {
34
@@ -XXX,XX +XXX,XX @@ struct SheepdogAIOCB {
42
+ node->deleted = 1;
35
QLIST_ENTRY(SheepdogAIOCB) aiocb_siblings;
43
+ node->pfd.revents = 0;
44
+ return false;
45
+ }
46
+ /* Otherwise, delete it for real. We can't just mark it as
47
+ * deleted because deleted nodes are only cleaned up while
48
+ * no one is walking the handlers list.
49
+ */
50
+ QLIST_REMOVE(node, node);
51
+ return true;
52
+}
53
+
54
void aio_set_fd_handler(AioContext *ctx,
55
int fd,
56
bool is_external,
57
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
58
void *opaque)
59
{
60
AioHandler *node;
61
+ AioHandler *new_node = NULL;
62
bool is_new = false;
63
bool deleted = false;
64
int poll_disable_change;
65
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
66
qemu_lockcnt_unlock(&ctx->list_lock);
67
return;
68
}
69
-
70
- /* If the GSource is in the process of being destroyed then
71
- * g_source_remove_poll() causes an assertion failure. Skip
72
- * removal in that case, because glib cleans up its state during
73
- * destruction anyway.
74
- */
75
- if (!g_source_is_destroyed(&ctx->source)) {
76
- g_source_remove_poll(&ctx->source, &node->pfd);
77
- }
78
-
79
- /* If a read is in progress, just mark the node as deleted */
80
- if (qemu_lockcnt_count(&ctx->list_lock)) {
81
- node->deleted = 1;
82
- node->pfd.revents = 0;
83
- } else {
84
- /* Otherwise, delete it for real. We can't just mark it as
85
- * deleted because deleted nodes are only cleaned up while
86
- * no one is walking the handlers list.
87
- */
88
- QLIST_REMOVE(node, node);
89
- deleted = true;
90
- }
91
/* Clean events in order to unregister fd from the ctx epoll. */
92
node->pfd.events = 0;
93
94
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
95
} else {
96
poll_disable_change = !io_poll - (node && !node->io_poll);
97
if (node == NULL) {
98
- /* Alloc and insert if it's not already there */
99
- node = g_new0(AioHandler, 1);
100
- node->pfd.fd = fd;
101
- QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
102
-
103
- g_source_add_poll(&ctx->source, &node->pfd);
104
is_new = true;
105
}
106
+ /* Alloc and insert if it's not already there */
107
+ new_node = g_new0(AioHandler, 1);
108
109
/* Update handler with latest information */
110
- node->io_read = io_read;
111
- node->io_write = io_write;
112
- node->io_poll = io_poll;
113
- node->opaque = opaque;
114
- node->is_external = is_external;
115
+ new_node->io_read = io_read;
116
+ new_node->io_write = io_write;
117
+ new_node->io_poll = io_poll;
118
+ new_node->opaque = opaque;
119
+ new_node->is_external = is_external;
120
121
- node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
122
- node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
123
+ if (is_new) {
124
+ new_node->pfd.fd = fd;
125
+ } else {
126
+ new_node->pfd = node->pfd;
127
+ }
128
+ g_source_add_poll(&ctx->source, &new_node->pfd);
129
+
130
+ new_node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
131
+ new_node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
132
+
133
+ QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, new_node, node);
134
+ }
135
+ if (node) {
136
+ deleted = aio_remove_fd_handler(ctx, node);
137
}
138
139
/* No need to order poll_disable_cnt writes against other updates;
140
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
141
atomic_set(&ctx->poll_disable_cnt,
142
atomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
143
144
- aio_epoll_update(ctx, node, is_new);
145
+ if (new_node) {
146
+ aio_epoll_update(ctx, new_node, is_new);
147
+ } else if (node) {
148
+ /* Unregister deleted fd_handler */
149
+ aio_epoll_update(ctx, node, false);
150
+ }
151
qemu_lockcnt_unlock(&ctx->list_lock);
152
aio_notify(ctx);
153
154
diff --git a/util/aio-win32.c b/util/aio-win32.c
155
index XXXXXXX..XXXXXXX 100644
156
--- a/util/aio-win32.c
157
+++ b/util/aio-win32.c
158
@@ -XXX,XX +XXX,XX @@ struct AioHandler {
159
QLIST_ENTRY(AioHandler) node;
36
};
160
};
37
161
38
-typedef struct BDRVSheepdogState {
162
+static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
39
+struct BDRVSheepdogState {
163
+{
40
BlockDriverState *bs;
164
+ /* If aio_poll is in progress, just mark the node as deleted */
41
AioContext *aio_context;
165
+ if (qemu_lockcnt_count(&ctx->list_lock)) {
42
166
+ node->deleted = 1;
43
@@ -XXX,XX +XXX,XX @@ typedef struct BDRVSheepdogState {
167
+ node->pfd.revents = 0;
44
168
+ } else {
45
CoQueue overlapping_queue;
169
+ /* Otherwise, delete it for real. We can't just mark it as
46
QLIST_HEAD(inflight_aiocb_head, SheepdogAIOCB) inflight_aiocb_head;
170
+ * deleted because deleted nodes are only cleaned up after
47
-} BDRVSheepdogState;
171
+ * releasing the list_lock.
48
+};
172
+ */
49
173
+ QLIST_REMOVE(node, node);
50
typedef struct BDRVSheepdogReopenState {
174
+ g_free(node);
51
int fd;
175
+ }
52
@@ -XXX,XX +XXX,XX @@ static inline void free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req)
176
+}
53
acb->nr_pending--;
177
+
54
}
178
void aio_set_fd_handler(AioContext *ctx,
55
179
int fd,
56
-static const AIOCBInfo sd_aiocb_info = {
180
bool is_external,
57
- .aiocb_size = sizeof(SheepdogAIOCB),
181
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
58
-};
182
void *opaque)
59
-
60
-static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov,
61
- int64_t sector_num, int nb_sectors)
62
+static void sd_aio_setup(SheepdogAIOCB *acb, BDRVSheepdogState *s,
63
+ QEMUIOVector *qiov, int64_t sector_num, int nb_sectors,
64
+ int type)
65
{
183
{
66
- SheepdogAIOCB *acb;
184
/* fd is a SOCKET in our case */
67
uint32_t object_size;
185
- AioHandler *node;
68
- BDRVSheepdogState *s = bs->opaque;
186
+ AioHandler *old_node;
69
187
+ AioHandler *node = NULL;
70
object_size = (UINT32_C(1) << s->inode.block_size_shift);
188
71
189
qemu_lockcnt_lock(&ctx->list_lock);
72
- acb = qemu_aio_get(&sd_aiocb_info, bs, NULL, NULL);
190
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
73
+ acb->s = s;
191
- if (node->pfd.fd == fd && !node->deleted) {
74
192
+ QLIST_FOREACH(old_node, &ctx->aio_handlers, node) {
75
acb->qiov = qiov;
193
+ if (old_node->pfd.fd == fd && !old_node->deleted) {
76
194
break;
77
@@ -XXX,XX +XXX,XX @@ static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov,
78
79
acb->min_dirty_data_idx = UINT32_MAX;
80
acb->max_dirty_data_idx = 0;
81
-
82
- return acb;
83
+ acb->aiocb_type = type;
84
}
85
86
/* Return -EIO in case of error, file descriptor on success */
87
@@ -XXX,XX +XXX,XX @@ static int sd_truncate(BlockDriverState *bs, int64_t offset)
88
*/
89
static void coroutine_fn sd_write_done(SheepdogAIOCB *acb)
90
{
91
- BDRVSheepdogState *s = acb->common.bs->opaque;
92
+ BDRVSheepdogState *s = acb->s;
93
struct iovec iov;
94
AIOReq *aio_req;
95
uint32_t offset, data_len, mn, mx;
96
@@ -XXX,XX +XXX,XX @@ out:
97
* Returns 1 when we need to wait a response, 0 when there is no sent
98
* request and -errno in error cases.
99
*/
100
-static void coroutine_fn sd_co_rw_vector(void *p)
101
+static void coroutine_fn sd_co_rw_vector(SheepdogAIOCB *acb)
102
{
103
- SheepdogAIOCB *acb = p;
104
int ret = 0;
105
unsigned long len, done = 0, total = acb->nb_sectors * BDRV_SECTOR_SIZE;
106
unsigned long idx;
107
uint32_t object_size;
108
uint64_t oid;
109
uint64_t offset;
110
- BDRVSheepdogState *s = acb->common.bs->opaque;
111
+ BDRVSheepdogState *s = acb->s;
112
SheepdogInode *inode = &s->inode;
113
AIOReq *aio_req;
114
115
@@ -XXX,XX +XXX,XX @@ static bool check_overlapping_aiocb(BDRVSheepdogState *s, SheepdogAIOCB *aiocb)
116
static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num,
117
int nb_sectors, QEMUIOVector *qiov)
118
{
119
- SheepdogAIOCB *acb;
120
+ SheepdogAIOCB acb;
121
int ret;
122
int64_t offset = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE;
123
BDRVSheepdogState *s = bs->opaque;
124
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num,
125
}
195
}
126
}
196
}
127
197
128
- acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors);
198
- /* Are we deleting the fd handler? */
129
- acb->aiocb_type = AIOCB_WRITE_UDATA;
199
- if (!io_read && !io_write) {
130
+ sd_aio_setup(&acb, s, qiov, sector_num, nb_sectors, AIOCB_WRITE_UDATA);
200
- if (node) {
131
201
- /* If aio_poll is in progress, just mark the node as deleted */
132
retry:
202
- if (qemu_lockcnt_count(&ctx->list_lock)) {
133
- if (check_overlapping_aiocb(s, acb)) {
203
- node->deleted = 1;
134
+ if (check_overlapping_aiocb(s, &acb)) {
204
- node->pfd.revents = 0;
135
qemu_co_queue_wait(&s->overlapping_queue);
205
- } else {
136
goto retry;
206
- /* Otherwise, delete it for real. We can't just mark it as
207
- * deleted because deleted nodes are only cleaned up after
208
- * releasing the list_lock.
209
- */
210
- QLIST_REMOVE(node, node);
211
- g_free(node);
212
- }
213
- }
214
- } else {
215
+ if (io_read || io_write) {
216
HANDLE event;
217
long bitmask = 0;
218
219
- if (node == NULL) {
220
- /* Alloc and insert if it's not already there */
221
- node = g_new0(AioHandler, 1);
222
- node->pfd.fd = fd;
223
- QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
224
- }
225
+ /* Alloc and insert if it's not already there */
226
+ node = g_new0(AioHandler, 1);
227
+ node->pfd.fd = fd;
228
229
node->pfd.events = 0;
230
if (node->io_read) {
231
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
232
bitmask |= FD_WRITE | FD_CONNECT;
233
}
234
235
+ QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
236
event = event_notifier_get_handle(&ctx->notifier);
237
WSAEventSelect(node->pfd.fd, event, bitmask);
137
}
238
}
138
239
+ if (old_node) {
139
- sd_co_rw_vector(acb);
240
+ aio_remove_fd_handler(ctx, old_node);
140
- sd_write_done(acb);
241
+ }
141
+ sd_co_rw_vector(&acb);
242
142
+ sd_write_done(&acb);
243
qemu_lockcnt_unlock(&ctx->list_lock);
143
244
aio_notify(ctx);
144
- QLIST_REMOVE(acb, aiocb_siblings);
245
@@ -XXX,XX +XXX,XX @@ void aio_set_event_notifier(AioContext *ctx,
145
+ QLIST_REMOVE(&acb, aiocb_siblings);
246
if (node) {
146
qemu_co_queue_restart_all(&s->overlapping_queue);
247
g_source_remove_poll(&ctx->source, &node->pfd);
147
- ret = acb->ret;
248
148
- qemu_aio_unref(acb);
249
- /* aio_poll is in progress, just mark the node as deleted */
149
- return ret;
250
- if (qemu_lockcnt_count(&ctx->list_lock)) {
150
+ return acb.ret;
251
- node->deleted = 1;
151
}
252
- node->pfd.revents = 0;
152
253
- } else {
153
static coroutine_fn int sd_co_readv(BlockDriverState *bs, int64_t sector_num,
254
- /* Otherwise, delete it for real. We can't just mark it as
154
int nb_sectors, QEMUIOVector *qiov)
255
- * deleted because deleted nodes are only cleaned up after
155
{
256
- * releasing the list_lock.
156
- SheepdogAIOCB *acb;
257
- */
157
- int ret;
258
- QLIST_REMOVE(node, node);
158
+ SheepdogAIOCB acb;
259
- g_free(node);
159
BDRVSheepdogState *s = bs->opaque;
260
- }
160
261
+ aio_remove_fd_handler(ctx, node);
161
- acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors);
262
}
162
- acb->aiocb_type = AIOCB_READ_UDATA;
263
} else {
163
+ sd_aio_setup(&acb, s, qiov, sector_num, nb_sectors, AIOCB_READ_UDATA);
264
if (node == NULL) {
164
165
retry:
166
- if (check_overlapping_aiocb(s, acb)) {
167
+ if (check_overlapping_aiocb(s, &acb)) {
168
qemu_co_queue_wait(&s->overlapping_queue);
169
goto retry;
170
}
171
172
- sd_co_rw_vector(acb);
173
+ sd_co_rw_vector(&acb);
174
175
- QLIST_REMOVE(acb, aiocb_siblings);
176
+ QLIST_REMOVE(&acb, aiocb_siblings);
177
qemu_co_queue_restart_all(&s->overlapping_queue);
178
- ret = acb->ret;
179
- qemu_aio_unref(acb);
180
- return ret;
181
+ return acb.ret;
182
}
183
184
static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs)
185
{
186
BDRVSheepdogState *s = bs->opaque;
187
- SheepdogAIOCB *acb;
188
- int ret;
189
+ SheepdogAIOCB acb;
190
AIOReq *aio_req;
191
192
if (s->cache_flags != SD_FLAG_CMD_CACHE) {
193
return 0;
194
}
195
196
- acb = sd_aio_setup(bs, NULL, 0, 0);
197
- acb->aiocb_type = AIOCB_FLUSH_CACHE;
198
+ sd_aio_setup(&acb, s, NULL, 0, 0, AIOCB_FLUSH_CACHE);
199
200
- acb->nr_pending++;
201
- aio_req = alloc_aio_req(s, acb, vid_to_vdi_oid(s->inode.vdi_id),
202
+ acb.nr_pending++;
203
+ aio_req = alloc_aio_req(s, &acb, vid_to_vdi_oid(s->inode.vdi_id),
204
0, 0, 0, false, 0, 0);
205
QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
206
- add_aio_request(s, aio_req, NULL, 0, acb->aiocb_type);
207
+ add_aio_request(s, aio_req, NULL, 0, acb.aiocb_type);
208
209
- if (--acb->nr_pending) {
210
+ if (--acb.nr_pending) {
211
qemu_coroutine_yield();
212
}
213
- ret = acb->ret;
214
- qemu_aio_unref(acb);
215
- return ret;
216
+ return acb.ret;
217
}
218
219
static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
220
@@ -XXX,XX +XXX,XX @@ static int sd_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
221
static coroutine_fn int sd_co_pdiscard(BlockDriverState *bs, int64_t offset,
222
int count)
223
{
224
- SheepdogAIOCB *acb;
225
+ SheepdogAIOCB acb;
226
BDRVSheepdogState *s = bs->opaque;
227
- int ret;
228
QEMUIOVector discard_iov;
229
struct iovec iov;
230
uint32_t zero = 0;
231
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_pdiscard(BlockDriverState *bs, int64_t offset,
232
if (!QEMU_IS_ALIGNED(offset | count, BDRV_SECTOR_SIZE)) {
233
return -ENOTSUP;
234
}
235
- acb = sd_aio_setup(bs, &discard_iov, offset >> BDRV_SECTOR_BITS,
236
- count >> BDRV_SECTOR_BITS);
237
- acb->aiocb_type = AIOCB_DISCARD_OBJ;
238
+ sd_aio_setup(&acb, s, &discard_iov, offset >> BDRV_SECTOR_BITS,
239
+ count >> BDRV_SECTOR_BITS, AIOCB_DISCARD_OBJ);
240
241
retry:
242
- if (check_overlapping_aiocb(s, acb)) {
243
+ if (check_overlapping_aiocb(s, &acb)) {
244
qemu_co_queue_wait(&s->overlapping_queue);
245
goto retry;
246
}
247
248
- sd_co_rw_vector(acb);
249
+ sd_co_rw_vector(&acb);
250
251
- QLIST_REMOVE(acb, aiocb_siblings);
252
+ QLIST_REMOVE(&acb, aiocb_siblings);
253
qemu_co_queue_restart_all(&s->overlapping_queue);
254
- ret = acb->ret;
255
- qemu_aio_unref(acb);
256
- return ret;
257
+ return acb.ret;
258
}
259
260
static coroutine_fn int64_t
261
--
265
--
262
2.9.3
266
2.20.1
263
267
264
268
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Add to the list in add_aio_request and, indirectly, resend_aioreq. Inline
4
free_aio_req in the caller, it does not simply undo alloc_aio_req's job.
5
6
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
7
Message-id: 20161129113245.32724-5-pbonzini@redhat.com
8
Signed-off-by: Jeff Cody <jcody@redhat.com>
9
---
10
block/sheepdog.c | 23 ++++++-----------------
11
1 file changed, 6 insertions(+), 17 deletions(-)
12
13
diff --git a/block/sheepdog.c b/block/sheepdog.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/block/sheepdog.c
16
+++ b/block/sheepdog.c
17
@@ -XXX,XX +XXX,XX @@ static inline AIOReq *alloc_aio_req(BDRVSheepdogState *s, SheepdogAIOCB *acb,
18
return aio_req;
19
}
20
21
-static inline void free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req)
22
-{
23
- SheepdogAIOCB *acb = aio_req->aiocb;
24
-
25
- QLIST_REMOVE(aio_req, aio_siblings);
26
- g_free(aio_req);
27
-
28
- acb->nr_pending--;
29
-}
30
-
31
static void sd_aio_setup(SheepdogAIOCB *acb, BDRVSheepdogState *s,
32
QEMUIOVector *qiov, int64_t sector_num, int nb_sectors,
33
int type)
34
@@ -XXX,XX +XXX,XX @@ static coroutine_fn void reconnect_to_sdog(void *opaque)
35
while (!QLIST_EMPTY(&s->failed_aio_head)) {
36
aio_req = QLIST_FIRST(&s->failed_aio_head);
37
QLIST_REMOVE(aio_req, aio_siblings);
38
- QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
39
resend_aioreq(s, aio_req);
40
}
41
}
42
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque)
43
*/
44
s->co_recv = NULL;
45
46
+ QLIST_REMOVE(aio_req, aio_siblings);
47
switch (rsp.result) {
48
case SD_RES_SUCCESS:
49
break;
50
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque)
51
break;
52
}
53
54
- free_aio_req(s, aio_req);
55
- if (!acb->nr_pending) {
56
+ g_free(aio_req);
57
+
58
+ if (!--acb->nr_pending) {
59
/*
60
* We've finished all requests which belong to the AIOCB, so
61
* we can switch back to sd_co_readv/writev now.
62
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
63
uint64_t old_oid = aio_req->base_oid;
64
bool create = aio_req->create;
65
66
+ QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
67
+
68
if (!nr_copies) {
69
error_report("bug");
70
}
71
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn sd_write_done(SheepdogAIOCB *acb)
72
iov.iov_len = sizeof(s->inode);
73
aio_req = alloc_aio_req(s, acb, vid_to_vdi_oid(s->inode.vdi_id),
74
data_len, offset, 0, false, 0, offset);
75
- QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
76
add_aio_request(s, aio_req, &iov, 1, AIOCB_WRITE_UDATA);
77
if (--acb->nr_pending) {
78
qemu_coroutine_yield();
79
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn sd_co_rw_vector(SheepdogAIOCB *acb)
80
old_oid,
81
acb->aiocb_type == AIOCB_DISCARD_OBJ ?
82
0 : done);
83
- QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
84
-
85
add_aio_request(s, aio_req, acb->qiov->iov, acb->qiov->niov,
86
acb->aiocb_type);
87
done:
88
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs)
89
acb.nr_pending++;
90
aio_req = alloc_aio_req(s, &acb, vid_to_vdi_oid(s->inode.vdi_id),
91
0, 0, 0, false, 0, 0);
92
- QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
93
add_aio_request(s, aio_req, NULL, 0, acb.aiocb_type);
94
95
if (--acb.nr_pending) {
96
--
97
2.9.3
98
99
diff view generated by jsdifflib