1 | The following changes since commit a0def594286d9110a6035e02eef558cf3cf5d847: | 1 | The following changes since commit f90ea7ba7c5ae7010ee0ce062207ae42530f57d6: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging (2017-01-30 10:23:20 +0000) | 3 | Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20171012' into staging (2017-10-12 17:06:50 +0100) |
4 | 4 | ||
5 | are available in the git repository at: | 5 | are available in the git repository at: |
6 | 6 | ||
7 | https://github.com/codyprime/qemu-kvm-jtc.git tags/block-pull-request | 7 | git://github.com/stefanha/qemu.git tags/block-pull-request |
8 | 8 | ||
9 | for you to fetch changes up to acf6e5f0962c4be670d4a93ede77423512521876: | 9 | for you to fetch changes up to b867eaa17b3940760f51134e409cb0580dd3dde3: |
10 | 10 | ||
11 | sheepdog: reorganize check for overlapping requests (2017-02-01 00:17:20 -0500) | 11 | block/throttle.c: add bdrv_co_drain_begin/end callbacks (2017-10-13 12:38:41 +0100) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Block patches | 14 | |
15 | ---------------------------------------------------------------- | 15 | ---------------------------------------------------------------- |
16 | 16 | ||
17 | Paolo Bonzini (5): | 17 | Manos Pitsidianakis (3): |
18 | sheepdog: remove unused cancellation support | 18 | block: add bdrv_co_drain_end callback |
19 | sheepdog: reorganize coroutine flow | 19 | block: rename bdrv_co_drain to bdrv_co_drain_begin |
20 | sheepdog: do not use BlockAIOCB | 20 | block/throttle.c: add bdrv_co_drain_begin/end callbacks |
21 | sheepdog: simplify inflight_aio_head management | ||
22 | sheepdog: reorganize check for overlapping requests | ||
23 | 21 | ||
24 | block/sheepdog.c | 289 ++++++++++++++++--------------------------------------- | 22 | include/block/block_int.h | 13 ++++++++++--- |
25 | 1 file changed, 84 insertions(+), 205 deletions(-) | 23 | block/io.c | 48 +++++++++++++++++++++++++++++++++-------------- |
24 | block/qed.c | 6 +++--- | ||
25 | block/throttle.c | 18 ++++++++++++++++++ | ||
26 | 4 files changed, 65 insertions(+), 20 deletions(-) | ||
26 | 27 | ||
27 | -- | 28 | -- |
28 | 2.9.3 | 29 | 2.13.6 |
29 | 30 | ||
30 | 31 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | From: Manos Pitsidianakis <el13635@mail.ntua.gr> |
---|---|---|---|
2 | 2 | ||
3 | Sheepdog's AIOCB are completely internal entities for a group of | 3 | BlockDriverState has a bdrv_co_drain() callback but no equivalent for |
4 | requests and do not need dynamic allocation. | 4 | the end of the drain. The throttle driver (block/throttle.c) needs a way |
5 | to mark the end of the drain in order to toggle io_limits_disabled | ||
6 | correctly, thus bdrv_co_drain_end is needed. | ||
5 | 7 | ||
6 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 8 | Signed-off-by: Manos Pitsidianakis <el13635@mail.ntua.gr> |
7 | Message-id: 20161129113245.32724-4-pbonzini@redhat.com | 9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
8 | Signed-off-by: Jeff Cody <jcody@redhat.com> | 10 | Reviewed-by: Fam Zheng <famz@redhat.com> |
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | --- | 12 | --- |
10 | block/sheepdog.c | 99 ++++++++++++++++++++++---------------------------------- | 13 | include/block/block_int.h | 11 +++++++++-- |
11 | 1 file changed, 39 insertions(+), 60 deletions(-) | 14 | block/io.c | 48 +++++++++++++++++++++++++++++++++-------------- |
15 | 2 files changed, 43 insertions(+), 16 deletions(-) | ||
12 | 16 | ||
13 | diff --git a/block/sheepdog.c b/block/sheepdog.c | 17 | diff --git a/include/block/block_int.h b/include/block/block_int.h |
14 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/block/sheepdog.c | 19 | --- a/include/block/block_int.h |
16 | +++ b/block/sheepdog.c | 20 | +++ b/include/block/block_int.h |
17 | @@ -XXX,XX +XXX,XX @@ static inline size_t count_data_objs(const struct SheepdogInode *inode) | 21 | @@ -XXX,XX +XXX,XX @@ struct BlockDriver { |
18 | } while (0) | 22 | int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo); |
19 | 23 | ||
20 | typedef struct SheepdogAIOCB SheepdogAIOCB; | 24 | /** |
21 | +typedef struct BDRVSheepdogState BDRVSheepdogState; | 25 | - * Drain and stop any internal sources of requests in the driver, and |
22 | 26 | - * remain so until next I/O callback (e.g. bdrv_co_writev) is called. | |
23 | typedef struct AIOReq { | 27 | + * bdrv_co_drain is called if implemented in the beginning of a |
24 | SheepdogAIOCB *aiocb; | 28 | + * drain operation to drain and stop any internal sources of requests in |
25 | @@ -XXX,XX +XXX,XX @@ enum AIOCBState { | 29 | + * the driver. |
26 | || y->max_affect_data_idx < x->min_affect_data_idx)) | 30 | + * bdrv_co_drain_end is called if implemented at the end of the drain. |
27 | 31 | + * | |
28 | struct SheepdogAIOCB { | 32 | + * They should be used by the driver to e.g. manage scheduled I/O |
29 | - BlockAIOCB common; | 33 | + * requests, or toggle an internal state. After the end of the drain new |
30 | + BDRVSheepdogState *s; | 34 | + * requests will continue normally. |
31 | 35 | */ | |
32 | QEMUIOVector *qiov; | 36 | void coroutine_fn (*bdrv_co_drain)(BlockDriverState *bs); |
33 | 37 | + void coroutine_fn (*bdrv_co_drain_end)(BlockDriverState *bs); | |
34 | @@ -XXX,XX +XXX,XX @@ struct SheepdogAIOCB { | 38 | |
35 | QLIST_ENTRY(SheepdogAIOCB) aiocb_siblings; | 39 | void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child, |
36 | }; | 40 | Error **errp); |
37 | 41 | diff --git a/block/io.c b/block/io.c | |
38 | -typedef struct BDRVSheepdogState { | 42 | index XXXXXXX..XXXXXXX 100644 |
39 | +struct BDRVSheepdogState { | 43 | --- a/block/io.c |
44 | +++ b/block/io.c | ||
45 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
46 | Coroutine *co; | ||
40 | BlockDriverState *bs; | 47 | BlockDriverState *bs; |
41 | AioContext *aio_context; | 48 | bool done; |
42 | 49 | + bool begin; | |
43 | @@ -XXX,XX +XXX,XX @@ typedef struct BDRVSheepdogState { | 50 | } BdrvCoDrainData; |
44 | 51 | ||
45 | CoQueue overlapping_queue; | 52 | static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) |
46 | QLIST_HEAD(inflight_aiocb_head, SheepdogAIOCB) inflight_aiocb_head; | 53 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) |
47 | -} BDRVSheepdogState; | 54 | BdrvCoDrainData *data = opaque; |
48 | +}; | 55 | BlockDriverState *bs = data->bs; |
49 | 56 | ||
50 | typedef struct BDRVSheepdogReopenState { | 57 | - bs->drv->bdrv_co_drain(bs); |
51 | int fd; | 58 | + if (data->begin) { |
52 | @@ -XXX,XX +XXX,XX @@ static inline void free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req) | 59 | + bs->drv->bdrv_co_drain(bs); |
53 | acb->nr_pending--; | 60 | + } else { |
61 | + bs->drv->bdrv_co_drain_end(bs); | ||
62 | + } | ||
63 | |||
64 | /* Set data->done before reading bs->wakeup. */ | ||
65 | atomic_mb_set(&data->done, true); | ||
66 | bdrv_wakeup(bs); | ||
54 | } | 67 | } |
55 | 68 | ||
56 | -static const AIOCBInfo sd_aiocb_info = { | 69 | -static void bdrv_drain_invoke(BlockDriverState *bs) |
57 | - .aiocb_size = sizeof(SheepdogAIOCB), | 70 | +static void bdrv_drain_invoke(BlockDriverState *bs, bool begin) |
58 | -}; | 71 | { |
72 | - BdrvCoDrainData data = { .bs = bs, .done = false }; | ||
73 | + BdrvCoDrainData data = { .bs = bs, .done = false, .begin = begin}; | ||
74 | |||
75 | - if (!bs->drv || !bs->drv->bdrv_co_drain) { | ||
76 | + if (!bs->drv || (begin && !bs->drv->bdrv_co_drain) || | ||
77 | + (!begin && !bs->drv->bdrv_co_drain_end)) { | ||
78 | return; | ||
79 | } | ||
80 | |||
81 | @@ -XXX,XX +XXX,XX @@ static void bdrv_drain_invoke(BlockDriverState *bs) | ||
82 | BDRV_POLL_WHILE(bs, !data.done); | ||
83 | } | ||
84 | |||
85 | -static bool bdrv_drain_recurse(BlockDriverState *bs) | ||
86 | +static bool bdrv_drain_recurse(BlockDriverState *bs, bool begin) | ||
87 | { | ||
88 | BdrvChild *child, *tmp; | ||
89 | bool waited; | ||
90 | |||
91 | - waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0); | ||
59 | - | 92 | - |
60 | -static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov, | 93 | /* Ensure any pending metadata writes are submitted to bs->file. */ |
61 | - int64_t sector_num, int nb_sectors) | 94 | - bdrv_drain_invoke(bs); |
62 | +static void sd_aio_setup(SheepdogAIOCB *acb, BDRVSheepdogState *s, | 95 | + bdrv_drain_invoke(bs, begin); |
63 | + QEMUIOVector *qiov, int64_t sector_num, int nb_sectors, | 96 | + |
64 | + int type) | 97 | + /* Wait for drained requests to finish */ |
98 | + waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0); | ||
99 | |||
100 | QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) { | ||
101 | BlockDriverState *bs = child->bs; | ||
102 | @@ -XXX,XX +XXX,XX @@ static bool bdrv_drain_recurse(BlockDriverState *bs) | ||
103 | */ | ||
104 | bdrv_ref(bs); | ||
105 | } | ||
106 | - waited |= bdrv_drain_recurse(bs); | ||
107 | + waited |= bdrv_drain_recurse(bs, begin); | ||
108 | if (in_main_loop) { | ||
109 | bdrv_unref(bs); | ||
110 | } | ||
111 | @@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque) | ||
112 | BlockDriverState *bs = data->bs; | ||
113 | |||
114 | bdrv_dec_in_flight(bs); | ||
115 | - bdrv_drained_begin(bs); | ||
116 | + if (data->begin) { | ||
117 | + bdrv_drained_begin(bs); | ||
118 | + } else { | ||
119 | + bdrv_drained_end(bs); | ||
120 | + } | ||
121 | + | ||
122 | data->done = true; | ||
123 | aio_co_wake(co); | ||
124 | } | ||
125 | |||
126 | -static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs) | ||
127 | +static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, | ||
128 | + bool begin) | ||
65 | { | 129 | { |
66 | - SheepdogAIOCB *acb; | 130 | BdrvCoDrainData data; |
67 | uint32_t object_size; | 131 | |
68 | - BDRVSheepdogState *s = bs->opaque; | 132 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs) |
69 | 133 | .co = qemu_coroutine_self(), | |
70 | object_size = (UINT32_C(1) << s->inode.block_size_shift); | 134 | .bs = bs, |
71 | 135 | .done = false, | |
72 | - acb = qemu_aio_get(&sd_aiocb_info, bs, NULL, NULL); | 136 | + .begin = begin, |
73 | + acb->s = s; | 137 | }; |
74 | 138 | bdrv_inc_in_flight(bs); | |
75 | acb->qiov = qiov; | 139 | aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), |
76 | 140 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs) | |
77 | @@ -XXX,XX +XXX,XX @@ static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov, | 141 | void bdrv_drained_begin(BlockDriverState *bs) |
78 | 142 | { | |
79 | acb->min_dirty_data_idx = UINT32_MAX; | 143 | if (qemu_in_coroutine()) { |
80 | acb->max_dirty_data_idx = 0; | 144 | - bdrv_co_yield_to_drain(bs); |
81 | - | 145 | + bdrv_co_yield_to_drain(bs, true); |
82 | - return acb; | 146 | return; |
83 | + acb->aiocb_type = type; | 147 | } |
148 | |||
149 | @@ -XXX,XX +XXX,XX @@ void bdrv_drained_begin(BlockDriverState *bs) | ||
150 | bdrv_parent_drained_begin(bs); | ||
151 | } | ||
152 | |||
153 | - bdrv_drain_recurse(bs); | ||
154 | + bdrv_drain_recurse(bs, true); | ||
84 | } | 155 | } |
85 | 156 | ||
86 | /* Return -EIO in case of error, file descriptor on success */ | 157 | void bdrv_drained_end(BlockDriverState *bs) |
87 | @@ -XXX,XX +XXX,XX @@ static int sd_truncate(BlockDriverState *bs, int64_t offset) | ||
88 | */ | ||
89 | static void coroutine_fn sd_write_done(SheepdogAIOCB *acb) | ||
90 | { | 158 | { |
91 | - BDRVSheepdogState *s = acb->common.bs->opaque; | 159 | + if (qemu_in_coroutine()) { |
92 | + BDRVSheepdogState *s = acb->s; | 160 | + bdrv_co_yield_to_drain(bs, false); |
93 | struct iovec iov; | 161 | + return; |
94 | AIOReq *aio_req; | 162 | + } |
95 | uint32_t offset, data_len, mn, mx; | 163 | assert(bs->quiesce_counter > 0); |
96 | @@ -XXX,XX +XXX,XX @@ out: | 164 | if (atomic_fetch_dec(&bs->quiesce_counter) > 1) { |
97 | * Returns 1 when we need to wait a response, 0 when there is no sent | 165 | return; |
98 | * request and -errno in error cases. | ||
99 | */ | ||
100 | -static void coroutine_fn sd_co_rw_vector(void *p) | ||
101 | +static void coroutine_fn sd_co_rw_vector(SheepdogAIOCB *acb) | ||
102 | { | ||
103 | - SheepdogAIOCB *acb = p; | ||
104 | int ret = 0; | ||
105 | unsigned long len, done = 0, total = acb->nb_sectors * BDRV_SECTOR_SIZE; | ||
106 | unsigned long idx; | ||
107 | uint32_t object_size; | ||
108 | uint64_t oid; | ||
109 | uint64_t offset; | ||
110 | - BDRVSheepdogState *s = acb->common.bs->opaque; | ||
111 | + BDRVSheepdogState *s = acb->s; | ||
112 | SheepdogInode *inode = &s->inode; | ||
113 | AIOReq *aio_req; | ||
114 | |||
115 | @@ -XXX,XX +XXX,XX @@ static bool check_overlapping_aiocb(BDRVSheepdogState *s, SheepdogAIOCB *aiocb) | ||
116 | static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num, | ||
117 | int nb_sectors, QEMUIOVector *qiov) | ||
118 | { | ||
119 | - SheepdogAIOCB *acb; | ||
120 | + SheepdogAIOCB acb; | ||
121 | int ret; | ||
122 | int64_t offset = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE; | ||
123 | BDRVSheepdogState *s = bs->opaque; | ||
124 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num, | ||
125 | } | ||
126 | } | 166 | } |
127 | 167 | ||
128 | - acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors); | 168 | bdrv_parent_drained_end(bs); |
129 | - acb->aiocb_type = AIOCB_WRITE_UDATA; | 169 | + bdrv_drain_recurse(bs, false); |
130 | + sd_aio_setup(&acb, s, qiov, sector_num, nb_sectors, AIOCB_WRITE_UDATA); | 170 | aio_enable_external(bdrv_get_aio_context(bs)); |
131 | 171 | } | |
132 | retry: | 172 | |
133 | - if (check_overlapping_aiocb(s, acb)) { | 173 | @@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void) |
134 | + if (check_overlapping_aiocb(s, &acb)) { | 174 | aio_context_acquire(aio_context); |
135 | qemu_co_queue_wait(&s->overlapping_queue); | 175 | for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { |
136 | goto retry; | 176 | if (aio_context == bdrv_get_aio_context(bs)) { |
177 | - waited |= bdrv_drain_recurse(bs); | ||
178 | + waited |= bdrv_drain_recurse(bs, true); | ||
179 | } | ||
180 | } | ||
181 | aio_context_release(aio_context); | ||
182 | @@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_end(void) | ||
183 | aio_context_acquire(aio_context); | ||
184 | aio_enable_external(aio_context); | ||
185 | bdrv_parent_drained_end(bs); | ||
186 | + bdrv_drain_recurse(bs, false); | ||
187 | aio_context_release(aio_context); | ||
137 | } | 188 | } |
138 | 189 | ||
139 | - sd_co_rw_vector(acb); | ||
140 | - sd_write_done(acb); | ||
141 | + sd_co_rw_vector(&acb); | ||
142 | + sd_write_done(&acb); | ||
143 | |||
144 | - QLIST_REMOVE(acb, aiocb_siblings); | ||
145 | + QLIST_REMOVE(&acb, aiocb_siblings); | ||
146 | qemu_co_queue_restart_all(&s->overlapping_queue); | ||
147 | - ret = acb->ret; | ||
148 | - qemu_aio_unref(acb); | ||
149 | - return ret; | ||
150 | + return acb.ret; | ||
151 | } | ||
152 | |||
153 | static coroutine_fn int sd_co_readv(BlockDriverState *bs, int64_t sector_num, | ||
154 | int nb_sectors, QEMUIOVector *qiov) | ||
155 | { | ||
156 | - SheepdogAIOCB *acb; | ||
157 | - int ret; | ||
158 | + SheepdogAIOCB acb; | ||
159 | BDRVSheepdogState *s = bs->opaque; | ||
160 | |||
161 | - acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors); | ||
162 | - acb->aiocb_type = AIOCB_READ_UDATA; | ||
163 | + sd_aio_setup(&acb, s, qiov, sector_num, nb_sectors, AIOCB_READ_UDATA); | ||
164 | |||
165 | retry: | ||
166 | - if (check_overlapping_aiocb(s, acb)) { | ||
167 | + if (check_overlapping_aiocb(s, &acb)) { | ||
168 | qemu_co_queue_wait(&s->overlapping_queue); | ||
169 | goto retry; | ||
170 | } | ||
171 | |||
172 | - sd_co_rw_vector(acb); | ||
173 | + sd_co_rw_vector(&acb); | ||
174 | |||
175 | - QLIST_REMOVE(acb, aiocb_siblings); | ||
176 | + QLIST_REMOVE(&acb, aiocb_siblings); | ||
177 | qemu_co_queue_restart_all(&s->overlapping_queue); | ||
178 | - ret = acb->ret; | ||
179 | - qemu_aio_unref(acb); | ||
180 | - return ret; | ||
181 | + return acb.ret; | ||
182 | } | ||
183 | |||
184 | static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs) | ||
185 | { | ||
186 | BDRVSheepdogState *s = bs->opaque; | ||
187 | - SheepdogAIOCB *acb; | ||
188 | - int ret; | ||
189 | + SheepdogAIOCB acb; | ||
190 | AIOReq *aio_req; | ||
191 | |||
192 | if (s->cache_flags != SD_FLAG_CMD_CACHE) { | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | - acb = sd_aio_setup(bs, NULL, 0, 0); | ||
197 | - acb->aiocb_type = AIOCB_FLUSH_CACHE; | ||
198 | + sd_aio_setup(&acb, s, NULL, 0, 0, AIOCB_FLUSH_CACHE); | ||
199 | |||
200 | - acb->nr_pending++; | ||
201 | - aio_req = alloc_aio_req(s, acb, vid_to_vdi_oid(s->inode.vdi_id), | ||
202 | + acb.nr_pending++; | ||
203 | + aio_req = alloc_aio_req(s, &acb, vid_to_vdi_oid(s->inode.vdi_id), | ||
204 | 0, 0, 0, false, 0, 0); | ||
205 | QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); | ||
206 | - add_aio_request(s, aio_req, NULL, 0, acb->aiocb_type); | ||
207 | + add_aio_request(s, aio_req, NULL, 0, acb.aiocb_type); | ||
208 | |||
209 | - if (--acb->nr_pending) { | ||
210 | + if (--acb.nr_pending) { | ||
211 | qemu_coroutine_yield(); | ||
212 | } | ||
213 | - ret = acb->ret; | ||
214 | - qemu_aio_unref(acb); | ||
215 | - return ret; | ||
216 | + return acb.ret; | ||
217 | } | ||
218 | |||
219 | static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info) | ||
220 | @@ -XXX,XX +XXX,XX @@ static int sd_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, | ||
221 | static coroutine_fn int sd_co_pdiscard(BlockDriverState *bs, int64_t offset, | ||
222 | int count) | ||
223 | { | ||
224 | - SheepdogAIOCB *acb; | ||
225 | + SheepdogAIOCB acb; | ||
226 | BDRVSheepdogState *s = bs->opaque; | ||
227 | - int ret; | ||
228 | QEMUIOVector discard_iov; | ||
229 | struct iovec iov; | ||
230 | uint32_t zero = 0; | ||
231 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_pdiscard(BlockDriverState *bs, int64_t offset, | ||
232 | if (!QEMU_IS_ALIGNED(offset | count, BDRV_SECTOR_SIZE)) { | ||
233 | return -ENOTSUP; | ||
234 | } | ||
235 | - acb = sd_aio_setup(bs, &discard_iov, offset >> BDRV_SECTOR_BITS, | ||
236 | - count >> BDRV_SECTOR_BITS); | ||
237 | - acb->aiocb_type = AIOCB_DISCARD_OBJ; | ||
238 | + sd_aio_setup(&acb, s, &discard_iov, offset >> BDRV_SECTOR_BITS, | ||
239 | + count >> BDRV_SECTOR_BITS, AIOCB_DISCARD_OBJ); | ||
240 | |||
241 | retry: | ||
242 | - if (check_overlapping_aiocb(s, acb)) { | ||
243 | + if (check_overlapping_aiocb(s, &acb)) { | ||
244 | qemu_co_queue_wait(&s->overlapping_queue); | ||
245 | goto retry; | ||
246 | } | ||
247 | |||
248 | - sd_co_rw_vector(acb); | ||
249 | + sd_co_rw_vector(&acb); | ||
250 | |||
251 | - QLIST_REMOVE(acb, aiocb_siblings); | ||
252 | + QLIST_REMOVE(&acb, aiocb_siblings); | ||
253 | qemu_co_queue_restart_all(&s->overlapping_queue); | ||
254 | - ret = acb->ret; | ||
255 | - qemu_aio_unref(acb); | ||
256 | - return ret; | ||
257 | + return acb.ret; | ||
258 | } | ||
259 | |||
260 | static coroutine_fn int64_t | ||
261 | -- | 190 | -- |
262 | 2.9.3 | 191 | 2.13.6 |
263 | 192 | ||
264 | 193 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | From: Manos Pitsidianakis <el13635@mail.ntua.gr> |
---|---|---|---|
2 | 2 | ||
3 | SheepdogAIOCB is internal to sheepdog.c, hence it is never canceled. | 3 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
4 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
5 | Signed-off-by: Manos Pitsidianakis <el13635@mail.ntua.gr> | ||
6 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
7 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | --- | ||
9 | include/block/block_int.h | 4 ++-- | ||
10 | block/io.c | 4 ++-- | ||
11 | block/qed.c | 6 +++--- | ||
12 | 3 files changed, 7 insertions(+), 7 deletions(-) | ||
4 | 13 | ||
5 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 14 | diff --git a/include/block/block_int.h b/include/block/block_int.h |
6 | Message-id: 20161129113245.32724-2-pbonzini@redhat.com | ||
7 | Signed-off-by: Jeff Cody <jcody@redhat.com> | ||
8 | --- | ||
9 | block/sheepdog.c | 52 ---------------------------------------------------- | ||
10 | 1 file changed, 52 deletions(-) | ||
11 | |||
12 | diff --git a/block/sheepdog.c b/block/sheepdog.c | ||
13 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/block/sheepdog.c | 16 | --- a/include/block/block_int.h |
15 | +++ b/block/sheepdog.c | 17 | +++ b/include/block/block_int.h |
16 | @@ -XXX,XX +XXX,XX @@ struct SheepdogAIOCB { | 18 | @@ -XXX,XX +XXX,XX @@ struct BlockDriver { |
17 | Coroutine *coroutine; | 19 | int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo); |
18 | void (*aio_done_func)(SheepdogAIOCB *); | 20 | |
19 | 21 | /** | |
20 | - bool cancelable; | 22 | - * bdrv_co_drain is called if implemented in the beginning of a |
21 | int nr_pending; | 23 | + * bdrv_co_drain_begin is called if implemented in the beginning of a |
22 | 24 | * drain operation to drain and stop any internal sources of requests in | |
23 | uint32_t min_affect_data_idx; | 25 | * the driver. |
24 | @@ -XXX,XX +XXX,XX @@ static inline void free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req) | 26 | * bdrv_co_drain_end is called if implemented at the end of the drain. |
27 | @@ -XXX,XX +XXX,XX @@ struct BlockDriver { | ||
28 | * requests, or toggle an internal state. After the end of the drain new | ||
29 | * requests will continue normally. | ||
30 | */ | ||
31 | - void coroutine_fn (*bdrv_co_drain)(BlockDriverState *bs); | ||
32 | + void coroutine_fn (*bdrv_co_drain_begin)(BlockDriverState *bs); | ||
33 | void coroutine_fn (*bdrv_co_drain_end)(BlockDriverState *bs); | ||
34 | |||
35 | void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child, | ||
36 | diff --git a/block/io.c b/block/io.c | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/block/io.c | ||
39 | +++ b/block/io.c | ||
40 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) | ||
41 | BlockDriverState *bs = data->bs; | ||
42 | |||
43 | if (data->begin) { | ||
44 | - bs->drv->bdrv_co_drain(bs); | ||
45 | + bs->drv->bdrv_co_drain_begin(bs); | ||
46 | } else { | ||
47 | bs->drv->bdrv_co_drain_end(bs); | ||
48 | } | ||
49 | @@ -XXX,XX +XXX,XX @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin) | ||
25 | { | 50 | { |
26 | SheepdogAIOCB *acb = aio_req->aiocb; | 51 | BdrvCoDrainData data = { .bs = bs, .done = false, .begin = begin}; |
27 | 52 | ||
28 | - acb->cancelable = false; | 53 | - if (!bs->drv || (begin && !bs->drv->bdrv_co_drain) || |
29 | QLIST_REMOVE(aio_req, aio_siblings); | 54 | + if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) || |
30 | g_free(aio_req); | 55 | (!begin && !bs->drv->bdrv_co_drain_end)) { |
31 | 56 | return; | |
32 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn sd_finish_aiocb(SheepdogAIOCB *acb) | 57 | } |
33 | qemu_aio_unref(acb); | 58 | diff --git a/block/qed.c b/block/qed.c |
59 | index XXXXXXX..XXXXXXX 100644 | ||
60 | --- a/block/qed.c | ||
61 | +++ b/block/qed.c | ||
62 | @@ -XXX,XX +XXX,XX @@ static bool qed_plug_allocating_write_reqs(BDRVQEDState *s) | ||
63 | assert(!s->allocating_write_reqs_plugged); | ||
64 | if (s->allocating_acb != NULL) { | ||
65 | /* Another allocating write came concurrently. This cannot happen | ||
66 | - * from bdrv_qed_co_drain, but it can happen when the timer runs. | ||
67 | + * from bdrv_qed_co_drain_begin, but it can happen when the timer runs. | ||
68 | */ | ||
69 | qemu_co_mutex_unlock(&s->table_lock); | ||
70 | return false; | ||
71 | @@ -XXX,XX +XXX,XX @@ static void bdrv_qed_attach_aio_context(BlockDriverState *bs, | ||
72 | } | ||
34 | } | 73 | } |
35 | 74 | ||
36 | -/* | 75 | -static void coroutine_fn bdrv_qed_co_drain(BlockDriverState *bs) |
37 | - * Check whether the specified acb can be canceled | 76 | +static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs) |
38 | - * | 77 | { |
39 | - * We can cancel aio when any request belonging to the acb is: | 78 | BDRVQEDState *s = bs->opaque; |
40 | - * - Not processed by the sheepdog server. | 79 | |
41 | - * - Not linked to the inflight queue. | 80 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_qed = { |
42 | - */ | 81 | .bdrv_check = bdrv_qed_check, |
43 | -static bool sd_acb_cancelable(const SheepdogAIOCB *acb) | 82 | .bdrv_detach_aio_context = bdrv_qed_detach_aio_context, |
44 | -{ | 83 | .bdrv_attach_aio_context = bdrv_qed_attach_aio_context, |
45 | - BDRVSheepdogState *s = acb->common.bs->opaque; | 84 | - .bdrv_co_drain = bdrv_qed_co_drain, |
46 | - AIOReq *aioreq; | 85 | + .bdrv_co_drain_begin = bdrv_qed_co_drain_begin, |
47 | - | ||
48 | - if (!acb->cancelable) { | ||
49 | - return false; | ||
50 | - } | ||
51 | - | ||
52 | - QLIST_FOREACH(aioreq, &s->inflight_aio_head, aio_siblings) { | ||
53 | - if (aioreq->aiocb == acb) { | ||
54 | - return false; | ||
55 | - } | ||
56 | - } | ||
57 | - | ||
58 | - return true; | ||
59 | -} | ||
60 | - | ||
61 | -static void sd_aio_cancel(BlockAIOCB *blockacb) | ||
62 | -{ | ||
63 | - SheepdogAIOCB *acb = (SheepdogAIOCB *)blockacb; | ||
64 | - BDRVSheepdogState *s = acb->common.bs->opaque; | ||
65 | - AIOReq *aioreq, *next; | ||
66 | - | ||
67 | - if (sd_acb_cancelable(acb)) { | ||
68 | - /* Remove outstanding requests from failed queue. */ | ||
69 | - QLIST_FOREACH_SAFE(aioreq, &s->failed_aio_head, aio_siblings, | ||
70 | - next) { | ||
71 | - if (aioreq->aiocb == acb) { | ||
72 | - free_aio_req(s, aioreq); | ||
73 | - } | ||
74 | - } | ||
75 | - | ||
76 | - assert(acb->nr_pending == 0); | ||
77 | - if (acb->common.cb) { | ||
78 | - acb->common.cb(acb->common.opaque, -ECANCELED); | ||
79 | - } | ||
80 | - sd_finish_aiocb(acb); | ||
81 | - } | ||
82 | -} | ||
83 | - | ||
84 | static const AIOCBInfo sd_aiocb_info = { | ||
85 | .aiocb_size = sizeof(SheepdogAIOCB), | ||
86 | - .cancel_async = sd_aio_cancel, | ||
87 | }; | 86 | }; |
88 | 87 | ||
89 | static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov, | 88 | static void bdrv_qed_init(void) |
90 | @@ -XXX,XX +XXX,XX @@ static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov, | ||
91 | acb->nb_sectors = nb_sectors; | ||
92 | |||
93 | acb->aio_done_func = NULL; | ||
94 | - acb->cancelable = true; | ||
95 | acb->coroutine = qemu_coroutine_self(); | ||
96 | acb->ret = 0; | ||
97 | acb->nr_pending = 0; | ||
98 | -- | 89 | -- |
99 | 2.9.3 | 90 | 2.13.6 |
100 | 91 | ||
101 | 92 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
2 | 1 | ||
3 | Delimit co_recv's lifetime clearly in aio_read_response. | ||
4 | |||
5 | Do a simple qemu_coroutine_enter in aio_read_response, letting | ||
6 | sd_co_writev call sd_write_done. | ||
7 | |||
8 | Handle nr_pending in the same way in sd_co_rw_vector, | ||
9 | sd_write_done and sd_co_flush_to_disk. | ||
10 | |||
11 | Remove sd_co_rw_vector's return value; just leave with no | ||
12 | pending requests. | ||
13 | |||
14 | [Jeff: added missing 'return' back, spotted by Paolo after | ||
15 | series was applied.] | ||
16 | |||
17 | Signed-off-by: Jeff Cody <jcody@redhat.com> | ||
18 | --- | ||
19 | block/sheepdog.c | 115 ++++++++++++++++++++----------------------------------- | ||
20 | 1 file changed, 42 insertions(+), 73 deletions(-) | ||
21 | |||
22 | diff --git a/block/sheepdog.c b/block/sheepdog.c | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/block/sheepdog.c | ||
25 | +++ b/block/sheepdog.c | ||
26 | @@ -XXX,XX +XXX,XX @@ struct SheepdogAIOCB { | ||
27 | enum AIOCBState aiocb_type; | ||
28 | |||
29 | Coroutine *coroutine; | ||
30 | - void (*aio_done_func)(SheepdogAIOCB *); | ||
31 | - | ||
32 | int nr_pending; | ||
33 | |||
34 | uint32_t min_affect_data_idx; | ||
35 | @@ -XXX,XX +XXX,XX @@ static const char * sd_strerror(int err) | ||
36 | * | ||
37 | * 1. In sd_co_rw_vector, we send the I/O requests to the server and | ||
38 | * link the requests to the inflight_list in the | ||
39 | - * BDRVSheepdogState. The function exits without waiting for | ||
40 | + * BDRVSheepdogState. The function yields while waiting for | ||
41 | * receiving the response. | ||
42 | * | ||
43 | * 2. We receive the response in aio_read_response, the fd handler to | ||
44 | - * the sheepdog connection. If metadata update is needed, we send | ||
45 | - * the write request to the vdi object in sd_write_done, the write | ||
46 | - * completion function. We switch back to sd_co_readv/writev after | ||
47 | - * all the requests belonging to the AIOCB are finished. | ||
48 | + * the sheepdog connection. We switch back to sd_co_readv/sd_writev | ||
49 | + * after all the requests belonging to the AIOCB are finished. If | ||
50 | + * needed, sd_co_writev will send another requests for the vdi object. | ||
51 | */ | ||
52 | |||
53 | static inline AIOReq *alloc_aio_req(BDRVSheepdogState *s, SheepdogAIOCB *acb, | ||
54 | @@ -XXX,XX +XXX,XX @@ static inline void free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req) | ||
55 | acb->nr_pending--; | ||
56 | } | ||
57 | |||
58 | -static void coroutine_fn sd_finish_aiocb(SheepdogAIOCB *acb) | ||
59 | -{ | ||
60 | - qemu_coroutine_enter(acb->coroutine); | ||
61 | - qemu_aio_unref(acb); | ||
62 | -} | ||
63 | - | ||
64 | static const AIOCBInfo sd_aiocb_info = { | ||
65 | .aiocb_size = sizeof(SheepdogAIOCB), | ||
66 | }; | ||
67 | @@ -XXX,XX +XXX,XX @@ static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov, | ||
68 | acb->sector_num = sector_num; | ||
69 | acb->nb_sectors = nb_sectors; | ||
70 | |||
71 | - acb->aio_done_func = NULL; | ||
72 | acb->coroutine = qemu_coroutine_self(); | ||
73 | acb->ret = 0; | ||
74 | acb->nr_pending = 0; | ||
75 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque) | ||
76 | |||
77 | switch (acb->aiocb_type) { | ||
78 | case AIOCB_WRITE_UDATA: | ||
79 | - /* this coroutine context is no longer suitable for co_recv | ||
80 | - * because we may send data to update vdi objects */ | ||
81 | - s->co_recv = NULL; | ||
82 | if (!is_data_obj(aio_req->oid)) { | ||
83 | break; | ||
84 | } | ||
85 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque) | ||
86 | } | ||
87 | } | ||
88 | |||
89 | + /* No more data for this aio_req (reload_inode below uses its own file | ||
90 | + * descriptor handler which doesn't use co_recv). | ||
91 | + */ | ||
92 | + s->co_recv = NULL; | ||
93 | + | ||
94 | switch (rsp.result) { | ||
95 | case SD_RES_SUCCESS: | ||
96 | break; | ||
97 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque) | ||
98 | aio_req->oid = vid_to_vdi_oid(s->inode.vdi_id); | ||
99 | } | ||
100 | resend_aioreq(s, aio_req); | ||
101 | - goto out; | ||
102 | + return; | ||
103 | default: | ||
104 | acb->ret = -EIO; | ||
105 | error_report("%s", sd_strerror(rsp.result)); | ||
106 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque) | ||
107 | * We've finished all requests which belong to the AIOCB, so | ||
108 | * we can switch back to sd_co_readv/writev now. | ||
109 | */ | ||
110 | - acb->aio_done_func(acb); | ||
111 | + qemu_coroutine_enter(acb->coroutine); | ||
112 | } | ||
113 | -out: | ||
114 | - s->co_recv = NULL; | ||
115 | + | ||
116 | return; | ||
117 | + | ||
118 | err: | ||
119 | - s->co_recv = NULL; | ||
120 | reconnect_to_sdog(opaque); | ||
121 | } | ||
122 | |||
123 | @@ -XXX,XX +XXX,XX @@ static int sd_truncate(BlockDriverState *bs, int64_t offset) | ||
124 | /* | ||
125 | * This function is called after writing data objects. If we need to | ||
126 | * update metadata, this sends a write request to the vdi object. | ||
127 | - * Otherwise, this switches back to sd_co_readv/writev. | ||
128 | */ | ||
129 | static void coroutine_fn sd_write_done(SheepdogAIOCB *acb) | ||
130 | { | ||
131 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn sd_write_done(SheepdogAIOCB *acb) | ||
132 | mx = acb->max_dirty_data_idx; | ||
133 | if (mn <= mx) { | ||
134 | /* we need to update the vdi object. */ | ||
135 | + ++acb->nr_pending; | ||
136 | offset = sizeof(s->inode) - sizeof(s->inode.data_vdi_id) + | ||
137 | mn * sizeof(s->inode.data_vdi_id[0]); | ||
138 | data_len = (mx - mn + 1) * sizeof(s->inode.data_vdi_id[0]); | ||
139 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn sd_write_done(SheepdogAIOCB *acb) | ||
140 | data_len, offset, 0, false, 0, offset); | ||
141 | QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); | ||
142 | add_aio_request(s, aio_req, &iov, 1, AIOCB_WRITE_UDATA); | ||
143 | - | ||
144 | - acb->aio_done_func = sd_finish_aiocb; | ||
145 | - acb->aiocb_type = AIOCB_WRITE_UDATA; | ||
146 | - return; | ||
147 | + if (--acb->nr_pending) { | ||
148 | + qemu_coroutine_yield(); | ||
149 | + } | ||
150 | } | ||
151 | - | ||
152 | - sd_finish_aiocb(acb); | ||
153 | } | ||
154 | |||
155 | /* Delete current working VDI on the snapshot chain */ | ||
156 | @@ -XXX,XX +XXX,XX @@ out: | ||
157 | * Returns 1 when we need to wait a response, 0 when there is no sent | ||
158 | * request and -errno in error cases. | ||
159 | */ | ||
160 | -static int coroutine_fn sd_co_rw_vector(void *p) | ||
161 | +static void coroutine_fn sd_co_rw_vector(void *p) | ||
162 | { | ||
163 | SheepdogAIOCB *acb = p; | ||
164 | int ret = 0; | ||
165 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn sd_co_rw_vector(void *p) | ||
166 | ret = sd_create_branch(s); | ||
167 | if (ret) { | ||
168 | acb->ret = -EIO; | ||
169 | - goto out; | ||
170 | + return; | ||
171 | } | ||
172 | } | ||
173 | |||
174 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn sd_co_rw_vector(void *p) | ||
175 | idx++; | ||
176 | done += len; | ||
177 | } | ||
178 | -out: | ||
179 | - if (!--acb->nr_pending) { | ||
180 | - return acb->ret; | ||
181 | + if (--acb->nr_pending) { | ||
182 | + qemu_coroutine_yield(); | ||
183 | } | ||
184 | - return 1; | ||
185 | } | ||
186 | |||
187 | static bool check_overlapping_aiocb(BDRVSheepdogState *s, SheepdogAIOCB *aiocb) | ||
188 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num, | ||
189 | } | ||
190 | |||
191 | acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors); | ||
192 | - acb->aio_done_func = sd_write_done; | ||
193 | acb->aiocb_type = AIOCB_WRITE_UDATA; | ||
194 | |||
195 | retry: | ||
196 | @@ -XXX,XX +XXX,XX @@ retry: | ||
197 | goto retry; | ||
198 | } | ||
199 | |||
200 | - ret = sd_co_rw_vector(acb); | ||
201 | - if (ret <= 0) { | ||
202 | - QLIST_REMOVE(acb, aiocb_siblings); | ||
203 | - qemu_co_queue_restart_all(&s->overlapping_queue); | ||
204 | - qemu_aio_unref(acb); | ||
205 | - return ret; | ||
206 | - } | ||
207 | - | ||
208 | - qemu_coroutine_yield(); | ||
209 | + sd_co_rw_vector(acb); | ||
210 | + sd_write_done(acb); | ||
211 | |||
212 | QLIST_REMOVE(acb, aiocb_siblings); | ||
213 | qemu_co_queue_restart_all(&s->overlapping_queue); | ||
214 | - | ||
215 | - return acb->ret; | ||
216 | + ret = acb->ret; | ||
217 | + qemu_aio_unref(acb); | ||
218 | + return ret; | ||
219 | } | ||
220 | |||
221 | static coroutine_fn int sd_co_readv(BlockDriverState *bs, int64_t sector_num, | ||
222 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_readv(BlockDriverState *bs, int64_t sector_num, | ||
223 | |||
224 | acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors); | ||
225 | acb->aiocb_type = AIOCB_READ_UDATA; | ||
226 | - acb->aio_done_func = sd_finish_aiocb; | ||
227 | |||
228 | retry: | ||
229 | if (check_overlapping_aiocb(s, acb)) { | ||
230 | @@ -XXX,XX +XXX,XX @@ retry: | ||
231 | goto retry; | ||
232 | } | ||
233 | |||
234 | - ret = sd_co_rw_vector(acb); | ||
235 | - if (ret <= 0) { | ||
236 | - QLIST_REMOVE(acb, aiocb_siblings); | ||
237 | - qemu_co_queue_restart_all(&s->overlapping_queue); | ||
238 | - qemu_aio_unref(acb); | ||
239 | - return ret; | ||
240 | - } | ||
241 | - | ||
242 | - qemu_coroutine_yield(); | ||
243 | + sd_co_rw_vector(acb); | ||
244 | |||
245 | QLIST_REMOVE(acb, aiocb_siblings); | ||
246 | qemu_co_queue_restart_all(&s->overlapping_queue); | ||
247 | - return acb->ret; | ||
248 | + ret = acb->ret; | ||
249 | + qemu_aio_unref(acb); | ||
250 | + return ret; | ||
251 | } | ||
252 | |||
253 | static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs) | ||
254 | { | ||
255 | BDRVSheepdogState *s = bs->opaque; | ||
256 | SheepdogAIOCB *acb; | ||
257 | + int ret; | ||
258 | AIOReq *aio_req; | ||
259 | |||
260 | if (s->cache_flags != SD_FLAG_CMD_CACHE) { | ||
261 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs) | ||
262 | |||
263 | acb = sd_aio_setup(bs, NULL, 0, 0); | ||
264 | acb->aiocb_type = AIOCB_FLUSH_CACHE; | ||
265 | - acb->aio_done_func = sd_finish_aiocb; | ||
266 | |||
267 | + acb->nr_pending++; | ||
268 | aio_req = alloc_aio_req(s, acb, vid_to_vdi_oid(s->inode.vdi_id), | ||
269 | 0, 0, 0, false, 0, 0); | ||
270 | QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); | ||
271 | add_aio_request(s, aio_req, NULL, 0, acb->aiocb_type); | ||
272 | |||
273 | - qemu_coroutine_yield(); | ||
274 | - return acb->ret; | ||
275 | + if (--acb->nr_pending) { | ||
276 | + qemu_coroutine_yield(); | ||
277 | + } | ||
278 | + ret = acb->ret; | ||
279 | + qemu_aio_unref(acb); | ||
280 | + return ret; | ||
281 | } | ||
282 | |||
283 | static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info) | ||
284 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_pdiscard(BlockDriverState *bs, int64_t offset, | ||
285 | acb = sd_aio_setup(bs, &discard_iov, offset >> BDRV_SECTOR_BITS, | ||
286 | count >> BDRV_SECTOR_BITS); | ||
287 | acb->aiocb_type = AIOCB_DISCARD_OBJ; | ||
288 | - acb->aio_done_func = sd_finish_aiocb; | ||
289 | |||
290 | retry: | ||
291 | if (check_overlapping_aiocb(s, acb)) { | ||
292 | @@ -XXX,XX +XXX,XX @@ retry: | ||
293 | goto retry; | ||
294 | } | ||
295 | |||
296 | - ret = sd_co_rw_vector(acb); | ||
297 | - if (ret <= 0) { | ||
298 | - QLIST_REMOVE(acb, aiocb_siblings); | ||
299 | - qemu_co_queue_restart_all(&s->overlapping_queue); | ||
300 | - qemu_aio_unref(acb); | ||
301 | - return ret; | ||
302 | - } | ||
303 | - | ||
304 | - qemu_coroutine_yield(); | ||
305 | + sd_co_rw_vector(acb); | ||
306 | |||
307 | QLIST_REMOVE(acb, aiocb_siblings); | ||
308 | qemu_co_queue_restart_all(&s->overlapping_queue); | ||
309 | - | ||
310 | - return acb->ret; | ||
311 | + ret = acb->ret; | ||
312 | + qemu_aio_unref(acb); | ||
313 | + return ret; | ||
314 | } | ||
315 | |||
316 | static coroutine_fn int64_t | ||
317 | -- | ||
318 | 2.9.3 | ||
319 | |||
320 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
2 | 1 | ||
3 | Add to the list in add_aio_request and, indirectly, resend_aioreq. Inline | ||
4 | free_aio_req in the caller, it does not simply undo alloc_aio_req's job. | ||
5 | |||
6 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
7 | Message-id: 20161129113245.32724-5-pbonzini@redhat.com | ||
8 | Signed-off-by: Jeff Cody <jcody@redhat.com> | ||
9 | --- | ||
10 | block/sheepdog.c | 23 ++++++----------------- | ||
11 | 1 file changed, 6 insertions(+), 17 deletions(-) | ||
12 | |||
13 | diff --git a/block/sheepdog.c b/block/sheepdog.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/block/sheepdog.c | ||
16 | +++ b/block/sheepdog.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static inline AIOReq *alloc_aio_req(BDRVSheepdogState *s, SheepdogAIOCB *acb, | ||
18 | return aio_req; | ||
19 | } | ||
20 | |||
21 | -static inline void free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req) | ||
22 | -{ | ||
23 | - SheepdogAIOCB *acb = aio_req->aiocb; | ||
24 | - | ||
25 | - QLIST_REMOVE(aio_req, aio_siblings); | ||
26 | - g_free(aio_req); | ||
27 | - | ||
28 | - acb->nr_pending--; | ||
29 | -} | ||
30 | - | ||
31 | static void sd_aio_setup(SheepdogAIOCB *acb, BDRVSheepdogState *s, | ||
32 | QEMUIOVector *qiov, int64_t sector_num, int nb_sectors, | ||
33 | int type) | ||
34 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn void reconnect_to_sdog(void *opaque) | ||
35 | while (!QLIST_EMPTY(&s->failed_aio_head)) { | ||
36 | aio_req = QLIST_FIRST(&s->failed_aio_head); | ||
37 | QLIST_REMOVE(aio_req, aio_siblings); | ||
38 | - QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); | ||
39 | resend_aioreq(s, aio_req); | ||
40 | } | ||
41 | } | ||
42 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque) | ||
43 | */ | ||
44 | s->co_recv = NULL; | ||
45 | |||
46 | + QLIST_REMOVE(aio_req, aio_siblings); | ||
47 | switch (rsp.result) { | ||
48 | case SD_RES_SUCCESS: | ||
49 | break; | ||
50 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque) | ||
51 | break; | ||
52 | } | ||
53 | |||
54 | - free_aio_req(s, aio_req); | ||
55 | - if (!acb->nr_pending) { | ||
56 | + g_free(aio_req); | ||
57 | + | ||
58 | + if (!--acb->nr_pending) { | ||
59 | /* | ||
60 | * We've finished all requests which belong to the AIOCB, so | ||
61 | * we can switch back to sd_co_readv/writev now. | ||
62 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, | ||
63 | uint64_t old_oid = aio_req->base_oid; | ||
64 | bool create = aio_req->create; | ||
65 | |||
66 | + QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); | ||
67 | + | ||
68 | if (!nr_copies) { | ||
69 | error_report("bug"); | ||
70 | } | ||
71 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn sd_write_done(SheepdogAIOCB *acb) | ||
72 | iov.iov_len = sizeof(s->inode); | ||
73 | aio_req = alloc_aio_req(s, acb, vid_to_vdi_oid(s->inode.vdi_id), | ||
74 | data_len, offset, 0, false, 0, offset); | ||
75 | - QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); | ||
76 | add_aio_request(s, aio_req, &iov, 1, AIOCB_WRITE_UDATA); | ||
77 | if (--acb->nr_pending) { | ||
78 | qemu_coroutine_yield(); | ||
79 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn sd_co_rw_vector(SheepdogAIOCB *acb) | ||
80 | old_oid, | ||
81 | acb->aiocb_type == AIOCB_DISCARD_OBJ ? | ||
82 | 0 : done); | ||
83 | - QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); | ||
84 | - | ||
85 | add_aio_request(s, aio_req, acb->qiov->iov, acb->qiov->niov, | ||
86 | acb->aiocb_type); | ||
87 | done: | ||
88 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs) | ||
89 | acb.nr_pending++; | ||
90 | aio_req = alloc_aio_req(s, &acb, vid_to_vdi_oid(s->inode.vdi_id), | ||
91 | 0, 0, 0, false, 0, 0); | ||
92 | - QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings); | ||
93 | add_aio_request(s, aio_req, NULL, 0, acb.aiocb_type); | ||
94 | |||
95 | if (--acb.nr_pending) { | ||
96 | -- | ||
97 | 2.9.3 | ||
98 | |||
99 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | From: Manos Pitsidianakis <el13635@mail.ntua.gr> |
---|---|---|---|
2 | 2 | ||
3 | Wrap the code that was copied repeatedly in the two functions, | 3 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
4 | sd_aio_setup and sd_aio_complete. | 4 | Reviewed-by: Fam Zheng <famz@redhat.com> |
5 | Signed-off-by: Manos Pitsidianakis <el13635@mail.ntua.gr> | ||
6 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
7 | --- | ||
8 | block/throttle.c | 18 ++++++++++++++++++ | ||
9 | 1 file changed, 18 insertions(+) | ||
5 | 10 | ||
6 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 11 | diff --git a/block/throttle.c b/block/throttle.c |
7 | Message-id: 20161129113245.32724-6-pbonzini@redhat.com | ||
8 | Signed-off-by: Jeff Cody <jcody@redhat.com> | ||
9 | --- | ||
10 | block/sheepdog.c | 66 ++++++++++++++++++++++++++------------------------------ | ||
11 | 1 file changed, 30 insertions(+), 36 deletions(-) | ||
12 | |||
13 | diff --git a/block/sheepdog.c b/block/sheepdog.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/block/sheepdog.c | 13 | --- a/block/throttle.c |
16 | +++ b/block/sheepdog.c | 14 | +++ b/block/throttle.c |
17 | @@ -XXX,XX +XXX,XX @@ static inline AIOReq *alloc_aio_req(BDRVSheepdogState *s, SheepdogAIOCB *acb, | 15 | @@ -XXX,XX +XXX,XX @@ static bool throttle_recurse_is_first_non_filter(BlockDriverState *bs, |
18 | return aio_req; | 16 | return bdrv_recurse_is_first_non_filter(bs->file->bs, candidate); |
19 | } | 17 | } |
20 | 18 | ||
21 | +static void wait_for_overlapping_aiocb(BDRVSheepdogState *s, SheepdogAIOCB *acb) | 19 | +static void coroutine_fn throttle_co_drain_begin(BlockDriverState *bs) |
22 | +{ | 20 | +{ |
23 | + SheepdogAIOCB *cb; | 21 | + ThrottleGroupMember *tgm = bs->opaque; |
24 | + | 22 | + if (atomic_fetch_inc(&tgm->io_limits_disabled) == 0) { |
25 | +retry: | 23 | + throttle_group_restart_tgm(tgm); |
26 | + QLIST_FOREACH(cb, &s->inflight_aiocb_head, aiocb_siblings) { | ||
27 | + if (AIOCBOverlapping(acb, cb)) { | ||
28 | + qemu_co_queue_wait(&s->overlapping_queue); | ||
29 | + goto retry; | ||
30 | + } | ||
31 | + } | 24 | + } |
32 | +} | 25 | +} |
33 | + | 26 | + |
34 | static void sd_aio_setup(SheepdogAIOCB *acb, BDRVSheepdogState *s, | 27 | +static void coroutine_fn throttle_co_drain_end(BlockDriverState *bs) |
35 | QEMUIOVector *qiov, int64_t sector_num, int nb_sectors, | 28 | +{ |
36 | int type) | 29 | + ThrottleGroupMember *tgm = bs->opaque; |
37 | @@ -XXX,XX +XXX,XX @@ static void sd_aio_setup(SheepdogAIOCB *acb, BDRVSheepdogState *s, | 30 | + assert(tgm->io_limits_disabled); |
38 | acb->min_dirty_data_idx = UINT32_MAX; | 31 | + atomic_dec(&tgm->io_limits_disabled); |
39 | acb->max_dirty_data_idx = 0; | 32 | +} |
40 | acb->aiocb_type = type; | ||
41 | + | 33 | + |
42 | + if (type == AIOCB_FLUSH_CACHE) { | 34 | static BlockDriver bdrv_throttle = { |
43 | + return; | 35 | .format_name = "throttle", |
44 | + } | 36 | .protocol_name = "throttle", |
37 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_throttle = { | ||
38 | .bdrv_reopen_abort = throttle_reopen_abort, | ||
39 | .bdrv_co_get_block_status = bdrv_co_get_block_status_from_file, | ||
40 | |||
41 | + .bdrv_co_drain_begin = throttle_co_drain_begin, | ||
42 | + .bdrv_co_drain_end = throttle_co_drain_end, | ||
45 | + | 43 | + |
46 | + wait_for_overlapping_aiocb(s, acb); | 44 | .is_filter = true, |
47 | + QLIST_INSERT_HEAD(&s->inflight_aiocb_head, acb, aiocb_siblings); | 45 | }; |
48 | } | ||
49 | |||
50 | /* Return -EIO in case of error, file descriptor on success */ | ||
51 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn sd_co_rw_vector(SheepdogAIOCB *acb) | ||
52 | } | ||
53 | } | ||
54 | |||
55 | -static bool check_overlapping_aiocb(BDRVSheepdogState *s, SheepdogAIOCB *aiocb) | ||
56 | +static void sd_aio_complete(SheepdogAIOCB *acb) | ||
57 | { | ||
58 | - SheepdogAIOCB *cb; | ||
59 | - | ||
60 | - QLIST_FOREACH(cb, &s->inflight_aiocb_head, aiocb_siblings) { | ||
61 | - if (AIOCBOverlapping(aiocb, cb)) { | ||
62 | - return true; | ||
63 | - } | ||
64 | + if (acb->aiocb_type == AIOCB_FLUSH_CACHE) { | ||
65 | + return; | ||
66 | } | ||
67 | |||
68 | - QLIST_INSERT_HEAD(&s->inflight_aiocb_head, aiocb, aiocb_siblings); | ||
69 | - return false; | ||
70 | + QLIST_REMOVE(acb, aiocb_siblings); | ||
71 | + qemu_co_queue_restart_all(&acb->s->overlapping_queue); | ||
72 | } | ||
73 | |||
74 | static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num, | ||
75 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num, | ||
76 | } | ||
77 | |||
78 | sd_aio_setup(&acb, s, qiov, sector_num, nb_sectors, AIOCB_WRITE_UDATA); | ||
79 | - | ||
80 | -retry: | ||
81 | - if (check_overlapping_aiocb(s, &acb)) { | ||
82 | - qemu_co_queue_wait(&s->overlapping_queue); | ||
83 | - goto retry; | ||
84 | - } | ||
85 | - | ||
86 | sd_co_rw_vector(&acb); | ||
87 | sd_write_done(&acb); | ||
88 | + sd_aio_complete(&acb); | ||
89 | |||
90 | - QLIST_REMOVE(&acb, aiocb_siblings); | ||
91 | - qemu_co_queue_restart_all(&s->overlapping_queue); | ||
92 | return acb.ret; | ||
93 | } | ||
94 | |||
95 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_readv(BlockDriverState *bs, int64_t sector_num, | ||
96 | BDRVSheepdogState *s = bs->opaque; | ||
97 | |||
98 | sd_aio_setup(&acb, s, qiov, sector_num, nb_sectors, AIOCB_READ_UDATA); | ||
99 | - | ||
100 | -retry: | ||
101 | - if (check_overlapping_aiocb(s, &acb)) { | ||
102 | - qemu_co_queue_wait(&s->overlapping_queue); | ||
103 | - goto retry; | ||
104 | - } | ||
105 | - | ||
106 | sd_co_rw_vector(&acb); | ||
107 | + sd_aio_complete(&acb); | ||
108 | |||
109 | - QLIST_REMOVE(&acb, aiocb_siblings); | ||
110 | - qemu_co_queue_restart_all(&s->overlapping_queue); | ||
111 | return acb.ret; | ||
112 | } | ||
113 | |||
114 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs) | ||
115 | if (--acb.nr_pending) { | ||
116 | qemu_coroutine_yield(); | ||
117 | } | ||
118 | + | ||
119 | + sd_aio_complete(&acb); | ||
120 | return acb.ret; | ||
121 | } | ||
122 | |||
123 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int sd_co_pdiscard(BlockDriverState *bs, int64_t offset, | ||
124 | } | ||
125 | sd_aio_setup(&acb, s, &discard_iov, offset >> BDRV_SECTOR_BITS, | ||
126 | count >> BDRV_SECTOR_BITS, AIOCB_DISCARD_OBJ); | ||
127 | - | ||
128 | -retry: | ||
129 | - if (check_overlapping_aiocb(s, &acb)) { | ||
130 | - qemu_co_queue_wait(&s->overlapping_queue); | ||
131 | - goto retry; | ||
132 | - } | ||
133 | - | ||
134 | sd_co_rw_vector(&acb); | ||
135 | + sd_aio_complete(&acb); | ||
136 | |||
137 | - QLIST_REMOVE(&acb, aiocb_siblings); | ||
138 | - qemu_co_queue_restart_all(&s->overlapping_queue); | ||
139 | return acb.ret; | ||
140 | } | ||
141 | 46 | ||
142 | -- | 47 | -- |
143 | 2.9.3 | 48 | 2.13.6 |
144 | 49 | ||
145 | 50 | diff view generated by jsdifflib |