1
The following changes since commit d992f2f1368ceb92e6bfd8efece174110f4236ff:
1
The following changes since commit 20d6c7312f1b812bb9c750f4087f69ac8485cc90:
2
2
3
Merge remote-tracking branch 'remotes/artyom/tags/pull-sun4v-20170226' into staging (2017-02-26 22:40:23 +0000)
3
Merge remote-tracking branch 'remotes/palmer/tags/riscv-for-master-3.2-part1' into staging (2019-01-03 13:26:30 +0000)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/stefanha/qemu.git tags/block-pull-request
7
git://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 1ab17f9f5c63c2798d707aeb22588e4fcc17b2cd:
9
for you to fetch changes up to 39a0408e768cd00142f5b57d27ab234282bf4df5:
10
10
11
tests-aio-multithread: use atomic_read properly (2017-02-27 14:00:53 +0000)
11
dmg: don't skip zero chunk (2019-01-04 11:15:09 +0000)
12
13
----------------------------------------------------------------
14
Pull request
15
16
Bug fixes for the .dmg image file format.
12
17
13
----------------------------------------------------------------
18
----------------------------------------------------------------
14
19
15
----------------------------------------------------------------
20
Julio Faracco (1):
21
dmg: Fixing wrong dmg block type value for block terminator.
16
22
17
Paolo Bonzini (4):
23
yuchenlin (3):
18
curl: do not use aio_context_acquire/release
24
dmg: fix binary search
19
nfs: do not use aio_context_acquire/release
25
dmg: use enumeration type instead of hard coding number
20
iscsi: do not use aio_context_acquire/release
26
dmg: don't skip zero chunk
21
tests-aio-multithread: use atomic_read properly
22
27
23
block/curl.c | 24 ++++++++-----
28
block/dmg.c | 31 ++++++++++++++++++++-----------
24
block/iscsi.c | 83 ++++++++++++++++++++++++++++++++++----------
29
1 file changed, 20 insertions(+), 11 deletions(-)
25
block/nfs.c | 23 +++++++++---
26
tests/test-aio-multithread.c | 4 +--
27
4 files changed, 100 insertions(+), 34 deletions(-)
28
30
29
--
31
--
30
2.9.3
32
2.20.1
31
33
32
34
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Julio Faracco <jcfaracco@gmail.com>
2
2
3
Now that all bottom halves and callbacks take care of taking the
3
This is a trivial patch to fix a wrong value for block terminator.
4
AioContext lock, we can migrate some users away from it and to a
4
The old value was 0x7fffffff which is wrong. It was not affecting the
5
specific QemuMutex or CoMutex.
5
code because QEMU dmg block is not handling block terminator right now.
6
Neverthless, it should be fixed.
6
7
7
Protect libnfs calls with a QemuMutex. Callbacks are invoked
8
Signed-off-by: Julio Faracco <jcfaracco@gmail.com>
8
using bottom halves, so we don't even have to drop it around
9
Reviewed-by: yuchenlin <yuchenlin@synology.com>
9
callback invocations.
10
Message-id: 20181228145055.18039-1-jcfaracco@gmail.com
10
11
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
12
Message-id: 20170222180725.28611-3-pbonzini@redhat.com
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
---
12
---
15
block/nfs.c | 23 +++++++++++++++++++----
13
block/dmg.c | 2 +-
16
1 file changed, 19 insertions(+), 4 deletions(-)
14
1 file changed, 1 insertion(+), 1 deletion(-)
17
15
18
diff --git a/block/nfs.c b/block/nfs.c
16
diff --git a/block/dmg.c b/block/dmg.c
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/block/nfs.c
18
--- a/block/dmg.c
21
+++ b/block/nfs.c
19
+++ b/block/dmg.c
22
@@ -XXX,XX +XXX,XX @@ typedef struct NFSClient {
20
@@ -XXX,XX +XXX,XX @@ enum {
23
int events;
21
UDBZ,
24
bool has_zero_init;
22
ULFO,
25
AioContext *aio_context;
23
UDCM = 0x7ffffffe, /* Comments */
26
+ QemuMutex mutex;
24
- UDLE /* Last Entry */
27
blkcnt_t st_blocks;
25
+ UDLE = 0xffffffff /* Last Entry */
28
bool cache_used;
26
};
29
NFSServer *server;
27
30
@@ -XXX,XX +XXX,XX @@ static void nfs_parse_filename(const char *filename, QDict *options,
28
static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
31
static void nfs_process_read(void *arg);
32
static void nfs_process_write(void *arg);
33
34
+/* Called with QemuMutex held. */
35
static void nfs_set_events(NFSClient *client)
36
{
37
int ev = nfs_which_events(client->context);
38
@@ -XXX,XX +XXX,XX @@ static void nfs_process_read(void *arg)
39
{
40
NFSClient *client = arg;
41
42
- aio_context_acquire(client->aio_context);
43
+ qemu_mutex_lock(&client->mutex);
44
nfs_service(client->context, POLLIN);
45
nfs_set_events(client);
46
- aio_context_release(client->aio_context);
47
+ qemu_mutex_unlock(&client->mutex);
48
}
49
50
static void nfs_process_write(void *arg)
51
{
52
NFSClient *client = arg;
53
54
- aio_context_acquire(client->aio_context);
55
+ qemu_mutex_lock(&client->mutex);
56
nfs_service(client->context, POLLOUT);
57
nfs_set_events(client);
58
- aio_context_release(client->aio_context);
59
+ qemu_mutex_unlock(&client->mutex);
60
}
61
62
static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
63
@@ -XXX,XX +XXX,XX @@ static void nfs_co_generic_bh_cb(void *opaque)
64
aio_co_wake(task->co);
65
}
66
67
+/* Called (via nfs_service) with QemuMutex held. */
68
static void
69
nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
70
void *private_data)
71
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset,
72
nfs_co_init_task(bs, &task);
73
task.iov = iov;
74
75
+ qemu_mutex_lock(&client->mutex);
76
if (nfs_pread_async(client->context, client->fh,
77
offset, bytes, nfs_co_generic_cb, &task) != 0) {
78
+ qemu_mutex_unlock(&client->mutex);
79
return -ENOMEM;
80
}
81
82
nfs_set_events(client);
83
+ qemu_mutex_unlock(&client->mutex);
84
while (!task.complete) {
85
qemu_coroutine_yield();
86
}
87
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
88
buf = iov->iov[0].iov_base;
89
}
90
91
+ qemu_mutex_lock(&client->mutex);
92
if (nfs_pwrite_async(client->context, client->fh,
93
offset, bytes, buf,
94
nfs_co_generic_cb, &task) != 0) {
95
+ qemu_mutex_unlock(&client->mutex);
96
if (my_buffer) {
97
g_free(buf);
98
}
99
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
100
}
101
102
nfs_set_events(client);
103
+ qemu_mutex_unlock(&client->mutex);
104
while (!task.complete) {
105
qemu_coroutine_yield();
106
}
107
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs)
108
109
nfs_co_init_task(bs, &task);
110
111
+ qemu_mutex_lock(&client->mutex);
112
if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
113
&task) != 0) {
114
+ qemu_mutex_unlock(&client->mutex);
115
return -ENOMEM;
116
}
117
118
nfs_set_events(client);
119
+ qemu_mutex_unlock(&client->mutex);
120
while (!task.complete) {
121
qemu_coroutine_yield();
122
}
123
@@ -XXX,XX +XXX,XX @@ static void nfs_file_close(BlockDriverState *bs)
124
{
125
NFSClient *client = bs->opaque;
126
nfs_client_close(client);
127
+ qemu_mutex_destroy(&client->mutex);
128
}
129
130
static NFSServer *nfs_config(QDict *options, Error **errp)
131
@@ -XXX,XX +XXX,XX @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags,
132
if (ret < 0) {
133
return ret;
134
}
135
+ qemu_mutex_init(&client->mutex);
136
bs->total_sectors = ret;
137
ret = 0;
138
return ret;
139
@@ -XXX,XX +XXX,XX @@ static int nfs_has_zero_init(BlockDriverState *bs)
140
return client->has_zero_init;
141
}
142
143
+/* Called (via nfs_service) with QemuMutex held. */
144
static void
145
nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data,
146
void *private_data)
147
--
29
--
148
2.9.3
30
2.20.1
149
31
150
32
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: yuchenlin <npes87184@gmail.com>
2
2
3
Now that all bottom halves and callbacks take care of taking the
3
There is a possible hang in original binary search implementation. That is
4
AioContext lock, we can migrate some users away from it and to a
4
if chunk1 = 4, chunk2 = 5, chunk3 = 4, and we go else case.
5
specific QemuMutex or CoMutex.
6
5
7
Protect libiscsi calls with a QemuMutex. Callbacks are invoked
6
The chunk1 will be still 4, and so on.
8
using bottom halves, so we don't even have to drop it around
9
callback invocations.
10
7
11
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Signed-off-by: yuchenlin <npes87184@gmail.com>
12
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-id: 20190103114700.9686-2-npes87184@gmail.com
13
Message-id: 20170222180725.28611-4-pbonzini@redhat.com
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
---
11
---
16
block/iscsi.c | 83 +++++++++++++++++++++++++++++++++++++++++++++--------------
12
block/dmg.c | 10 +++++++---
17
1 file changed, 64 insertions(+), 19 deletions(-)
13
1 file changed, 7 insertions(+), 3 deletions(-)
18
14
19
diff --git a/block/iscsi.c b/block/iscsi.c
15
diff --git a/block/dmg.c b/block/dmg.c
20
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
21
--- a/block/iscsi.c
17
--- a/block/dmg.c
22
+++ b/block/iscsi.c
18
+++ b/block/dmg.c
23
@@ -XXX,XX +XXX,XX @@ typedef struct IscsiLun {
19
@@ -XXX,XX +XXX,XX @@ static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
24
int events;
25
QEMUTimer *nop_timer;
26
QEMUTimer *event_timer;
27
+ QemuMutex mutex;
28
struct scsi_inquiry_logical_block_provisioning lbp;
29
struct scsi_inquiry_block_limits bl;
30
unsigned char *zeroblock;
31
@@ -XXX,XX +XXX,XX @@ static int iscsi_translate_sense(struct scsi_sense *sense)
32
return ret;
33
}
34
35
+/* Called (via iscsi_service) with QemuMutex held. */
36
static void
37
iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
38
void *command_data, void *opaque)
39
@@ -XXX,XX +XXX,XX @@ static const AIOCBInfo iscsi_aiocb_info = {
40
static void iscsi_process_read(void *arg);
41
static void iscsi_process_write(void *arg);
42
43
+/* Called with QemuMutex held. */
44
static void
45
iscsi_set_events(IscsiLun *iscsilun)
46
{
20
{
47
@@ -XXX,XX +XXX,XX @@ iscsi_process_read(void *arg)
21
/* binary search */
48
IscsiLun *iscsilun = arg;
22
uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
49
struct iscsi_context *iscsi = iscsilun->iscsi;
23
- while (chunk1 != chunk2) {
50
24
+ while (chunk1 <= chunk2) {
51
- aio_context_acquire(iscsilun->aio_context);
25
chunk3 = (chunk1 + chunk2) / 2;
52
+ qemu_mutex_lock(&iscsilun->mutex);
26
if (s->sectors[chunk3] > sector_num) {
53
iscsi_service(iscsi, POLLIN);
27
- chunk2 = chunk3;
54
iscsi_set_events(iscsilun);
28
+ if (chunk3 == 0) {
55
- aio_context_release(iscsilun->aio_context);
29
+ goto err;
56
+ qemu_mutex_unlock(&iscsilun->mutex);
30
+ }
57
}
31
+ chunk2 = chunk3 - 1;
58
32
} else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
59
static void
33
return chunk3;
60
@@ -XXX,XX +XXX,XX @@ iscsi_process_write(void *arg)
34
} else {
61
IscsiLun *iscsilun = arg;
35
- chunk1 = chunk3;
62
struct iscsi_context *iscsi = iscsilun->iscsi;
36
+ chunk1 = chunk3 + 1;
63
64
- aio_context_acquire(iscsilun->aio_context);
65
+ qemu_mutex_lock(&iscsilun->mutex);
66
iscsi_service(iscsi, POLLOUT);
67
iscsi_set_events(iscsilun);
68
- aio_context_release(iscsilun->aio_context);
69
+ qemu_mutex_unlock(&iscsilun->mutex);
70
}
71
72
static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun)
73
@@ -XXX,XX +XXX,XX @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
74
uint64_t lba;
75
uint32_t num_sectors;
76
bool fua = flags & BDRV_REQ_FUA;
77
+ int r = 0;
78
79
if (fua) {
80
assert(iscsilun->dpofua);
81
@@ -XXX,XX +XXX,XX @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
82
lba = sector_qemu2lun(sector_num, iscsilun);
83
num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
84
iscsi_co_init_iscsitask(iscsilun, &iTask);
85
+ qemu_mutex_lock(&iscsilun->mutex);
86
retry:
87
if (iscsilun->use_16_for_rw) {
88
#if LIBISCSI_API_VERSION >= (20160603)
89
@@ -XXX,XX +XXX,XX @@ retry:
90
#endif
91
while (!iTask.complete) {
92
iscsi_set_events(iscsilun);
93
+ qemu_mutex_unlock(&iscsilun->mutex);
94
qemu_coroutine_yield();
95
+ qemu_mutex_lock(&iscsilun->mutex);
96
}
97
98
if (iTask.task != NULL) {
99
@@ -XXX,XX +XXX,XX @@ retry:
100
101
if (iTask.status != SCSI_STATUS_GOOD) {
102
iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors);
103
- return iTask.err_code;
104
+ r = iTask.err_code;
105
+ goto out_unlock;
106
}
107
108
iscsi_allocmap_set_allocated(iscsilun, sector_num, nb_sectors);
109
110
- return 0;
111
+out_unlock:
112
+ qemu_mutex_unlock(&iscsilun->mutex);
113
+ return r;
114
}
115
116
117
@@ -XXX,XX +XXX,XX @@ static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs,
118
goto out;
119
}
120
121
+ qemu_mutex_lock(&iscsilun->mutex);
122
retry:
123
if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun,
124
sector_qemu2lun(sector_num, iscsilun),
125
8 + 16, iscsi_co_generic_cb,
126
&iTask) == NULL) {
127
ret = -ENOMEM;
128
- goto out;
129
+ goto out_unlock;
130
}
131
132
while (!iTask.complete) {
133
iscsi_set_events(iscsilun);
134
+ qemu_mutex_unlock(&iscsilun->mutex);
135
qemu_coroutine_yield();
136
+ qemu_mutex_lock(&iscsilun->mutex);
137
}
138
139
if (iTask.do_retry) {
140
@@ -XXX,XX +XXX,XX @@ retry:
141
* because the device is busy or the cmd is not
142
* supported) we pretend all blocks are allocated
143
* for backwards compatibility */
144
- goto out;
145
+ goto out_unlock;
146
}
147
148
lbas = scsi_datain_unmarshall(iTask.task);
149
if (lbas == NULL) {
150
ret = -EIO;
151
- goto out;
152
+ goto out_unlock;
153
}
154
155
lbasd = &lbas->descriptors[0];
156
157
if (sector_qemu2lun(sector_num, iscsilun) != lbasd->lba) {
158
ret = -EIO;
159
- goto out;
160
+ goto out_unlock;
161
}
162
163
*pnum = sector_lun2qemu(lbasd->num_blocks, iscsilun);
164
@@ -XXX,XX +XXX,XX @@ retry:
165
if (*pnum > nb_sectors) {
166
*pnum = nb_sectors;
167
}
168
+out_unlock:
169
+ qemu_mutex_unlock(&iscsilun->mutex);
170
out:
171
if (iTask.task != NULL) {
172
scsi_free_scsi_task(iTask.task);
173
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
174
num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
175
176
iscsi_co_init_iscsitask(iscsilun, &iTask);
177
+ qemu_mutex_lock(&iscsilun->mutex);
178
retry:
179
if (iscsilun->use_16_for_rw) {
180
#if LIBISCSI_API_VERSION >= (20160603)
181
@@ -XXX,XX +XXX,XX @@ retry:
182
#endif
183
while (!iTask.complete) {
184
iscsi_set_events(iscsilun);
185
+ qemu_mutex_unlock(&iscsilun->mutex);
186
qemu_coroutine_yield();
187
+ qemu_mutex_lock(&iscsilun->mutex);
188
}
189
190
if (iTask.task != NULL) {
191
@@ -XXX,XX +XXX,XX @@ retry:
192
iTask.complete = 0;
193
goto retry;
194
}
195
+ qemu_mutex_unlock(&iscsilun->mutex);
196
197
if (iTask.status != SCSI_STATUS_GOOD) {
198
return iTask.err_code;
199
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn iscsi_co_flush(BlockDriverState *bs)
200
struct IscsiTask iTask;
201
202
iscsi_co_init_iscsitask(iscsilun, &iTask);
203
+ qemu_mutex_lock(&iscsilun->mutex);
204
retry:
205
if (iscsi_synchronizecache10_task(iscsilun->iscsi, iscsilun->lun, 0, 0, 0,
206
0, iscsi_co_generic_cb, &iTask) == NULL) {
207
@@ -XXX,XX +XXX,XX @@ retry:
208
209
while (!iTask.complete) {
210
iscsi_set_events(iscsilun);
211
+ qemu_mutex_unlock(&iscsilun->mutex);
212
qemu_coroutine_yield();
213
+ qemu_mutex_lock(&iscsilun->mutex);
214
}
215
216
if (iTask.task != NULL) {
217
@@ -XXX,XX +XXX,XX @@ retry:
218
iTask.complete = 0;
219
goto retry;
220
}
221
+ qemu_mutex_unlock(&iscsilun->mutex);
222
223
if (iTask.status != SCSI_STATUS_GOOD) {
224
return iTask.err_code;
225
@@ -XXX,XX +XXX,XX @@ retry:
226
}
227
228
#ifdef __linux__
229
+/* Called (via iscsi_service) with QemuMutex held. */
230
static void
231
iscsi_aio_ioctl_cb(struct iscsi_context *iscsi, int status,
232
void *command_data, void *opaque)
233
@@ -XXX,XX +XXX,XX @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
234
acb->task->expxferlen = acb->ioh->dxfer_len;
235
236
data.size = 0;
237
+ qemu_mutex_lock(&iscsilun->mutex);
238
if (acb->task->xfer_dir == SCSI_XFER_WRITE) {
239
if (acb->ioh->iovec_count == 0) {
240
data.data = acb->ioh->dxferp;
241
@@ -XXX,XX +XXX,XX @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
242
iscsi_aio_ioctl_cb,
243
(data.size > 0) ? &data : NULL,
244
acb) != 0) {
245
+ qemu_mutex_unlock(&iscsilun->mutex);
246
scsi_free_scsi_task(acb->task);
247
qemu_aio_unref(acb);
248
return NULL;
249
@@ -XXX,XX +XXX,XX @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
250
}
251
252
iscsi_set_events(iscsilun);
253
+ qemu_mutex_unlock(&iscsilun->mutex);
254
255
return &acb->common;
256
}
257
@@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
258
IscsiLun *iscsilun = bs->opaque;
259
struct IscsiTask iTask;
260
struct unmap_list list;
261
+ int r = 0;
262
263
if (!is_byte_request_lun_aligned(offset, count, iscsilun)) {
264
return -ENOTSUP;
265
@@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
266
list.num = count / iscsilun->block_size;
267
268
iscsi_co_init_iscsitask(iscsilun, &iTask);
269
+ qemu_mutex_lock(&iscsilun->mutex);
270
retry:
271
if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1,
272
iscsi_co_generic_cb, &iTask) == NULL) {
273
- return -ENOMEM;
274
+ r = -ENOMEM;
275
+ goto out_unlock;
276
}
277
278
while (!iTask.complete) {
279
iscsi_set_events(iscsilun);
280
+ qemu_mutex_unlock(&iscsilun->mutex);
281
qemu_coroutine_yield();
282
+ qemu_mutex_lock(&iscsilun->mutex);
283
}
284
285
if (iTask.task != NULL) {
286
@@ -XXX,XX +XXX,XX @@ retry:
287
/* the target might fail with a check condition if it
288
is not happy with the alignment of the UNMAP request
289
we silently fail in this case */
290
- return 0;
291
+ goto out_unlock;
292
}
293
294
if (iTask.status != SCSI_STATUS_GOOD) {
295
- return iTask.err_code;
296
+ r = iTask.err_code;
297
+ goto out_unlock;
298
}
299
300
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
301
count >> BDRV_SECTOR_BITS);
302
303
- return 0;
304
+out_unlock:
305
+ qemu_mutex_unlock(&iscsilun->mutex);
306
+ return r;
307
}
308
309
static int
310
@@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
311
uint64_t lba;
312
uint32_t nb_blocks;
313
bool use_16_for_ws = iscsilun->use_16_for_rw;
314
+ int r = 0;
315
316
if (!is_byte_request_lun_aligned(offset, count, iscsilun)) {
317
return -ENOTSUP;
318
@@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
319
}
37
}
320
}
38
}
321
39
+err:
322
+ qemu_mutex_lock(&iscsilun->mutex);
40
return s->n_chunks; /* error */
323
iscsi_co_init_iscsitask(iscsilun, &iTask);
324
retry:
325
if (use_16_for_ws) {
326
@@ -XXX,XX +XXX,XX @@ retry:
327
328
while (!iTask.complete) {
329
iscsi_set_events(iscsilun);
330
+ qemu_mutex_unlock(&iscsilun->mutex);
331
qemu_coroutine_yield();
332
+ qemu_mutex_lock(&iscsilun->mutex);
333
}
334
335
if (iTask.status == SCSI_STATUS_CHECK_CONDITION &&
336
@@ -XXX,XX +XXX,XX @@ retry:
337
/* WRITE SAME is not supported by the target */
338
iscsilun->has_write_same = false;
339
scsi_free_scsi_task(iTask.task);
340
- return -ENOTSUP;
341
+ r = -ENOTSUP;
342
+ goto out_unlock;
343
}
344
345
if (iTask.task != NULL) {
346
@@ -XXX,XX +XXX,XX @@ retry:
347
if (iTask.status != SCSI_STATUS_GOOD) {
348
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
349
count >> BDRV_SECTOR_BITS);
350
- return iTask.err_code;
351
+ r = iTask.err_code;
352
+ goto out_unlock;
353
}
354
355
if (flags & BDRV_REQ_MAY_UNMAP) {
356
@@ -XXX,XX +XXX,XX @@ retry:
357
count >> BDRV_SECTOR_BITS);
358
}
359
360
- return 0;
361
+out_unlock:
362
+ qemu_mutex_unlock(&iscsilun->mutex);
363
+ return r;
364
}
41
}
365
42
366
static void apply_chap(struct iscsi_context *iscsi, QemuOpts *opts,
367
@@ -XXX,XX +XXX,XX @@ static void iscsi_nop_timed_event(void *opaque)
368
{
369
IscsiLun *iscsilun = opaque;
370
371
- aio_context_acquire(iscsilun->aio_context);
372
+ qemu_mutex_lock(&iscsilun->mutex);
373
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
374
error_report("iSCSI: NOP timeout. Reconnecting...");
375
iscsilun->request_timed_out = true;
376
@@ -XXX,XX +XXX,XX @@ static void iscsi_nop_timed_event(void *opaque)
377
iscsi_set_events(iscsilun);
378
379
out:
380
- aio_context_release(iscsilun->aio_context);
381
+ qemu_mutex_unlock(&iscsilun->mutex);
382
}
383
384
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
385
@@ -XXX,XX +XXX,XX @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
386
scsi_free_scsi_task(task);
387
task = NULL;
388
389
+ qemu_mutex_init(&iscsilun->mutex);
390
iscsi_attach_aio_context(bs, iscsilun->aio_context);
391
392
/* Guess the internal cluster (page) size of the iscsi target by the means
393
@@ -XXX,XX +XXX,XX @@ static void iscsi_close(BlockDriverState *bs)
394
iscsi_destroy_context(iscsi);
395
g_free(iscsilun->zeroblock);
396
iscsi_allocmap_free(iscsilun);
397
+ qemu_mutex_destroy(&iscsilun->mutex);
398
memset(iscsilun, 0, sizeof(IscsiLun));
399
}
400
401
--
43
--
402
2.9.3
44
2.20.1
403
45
404
46
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: yuchenlin <npes87184@gmail.com>
2
2
3
nodes[id].next is written by other threads. If atomic_read is not used
3
Signed-off-by: yuchenlin <npes87184@gmail.com>
4
(matching atomic_set in mcs_mutex_lock!) the compiler can optimize the
4
Reviewed-by: Julio Faracco <jcfaracco@gmail.com>
5
whole "if" away!
5
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
6
6
Message-id: 20190103114700.9686-3-npes87184@gmail.com
7
Reported-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Tested-by: Greg Kurz <groug@kaod.org>
10
Message-id: 20170227111726.9237-1-pbonzini@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
8
---
13
tests/test-aio-multithread.c | 4 ++--
9
block/dmg.c | 4 ++--
14
1 file changed, 2 insertions(+), 2 deletions(-)
10
1 file changed, 2 insertions(+), 2 deletions(-)
15
11
16
diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c
12
diff --git a/block/dmg.c b/block/dmg.c
17
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
18
--- a/tests/test-aio-multithread.c
14
--- a/block/dmg.c
19
+++ b/tests/test-aio-multithread.c
15
+++ b/block/dmg.c
20
@@ -XXX,XX +XXX,XX @@ static void mcs_mutex_lock(void)
16
@@ -XXX,XX +XXX,XX @@ static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
21
static void mcs_mutex_unlock(void)
17
22
{
18
/* all-zeroes sector (type 2) does not need to be "uncompressed" and can
23
int next;
19
* therefore be unbounded. */
24
- if (nodes[id].next == -1) {
20
- if (s->types[i] != 2 && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
25
+ if (atomic_read(&nodes[id].next) == -1) {
21
+ if (s->types[i] != UDIG && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
26
if (atomic_read(&mutex_head) == id &&
22
error_report("sector count %" PRIu64 " for chunk %" PRIu32
27
atomic_cmpxchg(&mutex_head, id, -1) == id) {
23
" is larger than max (%u)",
28
/* Last item in the list, exit. */
24
s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
29
@@ -XXX,XX +XXX,XX @@ static void mcs_mutex_unlock(void)
25
@@ -XXX,XX +XXX,XX @@ dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
30
}
26
/* Special case: current chunk is all zeroes. Do not perform a memcpy as
31
27
* s->uncompressed_chunk may be too small to cover the large all-zeroes
32
/* Wake up the next in line. */
28
* section. dmg_read_chunk is called to find s->current_chunk */
33
- next = nodes[id].next;
29
- if (s->types[s->current_chunk] == 2) { /* all zeroes block entry */
34
+ next = atomic_read(&nodes[id].next);
30
+ if (s->types[s->current_chunk] == UDIG) { /* all zeroes block entry */
35
nodes[next].locked = 0;
31
qemu_iovec_memset(qiov, i * 512, 0, 512);
36
qemu_futex_wake(&nodes[next].locked, 1);
32
continue;
37
}
33
}
38
--
34
--
39
2.9.3
35
2.20.1
40
36
41
37
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: yuchenlin <npes87184@gmail.com>
2
2
3
Now that all bottom halves and callbacks take care of taking the
3
The dmg file has many tables which describe: "start from sector XXX to
4
AioContext lock, we can migrate some users away from it and to a
4
sector XXX, the compression method is XXX and where the compressed data
5
specific QemuMutex or CoMutex.
5
resides on".
6
6
7
Protect BDRVCURLState access with a QemuMutex.
7
Each sector in the expanded file should be covered by a table. The table
8
will describe the offset of compressed data (or raw depends on the type)
9
in the dmg.
8
10
11
For example:
12
13
[-----------The expanded file------------]
14
[---bzip table ---]/* zeros */[---zlib---]
15
^
16
| if we want to read this sector.
17
18
we will find bzip table which contains this sector, and get the
19
compressed data offset, read it from dmg, uncompress it, finally write to
20
expanded file.
21
22
If we skip zero chunk (table), some sector cannot find the table which
23
will cause search_chunk() return s->n_chunks, dmg_read_chunk() return -1
24
and finally causing dmg_co_preadv() return EIO.
25
26
See:
27
28
[-----------The expanded file------------]
29
[---bzip table ---]/* zeros */[---zlib---]
30
^
31
| if we want to read this sector.
32
33
Oops, we cannot find the table contains it...
34
35
In the original implementation, we don't have zero table. When we try to
36
read sector inside the zero chunk. We will get EIO, and skip reading.
37
38
After this patch, we treat zero chunk the same as ignore chunk, it will
39
directly write zero and avoid some sector may not find the table.
40
41
After this patch:
42
43
[-----------The expanded file------------]
44
[---bzip table ---][--zeros--][---zlib---]
45
46
Signed-off-by: yuchenlin <npes87184@gmail.com>
47
Reviewed-by: Julio Faracco <jcfaracco@gmail.com>
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
48
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
49
Message-id: 20190103114700.9686-4-npes87184@gmail.com
11
Message-id: 20170222180725.28611-2-pbonzini@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
50
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
51
---
14
block/curl.c | 24 +++++++++++++++---------
52
block/dmg.c | 19 ++++++++++++-------
15
1 file changed, 15 insertions(+), 9 deletions(-)
53
1 file changed, 12 insertions(+), 7 deletions(-)
16
54
17
diff --git a/block/curl.c b/block/curl.c
55
diff --git a/block/dmg.c b/block/dmg.c
18
index XXXXXXX..XXXXXXX 100644
56
index XXXXXXX..XXXXXXX 100644
19
--- a/block/curl.c
57
--- a/block/dmg.c
20
+++ b/block/curl.c
58
+++ b/block/dmg.c
21
@@ -XXX,XX +XXX,XX @@ typedef struct BDRVCURLState {
59
@@ -XXX,XX +XXX,XX @@ static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
22
char *cookie;
60
case UDRW: /* copy */
23
bool accept_range;
61
uncompressed_sectors = DIV_ROUND_UP(s->lengths[chunk], 512);
24
AioContext *aio_context;
62
break;
25
+ QemuMutex mutex;
63
- case UDIG: /* zero */
26
char *username;
64
+ case UDZE: /* zero */
27
char *password;
65
+ case UDIG: /* ignore */
28
char *proxyusername;
66
/* as the all-zeroes block may be large, it is treated specially: the
29
@@ -XXX,XX +XXX,XX @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len,
67
* sector is not copied from a large buffer, a simple memset is used
30
return FIND_RET_NONE;
68
* instead. Therefore uncompressed_sectors does not need to be set. */
31
}
69
@@ -XXX,XX +XXX,XX @@ typedef struct DmgHeaderState {
32
70
static bool dmg_is_known_block_type(uint32_t entry_type)
33
+/* Called with s->mutex held. */
34
static void curl_multi_check_completion(BDRVCURLState *s)
35
{
71
{
36
int msgs_in_queue;
72
switch (entry_type) {
37
@@ -XXX,XX +XXX,XX @@ static void curl_multi_check_completion(BDRVCURLState *s)
73
+ case UDZE: /* zeros */
38
continue;
74
case UDRW: /* uncompressed */
39
}
75
- case UDIG: /* zeroes */
40
76
+ case UDIG: /* ignore */
41
+ qemu_mutex_unlock(&s->mutex);
77
case UDZO: /* zlib */
42
acb->common.cb(acb->common.opaque, -EPROTO);
78
return true;
43
+ qemu_mutex_lock(&s->mutex);
79
case UDBZ: /* bzip2 */
44
qemu_aio_unref(acb);
80
@@ -XXX,XX +XXX,XX @@ static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
45
state->acb[i] = NULL;
81
/* sector count */
46
}
82
s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10);
47
@@ -XXX,XX +XXX,XX @@ static void curl_multi_check_completion(BDRVCURLState *s)
83
48
}
84
- /* all-zeroes sector (type 2) does not need to be "uncompressed" and can
49
}
85
- * therefore be unbounded. */
50
86
- if (s->types[i] != UDIG && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
51
+/* Called with s->mutex held. */
87
+ /* all-zeroes sector (type UDZE and UDIG) does not need to be
52
static void curl_multi_do_locked(CURLState *s)
88
+ * "uncompressed" and can therefore be unbounded. */
53
{
89
+ if (s->types[i] != UDZE && s->types[i] != UDIG
54
CURLSocket *socket, *next_socket;
90
+ && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
55
@@ -XXX,XX +XXX,XX @@ static void curl_multi_do(void *arg)
91
error_report("sector count %" PRIu64 " for chunk %" PRIu32
56
{
92
" is larger than max (%u)",
57
CURLState *s = (CURLState *)arg;
93
s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
58
94
@@ -XXX,XX +XXX,XX @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
59
- aio_context_acquire(s->s->aio_context);
95
return -1;
60
+ qemu_mutex_lock(&s->s->mutex);
96
}
61
curl_multi_do_locked(s);
97
break;
62
- aio_context_release(s->s->aio_context);
98
- case UDIG: /* zero */
63
+ qemu_mutex_unlock(&s->s->mutex);
99
+ case UDZE: /* zeros */
64
}
100
+ case UDIG: /* ignore */
65
101
/* see dmg_read, it is treated specially. No buffer needs to be
66
static void curl_multi_read(void *arg)
102
* pre-filled, the zeroes can be set directly. */
67
{
103
break;
68
CURLState *s = (CURLState *)arg;
104
@@ -XXX,XX +XXX,XX @@ dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
69
105
/* Special case: current chunk is all zeroes. Do not perform a memcpy as
70
- aio_context_acquire(s->s->aio_context);
106
* s->uncompressed_chunk may be too small to cover the large all-zeroes
71
+ qemu_mutex_lock(&s->s->mutex);
107
* section. dmg_read_chunk is called to find s->current_chunk */
72
curl_multi_do_locked(s);
108
- if (s->types[s->current_chunk] == UDIG) { /* all zeroes block entry */
73
curl_multi_check_completion(s->s);
109
+ if (s->types[s->current_chunk] == UDZE
74
- aio_context_release(s->s->aio_context);
110
+ || s->types[s->current_chunk] == UDIG) { /* all zeroes block entry */
75
+ qemu_mutex_unlock(&s->s->mutex);
111
qemu_iovec_memset(qiov, i * 512, 0, 512);
76
}
112
continue;
77
113
}
78
static void curl_multi_timeout_do(void *arg)
79
@@ -XXX,XX +XXX,XX @@ static void curl_multi_timeout_do(void *arg)
80
return;
81
}
82
83
- aio_context_acquire(s->aio_context);
84
+ qemu_mutex_lock(&s->mutex);
85
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
86
87
curl_multi_check_completion(s);
88
- aio_context_release(s->aio_context);
89
+ qemu_mutex_unlock(&s->mutex);
90
#else
91
abort();
92
#endif
93
@@ -XXX,XX +XXX,XX @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
94
curl_easy_cleanup(state->curl);
95
state->curl = NULL;
96
97
+ qemu_mutex_init(&s->mutex);
98
curl_attach_aio_context(bs, bdrv_get_aio_context(bs));
99
100
qemu_opts_del(opts);
101
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
102
CURLAIOCB *acb = p;
103
BlockDriverState *bs = acb->common.bs;
104
BDRVCURLState *s = bs->opaque;
105
- AioContext *ctx = bdrv_get_aio_context(bs);
106
107
size_t start = acb->sector_num * BDRV_SECTOR_SIZE;
108
size_t end;
109
110
- aio_context_acquire(ctx);
111
+ qemu_mutex_lock(&s->mutex);
112
113
// In case we have the requested data already (e.g. read-ahead),
114
// we can just call the callback and be done.
115
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
116
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
117
118
out:
119
- aio_context_release(ctx);
120
+ qemu_mutex_unlock(&s->mutex);
121
if (ret != -EINPROGRESS) {
122
acb->common.cb(acb->common.opaque, ret);
123
qemu_aio_unref(acb);
124
@@ -XXX,XX +XXX,XX @@ static void curl_close(BlockDriverState *bs)
125
126
DPRINTF("CURL: Close\n");
127
curl_detach_aio_context(bs);
128
+ qemu_mutex_destroy(&s->mutex);
129
130
g_free(s->cookie);
131
g_free(s->url);
132
--
114
--
133
2.9.3
115
2.20.1
134
116
135
117
diff view generated by jsdifflib