1
The following changes since commit a0def594286d9110a6035e02eef558cf3cf5d847:
1
The following changes since commit d992f2f1368ceb92e6bfd8efece174110f4236ff:
2
2
3
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging (2017-01-30 10:23:20 +0000)
3
Merge remote-tracking branch 'remotes/artyom/tags/pull-sun4v-20170226' into staging (2017-02-26 22:40:23 +0000)
4
4
5
are available in the git repository at:
5
are available in the git repository at:
6
6
7
git://github.com/stefanha/qemu.git block-pull-request
7
git://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 81a9f2cb9336a7e9f50b0729b4c81d287e0015e9:
9
for you to fetch changes up to 1ab17f9f5c63c2798d707aeb22588e4fcc17b2cd:
10
10
11
iothread: enable AioContext polling by default (2017-01-31 17:09:34 +0000)
11
tests-aio-multithread: use atomic_read properly (2017-02-27 14:00:53 +0000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
14
15
----------------------------------------------------------------
15
----------------------------------------------------------------
16
16
17
Stefan Hajnoczi (1):
17
Paolo Bonzini (4):
18
iothread: enable AioContext polling by default
18
curl: do not use aio_context_acquire/release
19
nfs: do not use aio_context_acquire/release
20
iscsi: do not use aio_context_acquire/release
21
tests-aio-multithread: use atomic_read properly
19
22
20
iothread.c | 14 ++++++++++++++
23
block/curl.c | 24 ++++++++-----
21
1 file changed, 14 insertions(+)
24
block/iscsi.c | 83 ++++++++++++++++++++++++++++++++++----------
25
block/nfs.c | 23 +++++++++---
26
tests/test-aio-multithread.c | 4 +--
27
4 files changed, 100 insertions(+), 34 deletions(-)
22
28
23
--
29
--
24
2.9.3
30
2.9.3
25
31
26
32
diff view generated by jsdifflib
New patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
2
3
Now that all bottom halves and callbacks take care of taking the
4
AioContext lock, we can migrate some users away from it and to a
5
specific QemuMutex or CoMutex.
6
7
Protect BDRVCURLState access with a QemuMutex.
8
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
11
Message-id: 20170222180725.28611-2-pbonzini@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
block/curl.c | 24 +++++++++++++++---------
15
1 file changed, 15 insertions(+), 9 deletions(-)
16
17
diff --git a/block/curl.c b/block/curl.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/curl.c
20
+++ b/block/curl.c
21
@@ -XXX,XX +XXX,XX @@ typedef struct BDRVCURLState {
22
char *cookie;
23
bool accept_range;
24
AioContext *aio_context;
25
+ QemuMutex mutex;
26
char *username;
27
char *password;
28
char *proxyusername;
29
@@ -XXX,XX +XXX,XX @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len,
30
return FIND_RET_NONE;
31
}
32
33
+/* Called with s->mutex held. */
34
static void curl_multi_check_completion(BDRVCURLState *s)
35
{
36
int msgs_in_queue;
37
@@ -XXX,XX +XXX,XX @@ static void curl_multi_check_completion(BDRVCURLState *s)
38
continue;
39
}
40
41
+ qemu_mutex_unlock(&s->mutex);
42
acb->common.cb(acb->common.opaque, -EPROTO);
43
+ qemu_mutex_lock(&s->mutex);
44
qemu_aio_unref(acb);
45
state->acb[i] = NULL;
46
}
47
@@ -XXX,XX +XXX,XX @@ static void curl_multi_check_completion(BDRVCURLState *s)
48
}
49
}
50
51
+/* Called with s->mutex held. */
52
static void curl_multi_do_locked(CURLState *s)
53
{
54
CURLSocket *socket, *next_socket;
55
@@ -XXX,XX +XXX,XX @@ static void curl_multi_do(void *arg)
56
{
57
CURLState *s = (CURLState *)arg;
58
59
- aio_context_acquire(s->s->aio_context);
60
+ qemu_mutex_lock(&s->s->mutex);
61
curl_multi_do_locked(s);
62
- aio_context_release(s->s->aio_context);
63
+ qemu_mutex_unlock(&s->s->mutex);
64
}
65
66
static void curl_multi_read(void *arg)
67
{
68
CURLState *s = (CURLState *)arg;
69
70
- aio_context_acquire(s->s->aio_context);
71
+ qemu_mutex_lock(&s->s->mutex);
72
curl_multi_do_locked(s);
73
curl_multi_check_completion(s->s);
74
- aio_context_release(s->s->aio_context);
75
+ qemu_mutex_unlock(&s->s->mutex);
76
}
77
78
static void curl_multi_timeout_do(void *arg)
79
@@ -XXX,XX +XXX,XX @@ static void curl_multi_timeout_do(void *arg)
80
return;
81
}
82
83
- aio_context_acquire(s->aio_context);
84
+ qemu_mutex_lock(&s->mutex);
85
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
86
87
curl_multi_check_completion(s);
88
- aio_context_release(s->aio_context);
89
+ qemu_mutex_unlock(&s->mutex);
90
#else
91
abort();
92
#endif
93
@@ -XXX,XX +XXX,XX @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
94
curl_easy_cleanup(state->curl);
95
state->curl = NULL;
96
97
+ qemu_mutex_init(&s->mutex);
98
curl_attach_aio_context(bs, bdrv_get_aio_context(bs));
99
100
qemu_opts_del(opts);
101
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
102
CURLAIOCB *acb = p;
103
BlockDriverState *bs = acb->common.bs;
104
BDRVCURLState *s = bs->opaque;
105
- AioContext *ctx = bdrv_get_aio_context(bs);
106
107
size_t start = acb->sector_num * BDRV_SECTOR_SIZE;
108
size_t end;
109
110
- aio_context_acquire(ctx);
111
+ qemu_mutex_lock(&s->mutex);
112
113
// In case we have the requested data already (e.g. read-ahead),
114
// we can just call the callback and be done.
115
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
116
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
117
118
out:
119
- aio_context_release(ctx);
120
+ qemu_mutex_unlock(&s->mutex);
121
if (ret != -EINPROGRESS) {
122
acb->common.cb(acb->common.opaque, ret);
123
qemu_aio_unref(acb);
124
@@ -XXX,XX +XXX,XX @@ static void curl_close(BlockDriverState *bs)
125
126
DPRINTF("CURL: Close\n");
127
curl_detach_aio_context(bs);
128
+ qemu_mutex_destroy(&s->mutex);
129
130
g_free(s->cookie);
131
g_free(s->url);
132
--
133
2.9.3
134
135
diff view generated by jsdifflib
1
IOThread AioContexts are likely to consist only of event sources like
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
virtqueue ioeventfds and LinuxAIO completion eventfds that are pollable
3
from userspace (without system calls).
4
2
5
We recently merged the AioContext polling feature but didn't enable it
3
Now that all bottom halves and callbacks take care of taking the
6
by default yet. I have gone back over the performance data on the
4
AioContext lock, we can migrate some users away from it and to a
7
mailing list and picked a default polling value that gave good results.
5
specific QemuMutex or CoMutex.
8
6
9
Let's enable AioContext polling by default so users don't have another
7
Protect libnfs calls with a QemuMutex. Callbacks are invoked
10
switch they need to set manually. If performance regressions are found
8
using bottom halves, so we don't even have to drop it around
11
we can still disable this for the QEMU 2.9 release.
9
callback invocations.
12
10
13
Cc: Paolo Bonzini <pbonzini@redhat.com>
11
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
14
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
12
Message-id: 20170222180725.28611-3-pbonzini@redhat.com
15
Cc: Karl Rister <krister@redhat.com>
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
Message-id: 20170126170119.27876-1-stefanha@redhat.com
18
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
19
---
14
---
20
iothread.c | 14 ++++++++++++++
15
block/nfs.c | 23 +++++++++++++++++++----
21
1 file changed, 14 insertions(+)
16
1 file changed, 19 insertions(+), 4 deletions(-)
22
17
23
diff --git a/iothread.c b/iothread.c
18
diff --git a/block/nfs.c b/block/nfs.c
24
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
25
--- a/iothread.c
20
--- a/block/nfs.c
26
+++ b/iothread.c
21
+++ b/block/nfs.c
27
@@ -XXX,XX +XXX,XX @@ typedef ObjectClass IOThreadClass;
22
@@ -XXX,XX +XXX,XX @@ typedef struct NFSClient {
28
#define IOTHREAD_CLASS(klass) \
23
int events;
29
OBJECT_CLASS_CHECK(IOThreadClass, klass, TYPE_IOTHREAD)
24
bool has_zero_init;
30
25
AioContext *aio_context;
31
+/* Benchmark results from 2016 on NVMe SSD drives show max polling times around
26
+ QemuMutex mutex;
32
+ * 16-32 microseconds yield IOPS improvements for both iodepth=1 and iodepth=32
27
blkcnt_t st_blocks;
33
+ * workloads.
28
bool cache_used;
34
+ */
29
NFSServer *server;
35
+#define IOTHREAD_POLL_MAX_NS_DEFAULT 32768ULL
30
@@ -XXX,XX +XXX,XX @@ static void nfs_parse_filename(const char *filename, QDict *options,
36
+
31
static void nfs_process_read(void *arg);
37
static __thread IOThread *my_iothread;
32
static void nfs_process_write(void *arg);
38
33
39
AioContext *qemu_get_current_aio_context(void)
34
+/* Called with QemuMutex held. */
40
@@ -XXX,XX +XXX,XX @@ static int iothread_stop(Object *object, void *opaque)
35
static void nfs_set_events(NFSClient *client)
41
return 0;
36
{
37
int ev = nfs_which_events(client->context);
38
@@ -XXX,XX +XXX,XX @@ static void nfs_process_read(void *arg)
39
{
40
NFSClient *client = arg;
41
42
- aio_context_acquire(client->aio_context);
43
+ qemu_mutex_lock(&client->mutex);
44
nfs_service(client->context, POLLIN);
45
nfs_set_events(client);
46
- aio_context_release(client->aio_context);
47
+ qemu_mutex_unlock(&client->mutex);
42
}
48
}
43
49
44
+static void iothread_instance_init(Object *obj)
50
static void nfs_process_write(void *arg)
45
+{
46
+ IOThread *iothread = IOTHREAD(obj);
47
+
48
+ iothread->poll_max_ns = IOTHREAD_POLL_MAX_NS_DEFAULT;
49
+}
50
+
51
static void iothread_instance_finalize(Object *obj)
52
{
51
{
53
IOThread *iothread = IOTHREAD(obj);
52
NFSClient *client = arg;
54
@@ -XXX,XX +XXX,XX @@ static const TypeInfo iothread_info = {
53
55
.parent = TYPE_OBJECT,
54
- aio_context_acquire(client->aio_context);
56
.class_init = iothread_class_init,
55
+ qemu_mutex_lock(&client->mutex);
57
.instance_size = sizeof(IOThread),
56
nfs_service(client->context, POLLOUT);
58
+ .instance_init = iothread_instance_init,
57
nfs_set_events(client);
59
.instance_finalize = iothread_instance_finalize,
58
- aio_context_release(client->aio_context);
60
.interfaces = (InterfaceInfo[]) {
59
+ qemu_mutex_unlock(&client->mutex);
61
{TYPE_USER_CREATABLE},
60
}
61
62
static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
63
@@ -XXX,XX +XXX,XX @@ static void nfs_co_generic_bh_cb(void *opaque)
64
aio_co_wake(task->co);
65
}
66
67
+/* Called (via nfs_service) with QemuMutex held. */
68
static void
69
nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
70
void *private_data)
71
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset,
72
nfs_co_init_task(bs, &task);
73
task.iov = iov;
74
75
+ qemu_mutex_lock(&client->mutex);
76
if (nfs_pread_async(client->context, client->fh,
77
offset, bytes, nfs_co_generic_cb, &task) != 0) {
78
+ qemu_mutex_unlock(&client->mutex);
79
return -ENOMEM;
80
}
81
82
nfs_set_events(client);
83
+ qemu_mutex_unlock(&client->mutex);
84
while (!task.complete) {
85
qemu_coroutine_yield();
86
}
87
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
88
buf = iov->iov[0].iov_base;
89
}
90
91
+ qemu_mutex_lock(&client->mutex);
92
if (nfs_pwrite_async(client->context, client->fh,
93
offset, bytes, buf,
94
nfs_co_generic_cb, &task) != 0) {
95
+ qemu_mutex_unlock(&client->mutex);
96
if (my_buffer) {
97
g_free(buf);
98
}
99
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
100
}
101
102
nfs_set_events(client);
103
+ qemu_mutex_unlock(&client->mutex);
104
while (!task.complete) {
105
qemu_coroutine_yield();
106
}
107
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs)
108
109
nfs_co_init_task(bs, &task);
110
111
+ qemu_mutex_lock(&client->mutex);
112
if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
113
&task) != 0) {
114
+ qemu_mutex_unlock(&client->mutex);
115
return -ENOMEM;
116
}
117
118
nfs_set_events(client);
119
+ qemu_mutex_unlock(&client->mutex);
120
while (!task.complete) {
121
qemu_coroutine_yield();
122
}
123
@@ -XXX,XX +XXX,XX @@ static void nfs_file_close(BlockDriverState *bs)
124
{
125
NFSClient *client = bs->opaque;
126
nfs_client_close(client);
127
+ qemu_mutex_destroy(&client->mutex);
128
}
129
130
static NFSServer *nfs_config(QDict *options, Error **errp)
131
@@ -XXX,XX +XXX,XX @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags,
132
if (ret < 0) {
133
return ret;
134
}
135
+ qemu_mutex_init(&client->mutex);
136
bs->total_sectors = ret;
137
ret = 0;
138
return ret;
139
@@ -XXX,XX +XXX,XX @@ static int nfs_has_zero_init(BlockDriverState *bs)
140
return client->has_zero_init;
141
}
142
143
+/* Called (via nfs_service) with QemuMutex held. */
144
static void
145
nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data,
146
void *private_data)
62
--
147
--
63
2.9.3
148
2.9.3
64
149
65
150
diff view generated by jsdifflib
New patch
1
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
3
Now that all bottom halves and callbacks take care of taking the
4
AioContext lock, we can migrate some users away from it and to a
5
specific QemuMutex or CoMutex.
6
7
Protect libiscsi calls with a QemuMutex. Callbacks are invoked
8
using bottom halves, so we don't even have to drop it around
9
callback invocations.
10
11
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
13
Message-id: 20170222180725.28611-4-pbonzini@redhat.com
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
---
16
block/iscsi.c | 83 +++++++++++++++++++++++++++++++++++++++++++++--------------
17
1 file changed, 64 insertions(+), 19 deletions(-)
18
19
diff --git a/block/iscsi.c b/block/iscsi.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/block/iscsi.c
22
+++ b/block/iscsi.c
23
@@ -XXX,XX +XXX,XX @@ typedef struct IscsiLun {
24
int events;
25
QEMUTimer *nop_timer;
26
QEMUTimer *event_timer;
27
+ QemuMutex mutex;
28
struct scsi_inquiry_logical_block_provisioning lbp;
29
struct scsi_inquiry_block_limits bl;
30
unsigned char *zeroblock;
31
@@ -XXX,XX +XXX,XX @@ static int iscsi_translate_sense(struct scsi_sense *sense)
32
return ret;
33
}
34
35
+/* Called (via iscsi_service) with QemuMutex held. */
36
static void
37
iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
38
void *command_data, void *opaque)
39
@@ -XXX,XX +XXX,XX @@ static const AIOCBInfo iscsi_aiocb_info = {
40
static void iscsi_process_read(void *arg);
41
static void iscsi_process_write(void *arg);
42
43
+/* Called with QemuMutex held. */
44
static void
45
iscsi_set_events(IscsiLun *iscsilun)
46
{
47
@@ -XXX,XX +XXX,XX @@ iscsi_process_read(void *arg)
48
IscsiLun *iscsilun = arg;
49
struct iscsi_context *iscsi = iscsilun->iscsi;
50
51
- aio_context_acquire(iscsilun->aio_context);
52
+ qemu_mutex_lock(&iscsilun->mutex);
53
iscsi_service(iscsi, POLLIN);
54
iscsi_set_events(iscsilun);
55
- aio_context_release(iscsilun->aio_context);
56
+ qemu_mutex_unlock(&iscsilun->mutex);
57
}
58
59
static void
60
@@ -XXX,XX +XXX,XX @@ iscsi_process_write(void *arg)
61
IscsiLun *iscsilun = arg;
62
struct iscsi_context *iscsi = iscsilun->iscsi;
63
64
- aio_context_acquire(iscsilun->aio_context);
65
+ qemu_mutex_lock(&iscsilun->mutex);
66
iscsi_service(iscsi, POLLOUT);
67
iscsi_set_events(iscsilun);
68
- aio_context_release(iscsilun->aio_context);
69
+ qemu_mutex_unlock(&iscsilun->mutex);
70
}
71
72
static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun)
73
@@ -XXX,XX +XXX,XX @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
74
uint64_t lba;
75
uint32_t num_sectors;
76
bool fua = flags & BDRV_REQ_FUA;
77
+ int r = 0;
78
79
if (fua) {
80
assert(iscsilun->dpofua);
81
@@ -XXX,XX +XXX,XX @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
82
lba = sector_qemu2lun(sector_num, iscsilun);
83
num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
84
iscsi_co_init_iscsitask(iscsilun, &iTask);
85
+ qemu_mutex_lock(&iscsilun->mutex);
86
retry:
87
if (iscsilun->use_16_for_rw) {
88
#if LIBISCSI_API_VERSION >= (20160603)
89
@@ -XXX,XX +XXX,XX @@ retry:
90
#endif
91
while (!iTask.complete) {
92
iscsi_set_events(iscsilun);
93
+ qemu_mutex_unlock(&iscsilun->mutex);
94
qemu_coroutine_yield();
95
+ qemu_mutex_lock(&iscsilun->mutex);
96
}
97
98
if (iTask.task != NULL) {
99
@@ -XXX,XX +XXX,XX @@ retry:
100
101
if (iTask.status != SCSI_STATUS_GOOD) {
102
iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors);
103
- return iTask.err_code;
104
+ r = iTask.err_code;
105
+ goto out_unlock;
106
}
107
108
iscsi_allocmap_set_allocated(iscsilun, sector_num, nb_sectors);
109
110
- return 0;
111
+out_unlock:
112
+ qemu_mutex_unlock(&iscsilun->mutex);
113
+ return r;
114
}
115
116
117
@@ -XXX,XX +XXX,XX @@ static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs,
118
goto out;
119
}
120
121
+ qemu_mutex_lock(&iscsilun->mutex);
122
retry:
123
if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun,
124
sector_qemu2lun(sector_num, iscsilun),
125
8 + 16, iscsi_co_generic_cb,
126
&iTask) == NULL) {
127
ret = -ENOMEM;
128
- goto out;
129
+ goto out_unlock;
130
}
131
132
while (!iTask.complete) {
133
iscsi_set_events(iscsilun);
134
+ qemu_mutex_unlock(&iscsilun->mutex);
135
qemu_coroutine_yield();
136
+ qemu_mutex_lock(&iscsilun->mutex);
137
}
138
139
if (iTask.do_retry) {
140
@@ -XXX,XX +XXX,XX @@ retry:
141
* because the device is busy or the cmd is not
142
* supported) we pretend all blocks are allocated
143
* for backwards compatibility */
144
- goto out;
145
+ goto out_unlock;
146
}
147
148
lbas = scsi_datain_unmarshall(iTask.task);
149
if (lbas == NULL) {
150
ret = -EIO;
151
- goto out;
152
+ goto out_unlock;
153
}
154
155
lbasd = &lbas->descriptors[0];
156
157
if (sector_qemu2lun(sector_num, iscsilun) != lbasd->lba) {
158
ret = -EIO;
159
- goto out;
160
+ goto out_unlock;
161
}
162
163
*pnum = sector_lun2qemu(lbasd->num_blocks, iscsilun);
164
@@ -XXX,XX +XXX,XX @@ retry:
165
if (*pnum > nb_sectors) {
166
*pnum = nb_sectors;
167
}
168
+out_unlock:
169
+ qemu_mutex_unlock(&iscsilun->mutex);
170
out:
171
if (iTask.task != NULL) {
172
scsi_free_scsi_task(iTask.task);
173
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
174
num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
175
176
iscsi_co_init_iscsitask(iscsilun, &iTask);
177
+ qemu_mutex_lock(&iscsilun->mutex);
178
retry:
179
if (iscsilun->use_16_for_rw) {
180
#if LIBISCSI_API_VERSION >= (20160603)
181
@@ -XXX,XX +XXX,XX @@ retry:
182
#endif
183
while (!iTask.complete) {
184
iscsi_set_events(iscsilun);
185
+ qemu_mutex_unlock(&iscsilun->mutex);
186
qemu_coroutine_yield();
187
+ qemu_mutex_lock(&iscsilun->mutex);
188
}
189
190
if (iTask.task != NULL) {
191
@@ -XXX,XX +XXX,XX @@ retry:
192
iTask.complete = 0;
193
goto retry;
194
}
195
+ qemu_mutex_unlock(&iscsilun->mutex);
196
197
if (iTask.status != SCSI_STATUS_GOOD) {
198
return iTask.err_code;
199
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn iscsi_co_flush(BlockDriverState *bs)
200
struct IscsiTask iTask;
201
202
iscsi_co_init_iscsitask(iscsilun, &iTask);
203
+ qemu_mutex_lock(&iscsilun->mutex);
204
retry:
205
if (iscsi_synchronizecache10_task(iscsilun->iscsi, iscsilun->lun, 0, 0, 0,
206
0, iscsi_co_generic_cb, &iTask) == NULL) {
207
@@ -XXX,XX +XXX,XX @@ retry:
208
209
while (!iTask.complete) {
210
iscsi_set_events(iscsilun);
211
+ qemu_mutex_unlock(&iscsilun->mutex);
212
qemu_coroutine_yield();
213
+ qemu_mutex_lock(&iscsilun->mutex);
214
}
215
216
if (iTask.task != NULL) {
217
@@ -XXX,XX +XXX,XX @@ retry:
218
iTask.complete = 0;
219
goto retry;
220
}
221
+ qemu_mutex_unlock(&iscsilun->mutex);
222
223
if (iTask.status != SCSI_STATUS_GOOD) {
224
return iTask.err_code;
225
@@ -XXX,XX +XXX,XX @@ retry:
226
}
227
228
#ifdef __linux__
229
+/* Called (via iscsi_service) with QemuMutex held. */
230
static void
231
iscsi_aio_ioctl_cb(struct iscsi_context *iscsi, int status,
232
void *command_data, void *opaque)
233
@@ -XXX,XX +XXX,XX @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
234
acb->task->expxferlen = acb->ioh->dxfer_len;
235
236
data.size = 0;
237
+ qemu_mutex_lock(&iscsilun->mutex);
238
if (acb->task->xfer_dir == SCSI_XFER_WRITE) {
239
if (acb->ioh->iovec_count == 0) {
240
data.data = acb->ioh->dxferp;
241
@@ -XXX,XX +XXX,XX @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
242
iscsi_aio_ioctl_cb,
243
(data.size > 0) ? &data : NULL,
244
acb) != 0) {
245
+ qemu_mutex_unlock(&iscsilun->mutex);
246
scsi_free_scsi_task(acb->task);
247
qemu_aio_unref(acb);
248
return NULL;
249
@@ -XXX,XX +XXX,XX @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
250
}
251
252
iscsi_set_events(iscsilun);
253
+ qemu_mutex_unlock(&iscsilun->mutex);
254
255
return &acb->common;
256
}
257
@@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
258
IscsiLun *iscsilun = bs->opaque;
259
struct IscsiTask iTask;
260
struct unmap_list list;
261
+ int r = 0;
262
263
if (!is_byte_request_lun_aligned(offset, count, iscsilun)) {
264
return -ENOTSUP;
265
@@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
266
list.num = count / iscsilun->block_size;
267
268
iscsi_co_init_iscsitask(iscsilun, &iTask);
269
+ qemu_mutex_lock(&iscsilun->mutex);
270
retry:
271
if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1,
272
iscsi_co_generic_cb, &iTask) == NULL) {
273
- return -ENOMEM;
274
+ r = -ENOMEM;
275
+ goto out_unlock;
276
}
277
278
while (!iTask.complete) {
279
iscsi_set_events(iscsilun);
280
+ qemu_mutex_unlock(&iscsilun->mutex);
281
qemu_coroutine_yield();
282
+ qemu_mutex_lock(&iscsilun->mutex);
283
}
284
285
if (iTask.task != NULL) {
286
@@ -XXX,XX +XXX,XX @@ retry:
287
/* the target might fail with a check condition if it
288
is not happy with the alignment of the UNMAP request
289
we silently fail in this case */
290
- return 0;
291
+ goto out_unlock;
292
}
293
294
if (iTask.status != SCSI_STATUS_GOOD) {
295
- return iTask.err_code;
296
+ r = iTask.err_code;
297
+ goto out_unlock;
298
}
299
300
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
301
count >> BDRV_SECTOR_BITS);
302
303
- return 0;
304
+out_unlock:
305
+ qemu_mutex_unlock(&iscsilun->mutex);
306
+ return r;
307
}
308
309
static int
310
@@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
311
uint64_t lba;
312
uint32_t nb_blocks;
313
bool use_16_for_ws = iscsilun->use_16_for_rw;
314
+ int r = 0;
315
316
if (!is_byte_request_lun_aligned(offset, count, iscsilun)) {
317
return -ENOTSUP;
318
@@ -XXX,XX +XXX,XX @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
319
}
320
}
321
322
+ qemu_mutex_lock(&iscsilun->mutex);
323
iscsi_co_init_iscsitask(iscsilun, &iTask);
324
retry:
325
if (use_16_for_ws) {
326
@@ -XXX,XX +XXX,XX @@ retry:
327
328
while (!iTask.complete) {
329
iscsi_set_events(iscsilun);
330
+ qemu_mutex_unlock(&iscsilun->mutex);
331
qemu_coroutine_yield();
332
+ qemu_mutex_lock(&iscsilun->mutex);
333
}
334
335
if (iTask.status == SCSI_STATUS_CHECK_CONDITION &&
336
@@ -XXX,XX +XXX,XX @@ retry:
337
/* WRITE SAME is not supported by the target */
338
iscsilun->has_write_same = false;
339
scsi_free_scsi_task(iTask.task);
340
- return -ENOTSUP;
341
+ r = -ENOTSUP;
342
+ goto out_unlock;
343
}
344
345
if (iTask.task != NULL) {
346
@@ -XXX,XX +XXX,XX @@ retry:
347
if (iTask.status != SCSI_STATUS_GOOD) {
348
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
349
count >> BDRV_SECTOR_BITS);
350
- return iTask.err_code;
351
+ r = iTask.err_code;
352
+ goto out_unlock;
353
}
354
355
if (flags & BDRV_REQ_MAY_UNMAP) {
356
@@ -XXX,XX +XXX,XX @@ retry:
357
count >> BDRV_SECTOR_BITS);
358
}
359
360
- return 0;
361
+out_unlock:
362
+ qemu_mutex_unlock(&iscsilun->mutex);
363
+ return r;
364
}
365
366
static void apply_chap(struct iscsi_context *iscsi, QemuOpts *opts,
367
@@ -XXX,XX +XXX,XX @@ static void iscsi_nop_timed_event(void *opaque)
368
{
369
IscsiLun *iscsilun = opaque;
370
371
- aio_context_acquire(iscsilun->aio_context);
372
+ qemu_mutex_lock(&iscsilun->mutex);
373
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
374
error_report("iSCSI: NOP timeout. Reconnecting...");
375
iscsilun->request_timed_out = true;
376
@@ -XXX,XX +XXX,XX @@ static void iscsi_nop_timed_event(void *opaque)
377
iscsi_set_events(iscsilun);
378
379
out:
380
- aio_context_release(iscsilun->aio_context);
381
+ qemu_mutex_unlock(&iscsilun->mutex);
382
}
383
384
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
385
@@ -XXX,XX +XXX,XX @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
386
scsi_free_scsi_task(task);
387
task = NULL;
388
389
+ qemu_mutex_init(&iscsilun->mutex);
390
iscsi_attach_aio_context(bs, iscsilun->aio_context);
391
392
/* Guess the internal cluster (page) size of the iscsi target by the means
393
@@ -XXX,XX +XXX,XX @@ static void iscsi_close(BlockDriverState *bs)
394
iscsi_destroy_context(iscsi);
395
g_free(iscsilun->zeroblock);
396
iscsi_allocmap_free(iscsilun);
397
+ qemu_mutex_destroy(&iscsilun->mutex);
398
memset(iscsilun, 0, sizeof(IscsiLun));
399
}
400
401
--
402
2.9.3
403
404
diff view generated by jsdifflib
New patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
2
3
nodes[id].next is written by other threads. If atomic_read is not used
4
(matching atomic_set in mcs_mutex_lock!) the compiler can optimize the
5
whole "if" away!
6
7
Reported-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Tested-by: Greg Kurz <groug@kaod.org>
10
Message-id: 20170227111726.9237-1-pbonzini@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
tests/test-aio-multithread.c | 4 ++--
14
1 file changed, 2 insertions(+), 2 deletions(-)
15
16
diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/tests/test-aio-multithread.c
19
+++ b/tests/test-aio-multithread.c
20
@@ -XXX,XX +XXX,XX @@ static void mcs_mutex_lock(void)
21
static void mcs_mutex_unlock(void)
22
{
23
int next;
24
- if (nodes[id].next == -1) {
25
+ if (atomic_read(&nodes[id].next) == -1) {
26
if (atomic_read(&mutex_head) == id &&
27
atomic_cmpxchg(&mutex_head, id, -1) == id) {
28
/* Last item in the list, exit. */
29
@@ -XXX,XX +XXX,XX @@ static void mcs_mutex_unlock(void)
30
}
31
32
/* Wake up the next in line. */
33
- next = nodes[id].next;
34
+ next = atomic_read(&nodes[id].next);
35
nodes[next].locked = 0;
36
qemu_futex_wake(&nodes[next].locked, 1);
37
}
38
--
39
2.9.3
40
41
diff view generated by jsdifflib