1
The following changes since commit 64175afc695c0672876fbbfc31b299c86d562cb4:
1
The following changes since commit 7260438b7056469610ee166f7abe9ff8a26b8b16:
2
2
3
arm_gicv3: Fix ICC_BPR1 reset value when EL3 not implemented (2017-06-07 17:21:44 +0100)
3
Merge remote-tracking branch 'remotes/palmer/tags/riscv-for-master-3.2-part2' into staging (2019-01-14 11:41:43 +0000)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/codyprime/qemu-kvm-jtc.git tags/block-pull-request
7
git://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 56faeb9bb6872b3f926b3b3e0452a70beea10af2:
9
for you to fetch changes up to fef1660132b0f25bf2d275d7f986ddcfe19a4426:
10
10
11
block/gluster.c: Handle qdict_array_entries() failure (2017-06-09 08:41:29 -0400)
11
aio-posix: Fix concurrent aio_poll/set_fd_handler. (2019-01-14 14:09:41 +0000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Gluster patch
14
Pull request
15
16
No user-visible changes.
17
15
----------------------------------------------------------------
18
----------------------------------------------------------------
16
19
17
Peter Maydell (1):
20
Remy Noel (2):
18
block/gluster.c: Handle qdict_array_entries() failure
21
aio-posix: Unregister fd from ctx epoll when removing fd_handler.
22
aio-posix: Fix concurrent aio_poll/set_fd_handler.
19
23
20
block/gluster.c | 3 +--
24
util/aio-posix.c | 90 +++++++++++++++++++++++++++++-------------------
21
1 file changed, 1 insertion(+), 2 deletions(-)
25
util/aio-win32.c | 67 ++++++++++++++++-------------------
26
2 files changed, 84 insertions(+), 73 deletions(-)
22
27
23
--
28
--
24
2.9.3
29
2.20.1
25
30
26
31
diff view generated by jsdifflib
1
From: Peter Maydell <peter.maydell@linaro.org>
1
From: Remy Noel <remy.noel@blade-group.com>
2
2
3
In qemu_gluster_parse_json(), the call to qdict_array_entries()
3
Cleaning the events will cause aio_epoll_update to unregister the fd.
4
could return a negative error code, which we were ignoring
4
Otherwise, the fd is kept registered until it is destroyed.
5
because we assigned the result to an unsigned variable.
6
Fix this by using the 'int' type instead, which matches the
7
return type of qdict_array_entries() and also the type
8
we use for the loop enumeration variable 'i'.
9
5
10
(Spotted by Coverity, CID 1360960.)
6
Signed-off-by: Remy Noel <remy.noel@blade-group.com>
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-id: 20181220152030.28035-2-remy.noel@blade-group.com
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
12
util/aio-posix.c | 3 +++
13
1 file changed, 3 insertions(+)
11
14
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
diff --git a/util/aio-posix.c b/util/aio-posix.c
13
Reviewed-by: Eric Blake <eblake@redhat.com>
14
Reviewed-by: Jeff Cody <jcody@redhat.com>
15
Message-id: 1496682098-1540-1-git-send-email-peter.maydell@linaro.org
16
Signed-off-by: Jeff Cody <jcody@redhat.com>
17
---
18
block/gluster.c | 3 +--
19
1 file changed, 1 insertion(+), 2 deletions(-)
20
21
diff --git a/block/gluster.c b/block/gluster.c
22
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
23
--- a/block/gluster.c
17
--- a/util/aio-posix.c
24
+++ b/block/gluster.c
18
+++ b/util/aio-posix.c
25
@@ -XXX,XX +XXX,XX @@ static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf,
19
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
26
Error *local_err = NULL;
20
QLIST_REMOVE(node, node);
27
char *str = NULL;
21
deleted = true;
28
const char *ptr;
22
}
29
- size_t num_servers;
23
+ /* Clean events in order to unregister fd from the ctx epoll. */
30
- int i, type;
24
+ node->pfd.events = 0;
31
+ int i, type, num_servers;
25
+
32
26
poll_disable_change = -!node->io_poll;
33
/* create opts info from runtime_json_opts list */
27
} else {
34
opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort);
28
poll_disable_change = !io_poll - (node && !node->io_poll);
35
--
29
--
36
2.9.3
30
2.20.1
37
31
38
32
diff view generated by jsdifflib
New patch
1
1
From: Remy Noel <remy.noel@blade-group.com>
2
3
It is possible for an io_poll callback to be concurrently executed along
4
with an aio_set_fd_handlers. This can cause all sorts of problems, like
5
a NULL callback or a bad opaque pointer.
6
7
This changes set_fd_handlers so that it no longer modify existing handlers
8
entries and instead, always insert those after having proper initialisation.
9
10
Tested-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Signed-off-by: Remy Noel <remy.noel@blade-group.com>
12
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
14
Message-id: 20181220152030.28035-3-remy.noel@blade-group.com
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
16
---
17
util/aio-posix.c | 89 ++++++++++++++++++++++++++++--------------------
18
util/aio-win32.c | 67 ++++++++++++++++--------------------
19
2 files changed, 82 insertions(+), 74 deletions(-)
20
21
diff --git a/util/aio-posix.c b/util/aio-posix.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/util/aio-posix.c
24
+++ b/util/aio-posix.c
25
@@ -XXX,XX +XXX,XX @@ static AioHandler *find_aio_handler(AioContext *ctx, int fd)
26
return NULL;
27
}
28
29
+static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
30
+{
31
+ /* If the GSource is in the process of being destroyed then
32
+ * g_source_remove_poll() causes an assertion failure. Skip
33
+ * removal in that case, because glib cleans up its state during
34
+ * destruction anyway.
35
+ */
36
+ if (!g_source_is_destroyed(&ctx->source)) {
37
+ g_source_remove_poll(&ctx->source, &node->pfd);
38
+ }
39
+
40
+ /* If a read is in progress, just mark the node as deleted */
41
+ if (qemu_lockcnt_count(&ctx->list_lock)) {
42
+ node->deleted = 1;
43
+ node->pfd.revents = 0;
44
+ return false;
45
+ }
46
+ /* Otherwise, delete it for real. We can't just mark it as
47
+ * deleted because deleted nodes are only cleaned up while
48
+ * no one is walking the handlers list.
49
+ */
50
+ QLIST_REMOVE(node, node);
51
+ return true;
52
+}
53
+
54
void aio_set_fd_handler(AioContext *ctx,
55
int fd,
56
bool is_external,
57
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
58
void *opaque)
59
{
60
AioHandler *node;
61
+ AioHandler *new_node = NULL;
62
bool is_new = false;
63
bool deleted = false;
64
int poll_disable_change;
65
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
66
qemu_lockcnt_unlock(&ctx->list_lock);
67
return;
68
}
69
-
70
- /* If the GSource is in the process of being destroyed then
71
- * g_source_remove_poll() causes an assertion failure. Skip
72
- * removal in that case, because glib cleans up its state during
73
- * destruction anyway.
74
- */
75
- if (!g_source_is_destroyed(&ctx->source)) {
76
- g_source_remove_poll(&ctx->source, &node->pfd);
77
- }
78
-
79
- /* If a read is in progress, just mark the node as deleted */
80
- if (qemu_lockcnt_count(&ctx->list_lock)) {
81
- node->deleted = 1;
82
- node->pfd.revents = 0;
83
- } else {
84
- /* Otherwise, delete it for real. We can't just mark it as
85
- * deleted because deleted nodes are only cleaned up while
86
- * no one is walking the handlers list.
87
- */
88
- QLIST_REMOVE(node, node);
89
- deleted = true;
90
- }
91
/* Clean events in order to unregister fd from the ctx epoll. */
92
node->pfd.events = 0;
93
94
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
95
} else {
96
poll_disable_change = !io_poll - (node && !node->io_poll);
97
if (node == NULL) {
98
- /* Alloc and insert if it's not already there */
99
- node = g_new0(AioHandler, 1);
100
- node->pfd.fd = fd;
101
- QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
102
-
103
- g_source_add_poll(&ctx->source, &node->pfd);
104
is_new = true;
105
}
106
+ /* Alloc and insert if it's not already there */
107
+ new_node = g_new0(AioHandler, 1);
108
109
/* Update handler with latest information */
110
- node->io_read = io_read;
111
- node->io_write = io_write;
112
- node->io_poll = io_poll;
113
- node->opaque = opaque;
114
- node->is_external = is_external;
115
+ new_node->io_read = io_read;
116
+ new_node->io_write = io_write;
117
+ new_node->io_poll = io_poll;
118
+ new_node->opaque = opaque;
119
+ new_node->is_external = is_external;
120
121
- node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
122
- node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
123
+ if (is_new) {
124
+ new_node->pfd.fd = fd;
125
+ } else {
126
+ new_node->pfd = node->pfd;
127
+ }
128
+ g_source_add_poll(&ctx->source, &new_node->pfd);
129
+
130
+ new_node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
131
+ new_node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
132
+
133
+ QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, new_node, node);
134
+ }
135
+ if (node) {
136
+ deleted = aio_remove_fd_handler(ctx, node);
137
}
138
139
/* No need to order poll_disable_cnt writes against other updates;
140
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
141
atomic_set(&ctx->poll_disable_cnt,
142
atomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
143
144
- aio_epoll_update(ctx, node, is_new);
145
+ if (new_node) {
146
+ aio_epoll_update(ctx, new_node, is_new);
147
+ } else if (node) {
148
+ /* Unregister deleted fd_handler */
149
+ aio_epoll_update(ctx, node, false);
150
+ }
151
qemu_lockcnt_unlock(&ctx->list_lock);
152
aio_notify(ctx);
153
154
diff --git a/util/aio-win32.c b/util/aio-win32.c
155
index XXXXXXX..XXXXXXX 100644
156
--- a/util/aio-win32.c
157
+++ b/util/aio-win32.c
158
@@ -XXX,XX +XXX,XX @@ struct AioHandler {
159
QLIST_ENTRY(AioHandler) node;
160
};
161
162
+static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
163
+{
164
+ /* If aio_poll is in progress, just mark the node as deleted */
165
+ if (qemu_lockcnt_count(&ctx->list_lock)) {
166
+ node->deleted = 1;
167
+ node->pfd.revents = 0;
168
+ } else {
169
+ /* Otherwise, delete it for real. We can't just mark it as
170
+ * deleted because deleted nodes are only cleaned up after
171
+ * releasing the list_lock.
172
+ */
173
+ QLIST_REMOVE(node, node);
174
+ g_free(node);
175
+ }
176
+}
177
+
178
void aio_set_fd_handler(AioContext *ctx,
179
int fd,
180
bool is_external,
181
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
182
void *opaque)
183
{
184
/* fd is a SOCKET in our case */
185
- AioHandler *node;
186
+ AioHandler *old_node;
187
+ AioHandler *node = NULL;
188
189
qemu_lockcnt_lock(&ctx->list_lock);
190
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
191
- if (node->pfd.fd == fd && !node->deleted) {
192
+ QLIST_FOREACH(old_node, &ctx->aio_handlers, node) {
193
+ if (old_node->pfd.fd == fd && !old_node->deleted) {
194
break;
195
}
196
}
197
198
- /* Are we deleting the fd handler? */
199
- if (!io_read && !io_write) {
200
- if (node) {
201
- /* If aio_poll is in progress, just mark the node as deleted */
202
- if (qemu_lockcnt_count(&ctx->list_lock)) {
203
- node->deleted = 1;
204
- node->pfd.revents = 0;
205
- } else {
206
- /* Otherwise, delete it for real. We can't just mark it as
207
- * deleted because deleted nodes are only cleaned up after
208
- * releasing the list_lock.
209
- */
210
- QLIST_REMOVE(node, node);
211
- g_free(node);
212
- }
213
- }
214
- } else {
215
+ if (io_read || io_write) {
216
HANDLE event;
217
long bitmask = 0;
218
219
- if (node == NULL) {
220
- /* Alloc and insert if it's not already there */
221
- node = g_new0(AioHandler, 1);
222
- node->pfd.fd = fd;
223
- QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
224
- }
225
+ /* Alloc and insert if it's not already there */
226
+ node = g_new0(AioHandler, 1);
227
+ node->pfd.fd = fd;
228
229
node->pfd.events = 0;
230
if (node->io_read) {
231
@@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx,
232
bitmask |= FD_WRITE | FD_CONNECT;
233
}
234
235
+ QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
236
event = event_notifier_get_handle(&ctx->notifier);
237
WSAEventSelect(node->pfd.fd, event, bitmask);
238
}
239
+ if (old_node) {
240
+ aio_remove_fd_handler(ctx, old_node);
241
+ }
242
243
qemu_lockcnt_unlock(&ctx->list_lock);
244
aio_notify(ctx);
245
@@ -XXX,XX +XXX,XX @@ void aio_set_event_notifier(AioContext *ctx,
246
if (node) {
247
g_source_remove_poll(&ctx->source, &node->pfd);
248
249
- /* aio_poll is in progress, just mark the node as deleted */
250
- if (qemu_lockcnt_count(&ctx->list_lock)) {
251
- node->deleted = 1;
252
- node->pfd.revents = 0;
253
- } else {
254
- /* Otherwise, delete it for real. We can't just mark it as
255
- * deleted because deleted nodes are only cleaned up after
256
- * releasing the list_lock.
257
- */
258
- QLIST_REMOVE(node, node);
259
- g_free(node);
260
- }
261
+ aio_remove_fd_handler(ctx, node);
262
}
263
} else {
264
if (node == NULL) {
265
--
266
2.20.1
267
268
diff view generated by jsdifflib