1
The following changes since commit 136c67e07869227b21b3f627316e03679ce7b738:
1
The following changes since commit 56f9e46b841c7be478ca038d8d4085d776ab4b0d:
2
2
3
Merge remote-tracking branch 'remotes/bkoppelmann/tags/pull-tricore-2018-03-02' into staging (2018-03-02 16:56:20 +0000)
3
Merge remote-tracking branch 'remotes/armbru/tags/pull-qapi-2017-02-20' into staging (2017-02-20 17:42:47 +0000)
4
4
5
are available in the Git repository at:
5
are available in the git repository at:
6
6
7
git://github.com/stefanha/qemu.git tags/block-pull-request
7
git://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 23500c6a9409efc80d696aede0629bfbe7556a90:
9
for you to fetch changes up to a7b91d35bab97a2d3e779d0c64c9b837b52a6cf7:
10
10
11
README: Document 'git-publish' workflow (2018-03-05 09:03:17 +0000)
11
coroutine-lock: make CoRwlock thread-safe and fair (2017-02-21 11:39:40 +0000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Pull request
14
Pull request
15
15
16
Mostly patches that are only indirectly related to the block layer, but I've
16
v2:
17
reviewed them and there is no maintainer.
17
* Rebased to resolve scsi conflicts
18
18
19
----------------------------------------------------------------
19
----------------------------------------------------------------
20
20
21
Fam Zheng (2):
21
Paolo Bonzini (24):
22
Add a git-publish configuration file
22
block: move AioContext, QEMUTimer, main-loop to libqemuutil
23
README: Document 'git-publish' workflow
23
aio: introduce aio_co_schedule and aio_co_wake
24
block-backend: allow blk_prw from coroutine context
25
test-thread-pool: use generic AioContext infrastructure
26
io: add methods to set I/O handlers on AioContext
27
io: make qio_channel_yield aware of AioContexts
28
nbd: convert to use qio_channel_yield
29
coroutine-lock: reschedule coroutine on the AioContext it was running
30
on
31
blkdebug: reschedule coroutine on the AioContext it is running on
32
qed: introduce qed_aio_start_io and qed_aio_next_io_cb
33
aio: push aio_context_acquire/release down to dispatching
34
block: explicitly acquire aiocontext in timers that need it
35
block: explicitly acquire aiocontext in callbacks that need it
36
block: explicitly acquire aiocontext in bottom halves that need it
37
block: explicitly acquire aiocontext in aio callbacks that need it
38
aio-posix: partially inline aio_dispatch into aio_poll
39
async: remove unnecessary inc/dec pairs
40
block: document fields protected by AioContext lock
41
coroutine-lock: make CoMutex thread-safe
42
coroutine-lock: add limited spinning to CoMutex
43
test-aio-multithread: add performance comparison with thread-based
44
mutexes
45
coroutine-lock: place CoMutex before CoQueue in header
46
coroutine-lock: add mutex argument to CoQueue APIs
47
coroutine-lock: make CoRwlock thread-safe and fair
24
48
25
Su Hang (3):
49
Makefile.objs | 4 -
26
util/uri.c: Coding style check, Only whitespace involved
50
stubs/Makefile.objs | 1 +
27
util/uri.c: remove brackets that wrap `return` statement's content.
51
tests/Makefile.include | 19 +-
28
util/uri.c: wrap single statement blocks with braces {}
52
util/Makefile.objs | 6 +-
29
53
block/nbd-client.h | 2 +-
30
Thomas Huth (1):
54
block/qed.h | 3 +
31
tests/libqos: Check for valid dev pointer when looking for PCI devices
55
include/block/aio.h | 38 ++-
32
56
include/block/block_int.h | 64 +++--
33
tests/libqos/virtio-pci.c | 4 +-
57
include/io/channel.h | 72 +++++-
34
util/uri.c | 1733 ++++++++++++++++++++++++---------------------
58
include/qemu/coroutine.h | 84 ++++---
35
.gitpublish | 51 ++
59
include/qemu/coroutine_int.h | 11 +-
36
README | 31 +-
60
include/sysemu/block-backend.h | 14 +-
37
4 files changed, 1014 insertions(+), 805 deletions(-)
61
tests/iothread.h | 25 ++
38
create mode 100644 .gitpublish
62
block/backup.c | 2 +-
63
block/blkdebug.c | 9 +-
64
block/blkreplay.c | 2 +-
65
block/block-backend.c | 13 +-
66
block/curl.c | 44 +++-
67
block/gluster.c | 9 +-
68
block/io.c | 42 +---
69
block/iscsi.c | 15 +-
70
block/linux-aio.c | 10 +-
71
block/mirror.c | 12 +-
72
block/nbd-client.c | 119 +++++----
73
block/nfs.c | 9 +-
74
block/qcow2-cluster.c | 4 +-
75
block/qed-cluster.c | 2 +
76
block/qed-table.c | 12 +-
77
block/qed.c | 58 +++--
78
block/sheepdog.c | 31 +--
79
block/ssh.c | 29 +--
80
block/throttle-groups.c | 4 +-
81
block/win32-aio.c | 9 +-
82
dma-helpers.c | 2 +
83
hw/9pfs/9p.c | 2 +-
84
hw/block/virtio-blk.c | 19 +-
85
hw/scsi/scsi-bus.c | 2 +
86
hw/scsi/scsi-disk.c | 15 ++
87
hw/scsi/scsi-generic.c | 20 +-
88
hw/scsi/virtio-scsi.c | 7 +
89
io/channel-command.c | 13 +
90
io/channel-file.c | 11 +
91
io/channel-socket.c | 16 +-
92
io/channel-tls.c | 12 +
93
io/channel-watch.c | 6 +
94
io/channel.c | 97 ++++++--
95
nbd/client.c | 2 +-
96
nbd/common.c | 9 +-
97
nbd/server.c | 94 +++-----
98
stubs/linux-aio.c | 32 +++
99
stubs/set-fd-handler.c | 11 -
100
tests/iothread.c | 91 +++++++
101
tests/test-aio-multithread.c | 463 ++++++++++++++++++++++++++++++++++++
102
tests/test-thread-pool.c | 12 +-
103
aio-posix.c => util/aio-posix.c | 62 ++---
104
aio-win32.c => util/aio-win32.c | 30 +--
105
util/aiocb.c | 55 +++++
106
async.c => util/async.c | 84 ++++++-
107
iohandler.c => util/iohandler.c | 0
108
main-loop.c => util/main-loop.c | 0
109
util/qemu-coroutine-lock.c | 254 ++++++++++++++++++--
110
util/qemu-coroutine-sleep.c | 2 +-
111
util/qemu-coroutine.c | 8 +
112
qemu-timer.c => util/qemu-timer.c | 0
113
thread-pool.c => util/thread-pool.c | 8 +-
114
trace-events | 11 -
115
util/trace-events | 17 +-
116
67 files changed, 1712 insertions(+), 533 deletions(-)
117
create mode 100644 tests/iothread.h
118
create mode 100644 stubs/linux-aio.c
119
create mode 100644 tests/iothread.c
120
create mode 100644 tests/test-aio-multithread.c
121
rename aio-posix.c => util/aio-posix.c (94%)
122
rename aio-win32.c => util/aio-win32.c (95%)
123
create mode 100644 util/aiocb.c
124
rename async.c => util/async.c (82%)
125
rename iohandler.c => util/iohandler.c (100%)
126
rename main-loop.c => util/main-loop.c (100%)
127
rename qemu-timer.c => util/qemu-timer.c (100%)
128
rename thread-pool.c => util/thread-pool.c (97%)
39
129
40
--
130
--
41
2.14.3
131
2.9.3
42
132
43
133
diff view generated by jsdifflib
New patch
1
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
3
AioContext is fairly self contained, the only dependency is QEMUTimer but
4
that in turn doesn't need anything else. So move them out of block-obj-y
5
to avoid introducing a dependency from io/ to block-obj-y.
6
7
main-loop and its dependency iohandler also need to be moved, because
8
later in this series io/ will call iohandler_get_aio_context.
9
10
[Changed copyright "the QEMU team" to "other QEMU contributors" as
11
suggested by Daniel Berrange and agreed by Paolo.
12
--Stefan]
13
14
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
15
Reviewed-by: Fam Zheng <famz@redhat.com>
16
Message-id: 20170213135235.12274-2-pbonzini@redhat.com
17
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
18
---
19
Makefile.objs | 4 ---
20
stubs/Makefile.objs | 1 +
21
tests/Makefile.include | 11 ++++----
22
util/Makefile.objs | 6 +++-
23
block/io.c | 29 -------------------
24
stubs/linux-aio.c | 32 +++++++++++++++++++++
25
stubs/set-fd-handler.c | 11 --------
26
aio-posix.c => util/aio-posix.c | 2 +-
27
aio-win32.c => util/aio-win32.c | 0
28
util/aiocb.c | 55 +++++++++++++++++++++++++++++++++++++
29
async.c => util/async.c | 3 +-
30
iohandler.c => util/iohandler.c | 0
31
main-loop.c => util/main-loop.c | 0
32
qemu-timer.c => util/qemu-timer.c | 0
33
thread-pool.c => util/thread-pool.c | 2 +-
34
trace-events | 11 --------
35
util/trace-events | 11 ++++++++
36
17 files changed, 114 insertions(+), 64 deletions(-)
37
create mode 100644 stubs/linux-aio.c
38
rename aio-posix.c => util/aio-posix.c (99%)
39
rename aio-win32.c => util/aio-win32.c (100%)
40
create mode 100644 util/aiocb.c
41
rename async.c => util/async.c (99%)
42
rename iohandler.c => util/iohandler.c (100%)
43
rename main-loop.c => util/main-loop.c (100%)
44
rename qemu-timer.c => util/qemu-timer.c (100%)
45
rename thread-pool.c => util/thread-pool.c (99%)
46
47
diff --git a/Makefile.objs b/Makefile.objs
48
index XXXXXXX..XXXXXXX 100644
49
--- a/Makefile.objs
50
+++ b/Makefile.objs
51
@@ -XXX,XX +XXX,XX @@ chardev-obj-y = chardev/
52
#######################################################################
53
# block-obj-y is code used by both qemu system emulation and qemu-img
54
55
-block-obj-y = async.o thread-pool.o
56
block-obj-y += nbd/
57
block-obj-y += block.o blockjob.o
58
-block-obj-y += main-loop.o iohandler.o qemu-timer.o
59
-block-obj-$(CONFIG_POSIX) += aio-posix.o
60
-block-obj-$(CONFIG_WIN32) += aio-win32.o
61
block-obj-y += block/
62
block-obj-y += qemu-io-cmds.o
63
block-obj-$(CONFIG_REPLICATION) += replication.o
64
diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs
65
index XXXXXXX..XXXXXXX 100644
66
--- a/stubs/Makefile.objs
67
+++ b/stubs/Makefile.objs
68
@@ -XXX,XX +XXX,XX @@ stub-obj-y += get-vm-name.o
69
stub-obj-y += iothread.o
70
stub-obj-y += iothread-lock.o
71
stub-obj-y += is-daemonized.o
72
+stub-obj-$(CONFIG_LINUX_AIO) += linux-aio.o
73
stub-obj-y += machine-init-done.o
74
stub-obj-y += migr-blocker.o
75
stub-obj-y += monitor.o
76
diff --git a/tests/Makefile.include b/tests/Makefile.include
77
index XXXXXXX..XXXXXXX 100644
78
--- a/tests/Makefile.include
79
+++ b/tests/Makefile.include
80
@@ -XXX,XX +XXX,XX @@ check-unit-y += tests/test-visitor-serialization$(EXESUF)
81
check-unit-y += tests/test-iov$(EXESUF)
82
gcov-files-test-iov-y = util/iov.c
83
check-unit-y += tests/test-aio$(EXESUF)
84
+gcov-files-test-aio-y = util/async.c util/qemu-timer.o
85
+gcov-files-test-aio-$(CONFIG_WIN32) += util/aio-win32.c
86
+gcov-files-test-aio-$(CONFIG_POSIX) += util/aio-posix.c
87
check-unit-y += tests/test-throttle$(EXESUF)
88
gcov-files-test-aio-$(CONFIG_WIN32) = aio-win32.c
89
gcov-files-test-aio-$(CONFIG_POSIX) = aio-posix.c
90
@@ -XXX,XX +XXX,XX @@ tests/check-qjson$(EXESUF): tests/check-qjson.o $(test-util-obj-y)
91
tests/check-qom-interface$(EXESUF): tests/check-qom-interface.o $(test-qom-obj-y)
92
tests/check-qom-proplist$(EXESUF): tests/check-qom-proplist.o $(test-qom-obj-y)
93
94
-tests/test-char$(EXESUF): tests/test-char.o qemu-timer.o \
95
-    $(test-util-obj-y) $(qtest-obj-y) $(test-block-obj-y) $(chardev-obj-y)
96
+tests/test-char$(EXESUF): tests/test-char.o $(test-util-obj-y) $(qtest-obj-y) $(test-io-obj-y) $(chardev-obj-y)
97
tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(test-block-obj-y)
98
tests/test-aio$(EXESUF): tests/test-aio.o $(test-block-obj-y)
99
tests/test-throttle$(EXESUF): tests/test-throttle.o $(test-block-obj-y)
100
@@ -XXX,XX +XXX,XX @@ tests/test-vmstate$(EXESUF): tests/test-vmstate.o \
101
    migration/vmstate.o migration/qemu-file.o \
102
migration/qemu-file-channel.o migration/qjson.o \
103
    $(test-io-obj-y)
104
-tests/test-timed-average$(EXESUF): tests/test-timed-average.o qemu-timer.o \
105
-    $(test-util-obj-y)
106
+tests/test-timed-average$(EXESUF): tests/test-timed-average.o $(test-util-obj-y)
107
tests/test-base64$(EXESUF): tests/test-base64.o \
108
    libqemuutil.a libqemustub.a
109
tests/ptimer-test$(EXESUF): tests/ptimer-test.o tests/ptimer-test-stubs.o hw/core/ptimer.o libqemustub.a
110
@@ -XXX,XX +XXX,XX @@ tests/usb-hcd-ehci-test$(EXESUF): tests/usb-hcd-ehci-test.o $(libqos-usb-obj-y)
111
tests/usb-hcd-xhci-test$(EXESUF): tests/usb-hcd-xhci-test.o $(libqos-usb-obj-y)
112
tests/pc-cpu-test$(EXESUF): tests/pc-cpu-test.o
113
tests/postcopy-test$(EXESUF): tests/postcopy-test.o
114
-tests/vhost-user-test$(EXESUF): tests/vhost-user-test.o qemu-timer.o \
115
+tests/vhost-user-test$(EXESUF): tests/vhost-user-test.o $(test-util-obj-y) \
116
    $(qtest-obj-y) $(test-io-obj-y) $(libqos-virtio-obj-y) $(libqos-pc-obj-y) \
117
    $(chardev-obj-y)
118
tests/qemu-iotests/socket_scm_helper$(EXESUF): tests/qemu-iotests/socket_scm_helper.o
119
diff --git a/util/Makefile.objs b/util/Makefile.objs
120
index XXXXXXX..XXXXXXX 100644
121
--- a/util/Makefile.objs
122
+++ b/util/Makefile.objs
123
@@ -XXX,XX +XXX,XX @@
124
util-obj-y = osdep.o cutils.o unicode.o qemu-timer-common.o
125
util-obj-y += bufferiszero.o
126
util-obj-y += lockcnt.o
127
+util-obj-y += aiocb.o async.o thread-pool.o qemu-timer.o
128
+util-obj-y += main-loop.o iohandler.o
129
+util-obj-$(CONFIG_POSIX) += aio-posix.o
130
util-obj-$(CONFIG_POSIX) += compatfd.o
131
util-obj-$(CONFIG_POSIX) += event_notifier-posix.o
132
util-obj-$(CONFIG_POSIX) += mmap-alloc.o
133
util-obj-$(CONFIG_POSIX) += oslib-posix.o
134
util-obj-$(CONFIG_POSIX) += qemu-openpty.o
135
util-obj-$(CONFIG_POSIX) += qemu-thread-posix.o
136
-util-obj-$(CONFIG_WIN32) += event_notifier-win32.o
137
util-obj-$(CONFIG_POSIX) += memfd.o
138
+util-obj-$(CONFIG_WIN32) += aio-win32.o
139
+util-obj-$(CONFIG_WIN32) += event_notifier-win32.o
140
util-obj-$(CONFIG_WIN32) += oslib-win32.o
141
util-obj-$(CONFIG_WIN32) += qemu-thread-win32.o
142
util-obj-y += envlist.o path.o module.o
143
diff --git a/block/io.c b/block/io.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/block/io.c
146
+++ b/block/io.c
147
@@ -XXX,XX +XXX,XX @@ BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
148
return &acb->common;
149
}
150
151
-void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
152
- BlockCompletionFunc *cb, void *opaque)
153
-{
154
- BlockAIOCB *acb;
155
-
156
- acb = g_malloc(aiocb_info->aiocb_size);
157
- acb->aiocb_info = aiocb_info;
158
- acb->bs = bs;
159
- acb->cb = cb;
160
- acb->opaque = opaque;
161
- acb->refcnt = 1;
162
- return acb;
163
-}
164
-
165
-void qemu_aio_ref(void *p)
166
-{
167
- BlockAIOCB *acb = p;
168
- acb->refcnt++;
169
-}
170
-
171
-void qemu_aio_unref(void *p)
172
-{
173
- BlockAIOCB *acb = p;
174
- assert(acb->refcnt > 0);
175
- if (--acb->refcnt == 0) {
176
- g_free(acb);
177
- }
178
-}
179
-
180
/**************************************************************/
181
/* Coroutine block device emulation */
182
183
diff --git a/stubs/linux-aio.c b/stubs/linux-aio.c
184
new file mode 100644
185
index XXXXXXX..XXXXXXX
186
--- /dev/null
187
+++ b/stubs/linux-aio.c
188
@@ -XXX,XX +XXX,XX @@
189
+/*
190
+ * Linux native AIO support.
191
+ *
192
+ * Copyright (C) 2009 IBM, Corp.
193
+ * Copyright (C) 2009 Red Hat, Inc.
194
+ *
195
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
196
+ * See the COPYING file in the top-level directory.
197
+ */
198
+#include "qemu/osdep.h"
199
+#include "block/aio.h"
200
+#include "block/raw-aio.h"
201
+
202
+void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
203
+{
204
+ abort();
205
+}
206
+
207
+void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
208
+{
209
+ abort();
210
+}
211
+
212
+LinuxAioState *laio_init(void)
213
+{
214
+ abort();
215
+}
216
+
217
+void laio_cleanup(LinuxAioState *s)
218
+{
219
+ abort();
220
+}
221
diff --git a/stubs/set-fd-handler.c b/stubs/set-fd-handler.c
222
index XXXXXXX..XXXXXXX 100644
223
--- a/stubs/set-fd-handler.c
224
+++ b/stubs/set-fd-handler.c
225
@@ -XXX,XX +XXX,XX @@ void qemu_set_fd_handler(int fd,
226
{
227
abort();
228
}
229
-
230
-void aio_set_fd_handler(AioContext *ctx,
231
- int fd,
232
- bool is_external,
233
- IOHandler *io_read,
234
- IOHandler *io_write,
235
- AioPollFn *io_poll,
236
- void *opaque)
237
-{
238
- abort();
239
-}
240
diff --git a/aio-posix.c b/util/aio-posix.c
241
similarity index 99%
242
rename from aio-posix.c
243
rename to util/aio-posix.c
244
index XXXXXXX..XXXXXXX 100644
245
--- a/aio-posix.c
246
+++ b/util/aio-posix.c
247
@@ -XXX,XX +XXX,XX @@
248
#include "qemu/rcu_queue.h"
249
#include "qemu/sockets.h"
250
#include "qemu/cutils.h"
251
-#include "trace-root.h"
252
+#include "trace.h"
253
#ifdef CONFIG_EPOLL_CREATE1
254
#include <sys/epoll.h>
255
#endif
256
diff --git a/aio-win32.c b/util/aio-win32.c
257
similarity index 100%
258
rename from aio-win32.c
259
rename to util/aio-win32.c
260
diff --git a/util/aiocb.c b/util/aiocb.c
261
new file mode 100644
262
index XXXXXXX..XXXXXXX
263
--- /dev/null
264
+++ b/util/aiocb.c
265
@@ -XXX,XX +XXX,XX @@
266
+/*
267
+ * BlockAIOCB allocation
268
+ *
269
+ * Copyright (c) 2003-2017 Fabrice Bellard and other QEMU contributors
270
+ *
271
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
272
+ * of this software and associated documentation files (the "Software"), to deal
273
+ * in the Software without restriction, including without limitation the rights
274
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
275
+ * copies of the Software, and to permit persons to whom the Software is
276
+ * furnished to do so, subject to the following conditions:
277
+ *
278
+ * The above copyright notice and this permission notice shall be included in
279
+ * all copies or substantial portions of the Software.
280
+ *
281
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
282
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
283
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
284
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
285
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
286
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
287
+ * THE SOFTWARE.
288
+ */
289
+
290
+#include "qemu/osdep.h"
291
+#include "block/aio.h"
292
+
293
+void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
294
+ BlockCompletionFunc *cb, void *opaque)
295
+{
296
+ BlockAIOCB *acb;
297
+
298
+ acb = g_malloc(aiocb_info->aiocb_size);
299
+ acb->aiocb_info = aiocb_info;
300
+ acb->bs = bs;
301
+ acb->cb = cb;
302
+ acb->opaque = opaque;
303
+ acb->refcnt = 1;
304
+ return acb;
305
+}
306
+
307
+void qemu_aio_ref(void *p)
308
+{
309
+ BlockAIOCB *acb = p;
310
+ acb->refcnt++;
311
+}
312
+
313
+void qemu_aio_unref(void *p)
314
+{
315
+ BlockAIOCB *acb = p;
316
+ assert(acb->refcnt > 0);
317
+ if (--acb->refcnt == 0) {
318
+ g_free(acb);
319
+ }
320
+}
321
diff --git a/async.c b/util/async.c
322
similarity index 99%
323
rename from async.c
324
rename to util/async.c
325
index XXXXXXX..XXXXXXX 100644
326
--- a/async.c
327
+++ b/util/async.c
328
@@ -XXX,XX +XXX,XX @@
329
/*
330
- * QEMU System Emulator
331
+ * Data plane event loop
332
*
333
* Copyright (c) 2003-2008 Fabrice Bellard
334
+ * Copyright (c) 2009-2017 QEMU contributors
335
*
336
* Permission is hereby granted, free of charge, to any person obtaining a copy
337
* of this software and associated documentation files (the "Software"), to deal
338
diff --git a/iohandler.c b/util/iohandler.c
339
similarity index 100%
340
rename from iohandler.c
341
rename to util/iohandler.c
342
diff --git a/main-loop.c b/util/main-loop.c
343
similarity index 100%
344
rename from main-loop.c
345
rename to util/main-loop.c
346
diff --git a/qemu-timer.c b/util/qemu-timer.c
347
similarity index 100%
348
rename from qemu-timer.c
349
rename to util/qemu-timer.c
350
diff --git a/thread-pool.c b/util/thread-pool.c
351
similarity index 99%
352
rename from thread-pool.c
353
rename to util/thread-pool.c
354
index XXXXXXX..XXXXXXX 100644
355
--- a/thread-pool.c
356
+++ b/util/thread-pool.c
357
@@ -XXX,XX +XXX,XX @@
358
#include "qemu/queue.h"
359
#include "qemu/thread.h"
360
#include "qemu/coroutine.h"
361
-#include "trace-root.h"
362
+#include "trace.h"
363
#include "block/thread-pool.h"
364
#include "qemu/main-loop.h"
365
366
diff --git a/trace-events b/trace-events
367
index XXXXXXX..XXXXXXX 100644
368
--- a/trace-events
369
+++ b/trace-events
370
@@ -XXX,XX +XXX,XX @@
371
#
372
# The <format-string> should be a sprintf()-compatible format string.
373
374
-# aio-posix.c
375
-run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64
376
-run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
377
-poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
378
-poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
379
-
380
-# thread-pool.c
381
-thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
382
-thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d"
383
-thread_pool_cancel(void *req, void *opaque) "req %p opaque %p"
384
-
385
# ioport.c
386
cpu_in(unsigned int addr, char size, unsigned int val) "addr %#x(%c) value %u"
387
cpu_out(unsigned int addr, char size, unsigned int val) "addr %#x(%c) value %u"
388
diff --git a/util/trace-events b/util/trace-events
389
index XXXXXXX..XXXXXXX 100644
390
--- a/util/trace-events
391
+++ b/util/trace-events
392
@@ -XXX,XX +XXX,XX @@
393
# See docs/tracing.txt for syntax documentation.
394
395
+# util/aio-posix.c
396
+run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64
397
+run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
398
+poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
399
+poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
400
+
401
+# util/thread-pool.c
402
+thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
403
+thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d"
404
+thread_pool_cancel(void *req, void *opaque) "req %p opaque %p"
405
+
406
# util/buffer.c
407
buffer_resize(const char *buf, size_t olen, size_t len) "%s: old %zd, new %zd"
408
buffer_move_empty(const char *buf, size_t len, const char *from) "%s: %zd bytes from %s"
409
--
410
2.9.3
411
412
diff view generated by jsdifflib
New patch
1
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
3
aio_co_wake provides the infrastructure to start a coroutine on a "home"
4
AioContext. It will be used by CoMutex and CoQueue, so that coroutines
5
don't jump from one context to another when they go to sleep on a
6
mutex or waitqueue. However, it can also be used as a more efficient
7
alternative to one-shot bottom halves, and saves the effort of tracking
8
which AioContext a coroutine is running on.
9
10
aio_co_schedule is the part of aio_co_wake that starts a coroutine
11
on a remove AioContext, but it is also useful to implement e.g.
12
bdrv_set_aio_context callbacks.
13
14
The implementation of aio_co_schedule is based on a lock-free
15
multiple-producer, single-consumer queue. The multiple producers use
16
cmpxchg to add to a LIFO stack. The consumer (a per-AioContext bottom
17
half) grabs all items added so far, inverts the list to make it FIFO,
18
and goes through it one item at a time until it's empty. The data
19
structure was inspired by OSv, which uses it in the very code we'll
20
"port" to QEMU for the thread-safe CoMutex.
21
22
Most of the new code is really tests.
23
24
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
25
Reviewed-by: Fam Zheng <famz@redhat.com>
26
Message-id: 20170213135235.12274-3-pbonzini@redhat.com
27
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
28
---
29
tests/Makefile.include | 8 +-
30
include/block/aio.h | 32 +++++++
31
include/qemu/coroutine_int.h | 11 ++-
32
tests/iothread.h | 25 +++++
33
tests/iothread.c | 91 ++++++++++++++++++
34
tests/test-aio-multithread.c | 213 +++++++++++++++++++++++++++++++++++++++++++
35
util/async.c | 65 +++++++++++++
36
util/qemu-coroutine.c | 8 ++
37
util/trace-events | 4 +
38
9 files changed, 453 insertions(+), 4 deletions(-)
39
create mode 100644 tests/iothread.h
40
create mode 100644 tests/iothread.c
41
create mode 100644 tests/test-aio-multithread.c
42
43
diff --git a/tests/Makefile.include b/tests/Makefile.include
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tests/Makefile.include
46
+++ b/tests/Makefile.include
47
@@ -XXX,XX +XXX,XX @@ check-unit-y += tests/test-aio$(EXESUF)
48
gcov-files-test-aio-y = util/async.c util/qemu-timer.o
49
gcov-files-test-aio-$(CONFIG_WIN32) += util/aio-win32.c
50
gcov-files-test-aio-$(CONFIG_POSIX) += util/aio-posix.c
51
+check-unit-y += tests/test-aio-multithread$(EXESUF)
52
+gcov-files-test-aio-multithread-y = $(gcov-files-test-aio-y)
53
+gcov-files-test-aio-multithread-y += util/qemu-coroutine.c tests/iothread.c
54
check-unit-y += tests/test-throttle$(EXESUF)
55
-gcov-files-test-aio-$(CONFIG_WIN32) = aio-win32.c
56
-gcov-files-test-aio-$(CONFIG_POSIX) = aio-posix.c
57
check-unit-y += tests/test-thread-pool$(EXESUF)
58
gcov-files-test-thread-pool-y = thread-pool.c
59
gcov-files-test-hbitmap-y = util/hbitmap.c
60
@@ -XXX,XX +XXX,XX @@ test-qapi-obj-y = tests/test-qapi-visit.o tests/test-qapi-types.o \
61
    $(test-qom-obj-y)
62
test-crypto-obj-y = $(crypto-obj-y) $(test-qom-obj-y)
63
test-io-obj-y = $(io-obj-y) $(test-crypto-obj-y)
64
-test-block-obj-y = $(block-obj-y) $(test-io-obj-y)
65
+test-block-obj-y = $(block-obj-y) $(test-io-obj-y) tests/iothread.o
66
67
tests/check-qint$(EXESUF): tests/check-qint.o $(test-util-obj-y)
68
tests/check-qstring$(EXESUF): tests/check-qstring.o $(test-util-obj-y)
69
@@ -XXX,XX +XXX,XX @@ tests/check-qom-proplist$(EXESUF): tests/check-qom-proplist.o $(test-qom-obj-y)
70
tests/test-char$(EXESUF): tests/test-char.o $(test-util-obj-y) $(qtest-obj-y) $(test-io-obj-y) $(chardev-obj-y)
71
tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(test-block-obj-y)
72
tests/test-aio$(EXESUF): tests/test-aio.o $(test-block-obj-y)
73
+tests/test-aio-multithread$(EXESUF): tests/test-aio-multithread.o $(test-block-obj-y)
74
tests/test-throttle$(EXESUF): tests/test-throttle.o $(test-block-obj-y)
75
tests/test-blockjob$(EXESUF): tests/test-blockjob.o $(test-block-obj-y) $(test-util-obj-y)
76
tests/test-blockjob-txn$(EXESUF): tests/test-blockjob-txn.o $(test-block-obj-y) $(test-util-obj-y)
77
diff --git a/include/block/aio.h b/include/block/aio.h
78
index XXXXXXX..XXXXXXX 100644
79
--- a/include/block/aio.h
80
+++ b/include/block/aio.h
81
@@ -XXX,XX +XXX,XX @@ typedef void QEMUBHFunc(void *opaque);
82
typedef bool AioPollFn(void *opaque);
83
typedef void IOHandler(void *opaque);
84
85
+struct Coroutine;
86
struct ThreadPool;
87
struct LinuxAioState;
88
89
@@ -XXX,XX +XXX,XX @@ struct AioContext {
90
bool notified;
91
EventNotifier notifier;
92
93
+ QSLIST_HEAD(, Coroutine) scheduled_coroutines;
94
+ QEMUBH *co_schedule_bh;
95
+
96
/* Thread pool for performing work and receiving completion callbacks.
97
* Has its own locking.
98
*/
99
@@ -XXX,XX +XXX,XX @@ static inline bool aio_node_check(AioContext *ctx, bool is_external)
100
}
101
102
/**
103
+ * aio_co_schedule:
104
+ * @ctx: the aio context
105
+ * @co: the coroutine
106
+ *
107
+ * Start a coroutine on a remote AioContext.
108
+ *
109
+ * The coroutine must not be entered by anyone else while aio_co_schedule()
110
+ * is active. In addition the coroutine must have yielded unless ctx
111
+ * is the context in which the coroutine is running (i.e. the value of
112
+ * qemu_get_current_aio_context() from the coroutine itself).
113
+ */
114
+void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
115
+
116
+/**
117
+ * aio_co_wake:
118
+ * @co: the coroutine
119
+ *
120
+ * Restart a coroutine on the AioContext where it was running last, thus
121
+ * preventing coroutines from jumping from one context to another when they
122
+ * go to sleep.
123
+ *
124
+ * aio_co_wake may be executed either in coroutine or non-coroutine
125
+ * context. The coroutine must not be entered by anyone else while
126
+ * aio_co_wake() is active.
127
+ */
128
+void aio_co_wake(struct Coroutine *co);
129
+
130
+/**
131
* Return the AioContext whose event loop runs in the current thread.
132
*
133
* If called from an IOThread this will be the IOThread's AioContext. If
134
diff --git a/include/qemu/coroutine_int.h b/include/qemu/coroutine_int.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/include/qemu/coroutine_int.h
137
+++ b/include/qemu/coroutine_int.h
138
@@ -XXX,XX +XXX,XX @@ struct Coroutine {
139
CoroutineEntry *entry;
140
void *entry_arg;
141
Coroutine *caller;
142
+
143
+ /* Only used when the coroutine has terminated. */
144
QSLIST_ENTRY(Coroutine) pool_next;
145
+
146
size_t locks_held;
147
148
- /* Coroutines that should be woken up when we yield or terminate */
149
+ /* Coroutines that should be woken up when we yield or terminate.
150
+ * Only used when the coroutine is running.
151
+ */
152
QSIMPLEQ_HEAD(, Coroutine) co_queue_wakeup;
153
+
154
+ /* Only used when the coroutine has yielded. */
155
+ AioContext *ctx;
156
QSIMPLEQ_ENTRY(Coroutine) co_queue_next;
157
+ QSLIST_ENTRY(Coroutine) co_scheduled_next;
158
};
159
160
Coroutine *qemu_coroutine_new(void);
161
diff --git a/tests/iothread.h b/tests/iothread.h
162
new file mode 100644
163
index XXXXXXX..XXXXXXX
164
--- /dev/null
165
+++ b/tests/iothread.h
166
@@ -XXX,XX +XXX,XX @@
167
+/*
168
+ * Event loop thread implementation for unit tests
169
+ *
170
+ * Copyright Red Hat Inc., 2013, 2016
171
+ *
172
+ * Authors:
173
+ * Stefan Hajnoczi <stefanha@redhat.com>
174
+ * Paolo Bonzini <pbonzini@redhat.com>
175
+ *
176
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
177
+ * See the COPYING file in the top-level directory.
178
+ */
179
+#ifndef TEST_IOTHREAD_H
180
+#define TEST_IOTHREAD_H
181
+
182
+#include "block/aio.h"
183
+#include "qemu/thread.h"
184
+
185
+typedef struct IOThread IOThread;
186
+
187
+IOThread *iothread_new(void);
188
+void iothread_join(IOThread *iothread);
189
+AioContext *iothread_get_aio_context(IOThread *iothread);
190
+
191
+#endif
192
diff --git a/tests/iothread.c b/tests/iothread.c
193
new file mode 100644
194
index XXXXXXX..XXXXXXX
195
--- /dev/null
196
+++ b/tests/iothread.c
197
@@ -XXX,XX +XXX,XX @@
198
+/*
199
+ * Event loop thread implementation for unit tests
200
+ *
201
+ * Copyright Red Hat Inc., 2013, 2016
202
+ *
203
+ * Authors:
204
+ * Stefan Hajnoczi <stefanha@redhat.com>
205
+ * Paolo Bonzini <pbonzini@redhat.com>
206
+ *
207
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
208
+ * See the COPYING file in the top-level directory.
209
+ *
210
+ */
211
+
212
+#include "qemu/osdep.h"
213
+#include "qapi/error.h"
214
+#include "block/aio.h"
215
+#include "qemu/main-loop.h"
216
+#include "qemu/rcu.h"
217
+#include "iothread.h"
218
+
219
+struct IOThread {
220
+ AioContext *ctx;
221
+
222
+ QemuThread thread;
223
+ QemuMutex init_done_lock;
224
+ QemuCond init_done_cond; /* is thread initialization done? */
225
+ bool stopping;
226
+};
227
+
228
+static __thread IOThread *my_iothread;
229
+
230
+AioContext *qemu_get_current_aio_context(void)
231
+{
232
+ return my_iothread ? my_iothread->ctx : qemu_get_aio_context();
233
+}
234
+
235
+static void *iothread_run(void *opaque)
236
+{
237
+ IOThread *iothread = opaque;
238
+
239
+ rcu_register_thread();
240
+
241
+ my_iothread = iothread;
242
+ qemu_mutex_lock(&iothread->init_done_lock);
243
+ iothread->ctx = aio_context_new(&error_abort);
244
+ qemu_cond_signal(&iothread->init_done_cond);
245
+ qemu_mutex_unlock(&iothread->init_done_lock);
246
+
247
+ while (!atomic_read(&iothread->stopping)) {
248
+ aio_poll(iothread->ctx, true);
249
+ }
250
+
251
+ rcu_unregister_thread();
252
+ return NULL;
253
+}
254
+
255
+void iothread_join(IOThread *iothread)
256
+{
257
+ iothread->stopping = true;
258
+ aio_notify(iothread->ctx);
259
+ qemu_thread_join(&iothread->thread);
260
+ qemu_cond_destroy(&iothread->init_done_cond);
261
+ qemu_mutex_destroy(&iothread->init_done_lock);
262
+ aio_context_unref(iothread->ctx);
263
+ g_free(iothread);
264
+}
265
+
266
+IOThread *iothread_new(void)
267
+{
268
+ IOThread *iothread = g_new0(IOThread, 1);
269
+
270
+ qemu_mutex_init(&iothread->init_done_lock);
271
+ qemu_cond_init(&iothread->init_done_cond);
272
+ qemu_thread_create(&iothread->thread, NULL, iothread_run,
273
+ iothread, QEMU_THREAD_JOINABLE);
274
+
275
+ /* Wait for initialization to complete */
276
+ qemu_mutex_lock(&iothread->init_done_lock);
277
+ while (iothread->ctx == NULL) {
278
+ qemu_cond_wait(&iothread->init_done_cond,
279
+ &iothread->init_done_lock);
280
+ }
281
+ qemu_mutex_unlock(&iothread->init_done_lock);
282
+ return iothread;
283
+}
284
+
285
+AioContext *iothread_get_aio_context(IOThread *iothread)
286
+{
287
+ return iothread->ctx;
288
+}
289
diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c
290
new file mode 100644
291
index XXXXXXX..XXXXXXX
292
--- /dev/null
293
+++ b/tests/test-aio-multithread.c
294
@@ -XXX,XX +XXX,XX @@
295
+/*
296
+ * AioContext multithreading tests
297
+ *
298
+ * Copyright Red Hat, Inc. 2016
299
+ *
300
+ * Authors:
301
+ * Paolo Bonzini <pbonzini@redhat.com>
302
+ *
303
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
304
+ * See the COPYING.LIB file in the top-level directory.
305
+ */
306
+
307
+#include "qemu/osdep.h"
308
+#include <glib.h>
309
+#include "block/aio.h"
310
+#include "qapi/error.h"
311
+#include "qemu/coroutine.h"
312
+#include "qemu/thread.h"
313
+#include "qemu/error-report.h"
314
+#include "iothread.h"
315
+
316
+/* AioContext management */
317
+
318
+#define NUM_CONTEXTS 5
319
+
320
+static IOThread *threads[NUM_CONTEXTS];
321
+static AioContext *ctx[NUM_CONTEXTS];
322
+static __thread int id = -1;
323
+
324
+static QemuEvent done_event;
325
+
326
+/* Run a function synchronously on a remote iothread. */
327
+
328
+typedef struct CtxRunData {
329
+ QEMUBHFunc *cb;
330
+ void *arg;
331
+} CtxRunData;
332
+
333
+static void ctx_run_bh_cb(void *opaque)
334
+{
335
+ CtxRunData *data = opaque;
336
+
337
+ data->cb(data->arg);
338
+ qemu_event_set(&done_event);
339
+}
340
+
341
+static void ctx_run(int i, QEMUBHFunc *cb, void *opaque)
342
+{
343
+ CtxRunData data = {
344
+ .cb = cb,
345
+ .arg = opaque
346
+ };
347
+
348
+ qemu_event_reset(&done_event);
349
+ aio_bh_schedule_oneshot(ctx[i], ctx_run_bh_cb, &data);
350
+ qemu_event_wait(&done_event);
351
+}
352
+
353
+/* Starting the iothreads. */
354
+
355
+static void set_id_cb(void *opaque)
356
+{
357
+ int *i = opaque;
358
+
359
+ id = *i;
360
+}
361
+
362
+static void create_aio_contexts(void)
363
+{
364
+ int i;
365
+
366
+ for (i = 0; i < NUM_CONTEXTS; i++) {
367
+ threads[i] = iothread_new();
368
+ ctx[i] = iothread_get_aio_context(threads[i]);
369
+ }
370
+
371
+ qemu_event_init(&done_event, false);
372
+ for (i = 0; i < NUM_CONTEXTS; i++) {
373
+ ctx_run(i, set_id_cb, &i);
374
+ }
375
+}
376
+
377
+/* Stopping the iothreads. */
378
+
379
+static void join_aio_contexts(void)
380
+{
381
+ int i;
382
+
383
+ for (i = 0; i < NUM_CONTEXTS; i++) {
384
+ aio_context_ref(ctx[i]);
385
+ }
386
+ for (i = 0; i < NUM_CONTEXTS; i++) {
387
+ iothread_join(threads[i]);
388
+ }
389
+ for (i = 0; i < NUM_CONTEXTS; i++) {
390
+ aio_context_unref(ctx[i]);
391
+ }
392
+ qemu_event_destroy(&done_event);
393
+}
394
+
395
+/* Basic test for the stuff above. */
396
+
397
+static void test_lifecycle(void)
398
+{
399
+ create_aio_contexts();
400
+ join_aio_contexts();
401
+}
402
+
403
+/* aio_co_schedule test. */
404
+
405
+static Coroutine *to_schedule[NUM_CONTEXTS];
406
+
407
+static bool now_stopping;
408
+
409
+static int count_retry;
410
+static int count_here;
411
+static int count_other;
412
+
413
+static bool schedule_next(int n)
414
+{
415
+ Coroutine *co;
416
+
417
+ co = atomic_xchg(&to_schedule[n], NULL);
418
+ if (!co) {
419
+ atomic_inc(&count_retry);
420
+ return false;
421
+ }
422
+
423
+ if (n == id) {
424
+ atomic_inc(&count_here);
425
+ } else {
426
+ atomic_inc(&count_other);
427
+ }
428
+
429
+ aio_co_schedule(ctx[n], co);
430
+ return true;
431
+}
432
+
433
+static void finish_cb(void *opaque)
434
+{
435
+ schedule_next(id);
436
+}
437
+
438
+static coroutine_fn void test_multi_co_schedule_entry(void *opaque)
439
+{
440
+ g_assert(to_schedule[id] == NULL);
441
+ atomic_mb_set(&to_schedule[id], qemu_coroutine_self());
442
+
443
+ while (!atomic_mb_read(&now_stopping)) {
444
+ int n;
445
+
446
+ n = g_test_rand_int_range(0, NUM_CONTEXTS);
447
+ schedule_next(n);
448
+ qemu_coroutine_yield();
449
+
450
+ g_assert(to_schedule[id] == NULL);
451
+ atomic_mb_set(&to_schedule[id], qemu_coroutine_self());
452
+ }
453
+}
454
+
455
+
456
+static void test_multi_co_schedule(int seconds)
457
+{
458
+ int i;
459
+
460
+ count_here = count_other = count_retry = 0;
461
+ now_stopping = false;
462
+
463
+ create_aio_contexts();
464
+ for (i = 0; i < NUM_CONTEXTS; i++) {
465
+ Coroutine *co1 = qemu_coroutine_create(test_multi_co_schedule_entry, NULL);
466
+ aio_co_schedule(ctx[i], co1);
467
+ }
468
+
469
+ g_usleep(seconds * 1000000);
470
+
471
+ atomic_mb_set(&now_stopping, true);
472
+ for (i = 0; i < NUM_CONTEXTS; i++) {
473
+ ctx_run(i, finish_cb, NULL);
474
+ to_schedule[i] = NULL;
475
+ }
476
+
477
+ join_aio_contexts();
478
+ g_test_message("scheduled %d, queued %d, retry %d, total %d\n",
479
+ count_other, count_here, count_retry,
480
+ count_here + count_other + count_retry);
481
+}
482
+
483
+static void test_multi_co_schedule_1(void)
484
+{
485
+ test_multi_co_schedule(1);
486
+}
487
+
488
+static void test_multi_co_schedule_10(void)
489
+{
490
+ test_multi_co_schedule(10);
491
+}
492
+
493
+/* End of tests. */
494
+
495
+int main(int argc, char **argv)
496
+{
497
+ init_clocks();
498
+
499
+ g_test_init(&argc, &argv, NULL);
500
+ g_test_add_func("/aio/multi/lifecycle", test_lifecycle);
501
+ if (g_test_quick()) {
502
+ g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1);
503
+ } else {
504
+ g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10);
505
+ }
506
+ return g_test_run();
507
+}
508
diff --git a/util/async.c b/util/async.c
509
index XXXXXXX..XXXXXXX 100644
510
--- a/util/async.c
511
+++ b/util/async.c
512
@@ -XXX,XX +XXX,XX @@
513
#include "qemu/main-loop.h"
514
#include "qemu/atomic.h"
515
#include "block/raw-aio.h"
516
+#include "qemu/coroutine_int.h"
517
+#include "trace.h"
518
519
/***********************************************************/
520
/* bottom halves (can be seen as timers which expire ASAP) */
521
@@ -XXX,XX +XXX,XX @@ aio_ctx_finalize(GSource *source)
522
}
523
#endif
524
525
+ assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
526
+ qemu_bh_delete(ctx->co_schedule_bh);
527
+
528
qemu_lockcnt_lock(&ctx->list_lock);
529
assert(!qemu_lockcnt_count(&ctx->list_lock));
530
while (ctx->first_bh) {
531
@@ -XXX,XX +XXX,XX @@ static bool event_notifier_poll(void *opaque)
532
return atomic_read(&ctx->notified);
533
}
534
535
+static void co_schedule_bh_cb(void *opaque)
536
+{
537
+ AioContext *ctx = opaque;
538
+ QSLIST_HEAD(, Coroutine) straight, reversed;
539
+
540
+ QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
541
+ QSLIST_INIT(&straight);
542
+
543
+ while (!QSLIST_EMPTY(&reversed)) {
544
+ Coroutine *co = QSLIST_FIRST(&reversed);
545
+ QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
546
+ QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
547
+ }
548
+
549
+ while (!QSLIST_EMPTY(&straight)) {
550
+ Coroutine *co = QSLIST_FIRST(&straight);
551
+ QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
552
+ trace_aio_co_schedule_bh_cb(ctx, co);
553
+ qemu_coroutine_enter(co);
554
+ }
555
+}
556
+
557
AioContext *aio_context_new(Error **errp)
558
{
559
int ret;
560
@@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp)
561
}
562
g_source_set_can_recurse(&ctx->source, true);
563
qemu_lockcnt_init(&ctx->list_lock);
564
+
565
+ ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
566
+ QSLIST_INIT(&ctx->scheduled_coroutines);
567
+
568
aio_set_event_notifier(ctx, &ctx->notifier,
569
false,
570
(EventNotifierHandler *)
571
@@ -XXX,XX +XXX,XX @@ fail:
572
return NULL;
573
}
574
575
+void aio_co_schedule(AioContext *ctx, Coroutine *co)
576
+{
577
+ trace_aio_co_schedule(ctx, co);
578
+ QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
579
+ co, co_scheduled_next);
580
+ qemu_bh_schedule(ctx->co_schedule_bh);
581
+}
582
+
583
+void aio_co_wake(struct Coroutine *co)
584
+{
585
+ AioContext *ctx;
586
+
587
+ /* Read coroutine before co->ctx. Matches smp_wmb in
588
+ * qemu_coroutine_enter.
589
+ */
590
+ smp_read_barrier_depends();
591
+ ctx = atomic_read(&co->ctx);
592
+
593
+ if (ctx != qemu_get_current_aio_context()) {
594
+ aio_co_schedule(ctx, co);
595
+ return;
596
+ }
597
+
598
+ if (qemu_in_coroutine()) {
599
+ Coroutine *self = qemu_coroutine_self();
600
+ assert(self != co);
601
+ QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
602
+ } else {
603
+ aio_context_acquire(ctx);
604
+ qemu_coroutine_enter(co);
605
+ aio_context_release(ctx);
606
+ }
607
+}
608
+
609
void aio_context_ref(AioContext *ctx)
610
{
611
g_source_ref(&ctx->source);
612
diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
613
index XXXXXXX..XXXXXXX 100644
614
--- a/util/qemu-coroutine.c
615
+++ b/util/qemu-coroutine.c
616
@@ -XXX,XX +XXX,XX @@
617
#include "qemu/atomic.h"
618
#include "qemu/coroutine.h"
619
#include "qemu/coroutine_int.h"
620
+#include "block/aio.h"
621
622
enum {
623
POOL_BATCH_SIZE = 64,
624
@@ -XXX,XX +XXX,XX @@ void qemu_coroutine_enter(Coroutine *co)
625
}
626
627
co->caller = self;
628
+ co->ctx = qemu_get_current_aio_context();
629
+
630
+ /* Store co->ctx before anything that stores co. Matches
631
+ * barrier in aio_co_wake.
632
+ */
633
+ smp_wmb();
634
+
635
ret = qemu_coroutine_switch(self, co, COROUTINE_ENTER);
636
637
qemu_co_queue_run_restart(co);
638
diff --git a/util/trace-events b/util/trace-events
639
index XXXXXXX..XXXXXXX 100644
640
--- a/util/trace-events
641
+++ b/util/trace-events
642
@@ -XXX,XX +XXX,XX @@ run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
643
poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
644
poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
645
646
+# util/async.c
647
+aio_co_schedule(void *ctx, void *co) "ctx %p co %p"
648
+aio_co_schedule_bh_cb(void *ctx, void *co) "ctx %p co %p"
649
+
650
# util/thread-pool.c
651
thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
652
thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d"
653
--
654
2.9.3
655
656
diff view generated by jsdifflib
1
From: Thomas Huth <thuth@redhat.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
dev could be NULL if the PCI device can not be found due to some
3
qcow2_create2 calls this. Do not run a nested event loop, as that
4
reasons, so we must not dereference the pointer in this case.
4
breaks when aio_co_wake tries to queue the coroutine on the co_queue_wakeup
5
list of the currently running one.
5
6
6
Signed-off-by: Thomas Huth <thuth@redhat.com>
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Message-id: 1519713884-2346-1-git-send-email-thuth@redhat.com
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Reviewed-by: Fam Zheng <famz@redhat.com>
10
Message-id: 20170213135235.12274-4-pbonzini@redhat.com
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
12
---
10
tests/libqos/virtio-pci.c | 4 +++-
13
block/block-backend.c | 12 ++++++++----
11
1 file changed, 3 insertions(+), 1 deletion(-)
14
1 file changed, 8 insertions(+), 4 deletions(-)
12
15
13
diff --git a/tests/libqos/virtio-pci.c b/tests/libqos/virtio-pci.c
16
diff --git a/block/block-backend.c b/block/block-backend.c
14
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
15
--- a/tests/libqos/virtio-pci.c
18
--- a/block/block-backend.c
16
+++ b/tests/libqos/virtio-pci.c
19
+++ b/block/block-backend.c
17
@@ -XXX,XX +XXX,XX @@ QVirtioPCIDevice *qvirtio_pci_device_find(QPCIBus *bus, uint16_t device_type)
20
@@ -XXX,XX +XXX,XX @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
18
qvirtio_pci_foreach(bus, device_type, false, 0,
21
{
19
qvirtio_pci_assign_device, &dev);
22
QEMUIOVector qiov;
20
23
struct iovec iov;
21
- dev->vdev.bus = &qvirtio_pci;
24
- Coroutine *co;
22
+ if (dev) {
25
BlkRwCo rwco;
23
+ dev->vdev.bus = &qvirtio_pci;
26
27
iov = (struct iovec) {
28
@@ -XXX,XX +XXX,XX @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
29
.ret = NOT_DONE,
30
};
31
32
- co = qemu_coroutine_create(co_entry, &rwco);
33
- qemu_coroutine_enter(co);
34
- BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
35
+ if (qemu_in_coroutine()) {
36
+ /* Fast-path if already in coroutine context */
37
+ co_entry(&rwco);
38
+ } else {
39
+ Coroutine *co = qemu_coroutine_create(co_entry, &rwco);
40
+ qemu_coroutine_enter(co);
41
+ BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
24
+ }
42
+ }
25
43
26
return dev;
44
return rwco.ret;
27
}
45
}
28
--
46
--
29
2.14.3
47
2.9.3
30
48
31
49
diff view generated by jsdifflib
New patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
2
3
Once the thread pool starts using aio_co_wake, it will also need
4
qemu_get_current_aio_context(). Make test-thread-pool create
5
an AioContext with qemu_init_main_loop, so that stubs/iothread.c
6
and tests/iothread.c can provide the rest.
7
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
Reviewed-by: Fam Zheng <famz@redhat.com>
11
Message-id: 20170213135235.12274-5-pbonzini@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
tests/test-thread-pool.c | 12 +++---------
15
1 file changed, 3 insertions(+), 9 deletions(-)
16
17
diff --git a/tests/test-thread-pool.c b/tests/test-thread-pool.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tests/test-thread-pool.c
20
+++ b/tests/test-thread-pool.c
21
@@ -XXX,XX +XXX,XX @@
22
#include "qapi/error.h"
23
#include "qemu/timer.h"
24
#include "qemu/error-report.h"
25
+#include "qemu/main-loop.h"
26
27
static AioContext *ctx;
28
static ThreadPool *pool;
29
@@ -XXX,XX +XXX,XX @@ static void test_cancel_async(void)
30
int main(int argc, char **argv)
31
{
32
int ret;
33
- Error *local_error = NULL;
34
35
- init_clocks();
36
-
37
- ctx = aio_context_new(&local_error);
38
- if (!ctx) {
39
- error_reportf_err(local_error, "Failed to create AIO Context: ");
40
- exit(1);
41
- }
42
+ qemu_init_main_loop(&error_abort);
43
+ ctx = qemu_get_current_aio_context();
44
pool = aio_get_thread_pool(ctx);
45
46
g_test_init(&argc, &argv, NULL);
47
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
48
49
ret = g_test_run();
50
51
- aio_context_unref(ctx);
52
return ret;
53
}
54
--
55
2.9.3
56
57
diff view generated by jsdifflib
New patch
1
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
3
This is in preparation for making qio_channel_yield work on
4
AioContexts other than the main one.
5
6
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Reviewed-by: Fam Zheng <famz@redhat.com>
10
Message-id: 20170213135235.12274-6-pbonzini@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
include/io/channel.h | 25 +++++++++++++++++++++++++
14
io/channel-command.c | 13 +++++++++++++
15
io/channel-file.c | 11 +++++++++++
16
io/channel-socket.c | 16 +++++++++++-----
17
io/channel-tls.c | 12 ++++++++++++
18
io/channel-watch.c | 6 ++++++
19
io/channel.c | 11 +++++++++++
20
7 files changed, 89 insertions(+), 5 deletions(-)
21
22
diff --git a/include/io/channel.h b/include/io/channel.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/include/io/channel.h
25
+++ b/include/io/channel.h
26
@@ -XXX,XX +XXX,XX @@
27
28
#include "qemu-common.h"
29
#include "qom/object.h"
30
+#include "block/aio.h"
31
32
#define TYPE_QIO_CHANNEL "qio-channel"
33
#define QIO_CHANNEL(obj) \
34
@@ -XXX,XX +XXX,XX @@ struct QIOChannelClass {
35
off_t offset,
36
int whence,
37
Error **errp);
38
+ void (*io_set_aio_fd_handler)(QIOChannel *ioc,
39
+ AioContext *ctx,
40
+ IOHandler *io_read,
41
+ IOHandler *io_write,
42
+ void *opaque);
43
};
44
45
/* General I/O handling functions */
46
@@ -XXX,XX +XXX,XX @@ void qio_channel_yield(QIOChannel *ioc,
47
void qio_channel_wait(QIOChannel *ioc,
48
GIOCondition condition);
49
50
+/**
51
+ * qio_channel_set_aio_fd_handler:
52
+ * @ioc: the channel object
53
+ * @ctx: the AioContext to set the handlers on
54
+ * @io_read: the read handler
55
+ * @io_write: the write handler
56
+ * @opaque: the opaque value passed to the handler
57
+ *
58
+ * This is used internally by qio_channel_yield(). It can
59
+ * be used by channel implementations to forward the handlers
60
+ * to another channel (e.g. from #QIOChannelTLS to the
61
+ * underlying socket).
62
+ */
63
+void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
64
+ AioContext *ctx,
65
+ IOHandler *io_read,
66
+ IOHandler *io_write,
67
+ void *opaque);
68
+
69
#endif /* QIO_CHANNEL_H */
70
diff --git a/io/channel-command.c b/io/channel-command.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/io/channel-command.c
73
+++ b/io/channel-command.c
74
@@ -XXX,XX +XXX,XX @@ static int qio_channel_command_close(QIOChannel *ioc,
75
}
76
77
78
+static void qio_channel_command_set_aio_fd_handler(QIOChannel *ioc,
79
+ AioContext *ctx,
80
+ IOHandler *io_read,
81
+ IOHandler *io_write,
82
+ void *opaque)
83
+{
84
+ QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc);
85
+ aio_set_fd_handler(ctx, cioc->readfd, false, io_read, NULL, NULL, opaque);
86
+ aio_set_fd_handler(ctx, cioc->writefd, false, NULL, io_write, NULL, opaque);
87
+}
88
+
89
+
90
static GSource *qio_channel_command_create_watch(QIOChannel *ioc,
91
GIOCondition condition)
92
{
93
@@ -XXX,XX +XXX,XX @@ static void qio_channel_command_class_init(ObjectClass *klass,
94
ioc_klass->io_set_blocking = qio_channel_command_set_blocking;
95
ioc_klass->io_close = qio_channel_command_close;
96
ioc_klass->io_create_watch = qio_channel_command_create_watch;
97
+ ioc_klass->io_set_aio_fd_handler = qio_channel_command_set_aio_fd_handler;
98
}
99
100
static const TypeInfo qio_channel_command_info = {
101
diff --git a/io/channel-file.c b/io/channel-file.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/io/channel-file.c
104
+++ b/io/channel-file.c
105
@@ -XXX,XX +XXX,XX @@ static int qio_channel_file_close(QIOChannel *ioc,
106
}
107
108
109
+static void qio_channel_file_set_aio_fd_handler(QIOChannel *ioc,
110
+ AioContext *ctx,
111
+ IOHandler *io_read,
112
+ IOHandler *io_write,
113
+ void *opaque)
114
+{
115
+ QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc);
116
+ aio_set_fd_handler(ctx, fioc->fd, false, io_read, io_write, NULL, opaque);
117
+}
118
+
119
static GSource *qio_channel_file_create_watch(QIOChannel *ioc,
120
GIOCondition condition)
121
{
122
@@ -XXX,XX +XXX,XX @@ static void qio_channel_file_class_init(ObjectClass *klass,
123
ioc_klass->io_seek = qio_channel_file_seek;
124
ioc_klass->io_close = qio_channel_file_close;
125
ioc_klass->io_create_watch = qio_channel_file_create_watch;
126
+ ioc_klass->io_set_aio_fd_handler = qio_channel_file_set_aio_fd_handler;
127
}
128
129
static const TypeInfo qio_channel_file_info = {
130
diff --git a/io/channel-socket.c b/io/channel-socket.c
131
index XXXXXXX..XXXXXXX 100644
132
--- a/io/channel-socket.c
133
+++ b/io/channel-socket.c
134
@@ -XXX,XX +XXX,XX @@ qio_channel_socket_set_blocking(QIOChannel *ioc,
135
qemu_set_block(sioc->fd);
136
} else {
137
qemu_set_nonblock(sioc->fd);
138
-#ifdef WIN32
139
- WSAEventSelect(sioc->fd, ioc->event,
140
- FD_READ | FD_ACCEPT | FD_CLOSE |
141
- FD_CONNECT | FD_WRITE | FD_OOB);
142
-#endif
143
}
144
return 0;
145
}
146
@@ -XXX,XX +XXX,XX @@ qio_channel_socket_shutdown(QIOChannel *ioc,
147
return 0;
148
}
149
150
+static void qio_channel_socket_set_aio_fd_handler(QIOChannel *ioc,
151
+ AioContext *ctx,
152
+ IOHandler *io_read,
153
+ IOHandler *io_write,
154
+ void *opaque)
155
+{
156
+ QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
157
+ aio_set_fd_handler(ctx, sioc->fd, false, io_read, io_write, NULL, opaque);
158
+}
159
+
160
static GSource *qio_channel_socket_create_watch(QIOChannel *ioc,
161
GIOCondition condition)
162
{
163
@@ -XXX,XX +XXX,XX @@ static void qio_channel_socket_class_init(ObjectClass *klass,
164
ioc_klass->io_set_cork = qio_channel_socket_set_cork;
165
ioc_klass->io_set_delay = qio_channel_socket_set_delay;
166
ioc_klass->io_create_watch = qio_channel_socket_create_watch;
167
+ ioc_klass->io_set_aio_fd_handler = qio_channel_socket_set_aio_fd_handler;
168
}
169
170
static const TypeInfo qio_channel_socket_info = {
171
diff --git a/io/channel-tls.c b/io/channel-tls.c
172
index XXXXXXX..XXXXXXX 100644
173
--- a/io/channel-tls.c
174
+++ b/io/channel-tls.c
175
@@ -XXX,XX +XXX,XX @@ static int qio_channel_tls_close(QIOChannel *ioc,
176
return qio_channel_close(tioc->master, errp);
177
}
178
179
+static void qio_channel_tls_set_aio_fd_handler(QIOChannel *ioc,
180
+ AioContext *ctx,
181
+ IOHandler *io_read,
182
+ IOHandler *io_write,
183
+ void *opaque)
184
+{
185
+ QIOChannelTLS *tioc = QIO_CHANNEL_TLS(ioc);
186
+
187
+ qio_channel_set_aio_fd_handler(tioc->master, ctx, io_read, io_write, opaque);
188
+}
189
+
190
static GSource *qio_channel_tls_create_watch(QIOChannel *ioc,
191
GIOCondition condition)
192
{
193
@@ -XXX,XX +XXX,XX @@ static void qio_channel_tls_class_init(ObjectClass *klass,
194
ioc_klass->io_close = qio_channel_tls_close;
195
ioc_klass->io_shutdown = qio_channel_tls_shutdown;
196
ioc_klass->io_create_watch = qio_channel_tls_create_watch;
197
+ ioc_klass->io_set_aio_fd_handler = qio_channel_tls_set_aio_fd_handler;
198
}
199
200
static const TypeInfo qio_channel_tls_info = {
201
diff --git a/io/channel-watch.c b/io/channel-watch.c
202
index XXXXXXX..XXXXXXX 100644
203
--- a/io/channel-watch.c
204
+++ b/io/channel-watch.c
205
@@ -XXX,XX +XXX,XX @@ GSource *qio_channel_create_socket_watch(QIOChannel *ioc,
206
GSource *source;
207
QIOChannelSocketSource *ssource;
208
209
+#ifdef WIN32
210
+ WSAEventSelect(socket, ioc->event,
211
+ FD_READ | FD_ACCEPT | FD_CLOSE |
212
+ FD_CONNECT | FD_WRITE | FD_OOB);
213
+#endif
214
+
215
source = g_source_new(&qio_channel_socket_source_funcs,
216
sizeof(QIOChannelSocketSource));
217
ssource = (QIOChannelSocketSource *)source;
218
diff --git a/io/channel.c b/io/channel.c
219
index XXXXXXX..XXXXXXX 100644
220
--- a/io/channel.c
221
+++ b/io/channel.c
222
@@ -XXX,XX +XXX,XX @@ GSource *qio_channel_create_watch(QIOChannel *ioc,
223
}
224
225
226
+void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
227
+ AioContext *ctx,
228
+ IOHandler *io_read,
229
+ IOHandler *io_write,
230
+ void *opaque)
231
+{
232
+ QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
233
+
234
+ klass->io_set_aio_fd_handler(ioc, ctx, io_read, io_write, opaque);
235
+}
236
+
237
guint qio_channel_add_watch(QIOChannel *ioc,
238
GIOCondition condition,
239
QIOChannelFunc func,
240
--
241
2.9.3
242
243
diff view generated by jsdifflib
New patch
1
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
3
Support separate coroutines for reading and writing, and place the
4
read/write handlers on the AioContext that the QIOChannel is registered
5
with.
6
7
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
Reviewed-by: Fam Zheng <famz@redhat.com>
11
Message-id: 20170213135235.12274-7-pbonzini@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
include/io/channel.h | 47 ++++++++++++++++++++++++++--
15
io/channel.c | 86 +++++++++++++++++++++++++++++++++++++++-------------
16
2 files changed, 109 insertions(+), 24 deletions(-)
17
18
diff --git a/include/io/channel.h b/include/io/channel.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/include/io/channel.h
21
+++ b/include/io/channel.h
22
@@ -XXX,XX +XXX,XX @@
23
24
#include "qemu-common.h"
25
#include "qom/object.h"
26
+#include "qemu/coroutine.h"
27
#include "block/aio.h"
28
29
#define TYPE_QIO_CHANNEL "qio-channel"
30
@@ -XXX,XX +XXX,XX @@ struct QIOChannel {
31
Object parent;
32
unsigned int features; /* bitmask of QIOChannelFeatures */
33
char *name;
34
+ AioContext *ctx;
35
+ Coroutine *read_coroutine;
36
+ Coroutine *write_coroutine;
37
#ifdef _WIN32
38
HANDLE event; /* For use with GSource on Win32 */
39
#endif
40
@@ -XXX,XX +XXX,XX @@ guint qio_channel_add_watch(QIOChannel *ioc,
41
42
43
/**
44
+ * qio_channel_attach_aio_context:
45
+ * @ioc: the channel object
46
+ * @ctx: the #AioContext to set the handlers on
47
+ *
48
+ * Request that qio_channel_yield() sets I/O handlers on
49
+ * the given #AioContext. If @ctx is %NULL, qio_channel_yield()
50
+ * uses QEMU's main thread event loop.
51
+ *
52
+ * You can move a #QIOChannel from one #AioContext to another even if
53
+ * I/O handlers are set for a coroutine. However, #QIOChannel provides
54
+ * no synchronization between the calls to qio_channel_yield() and
55
+ * qio_channel_attach_aio_context().
56
+ *
57
+ * Therefore you should first call qio_channel_detach_aio_context()
58
+ * to ensure that the coroutine is not entered concurrently. Then,
59
+ * while the coroutine has yielded, call qio_channel_attach_aio_context(),
60
+ * and then aio_co_schedule() to place the coroutine on the new
61
+ * #AioContext. The calls to qio_channel_detach_aio_context()
62
+ * and qio_channel_attach_aio_context() should be protected with
63
+ * aio_context_acquire() and aio_context_release().
64
+ */
65
+void qio_channel_attach_aio_context(QIOChannel *ioc,
66
+ AioContext *ctx);
67
+
68
+/**
69
+ * qio_channel_detach_aio_context:
70
+ * @ioc: the channel object
71
+ *
72
+ * Disable any I/O handlers set by qio_channel_yield(). With the
73
+ * help of aio_co_schedule(), this allows moving a coroutine that was
74
+ * paused by qio_channel_yield() to another context.
75
+ */
76
+void qio_channel_detach_aio_context(QIOChannel *ioc);
77
+
78
+/**
79
* qio_channel_yield:
80
* @ioc: the channel object
81
* @condition: the I/O condition to wait for
82
*
83
- * Yields execution from the current coroutine until
84
- * the condition indicated by @condition becomes
85
- * available.
86
+ * Yields execution from the current coroutine until the condition
87
+ * indicated by @condition becomes available. @condition must
88
+ * be either %G_IO_IN or %G_IO_OUT; it cannot contain both. In
89
+ * addition, no two coroutine can be waiting on the same condition
90
+ * and channel at the same time.
91
*
92
* This must only be called from coroutine context
93
*/
94
diff --git a/io/channel.c b/io/channel.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/io/channel.c
97
+++ b/io/channel.c
98
@@ -XXX,XX +XXX,XX @@
99
#include "qemu/osdep.h"
100
#include "io/channel.h"
101
#include "qapi/error.h"
102
-#include "qemu/coroutine.h"
103
+#include "qemu/main-loop.h"
104
105
bool qio_channel_has_feature(QIOChannel *ioc,
106
QIOChannelFeature feature)
107
@@ -XXX,XX +XXX,XX @@ off_t qio_channel_io_seek(QIOChannel *ioc,
108
}
109
110
111
-typedef struct QIOChannelYieldData QIOChannelYieldData;
112
-struct QIOChannelYieldData {
113
- QIOChannel *ioc;
114
- Coroutine *co;
115
-};
116
+static void qio_channel_set_aio_fd_handlers(QIOChannel *ioc);
117
118
+static void qio_channel_restart_read(void *opaque)
119
+{
120
+ QIOChannel *ioc = opaque;
121
+ Coroutine *co = ioc->read_coroutine;
122
+
123
+ ioc->read_coroutine = NULL;
124
+ qio_channel_set_aio_fd_handlers(ioc);
125
+ aio_co_wake(co);
126
+}
127
128
-static gboolean qio_channel_yield_enter(QIOChannel *ioc,
129
- GIOCondition condition,
130
- gpointer opaque)
131
+static void qio_channel_restart_write(void *opaque)
132
{
133
- QIOChannelYieldData *data = opaque;
134
- qemu_coroutine_enter(data->co);
135
- return FALSE;
136
+ QIOChannel *ioc = opaque;
137
+ Coroutine *co = ioc->write_coroutine;
138
+
139
+ ioc->write_coroutine = NULL;
140
+ qio_channel_set_aio_fd_handlers(ioc);
141
+ aio_co_wake(co);
142
}
143
144
+static void qio_channel_set_aio_fd_handlers(QIOChannel *ioc)
145
+{
146
+ IOHandler *rd_handler = NULL, *wr_handler = NULL;
147
+ AioContext *ctx;
148
+
149
+ if (ioc->read_coroutine) {
150
+ rd_handler = qio_channel_restart_read;
151
+ }
152
+ if (ioc->write_coroutine) {
153
+ wr_handler = qio_channel_restart_write;
154
+ }
155
+
156
+ ctx = ioc->ctx ? ioc->ctx : iohandler_get_aio_context();
157
+ qio_channel_set_aio_fd_handler(ioc, ctx, rd_handler, wr_handler, ioc);
158
+}
159
+
160
+void qio_channel_attach_aio_context(QIOChannel *ioc,
161
+ AioContext *ctx)
162
+{
163
+ AioContext *old_ctx;
164
+ if (ioc->ctx == ctx) {
165
+ return;
166
+ }
167
+
168
+ old_ctx = ioc->ctx ? ioc->ctx : iohandler_get_aio_context();
169
+ qio_channel_set_aio_fd_handler(ioc, old_ctx, NULL, NULL, NULL);
170
+ ioc->ctx = ctx;
171
+ qio_channel_set_aio_fd_handlers(ioc);
172
+}
173
+
174
+void qio_channel_detach_aio_context(QIOChannel *ioc)
175
+{
176
+ ioc->read_coroutine = NULL;
177
+ ioc->write_coroutine = NULL;
178
+ qio_channel_set_aio_fd_handlers(ioc);
179
+ ioc->ctx = NULL;
180
+}
181
182
void coroutine_fn qio_channel_yield(QIOChannel *ioc,
183
GIOCondition condition)
184
{
185
- QIOChannelYieldData data;
186
-
187
assert(qemu_in_coroutine());
188
- data.ioc = ioc;
189
- data.co = qemu_coroutine_self();
190
- qio_channel_add_watch(ioc,
191
- condition,
192
- qio_channel_yield_enter,
193
- &data,
194
- NULL);
195
+ if (condition == G_IO_IN) {
196
+ assert(!ioc->read_coroutine);
197
+ ioc->read_coroutine = qemu_coroutine_self();
198
+ } else if (condition == G_IO_OUT) {
199
+ assert(!ioc->write_coroutine);
200
+ ioc->write_coroutine = qemu_coroutine_self();
201
+ } else {
202
+ abort();
203
+ }
204
+ qio_channel_set_aio_fd_handlers(ioc);
205
qemu_coroutine_yield();
206
}
207
208
--
209
2.9.3
210
211
diff view generated by jsdifflib
1
From: Su Hang <suhang16@mails.ucas.ac.cn>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
Using `clang-format -i util/uri.c` first, then change back few code
3
In the client, read the reply headers from a coroutine, switching the
4
manually, to make sure only whitespace involved.
4
read side between the "read header" coroutine and the I/O coroutine that
5
reads the body of the reply.
5
6
6
Signed-off-by: Su Hang <suhang16@mails.ucas.ac.cn>
7
In the server, if the server can read more requests it will create a new
7
Reviewed-by: Thomas Huth <thuth@redhat.com>
8
"read request" coroutine as soon as a request has been read. Otherwise,
8
Message-id: 1519533358-13759-2-git-send-email-suhang16@mails.ucas.ac.cn
9
the new coroutine is created in nbd_request_put.
10
11
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
13
Reviewed-by: Fam Zheng <famz@redhat.com>
14
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
15
Message-id: 20170213135235.12274-8-pbonzini@redhat.com
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
17
---
11
util/uri.c | 1450 ++++++++++++++++++++++++++++++------------------------------
18
block/nbd-client.h | 2 +-
12
1 file changed, 726 insertions(+), 724 deletions(-)
19
block/nbd-client.c | 117 ++++++++++++++++++++++++-----------------------------
20
nbd/client.c | 2 +-
21
nbd/common.c | 9 +----
22
nbd/server.c | 94 +++++++++++++-----------------------------
23
5 files changed, 83 insertions(+), 141 deletions(-)
13
24
14
diff --git a/util/uri.c b/util/uri.c
25
diff --git a/block/nbd-client.h b/block/nbd-client.h
15
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
16
--- a/util/uri.c
27
--- a/block/nbd-client.h
17
+++ b/util/uri.c
28
+++ b/block/nbd-client.h
18
@@ -XXX,XX +XXX,XX @@ static void uri_clean(URI *uri);
29
@@ -XXX,XX +XXX,XX @@ typedef struct NBDClientSession {
19
*/
30
20
#define IS_ALPHA(x) (IS_LOWALPHA(x) || IS_UPALPHA(x))
31
CoMutex send_mutex;
21
32
CoQueue free_sema;
22
-
33
- Coroutine *send_coroutine;
23
/*
34
+ Coroutine *read_reply_co;
24
* lowalpha = "a" | "b" | "c" | "d" | "e" | "f" | "g" | "h" | "i" | "j" |
35
int in_flight;
25
* "k" | "l" | "m" | "n" | "o" | "p" | "q" | "r" | "s" | "t" |
36
26
@@ -XXX,XX +XXX,XX @@ static void uri_clean(URI *uri);
37
Coroutine *recv_coroutine[MAX_NBD_REQUESTS];
27
* mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | "(" | ")"
38
diff --git a/block/nbd-client.c b/block/nbd-client.c
28
*/
39
index XXXXXXX..XXXXXXX 100644
29
40
--- a/block/nbd-client.c
30
-#define IS_MARK(x) (((x) == '-') || ((x) == '_') || ((x) == '.') || \
41
+++ b/block/nbd-client.c
31
- ((x) == '!') || ((x) == '~') || ((x) == '*') || ((x) == '\'') || \
42
@@ -XXX,XX +XXX,XX @@
32
+#define IS_MARK(x) (((x) == '-') || ((x) == '_') || ((x) == '.') || \
43
#define HANDLE_TO_INDEX(bs, handle) ((handle) ^ ((uint64_t)(intptr_t)bs))
33
+ ((x) == '!') || ((x) == '~') || ((x) == '*') || ((x) == '\'') || \
44
#define INDEX_TO_HANDLE(bs, index) ((index) ^ ((uint64_t)(intptr_t)bs))
34
((x) == '(') || ((x) == ')'))
45
35
46
-static void nbd_recv_coroutines_enter_all(NBDClientSession *s)
36
/*
47
+static void nbd_recv_coroutines_enter_all(BlockDriverState *bs)
37
* unwise = "{" | "}" | "|" | "\" | "^" | "`"
48
{
38
*/
49
+ NBDClientSession *s = nbd_get_client_session(bs);
39
50
int i;
40
-#define IS_UNWISE(p) \
51
41
- (((*(p) == '{')) || ((*(p) == '}')) || ((*(p) == '|')) || \
52
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
42
- ((*(p) == '\\')) || ((*(p) == '^')) || ((*(p) == '[')) || \
53
@@ -XXX,XX +XXX,XX @@ static void nbd_recv_coroutines_enter_all(NBDClientSession *s)
43
- ((*(p) == ']')) || ((*(p) == '`')))
54
qemu_coroutine_enter(s->recv_coroutine[i]);
44
+#define IS_UNWISE(p) \
55
}
45
+ (((*(p) == '{')) || ((*(p) == '}')) || ((*(p) == '|')) || \
56
}
46
+ ((*(p) == '\\')) || ((*(p) == '^')) || ((*(p) == '[')) || \
57
+ BDRV_POLL_WHILE(bs, s->read_reply_co);
47
+ ((*(p) == ']')) || ((*(p) == '`')))
58
}
48
/*
59
49
* reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," |
60
static void nbd_teardown_connection(BlockDriverState *bs)
50
* "[" | "]"
61
@@ -XXX,XX +XXX,XX @@ static void nbd_teardown_connection(BlockDriverState *bs)
51
*/
62
qio_channel_shutdown(client->ioc,
52
63
QIO_CHANNEL_SHUTDOWN_BOTH,
53
-#define IS_RESERVED(x) (((x) == ';') || ((x) == '/') || ((x) == '?') || \
64
NULL);
54
- ((x) == ':') || ((x) == '@') || ((x) == '&') || ((x) == '=') || \
65
- nbd_recv_coroutines_enter_all(client);
55
- ((x) == '+') || ((x) == '$') || ((x) == ',') || ((x) == '[') || \
66
+ nbd_recv_coroutines_enter_all(bs);
56
- ((x) == ']'))
67
57
+#define IS_RESERVED(x) (((x) == ';') || ((x) == '/') || ((x) == '?') || \
68
nbd_client_detach_aio_context(bs);
58
+ ((x) == ':') || ((x) == '@') || ((x) == '&') || ((x) == '=') || \
69
object_unref(OBJECT(client->sioc));
59
+ ((x) == '+') || ((x) == '$') || ((x) == ',') || ((x) == '[') || \
70
@@ -XXX,XX +XXX,XX @@ static void nbd_teardown_connection(BlockDriverState *bs)
60
+ ((x) == ']'))
71
client->ioc = NULL;
61
72
}
62
/*
73
63
* unreserved = alphanum | mark
74
-static void nbd_reply_ready(void *opaque)
64
@@ -XXX,XX +XXX,XX @@ static void uri_clean(URI *uri);
75
+static coroutine_fn void nbd_read_reply_entry(void *opaque)
65
* Skip to next pointer char, handle escaped sequences
76
{
66
*/
77
- BlockDriverState *bs = opaque;
67
78
- NBDClientSession *s = nbd_get_client_session(bs);
68
-#define NEXT(p) ((*p == '%')? p += 3 : p++)
79
+ NBDClientSession *s = opaque;
69
+#define NEXT(p) ((*p == '%') ? p += 3 : p++)
80
uint64_t i;
70
71
/*
72
* Productions from the spec.
73
@@ -XXX,XX +XXX,XX @@ static void uri_clean(URI *uri);
74
* path = [ abs_path | opaque_part ]
75
*/
76
77
-
78
/************************************************************************
79
- *                                    *
80
- * RFC 3986 parser                *
81
- *                                    *
82
+ * *
83
+ * RFC 3986 parser *
84
+ * *
85
************************************************************************/
86
87
#define ISA_DIGIT(p) ((*(p) >= '0') && (*(p) <= '9'))
88
-#define ISA_ALPHA(p) (((*(p) >= 'a') && (*(p) <= 'z')) ||        \
89
+#define ISA_ALPHA(p) (((*(p) >= 'a') && (*(p) <= 'z')) || \
90
((*(p) >= 'A') && (*(p) <= 'Z')))
91
-#define ISA_HEXDIG(p)                            \
92
- (ISA_DIGIT(p) || ((*(p) >= 'a') && (*(p) <= 'f')) ||        \
93
- ((*(p) >= 'A') && (*(p) <= 'F')))
94
+#define ISA_HEXDIG(p) \
95
+ (ISA_DIGIT(p) || ((*(p) >= 'a') && (*(p) <= 'f')) || \
96
+ ((*(p) >= 'A') && (*(p) <= 'F')))
97
98
/*
99
* sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
100
* / "*" / "+" / "," / ";" / "="
101
*/
102
-#define ISA_SUB_DELIM(p)                        \
103
- (((*(p) == '!')) || ((*(p) == '$')) || ((*(p) == '&')) ||        \
104
- ((*(p) == '(')) || ((*(p) == ')')) || ((*(p) == '*')) ||        \
105
- ((*(p) == '+')) || ((*(p) == ',')) || ((*(p) == ';')) ||        \
106
- ((*(p) == '=')) || ((*(p) == '\'')))
107
+#define ISA_SUB_DELIM(p) \
108
+ (((*(p) == '!')) || ((*(p) == '$')) || ((*(p) == '&')) || \
109
+ ((*(p) == '(')) || ((*(p) == ')')) || ((*(p) == '*')) || \
110
+ ((*(p) == '+')) || ((*(p) == ',')) || ((*(p) == ';')) || \
111
+ ((*(p) == '=')) || ((*(p) == '\'')))
112
113
/*
114
* gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
115
*/
116
-#define ISA_GEN_DELIM(p)                        \
117
- (((*(p) == ':')) || ((*(p) == '/')) || ((*(p) == '?')) || \
118
- ((*(p) == '#')) || ((*(p) == '[')) || ((*(p) == ']')) || \
119
- ((*(p) == '@')))
120
+#define ISA_GEN_DELIM(p) \
121
+ (((*(p) == ':')) || ((*(p) == '/')) || ((*(p) == '?')) || \
122
+ ((*(p) == '#')) || ((*(p) == '[')) || ((*(p) == ']')) || \
123
+ ((*(p) == '@')))
124
125
/*
126
* reserved = gen-delims / sub-delims
127
@@ -XXX,XX +XXX,XX @@ static void uri_clean(URI *uri);
128
/*
129
* unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
130
*/
131
-#define ISA_UNRESERVED(p)                        \
132
- ((ISA_ALPHA(p)) || (ISA_DIGIT(p)) || ((*(p) == '-')) ||        \
133
- ((*(p) == '.')) || ((*(p) == '_')) || ((*(p) == '~')))
134
+#define ISA_UNRESERVED(p) \
135
+ ((ISA_ALPHA(p)) || (ISA_DIGIT(p)) || ((*(p) == '-')) || \
136
+ ((*(p) == '.')) || ((*(p) == '_')) || ((*(p) == '~')))
137
138
/*
139
* pct-encoded = "%" HEXDIG HEXDIG
140
*/
141
-#define ISA_PCT_ENCODED(p)                        \
142
- ((*(p) == '%') && (ISA_HEXDIG(p + 1)) && (ISA_HEXDIG(p + 2)))
143
+#define ISA_PCT_ENCODED(p) \
144
+ ((*(p) == '%') && (ISA_HEXDIG(p + 1)) && (ISA_HEXDIG(p + 2)))
145
146
/*
147
* pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
148
*/
149
-#define ISA_PCHAR(p)                            \
150
- (ISA_UNRESERVED(p) || ISA_PCT_ENCODED(p) || ISA_SUB_DELIM(p) ||    \
151
- ((*(p) == ':')) || ((*(p) == '@')))
152
+#define ISA_PCHAR(p) \
153
+ (ISA_UNRESERVED(p) || ISA_PCT_ENCODED(p) || ISA_SUB_DELIM(p) || \
154
+ ((*(p) == ':')) || ((*(p) == '@')))
155
156
/**
157
* rfc3986_parse_scheme:
158
@@ -XXX,XX +XXX,XX @@ static void uri_clean(URI *uri);
159
*
160
* Returns 0 or the error code
161
*/
162
-static int
163
-rfc3986_parse_scheme(URI *uri, const char **str) {
164
+static int rfc3986_parse_scheme(URI *uri, const char **str)
165
+{
166
const char *cur;
167
168
if (str == NULL)
169
-    return(-1);
170
+ return (-1);
171
172
cur = *str;
173
if (!ISA_ALPHA(cur))
174
-    return(2);
175
+ return (2);
176
cur++;
177
- while (ISA_ALPHA(cur) || ISA_DIGIT(cur) ||
178
- (*cur == '+') || (*cur == '-') || (*cur == '.')) cur++;
179
+ while (ISA_ALPHA(cur) || ISA_DIGIT(cur) || (*cur == '+') || (*cur == '-') ||
180
+ (*cur == '.'))
181
+ cur++;
182
if (uri != NULL) {
183
g_free(uri->scheme);
184
-    uri->scheme = g_strndup(*str, cur - *str);
185
+ uri->scheme = g_strndup(*str, cur - *str);
186
}
187
*str = cur;
188
- return(0);
189
+ return (0);
190
}
191
192
/**
193
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_scheme(URI *uri, const char **str) {
194
*
195
* Returns 0 or the error code
196
*/
197
-static int
198
-rfc3986_parse_fragment(URI *uri, const char **str)
199
+static int rfc3986_parse_fragment(URI *uri, const char **str)
200
{
201
const char *cur;
202
203
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_fragment(URI *uri, const char **str)
204
NEXT(cur);
205
if (uri != NULL) {
206
g_free(uri->fragment);
207
-    if (uri->cleanup & 2)
208
-     uri->fragment = g_strndup(*str, cur - *str);
209
-    else
210
-     uri->fragment = uri_string_unescape(*str, cur - *str, NULL);
211
+ if (uri->cleanup & 2)
212
+ uri->fragment = g_strndup(*str, cur - *str);
213
+ else
214
+ uri->fragment = uri_string_unescape(*str, cur - *str, NULL);
215
}
216
*str = cur;
217
return (0);
218
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_fragment(URI *uri, const char **str)
219
*
220
* Returns 0 or the error code
221
*/
222
-static int
223
-rfc3986_parse_query(URI *uri, const char **str)
224
+static int rfc3986_parse_query(URI *uri, const char **str)
225
{
226
const char *cur;
227
228
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_query(URI *uri, const char **str)
229
NEXT(cur);
230
if (uri != NULL) {
231
g_free(uri->query);
232
-    uri->query = g_strndup (*str, cur - *str);
233
+ uri->query = g_strndup(*str, cur - *str);
234
}
235
*str = cur;
236
return (0);
237
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_query(URI *uri, const char **str)
238
*
239
* Returns 0 or the error code
240
*/
241
-static int
242
-rfc3986_parse_port(URI *uri, const char **str)
243
+static int rfc3986_parse_port(URI *uri, const char **str)
244
{
245
const char *cur = *str;
246
int port = 0;
247
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_port(URI *uri, const char **str)
248
*
249
* Returns 0 or the error code
250
*/
251
-static int
252
-rfc3986_parse_user_info(URI *uri, const char **str)
253
+static int rfc3986_parse_user_info(URI *uri, const char **str)
254
{
255
const char *cur;
256
257
cur = *str;
258
- while (ISA_UNRESERVED(cur) || ISA_PCT_ENCODED(cur) ||
259
- ISA_SUB_DELIM(cur) || (*cur == ':'))
260
-    NEXT(cur);
261
+ while (ISA_UNRESERVED(cur) || ISA_PCT_ENCODED(cur) || ISA_SUB_DELIM(cur) ||
262
+ (*cur == ':'))
263
+ NEXT(cur);
264
if (*cur == '@') {
265
-    if (uri != NULL) {
266
+ if (uri != NULL) {
267
g_free(uri->user);
268
-     if (uri->cleanup & 2)
269
-        uri->user = g_strndup(*str, cur - *str);
270
-     else
271
-        uri->user = uri_string_unescape(*str, cur - *str, NULL);
272
-    }
273
-    *str = cur;
274
-    return(0);
275
+ if (uri->cleanup & 2)
276
+ uri->user = g_strndup(*str, cur - *str);
277
+ else
278
+ uri->user = uri_string_unescape(*str, cur - *str, NULL);
279
+ }
280
+ *str = cur;
281
+ return (0);
282
}
283
- return(1);
284
+ return (1);
285
}
286
287
/**
288
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_user_info(URI *uri, const char **str)
289
*
290
* Returns 0 if found and skipped, 1 otherwise
291
*/
292
-static int
293
-rfc3986_parse_dec_octet(const char **str) {
294
+static int rfc3986_parse_dec_octet(const char **str)
295
+{
296
const char *cur = *str;
297
298
if (!(ISA_DIGIT(cur)))
299
- return(1);
300
- if (!ISA_DIGIT(cur+1))
301
-    cur++;
302
- else if ((*cur != '0') && (ISA_DIGIT(cur + 1)) && (!ISA_DIGIT(cur+2)))
303
-    cur += 2;
304
+ return (1);
305
+ if (!ISA_DIGIT(cur + 1))
306
+ cur++;
307
+ else if ((*cur != '0') && (ISA_DIGIT(cur + 1)) && (!ISA_DIGIT(cur + 2)))
308
+ cur += 2;
309
else if ((*cur == '1') && (ISA_DIGIT(cur + 1)) && (ISA_DIGIT(cur + 2)))
310
-    cur += 3;
311
- else if ((*cur == '2') && (*(cur + 1) >= '0') &&
312
-     (*(cur + 1) <= '4') && (ISA_DIGIT(cur + 2)))
313
-    cur += 3;
314
- else if ((*cur == '2') && (*(cur + 1) == '5') &&
315
-     (*(cur + 2) >= '0') && (*(cur + 1) <= '5'))
316
-    cur += 3;
317
+ cur += 3;
318
+ else if ((*cur == '2') && (*(cur + 1) >= '0') && (*(cur + 1) <= '4') &&
319
+ (ISA_DIGIT(cur + 2)))
320
+ cur += 3;
321
+ else if ((*cur == '2') && (*(cur + 1) == '5') && (*(cur + 2) >= '0') &&
322
+ (*(cur + 1) <= '5'))
323
+ cur += 3;
324
else
325
- return(1);
326
+ return (1);
327
*str = cur;
328
- return(0);
329
+ return (0);
330
}
331
/**
332
* rfc3986_parse_host:
333
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_dec_octet(const char **str) {
334
*
335
* Returns 0 or the error code
336
*/
337
-static int
338
-rfc3986_parse_host(URI *uri, const char **str)
339
+static int rfc3986_parse_host(URI *uri, const char **str)
340
{
341
const char *cur = *str;
342
const char *host;
343
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_host(URI *uri, const char **str)
344
*/
345
if (*cur == '[') {
346
cur++;
347
-    while ((*cur != ']') && (*cur != 0))
348
-     cur++;
349
-    if (*cur != ']')
350
-     return(1);
351
-    cur++;
352
-    goto found;
353
+ while ((*cur != ']') && (*cur != 0))
354
+ cur++;
355
+ if (*cur != ']')
356
+ return (1);
357
+ cur++;
358
+ goto found;
359
}
360
/*
361
* try to parse an IPv4
362
*/
363
if (ISA_DIGIT(cur)) {
364
if (rfc3986_parse_dec_octet(&cur) != 0)
365
-     goto not_ipv4;
366
-    if (*cur != '.')
367
-     goto not_ipv4;
368
-    cur++;
369
+ goto not_ipv4;
370
+ if (*cur != '.')
371
+ goto not_ipv4;
372
+ cur++;
373
if (rfc3986_parse_dec_octet(&cur) != 0)
374
-     goto not_ipv4;
375
-    if (*cur != '.')
376
-     goto not_ipv4;
377
+ goto not_ipv4;
378
+ if (*cur != '.')
379
+ goto not_ipv4;
380
if (rfc3986_parse_dec_octet(&cur) != 0)
381
-     goto not_ipv4;
382
-    if (*cur != '.')
383
-     goto not_ipv4;
384
+ goto not_ipv4;
385
+ if (*cur != '.')
386
+ goto not_ipv4;
387
if (rfc3986_parse_dec_octet(&cur) != 0)
388
-     goto not_ipv4;
389
-    goto found;
390
-not_ipv4:
391
+ goto not_ipv4;
392
+ goto found;
393
+ not_ipv4:
394
cur = *str;
395
}
396
/*
397
@@ -XXX,XX +XXX,XX @@ not_ipv4:
398
found:
399
if (uri != NULL) {
400
g_free(uri->authority);
401
-    uri->authority = NULL;
402
+ uri->authority = NULL;
403
g_free(uri->server);
404
-    if (cur != host) {
405
-     if (uri->cleanup & 2)
406
-        uri->server = g_strndup(host, cur - host);
407
-     else
408
-        uri->server = uri_string_unescape(host, cur - host, NULL);
409
-    } else
410
-     uri->server = NULL;
411
+ if (cur != host) {
412
+ if (uri->cleanup & 2)
413
+ uri->server = g_strndup(host, cur - host);
414
+ else
415
+ uri->server = uri_string_unescape(host, cur - host, NULL);
416
+ } else
417
+ uri->server = NULL;
418
}
419
*str = cur;
420
- return(0);
421
+ return (0);
422
}
423
424
/**
425
@@ -XXX,XX +XXX,XX @@ found:
426
*
427
* Returns 0 or the error code
428
*/
429
-static int
430
-rfc3986_parse_authority(URI *uri, const char **str)
431
+static int rfc3986_parse_authority(URI *uri, const char **str)
432
{
433
const char *cur;
434
int ret;
81
int ret;
435
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_authority(URI *uri, const char **str)
82
436
else
83
- if (!s->ioc) { /* Already closed */
437
cur++;
84
- return;
438
ret = rfc3986_parse_host(uri, &cur);
85
- }
439
- if (ret != 0) return(ret);
86
-
440
+ if (ret != 0)
87
- if (s->reply.handle == 0) {
441
+ return (ret);
88
- /* No reply already in flight. Fetch a header. It is possible
442
if (*cur == ':') {
89
- * that another thread has done the same thing in parallel, so
443
cur++;
90
- * the socket is not readable anymore.
444
ret = rfc3986_parse_port(uri, &cur);
91
- */
445
-    if (ret != 0) return(ret);
92
+ for (;;) {
446
+ if (ret != 0)
93
+ assert(s->reply.handle == 0);
447
+ return (ret);
94
ret = nbd_receive_reply(s->ioc, &s->reply);
448
}
95
- if (ret == -EAGAIN) {
449
*str = cur;
96
- return;
450
- return(0);
97
- }
451
+ return (0);
98
if (ret < 0) {
452
}
99
- s->reply.handle = 0;
453
100
- goto fail;
454
/**
455
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_authority(URI *uri, const char **str)
456
*
457
* Returns 0 or the error code
458
*/
459
-static int
460
-rfc3986_parse_segment(const char **str, char forbid, int empty)
461
+static int rfc3986_parse_segment(const char **str, char forbid, int empty)
462
{
463
const char *cur;
464
465
cur = *str;
466
if (!ISA_PCHAR(cur)) {
467
if (empty)
468
-     return(0);
469
-    return(1);
470
+ return (0);
471
+ return (1);
472
}
473
while (ISA_PCHAR(cur) && (*cur != forbid))
474
NEXT(cur);
475
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_segment(const char **str, char forbid, int empty)
476
*
477
* Returns 0 or the error code
478
*/
479
-static int
480
-rfc3986_parse_path_ab_empty(URI *uri, const char **str)
481
+static int rfc3986_parse_path_ab_empty(URI *uri, const char **str)
482
{
483
const char *cur;
484
int ret;
485
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_path_ab_empty(URI *uri, const char **str)
486
487
while (*cur == '/') {
488
cur++;
489
-    ret = rfc3986_parse_segment(&cur, 0, 1);
490
-    if (ret != 0) return(ret);
491
+ ret = rfc3986_parse_segment(&cur, 0, 1);
492
+ if (ret != 0)
493
+ return (ret);
494
}
495
if (uri != NULL) {
496
g_free(uri->path);
497
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_path_ab_empty(URI *uri, const char **str)
498
*
499
* Returns 0 or the error code
500
*/
501
-static int
502
-rfc3986_parse_path_absolute(URI *uri, const char **str)
503
+static int rfc3986_parse_path_absolute(URI *uri, const char **str)
504
{
505
const char *cur;
506
int ret;
507
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_path_absolute(URI *uri, const char **str)
508
cur = *str;
509
510
if (*cur != '/')
511
- return(1);
512
+ return (1);
513
cur++;
514
ret = rfc3986_parse_segment(&cur, 0, 0);
515
if (ret == 0) {
516
-    while (*cur == '/') {
517
-     cur++;
518
-     ret = rfc3986_parse_segment(&cur, 0, 1);
519
-     if (ret != 0) return(ret);
520
-    }
521
+ while (*cur == '/') {
522
+ cur++;
523
+ ret = rfc3986_parse_segment(&cur, 0, 1);
524
+ if (ret != 0)
525
+ return (ret);
526
+ }
527
}
528
if (uri != NULL) {
529
g_free(uri->path);
530
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_path_absolute(URI *uri, const char **str)
531
*
532
* Returns 0 or the error code
533
*/
534
-static int
535
-rfc3986_parse_path_rootless(URI *uri, const char **str)
536
+static int rfc3986_parse_path_rootless(URI *uri, const char **str)
537
{
538
const char *cur;
539
int ret;
540
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_path_rootless(URI *uri, const char **str)
541
cur = *str;
542
543
ret = rfc3986_parse_segment(&cur, 0, 0);
544
- if (ret != 0) return(ret);
545
+ if (ret != 0)
546
+ return (ret);
547
while (*cur == '/') {
548
cur++;
549
-    ret = rfc3986_parse_segment(&cur, 0, 1);
550
-    if (ret != 0) return(ret);
551
+ ret = rfc3986_parse_segment(&cur, 0, 1);
552
+ if (ret != 0)
553
+ return (ret);
554
}
555
if (uri != NULL) {
556
g_free(uri->path);
557
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_path_rootless(URI *uri, const char **str)
558
*
559
* Returns 0 or the error code
560
*/
561
-static int
562
-rfc3986_parse_path_no_scheme(URI *uri, const char **str)
563
+static int rfc3986_parse_path_no_scheme(URI *uri, const char **str)
564
{
565
const char *cur;
566
int ret;
567
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_path_no_scheme(URI *uri, const char **str)
568
cur = *str;
569
570
ret = rfc3986_parse_segment(&cur, ':', 0);
571
- if (ret != 0) return(ret);
572
+ if (ret != 0)
573
+ return (ret);
574
while (*cur == '/') {
575
cur++;
576
-    ret = rfc3986_parse_segment(&cur, 0, 1);
577
-    if (ret != 0) return(ret);
578
+ ret = rfc3986_parse_segment(&cur, 0, 1);
579
+ if (ret != 0)
580
+ return (ret);
581
}
582
if (uri != NULL) {
583
g_free(uri->path);
584
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_path_no_scheme(URI *uri, const char **str)
585
*
586
* Returns 0 or the error code
587
*/
588
-static int
589
-rfc3986_parse_hier_part(URI *uri, const char **str)
590
+static int rfc3986_parse_hier_part(URI *uri, const char **str)
591
{
592
const char *cur;
593
int ret;
594
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_hier_part(URI *uri, const char **str)
595
596
if ((*cur == '/') && (*(cur + 1) == '/')) {
597
cur += 2;
598
-    ret = rfc3986_parse_authority(uri, &cur);
599
-    if (ret != 0) return(ret);
600
-    ret = rfc3986_parse_path_ab_empty(uri, &cur);
601
-    if (ret != 0) return(ret);
602
-    *str = cur;
603
-    return(0);
604
+ ret = rfc3986_parse_authority(uri, &cur);
605
+ if (ret != 0)
606
+ return (ret);
607
+ ret = rfc3986_parse_path_ab_empty(uri, &cur);
608
+ if (ret != 0)
609
+ return (ret);
610
+ *str = cur;
611
+ return (0);
612
} else if (*cur == '/') {
613
ret = rfc3986_parse_path_absolute(uri, &cur);
614
-    if (ret != 0) return(ret);
615
+ if (ret != 0)
616
+ return (ret);
617
} else if (ISA_PCHAR(cur)) {
618
ret = rfc3986_parse_path_rootless(uri, &cur);
619
-    if (ret != 0) return(ret);
620
+ if (ret != 0)
621
+ return (ret);
622
} else {
623
-    /* path-empty is effectively empty */
624
-    if (uri != NULL) {
625
+ /* path-empty is effectively empty */
626
+ if (uri != NULL) {
627
g_free(uri->path);
628
-     uri->path = NULL;
629
-    }
630
+ uri->path = NULL;
631
+ }
632
}
633
*str = cur;
634
return (0);
635
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_hier_part(URI *uri, const char **str)
636
*
637
* Returns 0 or the error code
638
*/
639
-static int
640
-rfc3986_parse_relative_ref(URI *uri, const char *str) {
641
+static int rfc3986_parse_relative_ref(URI *uri, const char *str)
642
+{
643
int ret;
644
645
if ((*str == '/') && (*(str + 1) == '/')) {
646
str += 2;
647
-    ret = rfc3986_parse_authority(uri, &str);
648
-    if (ret != 0) return(ret);
649
-    ret = rfc3986_parse_path_ab_empty(uri, &str);
650
-    if (ret != 0) return(ret);
651
+ ret = rfc3986_parse_authority(uri, &str);
652
+ if (ret != 0)
653
+ return (ret);
654
+ ret = rfc3986_parse_path_ab_empty(uri, &str);
655
+ if (ret != 0)
656
+ return (ret);
657
} else if (*str == '/') {
658
-    ret = rfc3986_parse_path_absolute(uri, &str);
659
-    if (ret != 0) return(ret);
660
+ ret = rfc3986_parse_path_absolute(uri, &str);
661
+ if (ret != 0)
662
+ return (ret);
663
} else if (ISA_PCHAR(str)) {
664
ret = rfc3986_parse_path_no_scheme(uri, &str);
665
-    if (ret != 0) return(ret);
666
+ if (ret != 0)
667
+ return (ret);
668
} else {
669
-    /* path-empty is effectively empty */
670
-    if (uri != NULL) {
671
+ /* path-empty is effectively empty */
672
+ if (uri != NULL) {
673
g_free(uri->path);
674
-     uri->path = NULL;
675
-    }
676
+ uri->path = NULL;
677
+ }
678
}
679
680
if (*str == '?') {
681
-    str++;
682
-    ret = rfc3986_parse_query(uri, &str);
683
-    if (ret != 0) return(ret);
684
+ str++;
685
+ ret = rfc3986_parse_query(uri, &str);
686
+ if (ret != 0)
687
+ return (ret);
688
}
689
if (*str == '#') {
690
-    str++;
691
-    ret = rfc3986_parse_fragment(uri, &str);
692
-    if (ret != 0) return(ret);
693
+ str++;
694
+ ret = rfc3986_parse_fragment(uri, &str);
695
+ if (ret != 0)
696
+ return (ret);
697
}
698
if (*str != 0) {
699
-    uri_clean(uri);
700
-    return(1);
701
+ uri_clean(uri);
702
+ return (1);
703
}
704
- return(0);
705
+ return (0);
706
}
707
708
-
709
/**
710
* rfc3986_parse:
711
* @uri: pointer to an URI structure
712
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_relative_ref(URI *uri, const char *str) {
713
*
714
* Returns 0 or the error code
715
*/
716
-static int
717
-rfc3986_parse(URI *uri, const char *str) {
718
+static int rfc3986_parse(URI *uri, const char *str)
719
+{
720
int ret;
721
722
ret = rfc3986_parse_scheme(uri, &str);
723
- if (ret != 0) return(ret);
724
+ if (ret != 0)
725
+ return (ret);
726
if (*str != ':') {
727
-    return(1);
728
+ return (1);
729
}
730
str++;
731
ret = rfc3986_parse_hier_part(uri, &str);
732
- if (ret != 0) return(ret);
733
+ if (ret != 0)
734
+ return (ret);
735
if (*str == '?') {
736
-    str++;
737
-    ret = rfc3986_parse_query(uri, &str);
738
-    if (ret != 0) return(ret);
739
+ str++;
740
+ ret = rfc3986_parse_query(uri, &str);
741
+ if (ret != 0)
742
+ return (ret);
743
}
744
if (*str == '#') {
745
-    str++;
746
-    ret = rfc3986_parse_fragment(uri, &str);
747
-    if (ret != 0) return(ret);
748
+ str++;
749
+ ret = rfc3986_parse_fragment(uri, &str);
750
+ if (ret != 0)
751
+ return (ret);
752
}
753
if (*str != 0) {
754
-    uri_clean(uri);
755
-    return(1);
756
+ uri_clean(uri);
757
+ return (1);
758
}
759
- return(0);
760
+ return (0);
761
}
762
763
/**
764
@@ -XXX,XX +XXX,XX @@ rfc3986_parse(URI *uri, const char *str) {
765
*
766
* Returns 0 or the error code
767
*/
768
-static int
769
-rfc3986_parse_uri_reference(URI *uri, const char *str) {
770
+static int rfc3986_parse_uri_reference(URI *uri, const char *str)
771
+{
772
int ret;
773
774
if (str == NULL)
775
-    return(-1);
776
+ return (-1);
777
uri_clean(uri);
778
779
/*
780
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_uri_reference(URI *uri, const char *str) {
781
*/
782
ret = rfc3986_parse(uri, str);
783
if (ret != 0) {
784
-    uri_clean(uri);
785
+ uri_clean(uri);
786
ret = rfc3986_parse_relative_ref(uri, str);
787
-    if (ret != 0) {
788
-     uri_clean(uri);
789
-     return(ret);
790
-    }
791
+ if (ret != 0) {
792
+ uri_clean(uri);
793
+ return (ret);
794
+ }
795
}
796
- return(0);
797
+ return (0);
798
}
799
800
/**
801
@@ -XXX,XX +XXX,XX @@ rfc3986_parse_uri_reference(URI *uri, const char *str) {
802
*
803
* Returns a newly built URI or NULL in case of error
804
*/
805
-URI *
806
-uri_parse(const char *str) {
807
+URI *uri_parse(const char *str)
808
+{
809
URI *uri;
810
int ret;
811
812
if (str == NULL)
813
-    return(NULL);
814
+ return (NULL);
815
uri = uri_new();
816
ret = rfc3986_parse_uri_reference(uri, str);
817
if (ret) {
818
uri_free(uri);
819
- return(NULL);
820
+ return (NULL);
821
}
822
- return(uri);
823
+ return (uri);
824
}
825
826
/**
827
@@ -XXX,XX +XXX,XX @@ uri_parse(const char *str) {
828
*
829
* Returns 0 or the error code
830
*/
831
-int
832
-uri_parse_into(URI *uri, const char *str) {
833
- return(rfc3986_parse_uri_reference(uri, str));
834
+int uri_parse_into(URI *uri, const char *str)
835
+{
836
+ return (rfc3986_parse_uri_reference(uri, str));
837
}
838
839
/**
840
@@ -XXX,XX +XXX,XX @@ uri_parse_into(URI *uri, const char *str) {
841
*
842
* Returns a newly built URI or NULL in case of error
843
*/
844
-URI *
845
-uri_parse_raw(const char *str, int raw) {
846
+URI *uri_parse_raw(const char *str, int raw)
847
+{
848
URI *uri;
849
int ret;
850
851
if (str == NULL)
852
-    return(NULL);
853
+ return (NULL);
854
uri = uri_new();
855
if (raw) {
856
uri->cleanup |= 2;
857
@@ -XXX,XX +XXX,XX @@ uri_parse_raw(const char *str, int raw) {
858
ret = uri_parse_into(uri, str);
859
if (ret) {
860
uri_free(uri);
861
- return(NULL);
862
+ return (NULL);
863
}
864
- return(uri);
865
+ return (uri);
866
}
867
868
/************************************************************************
869
- *                                    *
870
- *            Generic URI structure functions            *
871
- *                                    *
872
+ * *
873
+ * Generic URI structure functions *
874
+ * *
875
************************************************************************/
876
877
/**
878
@@ -XXX,XX +XXX,XX @@ uri_parse_raw(const char *str, int raw) {
879
*
880
* Returns the new structure or NULL in case of error
881
*/
882
-URI *
883
-uri_new(void) {
884
+URI *uri_new(void)
885
+{
886
URI *ret;
887
888
ret = g_new0(URI, 1);
889
- return(ret);
890
+ return (ret);
891
}
892
893
/**
894
@@ -XXX,XX +XXX,XX @@ uri_new(void) {
895
* Function to handle properly a reallocation when saving an URI
896
* Also imposes some limit on the length of an URI string output
897
*/
898
-static char *
899
-realloc2n(char *ret, int *max) {
900
+static char *realloc2n(char *ret, int *max)
901
+{
902
char *temp;
903
int tmp;
904
905
tmp = *max * 2;
906
temp = g_realloc(ret, (tmp + 1));
907
*max = tmp;
908
- return(temp);
909
+ return (temp);
910
}
911
912
/**
913
@@ -XXX,XX +XXX,XX @@ realloc2n(char *ret, int *max) {
914
*
915
* Returns a new string (to be deallocated by caller)
916
*/
917
-char *
918
-uri_to_string(URI *uri) {
919
+char *uri_to_string(URI *uri)
920
+{
921
char *ret = NULL;
922
char *temp;
923
const char *p;
924
int len;
925
int max;
926
927
- if (uri == NULL) return(NULL);
928
-
929
+ if (uri == NULL)
930
+ return (NULL);
931
932
max = 80;
933
ret = g_malloc(max + 1);
934
len = 0;
935
936
if (uri->scheme != NULL) {
937
-    p = uri->scheme;
938
-    while (*p != 0) {
939
-     if (len >= max) {
940
+ p = uri->scheme;
941
+ while (*p != 0) {
942
+ if (len >= max) {
943
temp = realloc2n(ret, &max);
944
-        ret = temp;
945
-     }
946
-     ret[len++] = *p++;
947
-    }
948
-    if (len >= max) {
949
+ ret = temp;
950
+ }
951
+ ret[len++] = *p++;
952
+ }
953
+ if (len >= max) {
954
temp = realloc2n(ret, &max);
955
ret = temp;
956
-    }
957
-    ret[len++] = ':';
958
+ }
959
+ ret[len++] = ':';
960
}
961
if (uri->opaque != NULL) {
962
-    p = uri->opaque;
963
-    while (*p != 0) {
964
-     if (len + 3 >= max) {
965
+ p = uri->opaque;
966
+ while (*p != 0) {
967
+ if (len + 3 >= max) {
968
temp = realloc2n(ret, &max);
969
ret = temp;
970
-     }
971
-     if (IS_RESERVED(*(p)) || IS_UNRESERVED(*(p)))
972
-        ret[len++] = *p++;
973
-     else {
974
-        int val = *(unsigned char *)p++;
975
-        int hi = val / 0x10, lo = val % 0x10;
976
-        ret[len++] = '%';
977
-        ret[len++] = hi + (hi > 9? 'A'-10 : '0');
978
-        ret[len++] = lo + (lo > 9? 'A'-10 : '0');
979
-     }
980
-    }
981
+ }
982
+ if (IS_RESERVED(*(p)) || IS_UNRESERVED(*(p)))
983
+ ret[len++] = *p++;
984
+ else {
985
+ int val = *(unsigned char *)p++;
986
+ int hi = val / 0x10, lo = val % 0x10;
987
+ ret[len++] = '%';
988
+ ret[len++] = hi + (hi > 9 ? 'A' - 10 : '0');
989
+ ret[len++] = lo + (lo > 9 ? 'A' - 10 : '0');
990
+ }
991
+ }
992
} else {
993
-    if (uri->server != NULL) {
994
-     if (len + 3 >= max) {
995
+ if (uri->server != NULL) {
996
+ if (len + 3 >= max) {
997
temp = realloc2n(ret, &max);
998
ret = temp;
999
-     }
1000
-     ret[len++] = '/';
1001
-     ret[len++] = '/';
1002
-     if (uri->user != NULL) {
1003
-        p = uri->user;
1004
-        while (*p != 0) {
1005
-         if (len + 3 >= max) {
1006
+ }
1007
+ ret[len++] = '/';
1008
+ ret[len++] = '/';
1009
+ if (uri->user != NULL) {
1010
+ p = uri->user;
1011
+ while (*p != 0) {
1012
+ if (len + 3 >= max) {
1013
temp = realloc2n(ret, &max);
1014
ret = temp;
1015
-         }
1016
-         if ((IS_UNRESERVED(*(p))) ||
1017
-            ((*(p) == ';')) || ((*(p) == ':')) ||
1018
-            ((*(p) == '&')) || ((*(p) == '=')) ||
1019
-            ((*(p) == '+')) || ((*(p) == '$')) ||
1020
-            ((*(p) == ',')))
1021
-            ret[len++] = *p++;
1022
-         else {
1023
-            int val = *(unsigned char *)p++;
1024
-            int hi = val / 0x10, lo = val % 0x10;
1025
-            ret[len++] = '%';
1026
-            ret[len++] = hi + (hi > 9? 'A'-10 : '0');
1027
-            ret[len++] = lo + (lo > 9? 'A'-10 : '0');
1028
-         }
1029
-        }
1030
-        if (len + 3 >= max) {
1031
+ }
1032
+ if ((IS_UNRESERVED(*(p))) || ((*(p) == ';')) ||
1033
+ ((*(p) == ':')) || ((*(p) == '&')) || ((*(p) == '=')) ||
1034
+ ((*(p) == '+')) || ((*(p) == '$')) || ((*(p) == ',')))
1035
+ ret[len++] = *p++;
1036
+ else {
1037
+ int val = *(unsigned char *)p++;
1038
+ int hi = val / 0x10, lo = val % 0x10;
1039
+ ret[len++] = '%';
1040
+ ret[len++] = hi + (hi > 9 ? 'A' - 10 : '0');
1041
+ ret[len++] = lo + (lo > 9 ? 'A' - 10 : '0');
1042
+ }
1043
+ }
1044
+ if (len + 3 >= max) {
1045
temp = realloc2n(ret, &max);
1046
ret = temp;
1047
-        }
1048
-        ret[len++] = '@';
1049
-     }
1050
-     p = uri->server;
1051
-     while (*p != 0) {
1052
-        if (len >= max) {
1053
+ }
1054
+ ret[len++] = '@';
1055
+ }
1056
+ p = uri->server;
1057
+ while (*p != 0) {
1058
+ if (len >= max) {
1059
temp = realloc2n(ret, &max);
1060
ret = temp;
1061
-        }
1062
-        ret[len++] = *p++;
1063
-     }
1064
-     if (uri->port > 0) {
1065
-        if (len + 10 >= max) {
1066
+ }
1067
+ ret[len++] = *p++;
1068
+ }
1069
+ if (uri->port > 0) {
1070
+ if (len + 10 >= max) {
1071
temp = realloc2n(ret, &max);
1072
ret = temp;
1073
-        }
1074
-        len += snprintf(&ret[len], max - len, ":%d", uri->port);
1075
-     }
1076
-    } else if (uri->authority != NULL) {
1077
-     if (len + 3 >= max) {
1078
+ }
1079
+ len += snprintf(&ret[len], max - len, ":%d", uri->port);
1080
+ }
1081
+ } else if (uri->authority != NULL) {
1082
+ if (len + 3 >= max) {
1083
temp = realloc2n(ret, &max);
1084
ret = temp;
1085
-     }
1086
-     ret[len++] = '/';
1087
-     ret[len++] = '/';
1088
-     p = uri->authority;
1089
-     while (*p != 0) {
1090
-        if (len + 3 >= max) {
1091
+ }
1092
+ ret[len++] = '/';
1093
+ ret[len++] = '/';
1094
+ p = uri->authority;
1095
+ while (*p != 0) {
1096
+ if (len + 3 >= max) {
1097
temp = realloc2n(ret, &max);
1098
ret = temp;
1099
-        }
1100
-        if ((IS_UNRESERVED(*(p))) ||
1101
- ((*(p) == '$')) || ((*(p) == ',')) || ((*(p) == ';')) ||
1102
- ((*(p) == ':')) || ((*(p) == '@')) || ((*(p) == '&')) ||
1103
- ((*(p) == '=')) || ((*(p) == '+')))
1104
-         ret[len++] = *p++;
1105
-        else {
1106
-         int val = *(unsigned char *)p++;
1107
-         int hi = val / 0x10, lo = val % 0x10;
1108
-         ret[len++] = '%';
1109
-         ret[len++] = hi + (hi > 9? 'A'-10 : '0');
1110
-         ret[len++] = lo + (lo > 9? 'A'-10 : '0');
1111
-        }
1112
-     }
1113
-    } else if (uri->scheme != NULL) {
1114
-     if (len + 3 >= max) {
1115
+ }
1116
+ if ((IS_UNRESERVED(*(p))) || ((*(p) == '$')) ||
1117
+ ((*(p) == ',')) || ((*(p) == ';')) || ((*(p) == ':')) ||
1118
+ ((*(p) == '@')) || ((*(p) == '&')) || ((*(p) == '=')) ||
1119
+ ((*(p) == '+')))
1120
+ ret[len++] = *p++;
1121
+ else {
1122
+ int val = *(unsigned char *)p++;
1123
+ int hi = val / 0x10, lo = val % 0x10;
1124
+ ret[len++] = '%';
1125
+ ret[len++] = hi + (hi > 9 ? 'A' - 10 : '0');
1126
+ ret[len++] = lo + (lo > 9 ? 'A' - 10 : '0');
1127
+ }
1128
+ }
1129
+ } else if (uri->scheme != NULL) {
1130
+ if (len + 3 >= max) {
1131
temp = realloc2n(ret, &max);
1132
ret = temp;
1133
-     }
1134
-     ret[len++] = '/';
1135
-     ret[len++] = '/';
1136
-    }
1137
-    if (uri->path != NULL) {
1138
-     p = uri->path;
1139
-     /*
1140
-     * the colon in file:///d: should not be escaped or
1141
-     * Windows accesses fail later.
1142
-     */
1143
-     if ((uri->scheme != NULL) &&
1144
-        (p[0] == '/') &&
1145
-        (((p[1] >= 'a') && (p[1] <= 'z')) ||
1146
-         ((p[1] >= 'A') && (p[1] <= 'Z'))) &&
1147
-        (p[2] == ':') &&
1148
-     (!strcmp(uri->scheme, "file"))) {
1149
-        if (len + 3 >= max) {
1150
+ }
1151
+ ret[len++] = '/';
1152
+ ret[len++] = '/';
1153
+ }
1154
+ if (uri->path != NULL) {
1155
+ p = uri->path;
1156
+ /*
1157
+ * the colon in file:///d: should not be escaped or
1158
+ * Windows accesses fail later.
1159
+ */
1160
+ if ((uri->scheme != NULL) && (p[0] == '/') &&
1161
+ (((p[1] >= 'a') && (p[1] <= 'z')) ||
1162
+ ((p[1] >= 'A') && (p[1] <= 'Z'))) &&
1163
+ (p[2] == ':') && (!strcmp(uri->scheme, "file"))) {
1164
+ if (len + 3 >= max) {
1165
temp = realloc2n(ret, &max);
1166
ret = temp;
1167
-        }
1168
-        ret[len++] = *p++;
1169
-        ret[len++] = *p++;
1170
-        ret[len++] = *p++;
1171
-     }
1172
-     while (*p != 0) {
1173
-        if (len + 3 >= max) {
1174
+ }
1175
+ ret[len++] = *p++;
1176
+ ret[len++] = *p++;
1177
+ ret[len++] = *p++;
1178
+ }
1179
+ while (*p != 0) {
1180
+ if (len + 3 >= max) {
1181
temp = realloc2n(ret, &max);
1182
ret = temp;
1183
-        }
1184
-        if ((IS_UNRESERVED(*(p))) || ((*(p) == '/')) ||
1185
+ }
1186
+ if ((IS_UNRESERVED(*(p))) || ((*(p) == '/')) ||
1187
((*(p) == ';')) || ((*(p) == '@')) || ((*(p) == '&')) ||
1188
-     ((*(p) == '=')) || ((*(p) == '+')) || ((*(p) == '$')) ||
1189
-     ((*(p) == ',')))
1190
-         ret[len++] = *p++;
1191
-        else {
1192
-         int val = *(unsigned char *)p++;
1193
-         int hi = val / 0x10, lo = val % 0x10;
1194
-         ret[len++] = '%';
1195
-         ret[len++] = hi + (hi > 9? 'A'-10 : '0');
1196
-         ret[len++] = lo + (lo > 9? 'A'-10 : '0');
1197
-        }
1198
-     }
1199
-    }
1200
-    if (uri->query != NULL) {
1201
-     if (len + 1 >= max) {
1202
+ ((*(p) == '=')) || ((*(p) == '+')) || ((*(p) == '$')) ||
1203
+ ((*(p) == ',')))
1204
+ ret[len++] = *p++;
1205
+ else {
1206
+ int val = *(unsigned char *)p++;
1207
+ int hi = val / 0x10, lo = val % 0x10;
1208
+ ret[len++] = '%';
1209
+ ret[len++] = hi + (hi > 9 ? 'A' - 10 : '0');
1210
+ ret[len++] = lo + (lo > 9 ? 'A' - 10 : '0');
1211
+ }
1212
+ }
1213
+ }
1214
+ if (uri->query != NULL) {
1215
+ if (len + 1 >= max) {
1216
temp = realloc2n(ret, &max);
1217
ret = temp;
1218
-     }
1219
-     ret[len++] = '?';
1220
-     p = uri->query;
1221
-     while (*p != 0) {
1222
-        if (len + 1 >= max) {
1223
+ }
1224
+ ret[len++] = '?';
1225
+ p = uri->query;
1226
+ while (*p != 0) {
1227
+ if (len + 1 >= max) {
1228
temp = realloc2n(ret, &max);
1229
ret = temp;
1230
-        }
1231
-        ret[len++] = *p++;
1232
-     }
1233
-    }
1234
+ }
1235
+ ret[len++] = *p++;
1236
+ }
1237
+ }
1238
}
1239
if (uri->fragment != NULL) {
1240
-    if (len + 3 >= max) {
1241
+ if (len + 3 >= max) {
1242
temp = realloc2n(ret, &max);
1243
ret = temp;
1244
-    }
1245
-    ret[len++] = '#';
1246
-    p = uri->fragment;
1247
-    while (*p != 0) {
1248
-     if (len + 3 >= max) {
1249
+ }
1250
+ ret[len++] = '#';
1251
+ p = uri->fragment;
1252
+ while (*p != 0) {
1253
+ if (len + 3 >= max) {
1254
temp = realloc2n(ret, &max);
1255
ret = temp;
1256
-     }
1257
-     if ((IS_UNRESERVED(*(p))) || (IS_RESERVED(*(p))))
1258
-        ret[len++] = *p++;
1259
-     else {
1260
-        int val = *(unsigned char *)p++;
1261
-        int hi = val / 0x10, lo = val % 0x10;
1262
-        ret[len++] = '%';
1263
-        ret[len++] = hi + (hi > 9? 'A'-10 : '0');
1264
-        ret[len++] = lo + (lo > 9? 'A'-10 : '0');
1265
-     }
1266
-    }
1267
+ }
1268
+ if ((IS_UNRESERVED(*(p))) || (IS_RESERVED(*(p))))
1269
+ ret[len++] = *p++;
1270
+ else {
1271
+ int val = *(unsigned char *)p++;
1272
+ int hi = val / 0x10, lo = val % 0x10;
1273
+ ret[len++] = '%';
1274
+ ret[len++] = hi + (hi > 9 ? 'A' - 10 : '0');
1275
+ ret[len++] = lo + (lo > 9 ? 'A' - 10 : '0');
1276
+ }
1277
+ }
1278
}
1279
if (len >= max) {
1280
temp = realloc2n(ret, &max);
1281
ret = temp;
1282
}
1283
ret[len] = 0;
1284
- return(ret);
1285
+ return (ret);
1286
}
1287
1288
/**
1289
@@ -XXX,XX +XXX,XX @@ uri_to_string(URI *uri) {
1290
*
1291
* Make sure the URI struct is free of content
1292
*/
1293
-static void
1294
-uri_clean(URI *uri) {
1295
- if (uri == NULL) return;
1296
+static void uri_clean(URI *uri)
1297
+{
1298
+ if (uri == NULL)
1299
+ return;
1300
1301
g_free(uri->scheme);
1302
uri->scheme = NULL;
1303
@@ -XXX,XX +XXX,XX @@ uri_clean(URI *uri) {
1304
*
1305
* Free up the URI struct
1306
*/
1307
-void
1308
-uri_free(URI *uri) {
1309
+void uri_free(URI *uri)
1310
+{
1311
uri_clean(uri);
1312
g_free(uri);
1313
}
1314
1315
/************************************************************************
1316
- *                                    *
1317
- *            Helper functions                *
1318
- *                                    *
1319
+ * *
1320
+ * Helper functions *
1321
+ * *
1322
************************************************************************/
1323
1324
/**
1325
@@ -XXX,XX +XXX,XX @@ uri_free(URI *uri) {
1326
*
1327
* Returns 0 or an error code
1328
*/
1329
-static int
1330
-normalize_uri_path(char *path) {
1331
+static int normalize_uri_path(char *path)
1332
+{
1333
char *cur, *out;
1334
1335
if (path == NULL)
1336
-    return(-1);
1337
+ return (-1);
1338
1339
/* Skip all initial "/" chars. We want to get to the beginning of the
1340
* first non-empty segment.
1341
*/
1342
cur = path;
1343
while (cur[0] == '/')
1344
- ++cur;
1345
+ ++cur;
1346
if (cur[0] == '\0')
1347
- return(0);
1348
+ return (0);
1349
1350
/* Keep everything we've seen so far. */
1351
out = cur;
1352
@@ -XXX,XX +XXX,XX @@ normalize_uri_path(char *path) {
1353
* Analyze each segment in sequence for cases (c) and (d).
1354
*/
1355
while (cur[0] != '\0') {
1356
-    /*
1357
-     * c) All occurrences of "./", where "." is a complete path segment,
1358
-     * are removed from the buffer string.
1359
-     */
1360
-    if ((cur[0] == '.') && (cur[1] == '/')) {
1361
-     cur += 2;
1362
-     /* '//' normalization should be done at this point too */
1363
-     while (cur[0] == '/')
1364
-        cur++;
1365
-     continue;
1366
-    }
1367
+ /*
1368
+ * c) All occurrences of "./", where "." is a complete path segment,
1369
+ * are removed from the buffer string.
1370
+ */
1371
+ if ((cur[0] == '.') && (cur[1] == '/')) {
1372
+ cur += 2;
1373
+ /* '//' normalization should be done at this point too */
1374
+ while (cur[0] == '/')
1375
+ cur++;
1376
+ continue;
1377
+ }
1378
1379
-    /*
1380
-     * d) If the buffer string ends with "." as a complete path segment,
1381
-     * that "." is removed.
1382
-     */
1383
-    if ((cur[0] == '.') && (cur[1] == '\0'))
1384
-     break;
1385
+ /*
1386
+ * d) If the buffer string ends with "." as a complete path segment,
1387
+ * that "." is removed.
1388
+ */
1389
+ if ((cur[0] == '.') && (cur[1] == '\0'))
1390
+ break;
1391
1392
-    /* Otherwise keep the segment. */
1393
-    while (cur[0] != '/') {
1394
+ /* Otherwise keep the segment. */
1395
+ while (cur[0] != '/') {
1396
if (cur[0] == '\0')
1397
- goto done_cd;
1398
-     (out++)[0] = (cur++)[0];
1399
-    }
1400
-    /* nomalize // */
1401
-    while ((cur[0] == '/') && (cur[1] == '/'))
1402
-     cur++;
1403
+ goto done_cd;
1404
+ (out++)[0] = (cur++)[0];
1405
+ }
1406
+ /* nomalize // */
1407
+ while ((cur[0] == '/') && (cur[1] == '/'))
1408
+ cur++;
1409
1410
(out++)[0] = (cur++)[0];
1411
}
1412
- done_cd:
1413
+done_cd:
1414
out[0] = '\0';
1415
1416
/* Reset to the beginning of the first segment for the next sequence. */
1417
cur = path;
1418
while (cur[0] == '/')
1419
- ++cur;
1420
+ ++cur;
1421
if (cur[0] == '\0')
1422
-    return(0);
1423
+ return (0);
1424
1425
/*
1426
* Analyze each segment in sequence for cases (e) and (f).
1427
@@ -XXX,XX +XXX,XX @@ normalize_uri_path(char *path) {
1428
/* Find the end of the current segment. */
1429
segp = cur;
1430
while ((segp[0] != '/') && (segp[0] != '\0'))
1431
- ++segp;
1432
+ ++segp;
1433
1434
/* If this is the last segment, we're done (we need at least two
1435
* segments to meet the criteria for the (e) and (f) cases).
1436
*/
1437
if (segp[0] == '\0')
1438
- break;
1439
+ break;
1440
1441
/* If the first segment is "..", or if the next segment _isn't_ "..",
1442
* keep this segment and try the next one.
1443
*/
1444
++segp;
1445
- if (((cur[0] == '.') && (cur[1] == '.') && (segp == cur+3))
1446
- || ((segp[0] != '.') || (segp[1] != '.')
1447
- || ((segp[2] != '/') && (segp[2] != '\0')))) {
1448
- cur = segp;
1449
- continue;
1450
+ if (((cur[0] == '.') && (cur[1] == '.') && (segp == cur + 3)) ||
1451
+ ((segp[0] != '.') || (segp[1] != '.') ||
1452
+ ((segp[2] != '/') && (segp[2] != '\0')))) {
1453
+ cur = segp;
1454
+ continue;
1455
}
1456
1457
/* If we get here, remove this segment and the next one and back up
1458
@@ -XXX,XX +XXX,XX @@ normalize_uri_path(char *path) {
1459
1460
/* If this is the end of the buffer, we're done. */
1461
if (segp[2] == '\0') {
1462
- cur[0] = '\0';
1463
- break;
1464
+ cur[0] = '\0';
1465
+ break;
101
+ break;
1466
}
102
}
1467
/* Valgrind complained, strcpy(cur, segp + 3); */
103
- }
1468
/* string will overlap, do not use strcpy */
104
1469
tmp = cur;
105
- /* There's no need for a mutex on the receive side, because the
1470
segp += 3;
106
- * handler acts as a synchronization point and ensures that only
1471
while ((*tmp++ = *segp++) != 0)
107
- * one coroutine is called until the reply finishes. */
1472
- ;
108
- i = HANDLE_TO_INDEX(s, s->reply.handle);
1473
+ ;
109
- if (i >= MAX_NBD_REQUESTS) {
1474
110
- goto fail;
1475
/* If there are no previous segments, then keep going from here. */
111
- }
1476
segp = cur;
112
+ /* There's no need for a mutex on the receive side, because the
1477
while ((segp > path) && ((--segp)[0] == '/'))
113
+ * handler acts as a synchronization point and ensures that only
1478
- ;
114
+ * one coroutine is called until the reply finishes.
1479
+ ;
115
+ */
1480
if (segp == path)
116
+ i = HANDLE_TO_INDEX(s, s->reply.handle);
1481
- continue;
117
+ if (i >= MAX_NBD_REQUESTS || !s->recv_coroutine[i]) {
1482
+ continue;
118
+ break;
1483
119
+ }
1484
/* "segp" is pointing to the end of a previous segment; find it's
120
1485
* start. We need to back up to the previous segment and start
121
- if (s->recv_coroutine[i]) {
1486
@@ -XXX,XX +XXX,XX @@ normalize_uri_path(char *path) {
122
- qemu_coroutine_enter(s->recv_coroutine[i]);
123
- return;
124
+ /* We're woken up by the recv_coroutine itself. Note that there
125
+ * is no race between yielding and reentering read_reply_co. This
126
+ * is because:
127
+ *
128
+ * - if recv_coroutine[i] runs on the same AioContext, it is only
129
+ * entered after we yield
130
+ *
131
+ * - if recv_coroutine[i] runs on a different AioContext, reentering
132
+ * read_reply_co happens through a bottom half, which can only
133
+ * run after we yield.
134
+ */
135
+ aio_co_wake(s->recv_coroutine[i]);
136
+ qemu_coroutine_yield();
137
}
138
-
139
-fail:
140
- nbd_teardown_connection(bs);
141
-}
142
-
143
-static void nbd_restart_write(void *opaque)
144
-{
145
- BlockDriverState *bs = opaque;
146
-
147
- qemu_coroutine_enter(nbd_get_client_session(bs)->send_coroutine);
148
+ s->read_reply_co = NULL;
149
}
150
151
static int nbd_co_send_request(BlockDriverState *bs,
152
@@ -XXX,XX +XXX,XX @@ static int nbd_co_send_request(BlockDriverState *bs,
153
QEMUIOVector *qiov)
154
{
155
NBDClientSession *s = nbd_get_client_session(bs);
156
- AioContext *aio_context;
157
int rc, ret, i;
158
159
qemu_co_mutex_lock(&s->send_mutex);
160
@@ -XXX,XX +XXX,XX @@ static int nbd_co_send_request(BlockDriverState *bs,
161
return -EPIPE;
162
}
163
164
- s->send_coroutine = qemu_coroutine_self();
165
- aio_context = bdrv_get_aio_context(bs);
166
-
167
- aio_set_fd_handler(aio_context, s->sioc->fd, false,
168
- nbd_reply_ready, nbd_restart_write, NULL, bs);
169
if (qiov) {
170
qio_channel_set_cork(s->ioc, true);
171
rc = nbd_send_request(s->ioc, request);
172
@@ -XXX,XX +XXX,XX @@ static int nbd_co_send_request(BlockDriverState *bs,
173
} else {
174
rc = nbd_send_request(s->ioc, request);
175
}
176
- aio_set_fd_handler(aio_context, s->sioc->fd, false,
177
- nbd_reply_ready, NULL, NULL, bs);
178
- s->send_coroutine = NULL;
179
qemu_co_mutex_unlock(&s->send_mutex);
180
return rc;
181
}
182
@@ -XXX,XX +XXX,XX @@ static void nbd_co_receive_reply(NBDClientSession *s,
183
{
184
int ret;
185
186
- /* Wait until we're woken up by the read handler. TODO: perhaps
187
- * peek at the next reply and avoid yielding if it's ours? */
188
+ /* Wait until we're woken up by nbd_read_reply_entry. */
189
qemu_coroutine_yield();
190
*reply = s->reply;
191
if (reply->handle != request->handle ||
192
@@ -XXX,XX +XXX,XX @@ static void nbd_coroutine_start(NBDClientSession *s,
193
/* s->recv_coroutine[i] is set as soon as we get the send_lock. */
194
}
195
196
-static void nbd_coroutine_end(NBDClientSession *s,
197
+static void nbd_coroutine_end(BlockDriverState *bs,
198
NBDRequest *request)
199
{
200
+ NBDClientSession *s = nbd_get_client_session(bs);
201
int i = HANDLE_TO_INDEX(s, request->handle);
202
+
203
s->recv_coroutine[i] = NULL;
204
- if (s->in_flight-- == MAX_NBD_REQUESTS) {
205
- qemu_co_queue_next(&s->free_sema);
206
+ s->in_flight--;
207
+ qemu_co_queue_next(&s->free_sema);
208
+
209
+ /* Kick the read_reply_co to get the next reply. */
210
+ if (s->read_reply_co) {
211
+ aio_co_wake(s->read_reply_co);
212
}
213
}
214
215
@@ -XXX,XX +XXX,XX @@ int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
216
} else {
217
nbd_co_receive_reply(client, &request, &reply, qiov);
218
}
219
- nbd_coroutine_end(client, &request);
220
+ nbd_coroutine_end(bs, &request);
221
return -reply.error;
222
}
223
224
@@ -XXX,XX +XXX,XX @@ int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
225
} else {
226
nbd_co_receive_reply(client, &request, &reply, NULL);
227
}
228
- nbd_coroutine_end(client, &request);
229
+ nbd_coroutine_end(bs, &request);
230
return -reply.error;
231
}
232
233
@@ -XXX,XX +XXX,XX @@ int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
234
} else {
235
nbd_co_receive_reply(client, &request, &reply, NULL);
236
}
237
- nbd_coroutine_end(client, &request);
238
+ nbd_coroutine_end(bs, &request);
239
return -reply.error;
240
}
241
242
@@ -XXX,XX +XXX,XX @@ int nbd_client_co_flush(BlockDriverState *bs)
243
} else {
244
nbd_co_receive_reply(client, &request, &reply, NULL);
245
}
246
- nbd_coroutine_end(client, &request);
247
+ nbd_coroutine_end(bs, &request);
248
return -reply.error;
249
}
250
251
@@ -XXX,XX +XXX,XX @@ int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
252
} else {
253
nbd_co_receive_reply(client, &request, &reply, NULL);
254
}
255
- nbd_coroutine_end(client, &request);
256
+ nbd_coroutine_end(bs, &request);
257
return -reply.error;
258
259
}
260
261
void nbd_client_detach_aio_context(BlockDriverState *bs)
262
{
263
- aio_set_fd_handler(bdrv_get_aio_context(bs),
264
- nbd_get_client_session(bs)->sioc->fd,
265
- false, NULL, NULL, NULL, NULL);
266
+ NBDClientSession *client = nbd_get_client_session(bs);
267
+ qio_channel_detach_aio_context(QIO_CHANNEL(client->sioc));
268
}
269
270
void nbd_client_attach_aio_context(BlockDriverState *bs,
271
AioContext *new_context)
272
{
273
- aio_set_fd_handler(new_context, nbd_get_client_session(bs)->sioc->fd,
274
- false, nbd_reply_ready, NULL, NULL, bs);
275
+ NBDClientSession *client = nbd_get_client_session(bs);
276
+ qio_channel_attach_aio_context(QIO_CHANNEL(client->sioc), new_context);
277
+ aio_co_schedule(new_context, client->read_reply_co);
278
}
279
280
void nbd_client_close(BlockDriverState *bs)
281
@@ -XXX,XX +XXX,XX @@ int nbd_client_init(BlockDriverState *bs,
282
/* Now that we're connected, set the socket to be non-blocking and
283
* kick the reply mechanism. */
284
qio_channel_set_blocking(QIO_CHANNEL(sioc), false, NULL);
285
-
286
+ client->read_reply_co = qemu_coroutine_create(nbd_read_reply_entry, client);
287
nbd_client_attach_aio_context(bs, bdrv_get_aio_context(bs));
288
289
logout("Established connection with NBD server\n");
290
diff --git a/nbd/client.c b/nbd/client.c
291
index XXXXXXX..XXXXXXX 100644
292
--- a/nbd/client.c
293
+++ b/nbd/client.c
294
@@ -XXX,XX +XXX,XX @@ ssize_t nbd_receive_reply(QIOChannel *ioc, NBDReply *reply)
295
ssize_t ret;
296
297
ret = read_sync(ioc, buf, sizeof(buf));
298
- if (ret < 0) {
299
+ if (ret <= 0) {
300
return ret;
301
}
302
303
diff --git a/nbd/common.c b/nbd/common.c
304
index XXXXXXX..XXXXXXX 100644
305
--- a/nbd/common.c
306
+++ b/nbd/common.c
307
@@ -XXX,XX +XXX,XX @@ ssize_t nbd_wr_syncv(QIOChannel *ioc,
308
}
309
if (len == QIO_CHANNEL_ERR_BLOCK) {
310
if (qemu_in_coroutine()) {
311
- /* XXX figure out if we can create a variant on
312
- * qio_channel_yield() that works with AIO contexts
313
- * and consider using that in this branch */
314
- qemu_coroutine_yield();
315
- } else if (done) {
316
- /* XXX this is needed by nbd_reply_ready. */
317
- qio_channel_wait(ioc,
318
- do_read ? G_IO_IN : G_IO_OUT);
319
+ qio_channel_yield(ioc, do_read ? G_IO_IN : G_IO_OUT);
320
} else {
321
return -EAGAIN;
322
}
323
diff --git a/nbd/server.c b/nbd/server.c
324
index XXXXXXX..XXXXXXX 100644
325
--- a/nbd/server.c
326
+++ b/nbd/server.c
327
@@ -XXX,XX +XXX,XX @@ struct NBDClient {
328
CoMutex send_lock;
329
Coroutine *send_coroutine;
330
331
- bool can_read;
332
-
333
QTAILQ_ENTRY(NBDClient) next;
334
int nb_requests;
335
bool closing;
336
@@ -XXX,XX +XXX,XX @@ struct NBDClient {
337
338
/* That's all folks */
339
340
-static void nbd_set_handlers(NBDClient *client);
341
-static void nbd_unset_handlers(NBDClient *client);
342
-static void nbd_update_can_read(NBDClient *client);
343
+static void nbd_client_receive_next_request(NBDClient *client);
344
345
static gboolean nbd_negotiate_continue(QIOChannel *ioc,
346
GIOCondition condition,
347
@@ -XXX,XX +XXX,XX @@ void nbd_client_put(NBDClient *client)
1487
*/
348
*/
1488
cur = segp;
349
assert(client->closing);
1489
while ((cur > path) && (cur[-1] != '/'))
350
1490
- --cur;
351
- nbd_unset_handlers(client);
1491
+ --cur;
352
+ qio_channel_detach_aio_context(client->ioc);
1492
}
353
object_unref(OBJECT(client->sioc));
1493
out[0] = '\0';
354
object_unref(OBJECT(client->ioc));
1494
355
if (client->tlscreds) {
1495
@@ -XXX,XX +XXX,XX @@ normalize_uri_path(char *path) {
356
@@ -XXX,XX +XXX,XX @@ static NBDRequestData *nbd_request_get(NBDClient *client)
1496
* We discard them from the final path.
357
1497
*/
358
assert(client->nb_requests <= MAX_NBD_REQUESTS - 1);
1498
if (path[0] == '/') {
359
client->nb_requests++;
1499
- cur = path;
360
- nbd_update_can_read(client);
1500
- while ((cur[0] == '/') && (cur[1] == '.') && (cur[2] == '.')
361
1501
- && ((cur[3] == '/') || (cur[3] == '\0')))
362
req = g_new0(NBDRequestData, 1);
1502
-    cur += 3;
363
nbd_client_get(client);
1503
+ cur = path;
364
@@ -XXX,XX +XXX,XX @@ static void nbd_request_put(NBDRequestData *req)
1504
+ while ((cur[0] == '/') && (cur[1] == '.') && (cur[2] == '.') &&
365
g_free(req);
1505
+ ((cur[3] == '/') || (cur[3] == '\0')))
366
1506
+ cur += 3;
367
client->nb_requests--;
1507
368
- nbd_update_can_read(client);
1508
- if (cur != path) {
369
+ nbd_client_receive_next_request(client);
1509
-    out = path;
370
+
1510
-    while (cur[0] != '\0')
371
nbd_client_put(client);
1511
- (out++)[0] = (cur++)[0];
372
}
1512
-    out[0] = 0;
373
1513
- }
374
@@ -XXX,XX +XXX,XX @@ static void blk_aio_attached(AioContext *ctx, void *opaque)
1514
+ if (cur != path) {
375
exp->ctx = ctx;
1515
+ out = path;
376
1516
+ while (cur[0] != '\0')
377
QTAILQ_FOREACH(client, &exp->clients, next) {
1517
+ (out++)[0] = (cur++)[0];
378
- nbd_set_handlers(client);
1518
+ out[0] = 0;
379
+ qio_channel_attach_aio_context(client->ioc, ctx);
380
+ if (client->recv_coroutine) {
381
+ aio_co_schedule(ctx, client->recv_coroutine);
1519
+ }
382
+ }
1520
}
383
+ if (client->send_coroutine) {
1521
384
+ aio_co_schedule(ctx, client->send_coroutine);
1522
- return(0);
1523
+ return (0);
1524
}
1525
1526
-static int is_hex(char c) {
1527
- if (((c >= '0') && (c <= '9')) ||
1528
- ((c >= 'a') && (c <= 'f')) ||
1529
+static int is_hex(char c)
1530
+{
1531
+ if (((c >= '0') && (c <= '9')) || ((c >= 'a') && (c <= 'f')) ||
1532
((c >= 'A') && (c <= 'F')))
1533
-    return(1);
1534
- return(0);
1535
+ return (1);
1536
+ return (0);
1537
}
1538
1539
-
1540
/**
1541
* uri_string_unescape:
1542
* @str: the string to unescape
1543
@@ -XXX,XX +XXX,XX @@ static int is_hex(char c) {
1544
* Returns a copy of the string, but unescaped, will return NULL only in case
1545
* of error
1546
*/
1547
-char *
1548
-uri_string_unescape(const char *str, int len, char *target) {
1549
+char *uri_string_unescape(const char *str, int len, char *target)
1550
+{
1551
char *ret, *out;
1552
const char *in;
1553
1554
if (str == NULL)
1555
-    return(NULL);
1556
- if (len <= 0) len = strlen(str);
1557
- if (len < 0) return(NULL);
1558
+ return (NULL);
1559
+ if (len <= 0)
1560
+ len = strlen(str);
1561
+ if (len < 0)
1562
+ return (NULL);
1563
1564
if (target == NULL) {
1565
-    ret = g_malloc(len + 1);
1566
+ ret = g_malloc(len + 1);
1567
} else
1568
-    ret = target;
1569
+ ret = target;
1570
in = str;
1571
out = ret;
1572
- while(len > 0) {
1573
-    if ((len > 2) && (*in == '%') && (is_hex(in[1])) && (is_hex(in[2]))) {
1574
-     in++;
1575
-     if ((*in >= '0') && (*in <= '9'))
1576
-     *out = (*in - '0');
1577
-     else if ((*in >= 'a') && (*in <= 'f'))
1578
-     *out = (*in - 'a') + 10;
1579
-     else if ((*in >= 'A') && (*in <= 'F'))
1580
-     *out = (*in - 'A') + 10;
1581
-     in++;
1582
-     if ((*in >= '0') && (*in <= '9'))
1583
-     *out = *out * 16 + (*in - '0');
1584
-     else if ((*in >= 'a') && (*in <= 'f'))
1585
-     *out = *out * 16 + (*in - 'a') + 10;
1586
-     else if ((*in >= 'A') && (*in <= 'F'))
1587
-     *out = *out * 16 + (*in - 'A') + 10;
1588
-     in++;
1589
-     len -= 3;
1590
-     out++;
1591
-    } else {
1592
-     *out++ = *in++;
1593
-     len--;
1594
-    }
1595
+ while (len > 0) {
1596
+ if ((len > 2) && (*in == '%') && (is_hex(in[1])) && (is_hex(in[2]))) {
1597
+ in++;
1598
+ if ((*in >= '0') && (*in <= '9'))
1599
+ *out = (*in - '0');
1600
+ else if ((*in >= 'a') && (*in <= 'f'))
1601
+ *out = (*in - 'a') + 10;
1602
+ else if ((*in >= 'A') && (*in <= 'F'))
1603
+ *out = (*in - 'A') + 10;
1604
+ in++;
1605
+ if ((*in >= '0') && (*in <= '9'))
1606
+ *out = *out * 16 + (*in - '0');
1607
+ else if ((*in >= 'a') && (*in <= 'f'))
1608
+ *out = *out * 16 + (*in - 'a') + 10;
1609
+ else if ((*in >= 'A') && (*in <= 'F'))
1610
+ *out = *out * 16 + (*in - 'A') + 10;
1611
+ in++;
1612
+ len -= 3;
1613
+ out++;
1614
+ } else {
1615
+ *out++ = *in++;
1616
+ len--;
1617
+ }
385
+ }
1618
}
386
}
1619
*out = 0;
387
}
1620
- return(ret);
388
1621
+ return (ret);
389
@@ -XXX,XX +XXX,XX @@ static void blk_aio_detach(void *opaque)
1622
}
390
TRACE("Export %s: Detaching clients from AIO context %p\n", exp->name, exp->ctx);
1623
391
1624
/**
392
QTAILQ_FOREACH(client, &exp->clients, next) {
1625
@@ -XXX,XX +XXX,XX @@ uri_string_unescape(const char *str, int len, char *target) {
393
- nbd_unset_handlers(client);
1626
*
394
+ qio_channel_detach_aio_context(client->ioc);
1627
* Returns a new escaped string or NULL in case of error.
395
}
1628
*/
396
1629
-char *
397
exp->ctx = NULL;
1630
-uri_string_escape(const char *str, const char *list) {
398
@@ -XXX,XX +XXX,XX @@ static ssize_t nbd_co_send_reply(NBDRequestData *req, NBDReply *reply,
1631
+char *uri_string_escape(const char *str, const char *list)
399
g_assert(qemu_in_coroutine());
1632
+{
400
qemu_co_mutex_lock(&client->send_lock);
1633
char *ret, ch;
401
client->send_coroutine = qemu_coroutine_self();
1634
char *temp;
402
- nbd_set_handlers(client);
1635
const char *in;
403
1636
int len, out;
404
if (!len) {
1637
405
rc = nbd_send_reply(client->ioc, reply);
1638
if (str == NULL)
406
@@ -XXX,XX +XXX,XX @@ static ssize_t nbd_co_send_reply(NBDRequestData *req, NBDReply *reply,
1639
-    return(NULL);
407
}
1640
+ return (NULL);
408
1641
if (str[0] == 0)
409
client->send_coroutine = NULL;
1642
-    return(g_strdup(str));
410
- nbd_set_handlers(client);
1643
+ return (g_strdup(str));
411
qemu_co_mutex_unlock(&client->send_lock);
1644
len = strlen(str);
412
return rc;
1645
- if (!(len > 0)) return(NULL);
413
}
1646
+ if (!(len > 0))
414
@@ -XXX,XX +XXX,XX @@ static ssize_t nbd_co_receive_request(NBDRequestData *req,
1647
+ return (NULL);
415
ssize_t rc;
1648
416
1649
len += 20;
417
g_assert(qemu_in_coroutine());
1650
ret = g_malloc(len);
418
- client->recv_coroutine = qemu_coroutine_self();
1651
in = str;
419
- nbd_update_can_read(client);
1652
out = 0;
420
-
1653
- while(*in != 0) {
421
+ assert(client->recv_coroutine == qemu_coroutine_self());
1654
-    if (len - out <= 3) {
422
rc = nbd_receive_request(client->ioc, request);
1655
+ while (*in != 0) {
423
if (rc < 0) {
1656
+ if (len - out <= 3) {
424
if (rc != -EAGAIN) {
1657
temp = realloc2n(ret, &len);
425
@@ -XXX,XX +XXX,XX @@ static ssize_t nbd_co_receive_request(NBDRequestData *req,
1658
-     ret = temp;
426
1659
-    }
427
out:
1660
+ ret = temp;
428
client->recv_coroutine = NULL;
1661
+ }
429
- nbd_update_can_read(client);
1662
430
+ nbd_client_receive_next_request(client);
1663
-    ch = *in;
431
1664
-
432
return rc;
1665
-    if ((ch != '@') && (!IS_UNRESERVED(ch)) && (!strchr(list, ch))) {
433
}
1666
-     unsigned char val;
434
1667
-     ret[out++] = '%';
435
-static void nbd_trip(void *opaque)
1668
-     val = ch >> 4;
436
+/* Owns a reference to the NBDClient passed as opaque. */
1669
-     if (val <= 9)
437
+static coroutine_fn void nbd_trip(void *opaque)
1670
-        ret[out++] = '0' + val;
438
{
1671
-     else
439
NBDClient *client = opaque;
1672
-        ret[out++] = 'A' + val - 0xA;
440
NBDExport *exp = client->exp;
1673
-     val = ch & 0xF;
441
NBDRequestData *req;
1674
-     if (val <= 9)
442
- NBDRequest request;
1675
-        ret[out++] = '0' + val;
443
+ NBDRequest request = { 0 }; /* GCC thinks it can be used uninitialized */
1676
-     else
444
NBDReply reply;
1677
-        ret[out++] = 'A' + val - 0xA;
445
ssize_t ret;
1678
-     in++;
446
int flags;
1679
-    } else {
447
1680
-     ret[out++] = *in++;
448
TRACE("Reading request.");
1681
-    }
449
if (client->closing) {
1682
+ ch = *in;
450
+ nbd_client_put(client);
1683
451
return;
1684
+ if ((ch != '@') && (!IS_UNRESERVED(ch)) && (!strchr(list, ch))) {
452
}
1685
+ unsigned char val;
453
1686
+ ret[out++] = '%';
454
@@ -XXX,XX +XXX,XX @@ static void nbd_trip(void *opaque)
1687
+ val = ch >> 4;
1688
+ if (val <= 9)
1689
+ ret[out++] = '0' + val;
1690
+ else
1691
+ ret[out++] = 'A' + val - 0xA;
1692
+ val = ch & 0xF;
1693
+ if (val <= 9)
1694
+ ret[out++] = '0' + val;
1695
+ else
1696
+ ret[out++] = 'A' + val - 0xA;
1697
+ in++;
1698
+ } else {
1699
+ ret[out++] = *in++;
1700
+ }
1701
}
1702
ret[out] = 0;
1703
- return(ret);
1704
+ return (ret);
1705
}
1706
1707
/************************************************************************
1708
- *                                    *
1709
- *            Public functions                *
1710
- *                                    *
1711
+ * *
1712
+ * Public functions *
1713
+ * *
1714
************************************************************************/
1715
1716
/**
1717
@@ -XXX,XX +XXX,XX @@ uri_string_escape(const char *str, const char *list) {
1718
* Returns a new URI string (to be freed by the caller) or NULL in case
1719
* of error.
1720
*/
1721
-char *
1722
-uri_resolve(const char *uri, const char *base) {
1723
+char *uri_resolve(const char *uri, const char *base)
1724
+{
1725
char *val = NULL;
1726
int ret, len, indx, cur, out;
1727
URI *ref = NULL;
1728
@@ -XXX,XX +XXX,XX @@ uri_resolve(const char *uri, const char *base) {
1729
* URI. Should we do that here?
1730
*/
1731
if (uri == NULL)
1732
-    ret = -1;
1733
+ ret = -1;
1734
else {
1735
-    if (*uri) {
1736
-     ref = uri_new();
1737
-     ret = uri_parse_into(ref, uri);
1738
-    }
1739
-    else
1740
-     ret = 0;
1741
+ if (*uri) {
1742
+ ref = uri_new();
1743
+ ret = uri_parse_into(ref, uri);
1744
+ } else
1745
+ ret = 0;
1746
}
1747
if (ret != 0)
1748
-    goto done;
1749
+ goto done;
1750
if ((ref != NULL) && (ref->scheme != NULL)) {
1751
-    /*
1752
-     * The URI is absolute don't modify.
1753
-     */
1754
-    val = g_strdup(uri);
1755
-    goto done;
1756
+ /*
1757
+ * The URI is absolute don't modify.
1758
+ */
1759
+ val = g_strdup(uri);
1760
+ goto done;
1761
}
1762
if (base == NULL)
1763
-    ret = -1;
1764
+ ret = -1;
1765
else {
1766
-    bas = uri_new();
1767
-    ret = uri_parse_into(bas, base);
1768
+ bas = uri_new();
1769
+ ret = uri_parse_into(bas, base);
1770
}
1771
if (ret != 0) {
1772
-    if (ref)
1773
-     val = uri_to_string(ref);
1774
-    goto done;
1775
+ if (ref)
1776
+ val = uri_to_string(ref);
1777
+ goto done;
1778
}
1779
if (ref == NULL) {
1780
-    /*
1781
-     * the base fragment must be ignored
1782
-     */
1783
+ /*
1784
+ * the base fragment must be ignored
1785
+ */
1786
g_free(bas->fragment);
1787
bas->fragment = NULL;
1788
-    val = uri_to_string(bas);
1789
-    goto done;
1790
+ val = uri_to_string(bas);
1791
+ goto done;
1792
}
1793
1794
/*
1795
@@ -XXX,XX +XXX,XX @@ uri_resolve(const char *uri, const char *base) {
1796
*/
1797
res = uri_new();
1798
if ((ref->scheme == NULL) && (ref->path == NULL) &&
1799
-    ((ref->authority == NULL) && (ref->server == NULL))) {
1800
+ ((ref->authority == NULL) && (ref->server == NULL))) {
1801
res->scheme = g_strdup(bas->scheme);
1802
-    if (bas->authority != NULL)
1803
-     res->authority = g_strdup(bas->authority);
1804
-    else if (bas->server != NULL) {
1805
+ if (bas->authority != NULL)
1806
+ res->authority = g_strdup(bas->authority);
1807
+ else if (bas->server != NULL) {
1808
res->server = g_strdup(bas->server);
1809
res->user = g_strdup(bas->user);
1810
res->port = bas->port;
1811
-    }
1812
+ }
1813
res->path = g_strdup(bas->path);
1814
if (ref->query != NULL) {
1815
-     res->query = g_strdup (ref->query);
1816
+ res->query = g_strdup(ref->query);
1817
} else {
1818
res->query = g_strdup(bas->query);
1819
}
1820
res->fragment = g_strdup(ref->fragment);
1821
-    goto step_7;
1822
+ goto step_7;
1823
}
1824
1825
/*
1826
@@ -XXX,XX +XXX,XX @@ uri_resolve(const char *uri, const char *base) {
1827
* scheme is inherited from the base URI's scheme component.
1828
*/
1829
if (ref->scheme != NULL) {
1830
-    val = uri_to_string(ref);
1831
-    goto done;
1832
+ val = uri_to_string(ref);
1833
+ goto done;
1834
}
1835
res->scheme = g_strdup(bas->scheme);
1836
1837
@@ -XXX,XX +XXX,XX @@ uri_resolve(const char *uri, const char *base) {
1838
* use an authority component.
1839
*/
1840
if ((ref->authority != NULL) || (ref->server != NULL)) {
1841
-    if (ref->authority != NULL)
1842
-     res->authority = g_strdup(ref->authority);
1843
-    else {
1844
-     res->server = g_strdup(ref->server);
1845
+ if (ref->authority != NULL)
1846
+ res->authority = g_strdup(ref->authority);
1847
+ else {
1848
+ res->server = g_strdup(ref->server);
1849
res->user = g_strdup(ref->user);
1850
res->port = ref->port;
1851
-    }
1852
+ }
1853
res->path = g_strdup(ref->path);
1854
-    goto step_7;
1855
+ goto step_7;
1856
}
1857
if (bas->authority != NULL)
1858
-    res->authority = g_strdup(bas->authority);
1859
+ res->authority = g_strdup(bas->authority);
1860
else if (bas->server != NULL) {
1861
res->server = g_strdup(bas->server);
1862
res->user = g_strdup(bas->user);
1863
-    res->port = bas->port;
1864
+ res->port = bas->port;
1865
}
1866
1867
/*
1868
@@ -XXX,XX +XXX,XX @@ uri_resolve(const char *uri, const char *base) {
1869
* the reference is an absolute-path and we skip to step 7.
1870
*/
1871
if ((ref->path != NULL) && (ref->path[0] == '/')) {
1872
-    res->path = g_strdup(ref->path);
1873
-    goto step_7;
1874
+ res->path = g_strdup(ref->path);
1875
+ goto step_7;
1876
}
1877
1878
-
1879
/*
1880
* 6) If this step is reached, then we are resolving a relative-path
1881
* reference. The relative path needs to be merged with the base
1882
@@ -XXX,XX +XXX,XX @@ uri_resolve(const char *uri, const char *base) {
1883
*/
1884
len = 2; /* extra / and 0 */
1885
if (ref->path != NULL)
1886
-    len += strlen(ref->path);
1887
+ len += strlen(ref->path);
1888
if (bas->path != NULL)
1889
-    len += strlen(bas->path);
1890
+ len += strlen(bas->path);
1891
res->path = g_malloc(len);
1892
res->path[0] = 0;
1893
1894
@@ -XXX,XX +XXX,XX @@ uri_resolve(const char *uri, const char *base) {
1895
cur = 0;
1896
out = 0;
1897
if (bas->path != NULL) {
1898
-    while (bas->path[cur] != 0) {
1899
-     while ((bas->path[cur] != 0) && (bas->path[cur] != '/'))
1900
-        cur++;
1901
-     if (bas->path[cur] == 0)
1902
-        break;
1903
+ while (bas->path[cur] != 0) {
1904
+ while ((bas->path[cur] != 0) && (bas->path[cur] != '/'))
1905
+ cur++;
1906
+ if (bas->path[cur] == 0)
1907
+ break;
1908
1909
-     cur++;
1910
-     while (out < cur) {
1911
-        res->path[out] = bas->path[out];
1912
-        out++;
1913
-     }
1914
-    }
1915
+ cur++;
1916
+ while (out < cur) {
1917
+ res->path[out] = bas->path[out];
1918
+ out++;
1919
+ }
1920
+ }
1921
}
1922
res->path[out] = 0;
1923
1924
@@ -XXX,XX +XXX,XX @@ uri_resolve(const char *uri, const char *base) {
1925
* string.
1926
*/
1927
if (ref->path != NULL && ref->path[0] != 0) {
1928
-    indx = 0;
1929
-    /*
1930
-     * Ensure the path includes a '/'
1931
-     */
1932
-    if ((out == 0) && (bas->server != NULL))
1933
-     res->path[out++] = '/';
1934
-    while (ref->path[indx] != 0) {
1935
-     res->path[out++] = ref->path[indx++];
1936
-    }
1937
+ indx = 0;
1938
+ /*
1939
+ * Ensure the path includes a '/'
1940
+ */
1941
+ if ((out == 0) && (bas->server != NULL))
1942
+ res->path[out++] = '/';
1943
+ while (ref->path[indx] != 0) {
1944
+ res->path[out++] = ref->path[indx++];
1945
+ }
1946
}
1947
res->path[out] = 0;
1948
1949
@@ -XXX,XX +XXX,XX @@ step_7:
1950
455
1951
done:
456
done:
1952
if (ref != NULL)
457
nbd_request_put(req);
1953
-    uri_free(ref);
458
+ nbd_client_put(client);
1954
+ uri_free(ref);
459
return;
1955
if (bas != NULL)
460
1956
-    uri_free(bas);
461
out:
1957
+ uri_free(bas);
462
nbd_request_put(req);
1958
if (res != NULL)
463
client_close(client);
1959
-    uri_free(res);
464
+ nbd_client_put(client);
1960
- return(val);
465
}
1961
+ uri_free(res);
466
1962
+ return (val);
467
-static void nbd_read(void *opaque)
1963
}
468
+static void nbd_client_receive_next_request(NBDClient *client)
1964
469
{
1965
/**
470
- NBDClient *client = opaque;
1966
@@ -XXX,XX +XXX,XX @@ done:
471
-
1967
* Returns a new URI string (to be freed by the caller) or NULL in case
472
- if (client->recv_coroutine) {
1968
* error.
473
- qemu_coroutine_enter(client->recv_coroutine);
1969
*/
474
- } else {
1970
-char *
475
- qemu_coroutine_enter(qemu_coroutine_create(nbd_trip, client));
1971
-uri_resolve_relative (const char *uri, const char * base)
476
- }
1972
+char *uri_resolve_relative(const char *uri, const char *base)
477
-}
1973
{
478
-
1974
char *val = NULL;
479
-static void nbd_restart_write(void *opaque)
1975
int ret;
480
-{
1976
@@ -XXX,XX +XXX,XX @@ uri_resolve_relative (const char *uri, const char * base)
481
- NBDClient *client = opaque;
1977
int remove_path = 0;
482
-
1978
483
- qemu_coroutine_enter(client->send_coroutine);
1979
if ((uri == NULL) || (*uri == 0))
484
-}
1980
-    return NULL;
485
-
1981
+ return NULL;
486
-static void nbd_set_handlers(NBDClient *client)
1982
487
-{
1983
/*
488
- if (client->exp && client->exp->ctx) {
1984
* First parse URI into a standard form
489
- aio_set_fd_handler(client->exp->ctx, client->sioc->fd, true,
1985
*/
490
- client->can_read ? nbd_read : NULL,
1986
- ref = uri_new ();
491
- client->send_coroutine ? nbd_restart_write : NULL,
1987
+ ref = uri_new();
492
- NULL, client);
1988
/* If URI not already in "relative" form */
493
- }
1989
if (uri[0] != '.') {
494
-}
1990
-    ret = uri_parse_into (ref, uri);
495
-
1991
-    if (ret != 0)
496
-static void nbd_unset_handlers(NBDClient *client)
1992
-     goto done;        /* Error in URI, return NULL */
497
-{
1993
+ ret = uri_parse_into(ref, uri);
498
- if (client->exp && client->exp->ctx) {
1994
+ if (ret != 0)
499
- aio_set_fd_handler(client->exp->ctx, client->sioc->fd, true, NULL,
1995
+ goto done; /* Error in URI, return NULL */
500
- NULL, NULL, NULL);
1996
} else
501
- }
1997
-    ref->path = g_strdup(uri);
502
-}
1998
+ ref->path = g_strdup(uri);
503
-
1999
504
-static void nbd_update_can_read(NBDClient *client)
2000
/*
505
-{
2001
* Next parse base into the same standard form
506
- bool can_read = client->recv_coroutine ||
2002
*/
507
- client->nb_requests < MAX_NBD_REQUESTS;
2003
if ((base == NULL) || (*base == 0)) {
508
-
2004
-    val = g_strdup (uri);
509
- if (can_read != client->can_read) {
2005
-    goto done;
510
- client->can_read = can_read;
2006
+ val = g_strdup(uri);
511
- nbd_set_handlers(client);
2007
+ goto done;
512
-
2008
}
513
- /* There is no need to invoke aio_notify(), since aio_set_fd_handler()
2009
- bas = uri_new ();
514
- * in nbd_set_handlers() will have taken care of that */
2010
+ bas = uri_new();
515
+ if (!client->recv_coroutine && client->nb_requests < MAX_NBD_REQUESTS) {
2011
if (base[0] != '.') {
516
+ nbd_client_get(client);
2012
-    ret = uri_parse_into (bas, base);
517
+ client->recv_coroutine = qemu_coroutine_create(nbd_trip, client);
2013
-    if (ret != 0)
518
+ aio_co_schedule(client->exp->ctx, client->recv_coroutine);
2014
-     goto done;        /* Error in base, return NULL */
519
}
2015
+ ret = uri_parse_into(bas, base);
520
}
2016
+ if (ret != 0)
521
2017
+ goto done; /* Error in base, return NULL */
522
@@ -XXX,XX +XXX,XX @@ static coroutine_fn void nbd_co_client_start(void *opaque)
2018
} else
523
goto out;
2019
-    bas->path = g_strdup(base);
524
}
2020
+ bas->path = g_strdup(base);
525
qemu_co_mutex_init(&client->send_lock);
2021
526
- nbd_set_handlers(client);
2022
/*
527
2023
* If the scheme / server on the URI differs from the base,
528
if (exp) {
2024
* just return the URI
529
QTAILQ_INSERT_TAIL(&exp->clients, client, next);
2025
*/
530
}
2026
if ((ref->scheme != NULL) &&
531
+
2027
-    ((bas->scheme == NULL) ||
532
+ nbd_client_receive_next_request(client);
2028
-     (strcmp (bas->scheme, ref->scheme)) ||
533
+
2029
-     (strcmp (bas->server, ref->server)))) {
534
out:
2030
-    val = g_strdup (uri);
535
g_free(data);
2031
-    goto done;
536
}
2032
+ ((bas->scheme == NULL) || (strcmp(bas->scheme, ref->scheme)) ||
537
@@ -XXX,XX +XXX,XX @@ void nbd_client_new(NBDExport *exp,
2033
+ (strcmp(bas->server, ref->server)))) {
538
object_ref(OBJECT(client->sioc));
2034
+ val = g_strdup(uri);
539
client->ioc = QIO_CHANNEL(sioc);
2035
+ goto done;
540
object_ref(OBJECT(client->ioc));
2036
}
541
- client->can_read = true;
2037
if (bas->path == ref->path ||
542
client->close = close_fn;
2038
(bas->path && ref->path && !strcmp(bas->path, ref->path))) {
543
2039
-    val = g_strdup("");
544
data->client = client;
2040
-    goto done;
2041
+ val = g_strdup("");
2042
+ goto done;
2043
}
2044
if (bas->path == NULL) {
2045
-    val = g_strdup(ref->path);
2046
-    goto done;
2047
+ val = g_strdup(ref->path);
2048
+ goto done;
2049
}
2050
if (ref->path == NULL) {
2051
- ref->path = (char *) "/";
2052
-    remove_path = 1;
2053
+ ref->path = (char *)"/";
2054
+ remove_path = 1;
2055
}
2056
2057
/*
2058
@@ -XXX,XX +XXX,XX @@ uri_resolve_relative (const char *uri, const char * base)
2059
* two path components may be missing (bug 316224)
2060
*/
2061
if (bas->path == NULL) {
2062
-    if (ref->path != NULL) {
2063
-     uptr = ref->path;
2064
-     if (*uptr == '/')
2065
-        uptr++;
2066
-     /* exception characters from uri_to_string */
2067
-     val = uri_string_escape(uptr, "/;&=+$,");
2068
-    }
2069
-    goto done;
2070
+ if (ref->path != NULL) {
2071
+ uptr = ref->path;
2072
+ if (*uptr == '/')
2073
+ uptr++;
2074
+ /* exception characters from uri_to_string */
2075
+ val = uri_string_escape(uptr, "/;&=+$,");
2076
+ }
2077
+ goto done;
2078
}
2079
bptr = bas->path;
2080
if (ref->path == NULL) {
2081
-    for (ix = 0; bptr[ix] != 0; ix++) {
2082
-     if (bptr[ix] == '/')
2083
-        nbslash++;
2084
-    }
2085
-    uptr = NULL;
2086
-    len = 1;    /* this is for a string terminator only */
2087
+ for (ix = 0; bptr[ix] != 0; ix++) {
2088
+ if (bptr[ix] == '/')
2089
+ nbslash++;
2090
+ }
2091
+ uptr = NULL;
2092
+ len = 1; /* this is for a string terminator only */
2093
} else {
2094
- /*
2095
- * Next we compare the two strings and find where they first differ
2096
- */
2097
-    if ((ref->path[pos] == '.') && (ref->path[pos+1] == '/'))
2098
+ /*
2099
+ * Next we compare the two strings and find where they first differ
2100
+ */
2101
+ if ((ref->path[pos] == '.') && (ref->path[pos + 1] == '/'))
2102
pos += 2;
2103
-    if ((*bptr == '.') && (bptr[1] == '/'))
2104
+ if ((*bptr == '.') && (bptr[1] == '/'))
2105
bptr += 2;
2106
-    else if ((*bptr == '/') && (ref->path[pos] != '/'))
2107
-     bptr++;
2108
-    while ((bptr[pos] == ref->path[pos]) && (bptr[pos] != 0))
2109
-     pos++;
2110
+ else if ((*bptr == '/') && (ref->path[pos] != '/'))
2111
+ bptr++;
2112
+ while ((bptr[pos] == ref->path[pos]) && (bptr[pos] != 0))
2113
+ pos++;
2114
2115
-    if (bptr[pos] == ref->path[pos]) {
2116
-     val = g_strdup("");
2117
-     goto done;        /* (I can't imagine why anyone would do this) */
2118
-    }
2119
+ if (bptr[pos] == ref->path[pos]) {
2120
+ val = g_strdup("");
2121
+ goto done; /* (I can't imagine why anyone would do this) */
2122
+ }
2123
2124
-    /*
2125
-     * In URI, "back up" to the last '/' encountered. This will be the
2126
-     * beginning of the "unique" suffix of URI
2127
-     */
2128
-    ix = pos;
2129
-    if ((ref->path[ix] == '/') && (ix > 0))
2130
-     ix--;
2131
-    else if ((ref->path[ix] == 0) && (ix > 1) && (ref->path[ix - 1] == '/'))
2132
-     ix -= 2;
2133
-    for (; ix > 0; ix--) {
2134
-     if (ref->path[ix] == '/')
2135
-        break;
2136
-    }
2137
-    if (ix == 0) {
2138
-     uptr = ref->path;
2139
-    } else {
2140
-     ix++;
2141
-     uptr = &ref->path[ix];
2142
-    }
2143
+ /*
2144
+ * In URI, "back up" to the last '/' encountered. This will be the
2145
+ * beginning of the "unique" suffix of URI
2146
+ */
2147
+ ix = pos;
2148
+ if ((ref->path[ix] == '/') && (ix > 0))
2149
+ ix--;
2150
+ else if ((ref->path[ix] == 0) && (ix > 1) && (ref->path[ix - 1] == '/'))
2151
+ ix -= 2;
2152
+ for (; ix > 0; ix--) {
2153
+ if (ref->path[ix] == '/')
2154
+ break;
2155
+ }
2156
+ if (ix == 0) {
2157
+ uptr = ref->path;
2158
+ } else {
2159
+ ix++;
2160
+ uptr = &ref->path[ix];
2161
+ }
2162
2163
-    /*
2164
-     * In base, count the number of '/' from the differing point
2165
-     */
2166
-    if (bptr[pos] != ref->path[pos]) {/* check for trivial URI == base */
2167
-     for (; bptr[ix] != 0; ix++) {
2168
-        if (bptr[ix] == '/')
2169
-         nbslash++;
2170
-     }
2171
-    }
2172
-    len = strlen (uptr) + 1;
2173
+ /*
2174
+ * In base, count the number of '/' from the differing point
2175
+ */
2176
+ if (bptr[pos] != ref->path[pos]) { /* check for trivial URI == base */
2177
+ for (; bptr[ix] != 0; ix++) {
2178
+ if (bptr[ix] == '/')
2179
+ nbslash++;
2180
+ }
2181
+ }
2182
+ len = strlen(uptr) + 1;
2183
}
2184
2185
if (nbslash == 0) {
2186
-    if (uptr != NULL)
2187
-     /* exception characters from uri_to_string */
2188
-     val = uri_string_escape(uptr, "/;&=+$,");
2189
-    goto done;
2190
+ if (uptr != NULL)
2191
+ /* exception characters from uri_to_string */
2192
+ val = uri_string_escape(uptr, "/;&=+$,");
2193
+ goto done;
2194
}
2195
2196
/*
2197
@@ -XXX,XX +XXX,XX @@ uri_resolve_relative (const char *uri, const char * base)
2198
* length of the remainder of the URI, plus enough space
2199
* for the "../" groups, plus one for the terminator
2200
*/
2201
- val = g_malloc (len + 3 * nbslash);
2202
+ val = g_malloc(len + 3 * nbslash);
2203
vptr = val;
2204
/*
2205
* Put in as many "../" as needed
2206
*/
2207
- for (; nbslash>0; nbslash--) {
2208
-    *vptr++ = '.';
2209
-    *vptr++ = '.';
2210
-    *vptr++ = '/';
2211
+ for (; nbslash > 0; nbslash--) {
2212
+ *vptr++ = '.';
2213
+ *vptr++ = '.';
2214
+ *vptr++ = '/';
2215
}
2216
/*
2217
* Finish up with the end of the URI
2218
*/
2219
if (uptr != NULL) {
2220
- if ((vptr > val) && (len > 0) &&
2221
-     (uptr[0] == '/') && (vptr[-1] == '/')) {
2222
-     memcpy (vptr, uptr + 1, len - 1);
2223
-     vptr[len - 2] = 0;
2224
-    } else {
2225
-     memcpy (vptr, uptr, len);
2226
-     vptr[len - 1] = 0;
2227
-    }
2228
+ if ((vptr > val) && (len > 0) && (uptr[0] == '/') &&
2229
+ (vptr[-1] == '/')) {
2230
+ memcpy(vptr, uptr + 1, len - 1);
2231
+ vptr[len - 2] = 0;
2232
+ } else {
2233
+ memcpy(vptr, uptr, len);
2234
+ vptr[len - 1] = 0;
2235
+ }
2236
} else {
2237
-    vptr[len - 1] = 0;
2238
+ vptr[len - 1] = 0;
2239
}
2240
2241
/* escape the freshly-built path */
2242
vptr = val;
2243
-    /* exception characters from uri_to_string */
2244
+ /* exception characters from uri_to_string */
2245
val = uri_string_escape(vptr, "/;&=+$,");
2246
g_free(vptr);
2247
2248
@@ -XXX,XX +XXX,XX @@ done:
2249
if (remove_path != 0)
2250
ref->path = NULL;
2251
if (ref != NULL)
2252
-    uri_free (ref);
2253
+ uri_free(ref);
2254
if (bas != NULL)
2255
-    uri_free (bas);
2256
+ uri_free(bas);
2257
2258
return val;
2259
}
2260
@@ -XXX,XX +XXX,XX @@ done:
2261
* Utility functions to help parse and assemble query strings.
2262
*/
2263
2264
-struct QueryParams *
2265
-query_params_new (int init_alloc)
2266
+struct QueryParams *query_params_new(int init_alloc)
2267
{
2268
struct QueryParams *ps;
2269
2270
- if (init_alloc <= 0) init_alloc = 1;
2271
+ if (init_alloc <= 0)
2272
+ init_alloc = 1;
2273
2274
ps = g_new(QueryParams, 1);
2275
ps->n = 0;
2276
@@ -XXX,XX +XXX,XX @@ query_params_new (int init_alloc)
2277
/* Ensure there is space to store at least one more parameter
2278
* at the end of the set.
2279
*/
2280
-static int
2281
-query_params_append (struct QueryParams *ps,
2282
- const char *name, const char *value)
2283
+static int query_params_append(struct QueryParams *ps, const char *name,
2284
+ const char *value)
2285
{
2286
if (ps->n >= ps->alloc) {
2287
ps->p = g_renew(QueryParam, ps->p, ps->alloc * 2);
2288
@@ -XXX,XX +XXX,XX @@ query_params_append (struct QueryParams *ps,
2289
return 0;
2290
}
2291
2292
-void
2293
-query_params_free (struct QueryParams *ps)
2294
+void query_params_free(struct QueryParams *ps)
2295
{
2296
int i;
2297
2298
for (i = 0; i < ps->n; ++i) {
2299
- g_free (ps->p[i].name);
2300
- g_free (ps->p[i].value);
2301
+ g_free(ps->p[i].name);
2302
+ g_free(ps->p[i].value);
2303
}
2304
- g_free (ps->p);
2305
- g_free (ps);
2306
+ g_free(ps->p);
2307
+ g_free(ps);
2308
}
2309
2310
-struct QueryParams *
2311
-query_params_parse (const char *query)
2312
+struct QueryParams *query_params_parse(const char *query)
2313
{
2314
struct QueryParams *ps;
2315
const char *end, *eq;
2316
2317
- ps = query_params_new (0);
2318
- if (!query || query[0] == '\0') return ps;
2319
+ ps = query_params_new(0);
2320
+ if (!query || query[0] == '\0')
2321
+ return ps;
2322
2323
while (*query) {
2324
char *name = NULL, *value = NULL;
2325
2326
/* Find the next separator, or end of the string. */
2327
- end = strchr (query, '&');
2328
+ end = strchr(query, '&');
2329
if (!end)
2330
- end = strchr (query, ';');
2331
+ end = strchr(query, ';');
2332
if (!end)
2333
- end = query + strlen (query);
2334
+ end = query + strlen(query);
2335
2336
/* Find the first '=' character between here and end. */
2337
- eq = strchr (query, '=');
2338
- if (eq && eq >= end) eq = NULL;
2339
+ eq = strchr(query, '=');
2340
+ if (eq && eq >= end)
2341
+ eq = NULL;
2342
2343
/* Empty section (eg. "&&"). */
2344
if (end == query)
2345
@@ -XXX,XX +XXX,XX @@ query_params_parse (const char *query)
2346
* and consistent with CGI.pm we assume value is "".
2347
*/
2348
else if (!eq) {
2349
- name = uri_string_unescape (query, end - query, NULL);
2350
+ name = uri_string_unescape(query, end - query, NULL);
2351
value = NULL;
2352
}
2353
/* Or if we have "name=" here (works around annoying
2354
* problem when calling uri_string_unescape with len = 0).
2355
*/
2356
- else if (eq+1 == end) {
2357
- name = uri_string_unescape (query, eq - query, NULL);
2358
+ else if (eq + 1 == end) {
2359
+ name = uri_string_unescape(query, eq - query, NULL);
2360
value = g_new0(char, 1);
2361
}
2362
/* If the '=' character is at the beginning then we have
2363
@@ -XXX,XX +XXX,XX @@ query_params_parse (const char *query)
2364
2365
/* Otherwise it's "name=value". */
2366
else {
2367
- name = uri_string_unescape (query, eq - query, NULL);
2368
- value = uri_string_unescape (eq+1, end - (eq+1), NULL);
2369
+ name = uri_string_unescape(query, eq - query, NULL);
2370
+ value = uri_string_unescape(eq + 1, end - (eq + 1), NULL);
2371
}
2372
2373
/* Append to the parameter set. */
2374
- query_params_append (ps, name, value);
2375
+ query_params_append(ps, name, value);
2376
g_free(name);
2377
g_free(value);
2378
2379
next:
2380
query = end;
2381
- if (*query) query ++; /* skip '&' separator */
2382
+ if (*query)
2383
+ query++; /* skip '&' separator */
2384
}
2385
2386
return ps;
2387
--
545
--
2388
2.14.3
546
2.9.3
2389
547
2390
548
diff view generated by jsdifflib
New patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
2
3
As a small step towards the introduction of multiqueue, we want
4
coroutines to remain on the same AioContext that started them,
5
unless they are moved explicitly with e.g. aio_co_schedule. This patch
6
avoids that coroutines switch AioContext when they use a CoMutex.
7
For now it does not make much of a difference, because the CoMutex
8
is not thread-safe and the AioContext itself is used to protect the
9
CoMutex from concurrent access. However, this is going to change.
10
11
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
13
Reviewed-by: Fam Zheng <famz@redhat.com>
14
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
15
Message-id: 20170213135235.12274-9-pbonzini@redhat.com
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
---
18
util/qemu-coroutine-lock.c | 5 ++---
19
util/trace-events | 1 -
20
2 files changed, 2 insertions(+), 4 deletions(-)
21
22
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/util/qemu-coroutine-lock.c
25
+++ b/util/qemu-coroutine-lock.c
26
@@ -XXX,XX +XXX,XX @@
27
#include "qemu/coroutine.h"
28
#include "qemu/coroutine_int.h"
29
#include "qemu/queue.h"
30
+#include "block/aio.h"
31
#include "trace.h"
32
33
void qemu_co_queue_init(CoQueue *queue)
34
@@ -XXX,XX +XXX,XX @@ void qemu_co_queue_run_restart(Coroutine *co)
35
36
static bool qemu_co_queue_do_restart(CoQueue *queue, bool single)
37
{
38
- Coroutine *self = qemu_coroutine_self();
39
Coroutine *next;
40
41
if (QSIMPLEQ_EMPTY(&queue->entries)) {
42
@@ -XXX,XX +XXX,XX @@ static bool qemu_co_queue_do_restart(CoQueue *queue, bool single)
43
44
while ((next = QSIMPLEQ_FIRST(&queue->entries)) != NULL) {
45
QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next);
46
- QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, next, co_queue_next);
47
- trace_qemu_co_queue_next(next);
48
+ aio_co_wake(next);
49
if (single) {
50
break;
51
}
52
diff --git a/util/trace-events b/util/trace-events
53
index XXXXXXX..XXXXXXX 100644
54
--- a/util/trace-events
55
+++ b/util/trace-events
56
@@ -XXX,XX +XXX,XX @@ qemu_coroutine_terminate(void *co) "self %p"
57
58
# util/qemu-coroutine-lock.c
59
qemu_co_queue_run_restart(void *co) "co %p"
60
-qemu_co_queue_next(void *nxt) "next %p"
61
qemu_co_mutex_lock_entry(void *mutex, void *self) "mutex %p self %p"
62
qemu_co_mutex_lock_return(void *mutex, void *self) "mutex %p self %p"
63
qemu_co_mutex_unlock_entry(void *mutex, void *self) "mutex %p self %p"
64
--
65
2.9.3
66
67
diff view generated by jsdifflib
New patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
2
3
Keep the coroutine on the same AioContext. Without this change,
4
there would be a race between yielding the coroutine and reentering it.
5
While the race cannot happen now, because the code only runs from a single
6
AioContext, this will change with multiqueue support in the block layer.
7
8
While doing the change, replace custom bottom half with aio_co_schedule.
9
10
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
11
Reviewed-by: Fam Zheng <famz@redhat.com>
12
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
14
Message-id: 20170213135235.12274-10-pbonzini@redhat.com
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
16
---
17
block/blkdebug.c | 9 +--------
18
1 file changed, 1 insertion(+), 8 deletions(-)
19
20
diff --git a/block/blkdebug.c b/block/blkdebug.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/block/blkdebug.c
23
+++ b/block/blkdebug.c
24
@@ -XXX,XX +XXX,XX @@ out:
25
return ret;
26
}
27
28
-static void error_callback_bh(void *opaque)
29
-{
30
- Coroutine *co = opaque;
31
- qemu_coroutine_enter(co);
32
-}
33
-
34
static int inject_error(BlockDriverState *bs, BlkdebugRule *rule)
35
{
36
BDRVBlkdebugState *s = bs->opaque;
37
@@ -XXX,XX +XXX,XX @@ static int inject_error(BlockDriverState *bs, BlkdebugRule *rule)
38
}
39
40
if (!immediately) {
41
- aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), error_callback_bh,
42
- qemu_coroutine_self());
43
+ aio_co_schedule(qemu_get_current_aio_context(), qemu_coroutine_self());
44
qemu_coroutine_yield();
45
}
46
47
--
48
2.9.3
49
50
diff view generated by jsdifflib
New patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
2
3
qed_aio_start_io and qed_aio_next_io will not have to acquire/release
4
the AioContext, while qed_aio_next_io_cb will. Split the functionality
5
and gain a little type-safety in the process.
6
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Reviewed-by: Fam Zheng <famz@redhat.com>
10
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
11
Message-id: 20170213135235.12274-11-pbonzini@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
block/qed.c | 39 +++++++++++++++++++++++++--------------
15
1 file changed, 25 insertions(+), 14 deletions(-)
16
17
diff --git a/block/qed.c b/block/qed.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/qed.c
20
+++ b/block/qed.c
21
@@ -XXX,XX +XXX,XX @@ static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
22
return l2_table;
23
}
24
25
-static void qed_aio_next_io(void *opaque, int ret);
26
+static void qed_aio_next_io(QEDAIOCB *acb, int ret);
27
+
28
+static void qed_aio_start_io(QEDAIOCB *acb)
29
+{
30
+ qed_aio_next_io(acb, 0);
31
+}
32
+
33
+static void qed_aio_next_io_cb(void *opaque, int ret)
34
+{
35
+ QEDAIOCB *acb = opaque;
36
+
37
+ qed_aio_next_io(acb, ret);
38
+}
39
40
static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
41
{
42
@@ -XXX,XX +XXX,XX @@ static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
43
44
acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
45
if (acb) {
46
- qed_aio_next_io(acb, 0);
47
+ qed_aio_start_io(acb);
48
}
49
}
50
51
@@ -XXX,XX +XXX,XX @@ static void qed_aio_complete(QEDAIOCB *acb, int ret)
52
QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
53
acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
54
if (acb) {
55
- qed_aio_next_io(acb, 0);
56
+ qed_aio_start_io(acb);
57
} else if (s->header.features & QED_F_NEED_CHECK) {
58
qed_start_need_check_timer(s);
59
}
60
@@ -XXX,XX +XXX,XX @@ static void qed_commit_l2_update(void *opaque, int ret)
61
acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
62
assert(acb->request.l2_table != NULL);
63
64
- qed_aio_next_io(opaque, ret);
65
+ qed_aio_next_io(acb, ret);
66
}
67
68
/**
69
@@ -XXX,XX +XXX,XX @@ static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
70
if (need_alloc) {
71
/* Write out the whole new L2 table */
72
qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
73
- qed_aio_write_l1_update, acb);
74
+ qed_aio_write_l1_update, acb);
75
} else {
76
/* Write out only the updated part of the L2 table */
77
qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
78
- qed_aio_next_io, acb);
79
+ qed_aio_next_io_cb, acb);
80
}
81
return;
82
83
@@ -XXX,XX +XXX,XX @@ static void qed_aio_write_main(void *opaque, int ret)
84
}
85
86
if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
87
- next_fn = qed_aio_next_io;
88
+ next_fn = qed_aio_next_io_cb;
89
} else {
90
if (s->bs->backing) {
91
next_fn = qed_aio_write_flush_before_l2_update;
92
@@ -XXX,XX +XXX,XX @@ static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
93
if (acb->flags & QED_AIOCB_ZERO) {
94
/* Skip ahead if the clusters are already zero */
95
if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
96
- qed_aio_next_io(acb, 0);
97
+ qed_aio_start_io(acb);
98
return;
99
}
100
101
@@ -XXX,XX +XXX,XX @@ static void qed_aio_read_data(void *opaque, int ret,
102
/* Handle zero cluster and backing file reads */
103
if (ret == QED_CLUSTER_ZERO) {
104
qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
105
- qed_aio_next_io(acb, 0);
106
+ qed_aio_start_io(acb);
107
return;
108
} else if (ret != QED_CLUSTER_FOUND) {
109
qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
110
- &acb->backing_qiov, qed_aio_next_io, acb);
111
+ &acb->backing_qiov, qed_aio_next_io_cb, acb);
112
return;
113
}
114
115
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
116
bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
117
&acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
118
- qed_aio_next_io, acb);
119
+ qed_aio_next_io_cb, acb);
120
return;
121
122
err:
123
@@ -XXX,XX +XXX,XX @@ err:
124
/**
125
* Begin next I/O or complete the request
126
*/
127
-static void qed_aio_next_io(void *opaque, int ret)
128
+static void qed_aio_next_io(QEDAIOCB *acb, int ret)
129
{
130
- QEDAIOCB *acb = opaque;
131
BDRVQEDState *s = acb_to_s(acb);
132
QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
133
qed_aio_write_data : qed_aio_read_data;
134
@@ -XXX,XX +XXX,XX @@ static BlockAIOCB *qed_aio_setup(BlockDriverState *bs,
135
qemu_iovec_init(&acb->cur_qiov, qiov->niov);
136
137
/* Start request */
138
- qed_aio_next_io(acb, 0);
139
+ qed_aio_start_io(acb);
140
return &acb->common;
141
}
142
143
--
144
2.9.3
145
146
diff view generated by jsdifflib
New patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
2
3
The AioContext data structures are now protected by list_lock and/or
4
they are walked with FOREACH_RCU primitives. There is no need anymore
5
to acquire the AioContext for the entire duration of aio_dispatch.
6
Instead, just acquire it before and after invoking the callbacks.
7
The next step is then to push it further down.
8
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
11
Reviewed-by: Fam Zheng <famz@redhat.com>
12
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
13
Message-id: 20170213135235.12274-12-pbonzini@redhat.com
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
---
16
util/aio-posix.c | 25 +++++++++++--------------
17
util/aio-win32.c | 15 +++++++--------
18
util/async.c | 2 ++
19
3 files changed, 20 insertions(+), 22 deletions(-)
20
21
diff --git a/util/aio-posix.c b/util/aio-posix.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/util/aio-posix.c
24
+++ b/util/aio-posix.c
25
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx)
26
(revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
27
aio_node_check(ctx, node->is_external) &&
28
node->io_read) {
29
+ aio_context_acquire(ctx);
30
node->io_read(node->opaque);
31
+ aio_context_release(ctx);
32
33
/* aio_notify() does not count as progress */
34
if (node->opaque != &ctx->notifier) {
35
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx)
36
(revents & (G_IO_OUT | G_IO_ERR)) &&
37
aio_node_check(ctx, node->is_external) &&
38
node->io_write) {
39
+ aio_context_acquire(ctx);
40
node->io_write(node->opaque);
41
+ aio_context_release(ctx);
42
progress = true;
43
}
44
45
@@ -XXX,XX +XXX,XX @@ bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
46
}
47
48
/* Run our timers */
49
+ aio_context_acquire(ctx);
50
progress |= timerlistgroup_run_timers(&ctx->tlg);
51
+ aio_context_release(ctx);
52
53
return progress;
54
}
55
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
56
int64_t timeout;
57
int64_t start = 0;
58
59
- aio_context_acquire(ctx);
60
- progress = false;
61
-
62
/* aio_notify can avoid the expensive event_notifier_set if
63
* everything (file descriptors, bottom halves, timers) will
64
* be re-evaluated before the next blocking poll(). This is
65
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
66
start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
67
}
68
69
- if (try_poll_mode(ctx, blocking)) {
70
- progress = true;
71
- } else {
72
+ aio_context_acquire(ctx);
73
+ progress = try_poll_mode(ctx, blocking);
74
+ aio_context_release(ctx);
75
+
76
+ if (!progress) {
77
assert(npfd == 0);
78
79
/* fill pollfds */
80
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
81
timeout = blocking ? aio_compute_timeout(ctx) : 0;
82
83
/* wait until next event */
84
- if (timeout) {
85
- aio_context_release(ctx);
86
- }
87
if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
88
AioHandler epoll_handler;
89
90
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
91
} else {
92
ret = qemu_poll_ns(pollfds, npfd, timeout);
93
}
94
- if (timeout) {
95
- aio_context_acquire(ctx);
96
- }
97
}
98
99
if (blocking) {
100
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
101
progress = true;
102
}
103
104
- aio_context_release(ctx);
105
-
106
return progress;
107
}
108
109
diff --git a/util/aio-win32.c b/util/aio-win32.c
110
index XXXXXXX..XXXXXXX 100644
111
--- a/util/aio-win32.c
112
+++ b/util/aio-win32.c
113
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
114
(revents || event_notifier_get_handle(node->e) == event) &&
115
node->io_notify) {
116
node->pfd.revents = 0;
117
+ aio_context_acquire(ctx);
118
node->io_notify(node->e);
119
+ aio_context_release(ctx);
120
121
/* aio_notify() does not count as progress */
122
if (node->e != &ctx->notifier) {
123
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
124
(node->io_read || node->io_write)) {
125
node->pfd.revents = 0;
126
if ((revents & G_IO_IN) && node->io_read) {
127
+ aio_context_acquire(ctx);
128
node->io_read(node->opaque);
129
+ aio_context_release(ctx);
130
progress = true;
131
}
132
if ((revents & G_IO_OUT) && node->io_write) {
133
+ aio_context_acquire(ctx);
134
node->io_write(node->opaque);
135
+ aio_context_release(ctx);
136
progress = true;
137
}
138
139
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
140
int count;
141
int timeout;
142
143
- aio_context_acquire(ctx);
144
progress = false;
145
146
/* aio_notify can avoid the expensive event_notifier_set if
147
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
148
149
timeout = blocking && !have_select_revents
150
? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
151
- if (timeout) {
152
- aio_context_release(ctx);
153
- }
154
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
155
if (blocking) {
156
assert(first);
157
atomic_sub(&ctx->notify_me, 2);
158
}
159
- if (timeout) {
160
- aio_context_acquire(ctx);
161
- }
162
163
if (first) {
164
aio_notify_accept(ctx);
165
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
166
progress |= aio_dispatch_handlers(ctx, event);
167
} while (count > 0);
168
169
+ aio_context_acquire(ctx);
170
progress |= timerlistgroup_run_timers(&ctx->tlg);
171
-
172
aio_context_release(ctx);
173
return progress;
174
}
175
diff --git a/util/async.c b/util/async.c
176
index XXXXXXX..XXXXXXX 100644
177
--- a/util/async.c
178
+++ b/util/async.c
179
@@ -XXX,XX +XXX,XX @@ int aio_bh_poll(AioContext *ctx)
180
ret = 1;
181
}
182
bh->idle = 0;
183
+ aio_context_acquire(ctx);
184
aio_bh_call(bh);
185
+ aio_context_release(ctx);
186
}
187
if (bh->deleted) {
188
deleted = true;
189
--
190
2.9.3
191
192
diff view generated by jsdifflib
New patch
1
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
3
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
4
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
5
Reviewed-by: Fam Zheng <famz@redhat.com>
6
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
7
Message-id: 20170213135235.12274-13-pbonzini@redhat.com
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
10
block/qed.h | 3 +++
11
block/curl.c | 2 ++
12
block/io.c | 5 +++++
13
block/iscsi.c | 8 ++++++--
14
block/null.c | 4 ++++
15
block/qed.c | 12 ++++++++++++
16
block/throttle-groups.c | 2 ++
17
util/aio-posix.c | 2 --
18
util/aio-win32.c | 2 --
19
util/qemu-coroutine-sleep.c | 2 +-
20
10 files changed, 35 insertions(+), 7 deletions(-)
21
22
diff --git a/block/qed.h b/block/qed.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/block/qed.h
25
+++ b/block/qed.h
26
@@ -XXX,XX +XXX,XX @@ enum {
27
*/
28
typedef void QEDFindClusterFunc(void *opaque, int ret, uint64_t offset, size_t len);
29
30
+void qed_acquire(BDRVQEDState *s);
31
+void qed_release(BDRVQEDState *s);
32
+
33
/**
34
* Generic callback for chaining async callbacks
35
*/
36
diff --git a/block/curl.c b/block/curl.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/block/curl.c
39
+++ b/block/curl.c
40
@@ -XXX,XX +XXX,XX @@ static void curl_multi_timeout_do(void *arg)
41
return;
42
}
43
44
+ aio_context_acquire(s->aio_context);
45
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
46
47
curl_multi_check_completion(s);
48
+ aio_context_release(s->aio_context);
49
#else
50
abort();
51
#endif
52
diff --git a/block/io.c b/block/io.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/block/io.c
55
+++ b/block/io.c
56
@@ -XXX,XX +XXX,XX @@ void bdrv_aio_cancel(BlockAIOCB *acb)
57
if (acb->aiocb_info->get_aio_context) {
58
aio_poll(acb->aiocb_info->get_aio_context(acb), true);
59
} else if (acb->bs) {
60
+ /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
61
+ * assert that we're not using an I/O thread. Thread-safe
62
+ * code should use bdrv_aio_cancel_async exclusively.
63
+ */
64
+ assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
65
aio_poll(bdrv_get_aio_context(acb->bs), true);
66
} else {
67
abort();
68
diff --git a/block/iscsi.c b/block/iscsi.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/block/iscsi.c
71
+++ b/block/iscsi.c
72
@@ -XXX,XX +XXX,XX @@ static void iscsi_retry_timer_expired(void *opaque)
73
struct IscsiTask *iTask = opaque;
74
iTask->complete = 1;
75
if (iTask->co) {
76
- qemu_coroutine_enter(iTask->co);
77
+ aio_co_wake(iTask->co);
78
}
79
}
80
81
@@ -XXX,XX +XXX,XX @@ static void iscsi_nop_timed_event(void *opaque)
82
{
83
IscsiLun *iscsilun = opaque;
84
85
+ aio_context_acquire(iscsilun->aio_context);
86
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
87
error_report("iSCSI: NOP timeout. Reconnecting...");
88
iscsilun->request_timed_out = true;
89
} else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) {
90
error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages.");
91
- return;
92
+ goto out;
93
}
94
95
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
96
iscsi_set_events(iscsilun);
97
+
98
+out:
99
+ aio_context_release(iscsilun->aio_context);
100
}
101
102
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
103
diff --git a/block/null.c b/block/null.c
104
index XXXXXXX..XXXXXXX 100644
105
--- a/block/null.c
106
+++ b/block/null.c
107
@@ -XXX,XX +XXX,XX @@ static void null_bh_cb(void *opaque)
108
static void null_timer_cb(void *opaque)
109
{
110
NullAIOCB *acb = opaque;
111
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
112
+
113
+ aio_context_acquire(ctx);
114
acb->common.cb(acb->common.opaque, 0);
115
+ aio_context_release(ctx);
116
timer_deinit(&acb->timer);
117
qemu_aio_unref(acb);
118
}
119
diff --git a/block/qed.c b/block/qed.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/block/qed.c
122
+++ b/block/qed.c
123
@@ -XXX,XX +XXX,XX @@ static void qed_need_check_timer_cb(void *opaque)
124
125
trace_qed_need_check_timer_cb(s);
126
127
+ qed_acquire(s);
128
qed_plug_allocating_write_reqs(s);
129
130
/* Ensure writes are on disk before clearing flag */
131
bdrv_aio_flush(s->bs->file->bs, qed_clear_need_check, s);
132
+ qed_release(s);
133
+}
134
+
135
+void qed_acquire(BDRVQEDState *s)
136
+{
137
+ aio_context_acquire(bdrv_get_aio_context(s->bs));
138
+}
139
+
140
+void qed_release(BDRVQEDState *s)
141
+{
142
+ aio_context_release(bdrv_get_aio_context(s->bs));
143
}
144
145
static void qed_start_need_check_timer(BDRVQEDState *s)
146
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
147
index XXXXXXX..XXXXXXX 100644
148
--- a/block/throttle-groups.c
149
+++ b/block/throttle-groups.c
150
@@ -XXX,XX +XXX,XX @@ static void timer_cb(BlockBackend *blk, bool is_write)
151
qemu_mutex_unlock(&tg->lock);
152
153
/* Run the request that was waiting for this timer */
154
+ aio_context_acquire(blk_get_aio_context(blk));
155
empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]);
156
+ aio_context_release(blk_get_aio_context(blk));
157
158
/* If the request queue was empty then we have to take care of
159
* scheduling the next one */
160
diff --git a/util/aio-posix.c b/util/aio-posix.c
161
index XXXXXXX..XXXXXXX 100644
162
--- a/util/aio-posix.c
163
+++ b/util/aio-posix.c
164
@@ -XXX,XX +XXX,XX @@ bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
165
}
166
167
/* Run our timers */
168
- aio_context_acquire(ctx);
169
progress |= timerlistgroup_run_timers(&ctx->tlg);
170
- aio_context_release(ctx);
171
172
return progress;
173
}
174
diff --git a/util/aio-win32.c b/util/aio-win32.c
175
index XXXXXXX..XXXXXXX 100644
176
--- a/util/aio-win32.c
177
+++ b/util/aio-win32.c
178
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
179
progress |= aio_dispatch_handlers(ctx, event);
180
} while (count > 0);
181
182
- aio_context_acquire(ctx);
183
progress |= timerlistgroup_run_timers(&ctx->tlg);
184
- aio_context_release(ctx);
185
return progress;
186
}
187
188
diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c
189
index XXXXXXX..XXXXXXX 100644
190
--- a/util/qemu-coroutine-sleep.c
191
+++ b/util/qemu-coroutine-sleep.c
192
@@ -XXX,XX +XXX,XX @@ static void co_sleep_cb(void *opaque)
193
{
194
CoSleepCB *sleep_cb = opaque;
195
196
- qemu_coroutine_enter(sleep_cb->co);
197
+ aio_co_wake(sleep_cb->co);
198
}
199
200
void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type,
201
--
202
2.9.3
203
204
diff view generated by jsdifflib
New patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
2
3
This covers both file descriptor callbacks and polling callbacks,
4
since they execute related code.
5
6
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
8
Reviewed-by: Fam Zheng <famz@redhat.com>
9
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
10
Message-id: 20170213135235.12274-14-pbonzini@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
block/curl.c | 16 +++++++++++++---
14
block/iscsi.c | 4 ++++
15
block/linux-aio.c | 4 ++++
16
block/nfs.c | 6 ++++++
17
block/sheepdog.c | 29 +++++++++++++++--------------
18
block/ssh.c | 29 +++++++++--------------------
19
block/win32-aio.c | 10 ++++++----
20
hw/block/virtio-blk.c | 5 ++++-
21
hw/scsi/virtio-scsi.c | 7 +++++++
22
util/aio-posix.c | 7 -------
23
util/aio-win32.c | 6 ------
24
11 files changed, 68 insertions(+), 55 deletions(-)
25
26
diff --git a/block/curl.c b/block/curl.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/block/curl.c
29
+++ b/block/curl.c
30
@@ -XXX,XX +XXX,XX @@ static void curl_multi_check_completion(BDRVCURLState *s)
31
}
32
}
33
34
-static void curl_multi_do(void *arg)
35
+static void curl_multi_do_locked(CURLState *s)
36
{
37
- CURLState *s = (CURLState *)arg;
38
CURLSocket *socket, *next_socket;
39
int running;
40
int r;
41
@@ -XXX,XX +XXX,XX @@ static void curl_multi_do(void *arg)
42
}
43
}
44
45
+static void curl_multi_do(void *arg)
46
+{
47
+ CURLState *s = (CURLState *)arg;
48
+
49
+ aio_context_acquire(s->s->aio_context);
50
+ curl_multi_do_locked(s);
51
+ aio_context_release(s->s->aio_context);
52
+}
53
+
54
static void curl_multi_read(void *arg)
55
{
56
CURLState *s = (CURLState *)arg;
57
58
- curl_multi_do(arg);
59
+ aio_context_acquire(s->s->aio_context);
60
+ curl_multi_do_locked(s);
61
curl_multi_check_completion(s->s);
62
+ aio_context_release(s->s->aio_context);
63
}
64
65
static void curl_multi_timeout_do(void *arg)
66
diff --git a/block/iscsi.c b/block/iscsi.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/block/iscsi.c
69
+++ b/block/iscsi.c
70
@@ -XXX,XX +XXX,XX @@ iscsi_process_read(void *arg)
71
IscsiLun *iscsilun = arg;
72
struct iscsi_context *iscsi = iscsilun->iscsi;
73
74
+ aio_context_acquire(iscsilun->aio_context);
75
iscsi_service(iscsi, POLLIN);
76
iscsi_set_events(iscsilun);
77
+ aio_context_release(iscsilun->aio_context);
78
}
79
80
static void
81
@@ -XXX,XX +XXX,XX @@ iscsi_process_write(void *arg)
82
IscsiLun *iscsilun = arg;
83
struct iscsi_context *iscsi = iscsilun->iscsi;
84
85
+ aio_context_acquire(iscsilun->aio_context);
86
iscsi_service(iscsi, POLLOUT);
87
iscsi_set_events(iscsilun);
88
+ aio_context_release(iscsilun->aio_context);
89
}
90
91
static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun)
92
diff --git a/block/linux-aio.c b/block/linux-aio.c
93
index XXXXXXX..XXXXXXX 100644
94
--- a/block/linux-aio.c
95
+++ b/block/linux-aio.c
96
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_completion_cb(EventNotifier *e)
97
LinuxAioState *s = container_of(e, LinuxAioState, e);
98
99
if (event_notifier_test_and_clear(&s->e)) {
100
+ aio_context_acquire(s->aio_context);
101
qemu_laio_process_completions_and_submit(s);
102
+ aio_context_release(s->aio_context);
103
}
104
}
105
106
@@ -XXX,XX +XXX,XX @@ static bool qemu_laio_poll_cb(void *opaque)
107
return false;
108
}
109
110
+ aio_context_acquire(s->aio_context);
111
qemu_laio_process_completions_and_submit(s);
112
+ aio_context_release(s->aio_context);
113
return true;
114
}
115
116
diff --git a/block/nfs.c b/block/nfs.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/block/nfs.c
119
+++ b/block/nfs.c
120
@@ -XXX,XX +XXX,XX @@ static void nfs_set_events(NFSClient *client)
121
static void nfs_process_read(void *arg)
122
{
123
NFSClient *client = arg;
124
+
125
+ aio_context_acquire(client->aio_context);
126
nfs_service(client->context, POLLIN);
127
nfs_set_events(client);
128
+ aio_context_release(client->aio_context);
129
}
130
131
static void nfs_process_write(void *arg)
132
{
133
NFSClient *client = arg;
134
+
135
+ aio_context_acquire(client->aio_context);
136
nfs_service(client->context, POLLOUT);
137
nfs_set_events(client);
138
+ aio_context_release(client->aio_context);
139
}
140
141
static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
142
diff --git a/block/sheepdog.c b/block/sheepdog.c
143
index XXXXXXX..XXXXXXX 100644
144
--- a/block/sheepdog.c
145
+++ b/block/sheepdog.c
146
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int send_co_req(int sockfd, SheepdogReq *hdr, void *data,
147
return ret;
148
}
149
150
-static void restart_co_req(void *opaque)
151
-{
152
- Coroutine *co = opaque;
153
-
154
- qemu_coroutine_enter(co);
155
-}
156
-
157
typedef struct SheepdogReqCo {
158
int sockfd;
159
BlockDriverState *bs;
160
@@ -XXX,XX +XXX,XX @@ typedef struct SheepdogReqCo {
161
unsigned int *rlen;
162
int ret;
163
bool finished;
164
+ Coroutine *co;
165
} SheepdogReqCo;
166
167
+static void restart_co_req(void *opaque)
168
+{
169
+ SheepdogReqCo *srco = opaque;
170
+
171
+ aio_co_wake(srco->co);
172
+}
173
+
174
static coroutine_fn void do_co_req(void *opaque)
175
{
176
int ret;
177
- Coroutine *co;
178
SheepdogReqCo *srco = opaque;
179
int sockfd = srco->sockfd;
180
SheepdogReq *hdr = srco->hdr;
181
@@ -XXX,XX +XXX,XX @@ static coroutine_fn void do_co_req(void *opaque)
182
unsigned int *wlen = srco->wlen;
183
unsigned int *rlen = srco->rlen;
184
185
- co = qemu_coroutine_self();
186
+ srco->co = qemu_coroutine_self();
187
aio_set_fd_handler(srco->aio_context, sockfd, false,
188
- NULL, restart_co_req, NULL, co);
189
+ NULL, restart_co_req, NULL, srco);
190
191
ret = send_co_req(sockfd, hdr, data, wlen);
192
if (ret < 0) {
193
@@ -XXX,XX +XXX,XX @@ static coroutine_fn void do_co_req(void *opaque)
194
}
195
196
aio_set_fd_handler(srco->aio_context, sockfd, false,
197
- restart_co_req, NULL, NULL, co);
198
+ restart_co_req, NULL, NULL, srco);
199
200
ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
201
if (ret != sizeof(*hdr)) {
202
@@ -XXX,XX +XXX,XX @@ out:
203
aio_set_fd_handler(srco->aio_context, sockfd, false,
204
NULL, NULL, NULL, NULL);
205
206
+ srco->co = NULL;
207
srco->ret = ret;
208
srco->finished = true;
209
if (srco->bs) {
210
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque)
211
* We've finished all requests which belong to the AIOCB, so
212
* we can switch back to sd_co_readv/writev now.
213
*/
214
- qemu_coroutine_enter(acb->coroutine);
215
+ aio_co_wake(acb->coroutine);
216
}
217
218
return;
219
@@ -XXX,XX +XXX,XX @@ static void co_read_response(void *opaque)
220
s->co_recv = qemu_coroutine_create(aio_read_response, opaque);
221
}
222
223
- qemu_coroutine_enter(s->co_recv);
224
+ aio_co_wake(s->co_recv);
225
}
226
227
static void co_write_request(void *opaque)
228
{
229
BDRVSheepdogState *s = opaque;
230
231
- qemu_coroutine_enter(s->co_send);
232
+ aio_co_wake(s->co_send);
233
}
234
235
/*
236
diff --git a/block/ssh.c b/block/ssh.c
237
index XXXXXXX..XXXXXXX 100644
238
--- a/block/ssh.c
239
+++ b/block/ssh.c
240
@@ -XXX,XX +XXX,XX @@ static void restart_coroutine(void *opaque)
241
242
DPRINTF("co=%p", co);
243
244
- qemu_coroutine_enter(co);
245
+ aio_co_wake(co);
246
}
247
248
-static coroutine_fn void set_fd_handler(BDRVSSHState *s, BlockDriverState *bs)
249
+/* A non-blocking call returned EAGAIN, so yield, ensuring the
250
+ * handlers are set up so that we'll be rescheduled when there is an
251
+ * interesting event on the socket.
252
+ */
253
+static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs)
254
{
255
int r;
256
IOHandler *rd_handler = NULL, *wr_handler = NULL;
257
@@ -XXX,XX +XXX,XX @@ static coroutine_fn void set_fd_handler(BDRVSSHState *s, BlockDriverState *bs)
258
259
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
260
false, rd_handler, wr_handler, NULL, co);
261
-}
262
-
263
-static coroutine_fn void clear_fd_handler(BDRVSSHState *s,
264
- BlockDriverState *bs)
265
-{
266
- DPRINTF("s->sock=%d", s->sock);
267
- aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
268
- false, NULL, NULL, NULL, NULL);
269
-}
270
-
271
-/* A non-blocking call returned EAGAIN, so yield, ensuring the
272
- * handlers are set up so that we'll be rescheduled when there is an
273
- * interesting event on the socket.
274
- */
275
-static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs)
276
-{
277
- set_fd_handler(s, bs);
278
qemu_coroutine_yield();
279
- clear_fd_handler(s, bs);
280
+ DPRINTF("s->sock=%d - back", s->sock);
281
+ aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock, false,
282
+ NULL, NULL, NULL, NULL);
283
}
284
285
/* SFTP has a function `libssh2_sftp_seek64' which seeks to a position
286
diff --git a/block/win32-aio.c b/block/win32-aio.c
287
index XXXXXXX..XXXXXXX 100644
288
--- a/block/win32-aio.c
289
+++ b/block/win32-aio.c
290
@@ -XXX,XX +XXX,XX @@ struct QEMUWin32AIOState {
291
HANDLE hIOCP;
292
EventNotifier e;
293
int count;
294
- bool is_aio_context_attached;
295
+ AioContext *aio_ctx;
296
};
297
298
typedef struct QEMUWin32AIOCB {
299
@@ -XXX,XX +XXX,XX @@ static void win32_aio_process_completion(QEMUWin32AIOState *s,
300
}
301
302
303
+ aio_context_acquire(s->aio_ctx);
304
waiocb->common.cb(waiocb->common.opaque, ret);
305
+ aio_context_release(s->aio_ctx);
306
qemu_aio_unref(waiocb);
307
}
308
309
@@ -XXX,XX +XXX,XX @@ void win32_aio_detach_aio_context(QEMUWin32AIOState *aio,
310
AioContext *old_context)
311
{
312
aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL);
313
- aio->is_aio_context_attached = false;
314
+ aio->aio_ctx = NULL;
315
}
316
317
void win32_aio_attach_aio_context(QEMUWin32AIOState *aio,
318
AioContext *new_context)
319
{
320
- aio->is_aio_context_attached = true;
321
+ aio->aio_ctx = new_context;
322
aio_set_event_notifier(new_context, &aio->e, false,
323
win32_aio_completion_cb, NULL);
324
}
325
@@ -XXX,XX +XXX,XX @@ out_free_state:
326
327
void win32_aio_cleanup(QEMUWin32AIOState *aio)
328
{
329
- assert(!aio->is_aio_context_attached);
330
+ assert(!aio->aio_ctx);
331
CloseHandle(aio->hIOCP);
332
event_notifier_cleanup(&aio->e);
333
g_free(aio);
334
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
335
index XXXXXXX..XXXXXXX 100644
336
--- a/hw/block/virtio-blk.c
337
+++ b/hw/block/virtio-blk.c
338
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_ioctl_complete(void *opaque, int status)
339
{
340
VirtIOBlockIoctlReq *ioctl_req = opaque;
341
VirtIOBlockReq *req = ioctl_req->req;
342
- VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
343
+ VirtIOBlock *s = req->dev;
344
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
345
struct virtio_scsi_inhdr *scsi;
346
struct sg_io_hdr *hdr;
347
348
@@ -XXX,XX +XXX,XX @@ bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
349
MultiReqBuffer mrb = {};
350
bool progress = false;
351
352
+ aio_context_acquire(blk_get_aio_context(s->blk));
353
blk_io_plug(s->blk);
354
355
do {
356
@@ -XXX,XX +XXX,XX @@ bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
357
}
358
359
blk_io_unplug(s->blk);
360
+ aio_context_release(blk_get_aio_context(s->blk));
361
return progress;
362
}
363
364
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
365
index XXXXXXX..XXXXXXX 100644
366
--- a/hw/scsi/virtio-scsi.c
367
+++ b/hw/scsi/virtio-scsi.c
368
@@ -XXX,XX +XXX,XX @@ bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
369
VirtIOSCSIReq *req;
370
bool progress = false;
371
372
+ virtio_scsi_acquire(s);
373
while ((req = virtio_scsi_pop_req(s, vq))) {
374
progress = true;
375
virtio_scsi_handle_ctrl_req(s, req);
376
}
377
+ virtio_scsi_release(s);
378
return progress;
379
}
380
381
@@ -XXX,XX +XXX,XX @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
382
383
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
384
385
+ virtio_scsi_acquire(s);
386
do {
387
virtio_queue_set_notification(vq, 0);
388
389
@@ -XXX,XX +XXX,XX @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
390
QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
391
virtio_scsi_handle_cmd_req_submit(s, req);
392
}
393
+ virtio_scsi_release(s);
394
return progress;
395
}
396
397
@@ -XXX,XX +XXX,XX @@ out:
398
399
bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
400
{
401
+ virtio_scsi_acquire(s);
402
if (s->events_dropped) {
403
virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
404
+ virtio_scsi_release(s);
405
return true;
406
}
407
+ virtio_scsi_release(s);
408
return false;
409
}
410
411
diff --git a/util/aio-posix.c b/util/aio-posix.c
412
index XXXXXXX..XXXXXXX 100644
413
--- a/util/aio-posix.c
414
+++ b/util/aio-posix.c
415
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx)
416
(revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
417
aio_node_check(ctx, node->is_external) &&
418
node->io_read) {
419
- aio_context_acquire(ctx);
420
node->io_read(node->opaque);
421
- aio_context_release(ctx);
422
423
/* aio_notify() does not count as progress */
424
if (node->opaque != &ctx->notifier) {
425
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx)
426
(revents & (G_IO_OUT | G_IO_ERR)) &&
427
aio_node_check(ctx, node->is_external) &&
428
node->io_write) {
429
- aio_context_acquire(ctx);
430
node->io_write(node->opaque);
431
- aio_context_release(ctx);
432
progress = true;
433
}
434
435
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
436
start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
437
}
438
439
- aio_context_acquire(ctx);
440
progress = try_poll_mode(ctx, blocking);
441
- aio_context_release(ctx);
442
-
443
if (!progress) {
444
assert(npfd == 0);
445
446
diff --git a/util/aio-win32.c b/util/aio-win32.c
447
index XXXXXXX..XXXXXXX 100644
448
--- a/util/aio-win32.c
449
+++ b/util/aio-win32.c
450
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
451
(revents || event_notifier_get_handle(node->e) == event) &&
452
node->io_notify) {
453
node->pfd.revents = 0;
454
- aio_context_acquire(ctx);
455
node->io_notify(node->e);
456
- aio_context_release(ctx);
457
458
/* aio_notify() does not count as progress */
459
if (node->e != &ctx->notifier) {
460
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
461
(node->io_read || node->io_write)) {
462
node->pfd.revents = 0;
463
if ((revents & G_IO_IN) && node->io_read) {
464
- aio_context_acquire(ctx);
465
node->io_read(node->opaque);
466
- aio_context_release(ctx);
467
progress = true;
468
}
469
if ((revents & G_IO_OUT) && node->io_write) {
470
- aio_context_acquire(ctx);
471
node->io_write(node->opaque);
472
- aio_context_release(ctx);
473
progress = true;
474
}
475
476
--
477
2.9.3
478
479
diff view generated by jsdifflib
New patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
2
3
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
4
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
5
Reviewed-by: Fam Zheng <famz@redhat.com>
6
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
7
Message-id: 20170213135235.12274-15-pbonzini@redhat.com
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
10
block/archipelago.c | 3 +++
11
block/blkreplay.c | 2 +-
12
block/block-backend.c | 6 ++++++
13
block/curl.c | 26 ++++++++++++++++++--------
14
block/gluster.c | 9 +--------
15
block/io.c | 6 +++++-
16
block/iscsi.c | 6 +++++-
17
block/linux-aio.c | 15 +++++++++------
18
block/nfs.c | 3 ++-
19
block/null.c | 4 ++++
20
block/qed.c | 3 +++
21
block/rbd.c | 4 ++++
22
dma-helpers.c | 2 ++
23
hw/block/virtio-blk.c | 2 ++
24
hw/scsi/scsi-bus.c | 2 ++
25
util/async.c | 4 ++--
26
util/thread-pool.c | 2 ++
27
17 files changed, 71 insertions(+), 28 deletions(-)
28
29
diff --git a/block/archipelago.c b/block/archipelago.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/block/archipelago.c
32
+++ b/block/archipelago.c
33
@@ -XXX,XX +XXX,XX @@ static void qemu_archipelago_complete_aio(void *opaque)
34
{
35
AIORequestData *reqdata = (AIORequestData *) opaque;
36
ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb;
37
+ AioContext *ctx = bdrv_get_aio_context(aio_cb->common.bs);
38
39
+ aio_context_acquire(ctx);
40
aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret);
41
+ aio_context_release(ctx);
42
aio_cb->status = 0;
43
44
qemu_aio_unref(aio_cb);
45
diff --git a/block/blkreplay.c b/block/blkreplay.c
46
index XXXXXXX..XXXXXXX 100755
47
--- a/block/blkreplay.c
48
+++ b/block/blkreplay.c
49
@@ -XXX,XX +XXX,XX @@ static int64_t blkreplay_getlength(BlockDriverState *bs)
50
static void blkreplay_bh_cb(void *opaque)
51
{
52
Request *req = opaque;
53
- qemu_coroutine_enter(req->co);
54
+ aio_co_wake(req->co);
55
qemu_bh_delete(req->bh);
56
g_free(req);
57
}
58
diff --git a/block/block-backend.c b/block/block-backend.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/block/block-backend.c
61
+++ b/block/block-backend.c
62
@@ -XXX,XX +XXX,XX @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
63
static void error_callback_bh(void *opaque)
64
{
65
struct BlockBackendAIOCB *acb = opaque;
66
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
67
68
bdrv_dec_in_flight(acb->common.bs);
69
+ aio_context_acquire(ctx);
70
acb->common.cb(acb->common.opaque, acb->ret);
71
+ aio_context_release(ctx);
72
qemu_aio_unref(acb);
73
}
74
75
@@ -XXX,XX +XXX,XX @@ static void blk_aio_complete(BlkAioEmAIOCB *acb)
76
static void blk_aio_complete_bh(void *opaque)
77
{
78
BlkAioEmAIOCB *acb = opaque;
79
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
80
81
assert(acb->has_returned);
82
+ aio_context_acquire(ctx);
83
blk_aio_complete(acb);
84
+ aio_context_release(ctx);
85
}
86
87
static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
88
diff --git a/block/curl.c b/block/curl.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/block/curl.c
91
+++ b/block/curl.c
92
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
93
{
94
CURLState *state;
95
int running;
96
+ int ret = -EINPROGRESS;
97
98
CURLAIOCB *acb = p;
99
- BDRVCURLState *s = acb->common.bs->opaque;
100
+ BlockDriverState *bs = acb->common.bs;
101
+ BDRVCURLState *s = bs->opaque;
102
+ AioContext *ctx = bdrv_get_aio_context(bs);
103
104
size_t start = acb->sector_num * BDRV_SECTOR_SIZE;
105
size_t end;
106
107
+ aio_context_acquire(ctx);
108
+
109
// In case we have the requested data already (e.g. read-ahead),
110
// we can just call the callback and be done.
111
switch (curl_find_buf(s, start, acb->nb_sectors * BDRV_SECTOR_SIZE, acb)) {
112
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
113
qemu_aio_unref(acb);
114
// fall through
115
case FIND_RET_WAIT:
116
- return;
117
+ goto out;
118
default:
119
break;
120
}
121
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
122
// No cache found, so let's start a new request
123
state = curl_init_state(acb->common.bs, s);
124
if (!state) {
125
- acb->common.cb(acb->common.opaque, -EIO);
126
- qemu_aio_unref(acb);
127
- return;
128
+ ret = -EIO;
129
+ goto out;
130
}
131
132
acb->start = 0;
133
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
134
state->orig_buf = g_try_malloc(state->buf_len);
135
if (state->buf_len && state->orig_buf == NULL) {
136
curl_clean_state(state);
137
- acb->common.cb(acb->common.opaque, -ENOMEM);
138
- qemu_aio_unref(acb);
139
- return;
140
+ ret = -ENOMEM;
141
+ goto out;
142
}
143
state->acb[0] = acb;
144
145
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
146
147
/* Tell curl it needs to kick things off */
148
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
149
+
150
+out:
151
+ if (ret != -EINPROGRESS) {
152
+ acb->common.cb(acb->common.opaque, ret);
153
+ qemu_aio_unref(acb);
154
+ }
155
+ aio_context_release(ctx);
156
}
157
158
static BlockAIOCB *curl_aio_readv(BlockDriverState *bs,
159
diff --git a/block/gluster.c b/block/gluster.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/block/gluster.c
162
+++ b/block/gluster.c
163
@@ -XXX,XX +XXX,XX @@ static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf,
164
return qemu_gluster_glfs_init(gconf, errp);
165
}
166
167
-static void qemu_gluster_complete_aio(void *opaque)
168
-{
169
- GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
170
-
171
- qemu_coroutine_enter(acb->coroutine);
172
-}
173
-
174
/*
175
* AIO callback routine called from GlusterFS thread.
176
*/
177
@@ -XXX,XX +XXX,XX @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
178
acb->ret = -EIO; /* Partial read/write - fail it */
179
}
180
181
- aio_bh_schedule_oneshot(acb->aio_context, qemu_gluster_complete_aio, acb);
182
+ aio_co_schedule(acb->aio_context, acb->coroutine);
183
}
184
185
static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
186
diff --git a/block/io.c b/block/io.c
187
index XXXXXXX..XXXXXXX 100644
188
--- a/block/io.c
189
+++ b/block/io.c
190
@@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque)
191
bdrv_dec_in_flight(bs);
192
bdrv_drained_begin(bs);
193
data->done = true;
194
- qemu_coroutine_enter(co);
195
+ aio_co_wake(co);
196
}
197
198
static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
199
@@ -XXX,XX +XXX,XX @@ static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
200
static void bdrv_co_em_bh(void *opaque)
201
{
202
BlockAIOCBCoroutine *acb = opaque;
203
+ BlockDriverState *bs = acb->common.bs;
204
+ AioContext *ctx = bdrv_get_aio_context(bs);
205
206
assert(!acb->need_bh);
207
+ aio_context_acquire(ctx);
208
bdrv_co_complete(acb);
209
+ aio_context_release(ctx);
210
}
211
212
static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
213
diff --git a/block/iscsi.c b/block/iscsi.c
214
index XXXXXXX..XXXXXXX 100644
215
--- a/block/iscsi.c
216
+++ b/block/iscsi.c
217
@@ -XXX,XX +XXX,XX @@ static void
218
iscsi_bh_cb(void *p)
219
{
220
IscsiAIOCB *acb = p;
221
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
222
223
qemu_bh_delete(acb->bh);
224
225
g_free(acb->buf);
226
acb->buf = NULL;
227
228
+ aio_context_acquire(ctx);
229
acb->common.cb(acb->common.opaque, acb->status);
230
+ aio_context_release(ctx);
231
232
if (acb->task != NULL) {
233
scsi_free_scsi_task(acb->task);
234
@@ -XXX,XX +XXX,XX @@ iscsi_schedule_bh(IscsiAIOCB *acb)
235
static void iscsi_co_generic_bh_cb(void *opaque)
236
{
237
struct IscsiTask *iTask = opaque;
238
+
239
iTask->complete = 1;
240
- qemu_coroutine_enter(iTask->co);
241
+ aio_co_wake(iTask->co);
242
}
243
244
static void iscsi_retry_timer_expired(void *opaque)
245
diff --git a/block/linux-aio.c b/block/linux-aio.c
246
index XXXXXXX..XXXXXXX 100644
247
--- a/block/linux-aio.c
248
+++ b/block/linux-aio.c
249
@@ -XXX,XX +XXX,XX @@ struct LinuxAioState {
250
io_context_t ctx;
251
EventNotifier e;
252
253
- /* io queue for submit at batch */
254
+ /* io queue for submit at batch. Protected by AioContext lock. */
255
LaioQueue io_q;
256
257
- /* I/O completion processing */
258
+ /* I/O completion processing. Only runs in I/O thread. */
259
QEMUBH *completion_bh;
260
int event_idx;
261
int event_max;
262
@@ -XXX,XX +XXX,XX @@ static inline ssize_t io_event_ret(struct io_event *ev)
263
*/
264
static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
265
{
266
+ LinuxAioState *s = laiocb->ctx;
267
int ret;
268
269
ret = laiocb->ret;
270
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
271
}
272
273
laiocb->ret = ret;
274
+ aio_context_acquire(s->aio_context);
275
if (laiocb->co) {
276
/* If the coroutine is already entered it must be in ioq_submit() and
277
* will notice laio->ret has been filled in when it eventually runs
278
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
279
laiocb->common.cb(laiocb->common.opaque, ret);
280
qemu_aio_unref(laiocb);
281
}
282
+ aio_context_release(s->aio_context);
283
}
284
285
/**
286
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completions(LinuxAioState *s)
287
static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
288
{
289
qemu_laio_process_completions(s);
290
+
291
+ aio_context_acquire(s->aio_context);
292
if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
293
ioq_submit(s);
294
}
295
+ aio_context_release(s->aio_context);
296
}
297
298
static void qemu_laio_completion_bh(void *opaque)
299
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_completion_cb(EventNotifier *e)
300
LinuxAioState *s = container_of(e, LinuxAioState, e);
301
302
if (event_notifier_test_and_clear(&s->e)) {
303
- aio_context_acquire(s->aio_context);
304
qemu_laio_process_completions_and_submit(s);
305
- aio_context_release(s->aio_context);
306
}
307
}
308
309
@@ -XXX,XX +XXX,XX @@ static bool qemu_laio_poll_cb(void *opaque)
310
return false;
311
}
312
313
- aio_context_acquire(s->aio_context);
314
qemu_laio_process_completions_and_submit(s);
315
- aio_context_release(s->aio_context);
316
return true;
317
}
318
319
@@ -XXX,XX +XXX,XX @@ void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
320
{
321
aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
322
qemu_bh_delete(s->completion_bh);
323
+ s->aio_context = NULL;
324
}
325
326
void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
327
diff --git a/block/nfs.c b/block/nfs.c
328
index XXXXXXX..XXXXXXX 100644
329
--- a/block/nfs.c
330
+++ b/block/nfs.c
331
@@ -XXX,XX +XXX,XX @@ static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
332
static void nfs_co_generic_bh_cb(void *opaque)
333
{
334
NFSRPC *task = opaque;
335
+
336
task->complete = 1;
337
- qemu_coroutine_enter(task->co);
338
+ aio_co_wake(task->co);
339
}
340
341
static void
342
diff --git a/block/null.c b/block/null.c
343
index XXXXXXX..XXXXXXX 100644
344
--- a/block/null.c
345
+++ b/block/null.c
346
@@ -XXX,XX +XXX,XX @@ static const AIOCBInfo null_aiocb_info = {
347
static void null_bh_cb(void *opaque)
348
{
349
NullAIOCB *acb = opaque;
350
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
351
+
352
+ aio_context_acquire(ctx);
353
acb->common.cb(acb->common.opaque, 0);
354
+ aio_context_release(ctx);
355
qemu_aio_unref(acb);
356
}
357
358
diff --git a/block/qed.c b/block/qed.c
359
index XXXXXXX..XXXXXXX 100644
360
--- a/block/qed.c
361
+++ b/block/qed.c
362
@@ -XXX,XX +XXX,XX @@ static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
363
static void qed_aio_complete_bh(void *opaque)
364
{
365
QEDAIOCB *acb = opaque;
366
+ BDRVQEDState *s = acb_to_s(acb);
367
BlockCompletionFunc *cb = acb->common.cb;
368
void *user_opaque = acb->common.opaque;
369
int ret = acb->bh_ret;
370
@@ -XXX,XX +XXX,XX @@ static void qed_aio_complete_bh(void *opaque)
371
qemu_aio_unref(acb);
372
373
/* Invoke callback */
374
+ qed_acquire(s);
375
cb(user_opaque, ret);
376
+ qed_release(s);
377
}
378
379
static void qed_aio_complete(QEDAIOCB *acb, int ret)
380
diff --git a/block/rbd.c b/block/rbd.c
381
index XXXXXXX..XXXXXXX 100644
382
--- a/block/rbd.c
383
+++ b/block/rbd.c
384
@@ -XXX,XX +XXX,XX @@ shutdown:
385
static void qemu_rbd_complete_aio(RADOSCB *rcb)
386
{
387
RBDAIOCB *acb = rcb->acb;
388
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
389
int64_t r;
390
391
r = rcb->ret;
392
@@ -XXX,XX +XXX,XX @@ static void qemu_rbd_complete_aio(RADOSCB *rcb)
393
qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
394
}
395
qemu_vfree(acb->bounce);
396
+
397
+ aio_context_acquire(ctx);
398
acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
399
+ aio_context_release(ctx);
400
401
qemu_aio_unref(acb);
402
}
403
diff --git a/dma-helpers.c b/dma-helpers.c
404
index XXXXXXX..XXXXXXX 100644
405
--- a/dma-helpers.c
406
+++ b/dma-helpers.c
407
@@ -XXX,XX +XXX,XX @@ static void dma_blk_cb(void *opaque, int ret)
408
QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align));
409
}
410
411
+ aio_context_acquire(dbs->ctx);
412
dbs->acb = dbs->io_func(dbs->offset, &dbs->iov,
413
dma_blk_cb, dbs, dbs->io_func_opaque);
414
+ aio_context_release(dbs->ctx);
415
assert(dbs->acb);
416
}
417
418
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
419
index XXXXXXX..XXXXXXX 100644
420
--- a/hw/block/virtio-blk.c
421
+++ b/hw/block/virtio-blk.c
422
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_dma_restart_bh(void *opaque)
423
424
s->rq = NULL;
425
426
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
427
while (req) {
428
VirtIOBlockReq *next = req->next;
429
if (virtio_blk_handle_request(req, &mrb)) {
430
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_dma_restart_bh(void *opaque)
431
if (mrb.num_reqs) {
432
virtio_blk_submit_multireq(s->blk, &mrb);
433
}
434
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
435
}
436
437
static void virtio_blk_dma_restart_cb(void *opaque, int running,
438
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
439
index XXXXXXX..XXXXXXX 100644
440
--- a/hw/scsi/scsi-bus.c
441
+++ b/hw/scsi/scsi-bus.c
442
@@ -XXX,XX +XXX,XX @@ static void scsi_dma_restart_bh(void *opaque)
443
qemu_bh_delete(s->bh);
444
s->bh = NULL;
445
446
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
447
QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
448
scsi_req_ref(req);
449
if (req->retry) {
450
@@ -XXX,XX +XXX,XX @@ static void scsi_dma_restart_bh(void *opaque)
451
}
452
scsi_req_unref(req);
453
}
454
+ aio_context_release(blk_get_aio_context(s->conf.blk));
455
}
456
457
void scsi_req_retry(SCSIRequest *req)
458
diff --git a/util/async.c b/util/async.c
459
index XXXXXXX..XXXXXXX 100644
460
--- a/util/async.c
461
+++ b/util/async.c
462
@@ -XXX,XX +XXX,XX @@ int aio_bh_poll(AioContext *ctx)
463
ret = 1;
464
}
465
bh->idle = 0;
466
- aio_context_acquire(ctx);
467
aio_bh_call(bh);
468
- aio_context_release(ctx);
469
}
470
if (bh->deleted) {
471
deleted = true;
472
@@ -XXX,XX +XXX,XX @@ static void co_schedule_bh_cb(void *opaque)
473
Coroutine *co = QSLIST_FIRST(&straight);
474
QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
475
trace_aio_co_schedule_bh_cb(ctx, co);
476
+ aio_context_acquire(ctx);
477
qemu_coroutine_enter(co);
478
+ aio_context_release(ctx);
479
}
480
}
481
482
diff --git a/util/thread-pool.c b/util/thread-pool.c
483
index XXXXXXX..XXXXXXX 100644
484
--- a/util/thread-pool.c
485
+++ b/util/thread-pool.c
486
@@ -XXX,XX +XXX,XX @@ static void thread_pool_completion_bh(void *opaque)
487
ThreadPool *pool = opaque;
488
ThreadPoolElement *elem, *next;
489
490
+ aio_context_acquire(pool->ctx);
491
restart:
492
QLIST_FOREACH_SAFE(elem, &pool->head, all, next) {
493
if (elem->state != THREAD_DONE) {
494
@@ -XXX,XX +XXX,XX @@ restart:
495
qemu_aio_unref(elem);
496
}
497
}
498
+ aio_context_release(pool->ctx);
499
}
500
501
static void thread_pool_cancel(BlockAIOCB *acb)
502
--
503
2.9.3
504
505
diff view generated by jsdifflib
New patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
2
3
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
4
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
5
Reviewed-by: Fam Zheng <famz@redhat.com>
6
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
7
Message-id: 20170213135235.12274-16-pbonzini@redhat.com
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
10
block/archipelago.c | 3 ---
11
block/block-backend.c | 7 -------
12
block/curl.c | 2 +-
13
block/io.c | 6 +-----
14
block/iscsi.c | 3 ---
15
block/linux-aio.c | 5 +----
16
block/mirror.c | 12 +++++++++---
17
block/null.c | 8 --------
18
block/qed-cluster.c | 2 ++
19
block/qed-table.c | 12 ++++++++++--
20
block/qed.c | 4 ++--
21
block/rbd.c | 4 ----
22
block/win32-aio.c | 3 ---
23
hw/block/virtio-blk.c | 12 +++++++++++-
24
hw/scsi/scsi-disk.c | 15 +++++++++++++++
25
hw/scsi/scsi-generic.c | 20 +++++++++++++++++---
26
util/thread-pool.c | 4 +++-
27
17 files changed, 72 insertions(+), 50 deletions(-)
28
29
diff --git a/block/archipelago.c b/block/archipelago.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/block/archipelago.c
32
+++ b/block/archipelago.c
33
@@ -XXX,XX +XXX,XX @@ static void qemu_archipelago_complete_aio(void *opaque)
34
{
35
AIORequestData *reqdata = (AIORequestData *) opaque;
36
ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb;
37
- AioContext *ctx = bdrv_get_aio_context(aio_cb->common.bs);
38
39
- aio_context_acquire(ctx);
40
aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret);
41
- aio_context_release(ctx);
42
aio_cb->status = 0;
43
44
qemu_aio_unref(aio_cb);
45
diff --git a/block/block-backend.c b/block/block-backend.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/block/block-backend.c
48
+++ b/block/block-backend.c
49
@@ -XXX,XX +XXX,XX @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
50
static void error_callback_bh(void *opaque)
51
{
52
struct BlockBackendAIOCB *acb = opaque;
53
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
54
55
bdrv_dec_in_flight(acb->common.bs);
56
- aio_context_acquire(ctx);
57
acb->common.cb(acb->common.opaque, acb->ret);
58
- aio_context_release(ctx);
59
qemu_aio_unref(acb);
60
}
61
62
@@ -XXX,XX +XXX,XX @@ static void blk_aio_complete(BlkAioEmAIOCB *acb)
63
static void blk_aio_complete_bh(void *opaque)
64
{
65
BlkAioEmAIOCB *acb = opaque;
66
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
67
-
68
assert(acb->has_returned);
69
- aio_context_acquire(ctx);
70
blk_aio_complete(acb);
71
- aio_context_release(ctx);
72
}
73
74
static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
75
diff --git a/block/curl.c b/block/curl.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/block/curl.c
78
+++ b/block/curl.c
79
@@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p)
80
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
81
82
out:
83
+ aio_context_release(ctx);
84
if (ret != -EINPROGRESS) {
85
acb->common.cb(acb->common.opaque, ret);
86
qemu_aio_unref(acb);
87
}
88
- aio_context_release(ctx);
89
}
90
91
static BlockAIOCB *curl_aio_readv(BlockDriverState *bs,
92
diff --git a/block/io.c b/block/io.c
93
index XXXXXXX..XXXXXXX 100644
94
--- a/block/io.c
95
+++ b/block/io.c
96
@@ -XXX,XX +XXX,XX @@ static void bdrv_co_io_em_complete(void *opaque, int ret)
97
CoroutineIOCompletion *co = opaque;
98
99
co->ret = ret;
100
- qemu_coroutine_enter(co->coroutine);
101
+ aio_co_wake(co->coroutine);
102
}
103
104
static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
105
@@ -XXX,XX +XXX,XX @@ static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
106
static void bdrv_co_em_bh(void *opaque)
107
{
108
BlockAIOCBCoroutine *acb = opaque;
109
- BlockDriverState *bs = acb->common.bs;
110
- AioContext *ctx = bdrv_get_aio_context(bs);
111
112
assert(!acb->need_bh);
113
- aio_context_acquire(ctx);
114
bdrv_co_complete(acb);
115
- aio_context_release(ctx);
116
}
117
118
static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
119
diff --git a/block/iscsi.c b/block/iscsi.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/block/iscsi.c
122
+++ b/block/iscsi.c
123
@@ -XXX,XX +XXX,XX @@ static void
124
iscsi_bh_cb(void *p)
125
{
126
IscsiAIOCB *acb = p;
127
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
128
129
qemu_bh_delete(acb->bh);
130
131
g_free(acb->buf);
132
acb->buf = NULL;
133
134
- aio_context_acquire(ctx);
135
acb->common.cb(acb->common.opaque, acb->status);
136
- aio_context_release(ctx);
137
138
if (acb->task != NULL) {
139
scsi_free_scsi_task(acb->task);
140
diff --git a/block/linux-aio.c b/block/linux-aio.c
141
index XXXXXXX..XXXXXXX 100644
142
--- a/block/linux-aio.c
143
+++ b/block/linux-aio.c
144
@@ -XXX,XX +XXX,XX @@ static inline ssize_t io_event_ret(struct io_event *ev)
145
*/
146
static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
147
{
148
- LinuxAioState *s = laiocb->ctx;
149
int ret;
150
151
ret = laiocb->ret;
152
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
153
}
154
155
laiocb->ret = ret;
156
- aio_context_acquire(s->aio_context);
157
if (laiocb->co) {
158
/* If the coroutine is already entered it must be in ioq_submit() and
159
* will notice laio->ret has been filled in when it eventually runs
160
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
161
* that!
162
*/
163
if (!qemu_coroutine_entered(laiocb->co)) {
164
- qemu_coroutine_enter(laiocb->co);
165
+ aio_co_wake(laiocb->co);
166
}
167
} else {
168
laiocb->common.cb(laiocb->common.opaque, ret);
169
qemu_aio_unref(laiocb);
170
}
171
- aio_context_release(s->aio_context);
172
}
173
174
/**
175
diff --git a/block/mirror.c b/block/mirror.c
176
index XXXXXXX..XXXXXXX 100644
177
--- a/block/mirror.c
178
+++ b/block/mirror.c
179
@@ -XXX,XX +XXX,XX @@ static void mirror_write_complete(void *opaque, int ret)
180
{
181
MirrorOp *op = opaque;
182
MirrorBlockJob *s = op->s;
183
+
184
+ aio_context_acquire(blk_get_aio_context(s->common.blk));
185
if (ret < 0) {
186
BlockErrorAction action;
187
188
@@ -XXX,XX +XXX,XX @@ static void mirror_write_complete(void *opaque, int ret)
189
}
190
}
191
mirror_iteration_done(op, ret);
192
+ aio_context_release(blk_get_aio_context(s->common.blk));
193
}
194
195
static void mirror_read_complete(void *opaque, int ret)
196
{
197
MirrorOp *op = opaque;
198
MirrorBlockJob *s = op->s;
199
+
200
+ aio_context_acquire(blk_get_aio_context(s->common.blk));
201
if (ret < 0) {
202
BlockErrorAction action;
203
204
@@ -XXX,XX +XXX,XX @@ static void mirror_read_complete(void *opaque, int ret)
205
}
206
207
mirror_iteration_done(op, ret);
208
- return;
209
+ } else {
210
+ blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov,
211
+ 0, mirror_write_complete, op);
212
}
213
- blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov,
214
- 0, mirror_write_complete, op);
215
+ aio_context_release(blk_get_aio_context(s->common.blk));
216
}
217
218
static inline void mirror_clip_sectors(MirrorBlockJob *s,
219
diff --git a/block/null.c b/block/null.c
220
index XXXXXXX..XXXXXXX 100644
221
--- a/block/null.c
222
+++ b/block/null.c
223
@@ -XXX,XX +XXX,XX @@ static const AIOCBInfo null_aiocb_info = {
224
static void null_bh_cb(void *opaque)
225
{
226
NullAIOCB *acb = opaque;
227
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
228
-
229
- aio_context_acquire(ctx);
230
acb->common.cb(acb->common.opaque, 0);
231
- aio_context_release(ctx);
232
qemu_aio_unref(acb);
233
}
234
235
static void null_timer_cb(void *opaque)
236
{
237
NullAIOCB *acb = opaque;
238
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
239
-
240
- aio_context_acquire(ctx);
241
acb->common.cb(acb->common.opaque, 0);
242
- aio_context_release(ctx);
243
timer_deinit(&acb->timer);
244
qemu_aio_unref(acb);
245
}
246
diff --git a/block/qed-cluster.c b/block/qed-cluster.c
247
index XXXXXXX..XXXXXXX 100644
248
--- a/block/qed-cluster.c
249
+++ b/block/qed-cluster.c
250
@@ -XXX,XX +XXX,XX @@ static void qed_find_cluster_cb(void *opaque, int ret)
251
unsigned int index;
252
unsigned int n;
253
254
+ qed_acquire(s);
255
if (ret) {
256
goto out;
257
}
258
@@ -XXX,XX +XXX,XX @@ static void qed_find_cluster_cb(void *opaque, int ret)
259
260
out:
261
find_cluster_cb->cb(find_cluster_cb->opaque, ret, offset, len);
262
+ qed_release(s);
263
g_free(find_cluster_cb);
264
}
265
266
diff --git a/block/qed-table.c b/block/qed-table.c
267
index XXXXXXX..XXXXXXX 100644
268
--- a/block/qed-table.c
269
+++ b/block/qed-table.c
270
@@ -XXX,XX +XXX,XX @@ static void qed_read_table_cb(void *opaque, int ret)
271
{
272
QEDReadTableCB *read_table_cb = opaque;
273
QEDTable *table = read_table_cb->table;
274
+ BDRVQEDState *s = read_table_cb->s;
275
int noffsets = read_table_cb->qiov.size / sizeof(uint64_t);
276
int i;
277
278
@@ -XXX,XX +XXX,XX @@ static void qed_read_table_cb(void *opaque, int ret)
279
}
280
281
/* Byteswap offsets */
282
+ qed_acquire(s);
283
for (i = 0; i < noffsets; i++) {
284
table->offsets[i] = le64_to_cpu(table->offsets[i]);
285
}
286
+ qed_release(s);
287
288
out:
289
/* Completion */
290
- trace_qed_read_table_cb(read_table_cb->s, read_table_cb->table, ret);
291
+ trace_qed_read_table_cb(s, read_table_cb->table, ret);
292
gencb_complete(&read_table_cb->gencb, ret);
293
}
294
295
@@ -XXX,XX +XXX,XX @@ typedef struct {
296
static void qed_write_table_cb(void *opaque, int ret)
297
{
298
QEDWriteTableCB *write_table_cb = opaque;
299
+ BDRVQEDState *s = write_table_cb->s;
300
301
- trace_qed_write_table_cb(write_table_cb->s,
302
+ trace_qed_write_table_cb(s,
303
write_table_cb->orig_table,
304
write_table_cb->flush,
305
ret);
306
@@ -XXX,XX +XXX,XX @@ static void qed_write_table_cb(void *opaque, int ret)
307
if (write_table_cb->flush) {
308
/* We still need to flush first */
309
write_table_cb->flush = false;
310
+ qed_acquire(s);
311
bdrv_aio_flush(write_table_cb->s->bs, qed_write_table_cb,
312
write_table_cb);
313
+ qed_release(s);
314
return;
315
}
316
317
@@ -XXX,XX +XXX,XX @@ static void qed_read_l2_table_cb(void *opaque, int ret)
318
CachedL2Table *l2_table = request->l2_table;
319
uint64_t l2_offset = read_l2_table_cb->l2_offset;
320
321
+ qed_acquire(s);
322
if (ret) {
323
/* can't trust loaded L2 table anymore */
324
qed_unref_l2_cache_entry(l2_table);
325
@@ -XXX,XX +XXX,XX @@ static void qed_read_l2_table_cb(void *opaque, int ret)
326
request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
327
assert(request->l2_table != NULL);
328
}
329
+ qed_release(s);
330
331
gencb_complete(&read_l2_table_cb->gencb, ret);
332
}
333
diff --git a/block/qed.c b/block/qed.c
334
index XXXXXXX..XXXXXXX 100644
335
--- a/block/qed.c
336
+++ b/block/qed.c
337
@@ -XXX,XX +XXX,XX @@ static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t l
338
}
339
340
if (cb->co) {
341
- qemu_coroutine_enter(cb->co);
342
+ aio_co_wake(cb->co);
343
}
344
}
345
346
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn qed_co_pwrite_zeroes_cb(void *opaque, int ret)
347
cb->done = true;
348
cb->ret = ret;
349
if (cb->co) {
350
- qemu_coroutine_enter(cb->co);
351
+ aio_co_wake(cb->co);
352
}
353
}
354
355
diff --git a/block/rbd.c b/block/rbd.c
356
index XXXXXXX..XXXXXXX 100644
357
--- a/block/rbd.c
358
+++ b/block/rbd.c
359
@@ -XXX,XX +XXX,XX @@ shutdown:
360
static void qemu_rbd_complete_aio(RADOSCB *rcb)
361
{
362
RBDAIOCB *acb = rcb->acb;
363
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
364
int64_t r;
365
366
r = rcb->ret;
367
@@ -XXX,XX +XXX,XX @@ static void qemu_rbd_complete_aio(RADOSCB *rcb)
368
qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
369
}
370
qemu_vfree(acb->bounce);
371
-
372
- aio_context_acquire(ctx);
373
acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
374
- aio_context_release(ctx);
375
376
qemu_aio_unref(acb);
377
}
378
diff --git a/block/win32-aio.c b/block/win32-aio.c
379
index XXXXXXX..XXXXXXX 100644
380
--- a/block/win32-aio.c
381
+++ b/block/win32-aio.c
382
@@ -XXX,XX +XXX,XX @@ static void win32_aio_process_completion(QEMUWin32AIOState *s,
383
qemu_vfree(waiocb->buf);
384
}
385
386
-
387
- aio_context_acquire(s->aio_ctx);
388
waiocb->common.cb(waiocb->common.opaque, ret);
389
- aio_context_release(s->aio_ctx);
390
qemu_aio_unref(waiocb);
391
}
392
393
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
394
index XXXXXXX..XXXXXXX 100644
395
--- a/hw/block/virtio-blk.c
396
+++ b/hw/block/virtio-blk.c
397
@@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
398
static void virtio_blk_rw_complete(void *opaque, int ret)
399
{
400
VirtIOBlockReq *next = opaque;
401
+ VirtIOBlock *s = next->dev;
402
403
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
404
while (next) {
405
VirtIOBlockReq *req = next;
406
next = req->mr_next;
407
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_rw_complete(void *opaque, int ret)
408
block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
409
virtio_blk_free_request(req);
410
}
411
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
412
}
413
414
static void virtio_blk_flush_complete(void *opaque, int ret)
415
{
416
VirtIOBlockReq *req = opaque;
417
+ VirtIOBlock *s = req->dev;
418
419
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
420
if (ret) {
421
if (virtio_blk_handle_rw_error(req, -ret, 0)) {
422
- return;
423
+ goto out;
424
}
425
}
426
427
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
428
block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
429
virtio_blk_free_request(req);
430
+
431
+out:
432
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
433
}
434
435
#ifdef __linux__
436
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_ioctl_complete(void *opaque, int status)
437
virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len);
438
439
out:
440
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
441
virtio_blk_req_complete(req, status);
442
virtio_blk_free_request(req);
443
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
444
g_free(ioctl_req);
445
}
446
447
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
448
index XXXXXXX..XXXXXXX 100644
449
--- a/hw/scsi/scsi-disk.c
450
+++ b/hw/scsi/scsi-disk.c
451
@@ -XXX,XX +XXX,XX @@ static void scsi_aio_complete(void *opaque, int ret)
452
453
assert(r->req.aiocb != NULL);
454
r->req.aiocb = NULL;
455
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
456
if (scsi_disk_req_check_error(r, ret, true)) {
457
goto done;
458
}
459
@@ -XXX,XX +XXX,XX @@ static void scsi_aio_complete(void *opaque, int ret)
460
scsi_req_complete(&r->req, GOOD);
461
462
done:
463
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
464
scsi_req_unref(&r->req);
465
}
466
467
@@ -XXX,XX +XXX,XX @@ static void scsi_dma_complete(void *opaque, int ret)
468
assert(r->req.aiocb != NULL);
469
r->req.aiocb = NULL;
470
471
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
472
if (ret < 0) {
473
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
474
} else {
475
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
476
}
477
scsi_dma_complete_noio(r, ret);
478
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
479
}
480
481
static void scsi_read_complete(void * opaque, int ret)
482
@@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret)
483
484
assert(r->req.aiocb != NULL);
485
r->req.aiocb = NULL;
486
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
487
if (scsi_disk_req_check_error(r, ret, true)) {
488
goto done;
489
}
490
@@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret)
491
492
done:
493
scsi_req_unref(&r->req);
494
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
495
}
496
497
/* Actually issue a read to the block device. */
498
@@ -XXX,XX +XXX,XX @@ static void scsi_do_read_cb(void *opaque, int ret)
499
assert (r->req.aiocb != NULL);
500
r->req.aiocb = NULL;
501
502
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
503
if (ret < 0) {
504
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
505
} else {
506
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
507
}
508
scsi_do_read(opaque, ret);
509
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
510
}
511
512
/* Read more data from scsi device into buffer. */
513
@@ -XXX,XX +XXX,XX @@ static void scsi_write_complete(void * opaque, int ret)
514
assert (r->req.aiocb != NULL);
515
r->req.aiocb = NULL;
516
517
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
518
if (ret < 0) {
519
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
520
} else {
521
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
522
}
523
scsi_write_complete_noio(r, ret);
524
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
525
}
526
527
static void scsi_write_data(SCSIRequest *req)
528
@@ -XXX,XX +XXX,XX @@ static void scsi_unmap_complete(void *opaque, int ret)
529
{
530
UnmapCBData *data = opaque;
531
SCSIDiskReq *r = data->r;
532
+ SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
533
534
assert(r->req.aiocb != NULL);
535
r->req.aiocb = NULL;
536
537
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
538
scsi_unmap_complete_noio(data, ret);
539
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
540
}
541
542
static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
543
@@ -XXX,XX +XXX,XX @@ static void scsi_write_same_complete(void *opaque, int ret)
544
545
assert(r->req.aiocb != NULL);
546
r->req.aiocb = NULL;
547
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
548
if (scsi_disk_req_check_error(r, ret, true)) {
549
goto done;
550
}
551
@@ -XXX,XX +XXX,XX @@ done:
552
scsi_req_unref(&r->req);
553
qemu_vfree(data->iov.iov_base);
554
g_free(data);
555
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
556
}
557
558
static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
559
diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c
560
index XXXXXXX..XXXXXXX 100644
561
--- a/hw/scsi/scsi-generic.c
562
+++ b/hw/scsi/scsi-generic.c
563
@@ -XXX,XX +XXX,XX @@ done:
564
static void scsi_command_complete(void *opaque, int ret)
565
{
566
SCSIGenericReq *r = (SCSIGenericReq *)opaque;
567
+ SCSIDevice *s = r->req.dev;
568
569
assert(r->req.aiocb != NULL);
570
r->req.aiocb = NULL;
571
+
572
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
573
scsi_command_complete_noio(r, ret);
574
+ aio_context_release(blk_get_aio_context(s->conf.blk));
575
}
576
577
static int execute_command(BlockBackend *blk,
578
@@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret)
579
assert(r->req.aiocb != NULL);
580
r->req.aiocb = NULL;
581
582
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
583
+
584
if (ret || r->req.io_canceled) {
585
scsi_command_complete_noio(r, ret);
586
- return;
587
+ goto done;
588
}
589
590
len = r->io_header.dxfer_len - r->io_header.resid;
591
@@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret)
592
r->len = -1;
593
if (len == 0) {
594
scsi_command_complete_noio(r, 0);
595
- return;
596
+ goto done;
597
}
598
599
/* Snoop READ CAPACITY output to set the blocksize. */
600
@@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret)
601
}
602
scsi_req_data(&r->req, len);
603
scsi_req_unref(&r->req);
604
+
605
+done:
606
+ aio_context_release(blk_get_aio_context(s->conf.blk));
607
}
608
609
/* Read more data from scsi device into buffer. */
610
@@ -XXX,XX +XXX,XX @@ static void scsi_write_complete(void * opaque, int ret)
611
assert(r->req.aiocb != NULL);
612
r->req.aiocb = NULL;
613
614
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
615
+
616
if (ret || r->req.io_canceled) {
617
scsi_command_complete_noio(r, ret);
618
- return;
619
+ goto done;
620
}
621
622
if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
623
@@ -XXX,XX +XXX,XX @@ static void scsi_write_complete(void * opaque, int ret)
624
}
625
626
scsi_command_complete_noio(r, ret);
627
+
628
+done:
629
+ aio_context_release(blk_get_aio_context(s->conf.blk));
630
}
631
632
/* Write data to a scsi device. Returns nonzero on failure.
633
diff --git a/util/thread-pool.c b/util/thread-pool.c
634
index XXXXXXX..XXXXXXX 100644
635
--- a/util/thread-pool.c
636
+++ b/util/thread-pool.c
637
@@ -XXX,XX +XXX,XX @@ restart:
638
*/
639
qemu_bh_schedule(pool->completion_bh);
640
641
+ aio_context_release(pool->ctx);
642
elem->common.cb(elem->common.opaque, elem->ret);
643
+ aio_context_acquire(pool->ctx);
644
qemu_aio_unref(elem);
645
goto restart;
646
} else {
647
@@ -XXX,XX +XXX,XX @@ static void thread_pool_co_cb(void *opaque, int ret)
648
ThreadPoolCo *co = opaque;
649
650
co->ret = ret;
651
- qemu_coroutine_enter(co->co);
652
+ aio_co_wake(co->co);
653
}
654
655
int coroutine_fn thread_pool_submit_co(ThreadPool *pool, ThreadPoolFunc *func,
656
--
657
2.9.3
658
659
diff view generated by jsdifflib
1
From: Su Hang <suhang16@mails.ucas.ac.cn>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
only remove brackets that wrap `return` statements' content.
3
This patch prepares for the removal of unnecessary lockcnt inc/dec pairs.
4
Extract the dispatching loop for file descriptor handlers into a new
5
function aio_dispatch_handlers, and then inline aio_dispatch into
6
aio_poll.
4
7
5
use `perl -pi -e "s/return \((.*?)\);/return \1;/g" util/uri.c`
8
aio_dispatch can now become void.
6
to remove pattern like this: "return (1);"
7
9
8
Signed-off-by: Su Hang <suhang16@mails.ucas.ac.cn>
10
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Reviewed-by: Thomas Huth <thuth@redhat.com>
11
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
Message-id: 1519533358-13759-3-git-send-email-suhang16@mails.ucas.ac.cn
12
Reviewed-by: Fam Zheng <famz@redhat.com>
13
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
14
Message-id: 20170213135235.12274-17-pbonzini@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
16
---
13
util/uri.c | 160 ++++++++++++++++++++++++++++++-------------------------------
17
include/block/aio.h | 6 +-----
14
1 file changed, 80 insertions(+), 80 deletions(-)
18
util/aio-posix.c | 44 ++++++++++++++------------------------------
19
util/aio-win32.c | 13 ++++---------
20
util/async.c | 2 +-
21
4 files changed, 20 insertions(+), 45 deletions(-)
15
22
16
diff --git a/util/uri.c b/util/uri.c
23
diff --git a/include/block/aio.h b/include/block/aio.h
17
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
18
--- a/util/uri.c
25
--- a/include/block/aio.h
19
+++ b/util/uri.c
26
+++ b/include/block/aio.h
20
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_scheme(URI *uri, const char **str)
27
@@ -XXX,XX +XXX,XX @@ bool aio_pending(AioContext *ctx);
21
const char *cur;
28
/* Dispatch any pending callbacks from the GSource attached to the AioContext.
22
29
*
23
if (str == NULL)
30
* This is used internally in the implementation of the GSource.
24
- return (-1);
31
- *
25
+ return -1;
32
- * @dispatch_fds: true to process fds, false to skip them
26
33
- * (can be used as an optimization by callers that know there
27
cur = *str;
34
- * are no fds ready)
28
if (!ISA_ALPHA(cur))
35
*/
29
- return (2);
36
-bool aio_dispatch(AioContext *ctx, bool dispatch_fds);
30
+ return 2;
37
+void aio_dispatch(AioContext *ctx);
31
cur++;
38
32
while (ISA_ALPHA(cur) || ISA_DIGIT(cur) || (*cur == '+') || (*cur == '-') ||
39
/* Progress in completing AIO work to occur. This can issue new pending
33
(*cur == '.'))
40
* aio as a result of executing I/O completion or bh callbacks.
34
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_scheme(URI *uri, const char **str)
41
diff --git a/util/aio-posix.c b/util/aio-posix.c
35
uri->scheme = g_strndup(*str, cur - *str);
42
index XXXXXXX..XXXXXXX 100644
36
}
43
--- a/util/aio-posix.c
37
*str = cur;
44
+++ b/util/aio-posix.c
38
- return (0);
45
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx)
39
+ return 0;
46
AioHandler *node, *tmp;
40
}
47
bool progress = false;
41
48
42
/**
49
- /*
43
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_fragment(URI *uri, const char **str)
50
- * We have to walk very carefully in case aio_set_fd_handler is
44
const char *cur;
51
- * called while we're walking.
45
52
- */
46
if (str == NULL)
53
- qemu_lockcnt_inc(&ctx->list_lock);
47
- return (-1);
54
-
48
+ return -1;
55
QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
49
56
int revents;
50
cur = *str;
57
51
58
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx)
52
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_fragment(URI *uri, const char **str)
53
uri->fragment = uri_string_unescape(*str, cur - *str, NULL);
54
}
55
*str = cur;
56
- return (0);
57
+ return 0;
58
}
59
60
/**
61
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_query(URI *uri, const char **str)
62
const char *cur;
63
64
if (str == NULL)
65
- return (-1);
66
+ return -1;
67
68
cur = *str;
69
70
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_query(URI *uri, const char **str)
71
uri->query = g_strndup(*str, cur - *str);
72
}
73
*str = cur;
74
- return (0);
75
+ return 0;
76
}
77
78
/**
79
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_user_info(URI *uri, const char **str)
80
uri->user = uri_string_unescape(*str, cur - *str, NULL);
81
}
82
*str = cur;
83
- return (0);
84
+ return 0;
85
}
86
- return (1);
87
+ return 1;
88
}
89
90
/**
91
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_dec_octet(const char **str)
92
const char *cur = *str;
93
94
if (!(ISA_DIGIT(cur)))
95
- return (1);
96
+ return 1;
97
if (!ISA_DIGIT(cur + 1))
98
cur++;
99
else if ((*cur != '0') && (ISA_DIGIT(cur + 1)) && (!ISA_DIGIT(cur + 2)))
100
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_dec_octet(const char **str)
101
(*(cur + 1) <= '5'))
102
cur += 3;
103
else
104
- return (1);
105
+ return 1;
106
*str = cur;
107
- return (0);
108
+ return 0;
109
}
110
/**
111
* rfc3986_parse_host:
112
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_host(URI *uri, const char **str)
113
while ((*cur != ']') && (*cur != 0))
114
cur++;
115
if (*cur != ']')
116
- return (1);
117
+ return 1;
118
cur++;
119
goto found;
120
}
121
@@ -XXX,XX +XXX,XX @@ found:
122
uri->server = NULL;
123
}
124
*str = cur;
125
- return (0);
126
+ return 0;
127
}
128
129
/**
130
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_authority(URI *uri, const char **str)
131
cur++;
132
ret = rfc3986_parse_host(uri, &cur);
133
if (ret != 0)
134
- return (ret);
135
+ return ret;
136
if (*cur == ':') {
137
cur++;
138
ret = rfc3986_parse_port(uri, &cur);
139
if (ret != 0)
140
- return (ret);
141
+ return ret;
142
}
143
*str = cur;
144
- return (0);
145
+ return 0;
146
}
147
148
/**
149
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_segment(const char **str, char forbid, int empty)
150
cur = *str;
151
if (!ISA_PCHAR(cur)) {
152
if (empty)
153
- return (0);
154
- return (1);
155
+ return 0;
156
+ return 1;
157
}
158
while (ISA_PCHAR(cur) && (*cur != forbid))
159
NEXT(cur);
160
*str = cur;
161
- return (0);
162
+ return 0;
163
}
164
165
/**
166
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_path_ab_empty(URI *uri, const char **str)
167
cur++;
168
ret = rfc3986_parse_segment(&cur, 0, 1);
169
if (ret != 0)
170
- return (ret);
171
+ return ret;
172
}
173
if (uri != NULL) {
174
g_free(uri->path);
175
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_path_ab_empty(URI *uri, const char **str)
176
}
59
}
177
}
60
}
178
*str = cur;
61
179
- return (0);
62
- qemu_lockcnt_dec(&ctx->list_lock);
180
+ return 0;
63
return progress;
181
}
64
}
182
65
183
/**
66
-/*
184
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_path_absolute(URI *uri, const char **str)
67
- * Note that dispatch_fds == false has the side-effect of post-poning the
185
cur = *str;
68
- * freeing of deleted handlers.
186
69
- */
187
if (*cur != '/')
70
-bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
188
- return (1);
71
+void aio_dispatch(AioContext *ctx)
189
+ return 1;
72
{
190
cur++;
73
- bool progress;
191
ret = rfc3986_parse_segment(&cur, 0, 0);
74
+ aio_bh_poll(ctx);
192
if (ret == 0) {
75
193
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_path_absolute(URI *uri, const char **str)
76
- /*
194
cur++;
77
- * If there are callbacks left that have been queued, we need to call them.
195
ret = rfc3986_parse_segment(&cur, 0, 1);
78
- * Do not call select in this case, because it is possible that the caller
196
if (ret != 0)
79
- * does not need a complete flush (as is the case for aio_poll loops).
197
- return (ret);
80
- */
198
+ return ret;
81
- progress = aio_bh_poll(ctx);
199
}
82
+ qemu_lockcnt_inc(&ctx->list_lock);
83
+ aio_dispatch_handlers(ctx);
84
+ qemu_lockcnt_dec(&ctx->list_lock);
85
86
- if (dispatch_fds) {
87
- progress |= aio_dispatch_handlers(ctx);
88
- }
89
-
90
- /* Run our timers */
91
- progress |= timerlistgroup_run_timers(&ctx->tlg);
92
-
93
- return progress;
94
+ timerlistgroup_run_timers(&ctx->tlg);
95
}
96
97
/* These thread-local variables are used only in a small part of aio_poll
98
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
99
npfd = 0;
100
qemu_lockcnt_dec(&ctx->list_lock);
101
102
- /* Run dispatch even if there were no readable fds to run timers */
103
- if (aio_dispatch(ctx, ret > 0)) {
104
- progress = true;
105
+ progress |= aio_bh_poll(ctx);
106
+
107
+ if (ret > 0) {
108
+ qemu_lockcnt_inc(&ctx->list_lock);
109
+ progress |= aio_dispatch_handlers(ctx);
110
+ qemu_lockcnt_dec(&ctx->list_lock);
200
}
111
}
201
if (uri != NULL) {
112
202
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_path_absolute(URI *uri, const char **str)
113
+ progress |= timerlistgroup_run_timers(&ctx->tlg);
203
}
114
+
204
}
115
return progress;
205
*str = cur;
206
- return (0);
207
+ return 0;
208
}
116
}
209
117
210
/**
118
diff --git a/util/aio-win32.c b/util/aio-win32.c
211
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_path_rootless(URI *uri, const char **str)
119
index XXXXXXX..XXXXXXX 100644
212
120
--- a/util/aio-win32.c
213
ret = rfc3986_parse_segment(&cur, 0, 0);
121
+++ b/util/aio-win32.c
214
if (ret != 0)
122
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
215
- return (ret);
123
return progress;
216
+ return ret;
217
while (*cur == '/') {
218
cur++;
219
ret = rfc3986_parse_segment(&cur, 0, 1);
220
if (ret != 0)
221
- return (ret);
222
+ return ret;
223
}
224
if (uri != NULL) {
225
g_free(uri->path);
226
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_path_rootless(URI *uri, const char **str)
227
}
228
}
229
*str = cur;
230
- return (0);
231
+ return 0;
232
}
124
}
233
125
234
/**
126
-bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
235
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_path_no_scheme(URI *uri, const char **str)
127
+void aio_dispatch(AioContext *ctx)
236
128
{
237
ret = rfc3986_parse_segment(&cur, ':', 0);
129
- bool progress;
238
if (ret != 0)
130
-
239
- return (ret);
131
- progress = aio_bh_poll(ctx);
240
+ return ret;
132
- if (dispatch_fds) {
241
while (*cur == '/') {
133
- progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
242
cur++;
134
- }
243
ret = rfc3986_parse_segment(&cur, 0, 1);
135
- progress |= timerlistgroup_run_timers(&ctx->tlg);
244
if (ret != 0)
136
- return progress;
245
- return (ret);
137
+ aio_bh_poll(ctx);
246
+ return ret;
138
+ aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
247
}
139
+ timerlistgroup_run_timers(&ctx->tlg);
248
if (uri != NULL) {
249
g_free(uri->path);
250
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_path_no_scheme(URI *uri, const char **str)
251
}
252
}
253
*str = cur;
254
- return (0);
255
+ return 0;
256
}
140
}
257
141
258
/**
142
bool aio_poll(AioContext *ctx, bool blocking)
259
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_hier_part(URI *uri, const char **str)
143
diff --git a/util/async.c b/util/async.c
260
cur += 2;
144
index XXXXXXX..XXXXXXX 100644
261
ret = rfc3986_parse_authority(uri, &cur);
145
--- a/util/async.c
262
if (ret != 0)
146
+++ b/util/async.c
263
- return (ret);
147
@@ -XXX,XX +XXX,XX @@ aio_ctx_dispatch(GSource *source,
264
+ return ret;
148
AioContext *ctx = (AioContext *) source;
265
ret = rfc3986_parse_path_ab_empty(uri, &cur);
149
266
if (ret != 0)
150
assert(callback == NULL);
267
- return (ret);
151
- aio_dispatch(ctx, true);
268
+ return ret;
152
+ aio_dispatch(ctx);
269
*str = cur;
153
return true;
270
- return (0);
271
+ return 0;
272
} else if (*cur == '/') {
273
ret = rfc3986_parse_path_absolute(uri, &cur);
274
if (ret != 0)
275
- return (ret);
276
+ return ret;
277
} else if (ISA_PCHAR(cur)) {
278
ret = rfc3986_parse_path_rootless(uri, &cur);
279
if (ret != 0)
280
- return (ret);
281
+ return ret;
282
} else {
283
/* path-empty is effectively empty */
284
if (uri != NULL) {
285
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_hier_part(URI *uri, const char **str)
286
}
287
}
288
*str = cur;
289
- return (0);
290
+ return 0;
291
}
154
}
292
155
293
/**
294
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_relative_ref(URI *uri, const char *str)
295
str += 2;
296
ret = rfc3986_parse_authority(uri, &str);
297
if (ret != 0)
298
- return (ret);
299
+ return ret;
300
ret = rfc3986_parse_path_ab_empty(uri, &str);
301
if (ret != 0)
302
- return (ret);
303
+ return ret;
304
} else if (*str == '/') {
305
ret = rfc3986_parse_path_absolute(uri, &str);
306
if (ret != 0)
307
- return (ret);
308
+ return ret;
309
} else if (ISA_PCHAR(str)) {
310
ret = rfc3986_parse_path_no_scheme(uri, &str);
311
if (ret != 0)
312
- return (ret);
313
+ return ret;
314
} else {
315
/* path-empty is effectively empty */
316
if (uri != NULL) {
317
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_relative_ref(URI *uri, const char *str)
318
str++;
319
ret = rfc3986_parse_query(uri, &str);
320
if (ret != 0)
321
- return (ret);
322
+ return ret;
323
}
324
if (*str == '#') {
325
str++;
326
ret = rfc3986_parse_fragment(uri, &str);
327
if (ret != 0)
328
- return (ret);
329
+ return ret;
330
}
331
if (*str != 0) {
332
uri_clean(uri);
333
- return (1);
334
+ return 1;
335
}
336
- return (0);
337
+ return 0;
338
}
339
340
/**
341
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse(URI *uri, const char *str)
342
343
ret = rfc3986_parse_scheme(uri, &str);
344
if (ret != 0)
345
- return (ret);
346
+ return ret;
347
if (*str != ':') {
348
- return (1);
349
+ return 1;
350
}
351
str++;
352
ret = rfc3986_parse_hier_part(uri, &str);
353
if (ret != 0)
354
- return (ret);
355
+ return ret;
356
if (*str == '?') {
357
str++;
358
ret = rfc3986_parse_query(uri, &str);
359
if (ret != 0)
360
- return (ret);
361
+ return ret;
362
}
363
if (*str == '#') {
364
str++;
365
ret = rfc3986_parse_fragment(uri, &str);
366
if (ret != 0)
367
- return (ret);
368
+ return ret;
369
}
370
if (*str != 0) {
371
uri_clean(uri);
372
- return (1);
373
+ return 1;
374
}
375
- return (0);
376
+ return 0;
377
}
378
379
/**
380
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_uri_reference(URI *uri, const char *str)
381
int ret;
382
383
if (str == NULL)
384
- return (-1);
385
+ return -1;
386
uri_clean(uri);
387
388
/*
389
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_uri_reference(URI *uri, const char *str)
390
ret = rfc3986_parse_relative_ref(uri, str);
391
if (ret != 0) {
392
uri_clean(uri);
393
- return (ret);
394
+ return ret;
395
}
396
}
397
- return (0);
398
+ return 0;
399
}
400
401
/**
402
@@ -XXX,XX +XXX,XX @@ URI *uri_parse(const char *str)
403
int ret;
404
405
if (str == NULL)
406
- return (NULL);
407
+ return NULL;
408
uri = uri_new();
409
ret = rfc3986_parse_uri_reference(uri, str);
410
if (ret) {
411
uri_free(uri);
412
- return (NULL);
413
+ return NULL;
414
}
415
- return (uri);
416
+ return uri;
417
}
418
419
/**
420
@@ -XXX,XX +XXX,XX @@ URI *uri_parse(const char *str)
421
*/
422
int uri_parse_into(URI *uri, const char *str)
423
{
424
- return (rfc3986_parse_uri_reference(uri, str));
425
+ return rfc3986_parse_uri_reference(uri, str);
426
}
427
428
/**
429
@@ -XXX,XX +XXX,XX @@ URI *uri_parse_raw(const char *str, int raw)
430
int ret;
431
432
if (str == NULL)
433
- return (NULL);
434
+ return NULL;
435
uri = uri_new();
436
if (raw) {
437
uri->cleanup |= 2;
438
@@ -XXX,XX +XXX,XX @@ URI *uri_parse_raw(const char *str, int raw)
439
ret = uri_parse_into(uri, str);
440
if (ret) {
441
uri_free(uri);
442
- return (NULL);
443
+ return NULL;
444
}
445
- return (uri);
446
+ return uri;
447
}
448
449
/************************************************************************
450
@@ -XXX,XX +XXX,XX @@ URI *uri_new(void)
451
URI *ret;
452
453
ret = g_new0(URI, 1);
454
- return (ret);
455
+ return ret;
456
}
457
458
/**
459
@@ -XXX,XX +XXX,XX @@ static char *realloc2n(char *ret, int *max)
460
tmp = *max * 2;
461
temp = g_realloc(ret, (tmp + 1));
462
*max = tmp;
463
- return (temp);
464
+ return temp;
465
}
466
467
/**
468
@@ -XXX,XX +XXX,XX @@ char *uri_to_string(URI *uri)
469
int max;
470
471
if (uri == NULL)
472
- return (NULL);
473
+ return NULL;
474
475
max = 80;
476
ret = g_malloc(max + 1);
477
@@ -XXX,XX +XXX,XX @@ char *uri_to_string(URI *uri)
478
ret = temp;
479
}
480
ret[len] = 0;
481
- return (ret);
482
+ return ret;
483
}
484
485
/**
486
@@ -XXX,XX +XXX,XX @@ static int normalize_uri_path(char *path)
487
char *cur, *out;
488
489
if (path == NULL)
490
- return (-1);
491
+ return -1;
492
493
/* Skip all initial "/" chars. We want to get to the beginning of the
494
* first non-empty segment.
495
@@ -XXX,XX +XXX,XX @@ static int normalize_uri_path(char *path)
496
while (cur[0] == '/')
497
++cur;
498
if (cur[0] == '\0')
499
- return (0);
500
+ return 0;
501
502
/* Keep everything we've seen so far. */
503
out = cur;
504
@@ -XXX,XX +XXX,XX @@ done_cd:
505
while (cur[0] == '/')
506
++cur;
507
if (cur[0] == '\0')
508
- return (0);
509
+ return 0;
510
511
/*
512
* Analyze each segment in sequence for cases (e) and (f).
513
@@ -XXX,XX +XXX,XX @@ done_cd:
514
}
515
}
516
517
- return (0);
518
+ return 0;
519
}
520
521
static int is_hex(char c)
522
{
523
if (((c >= '0') && (c <= '9')) || ((c >= 'a') && (c <= 'f')) ||
524
((c >= 'A') && (c <= 'F')))
525
- return (1);
526
- return (0);
527
+ return 1;
528
+ return 0;
529
}
530
531
/**
532
@@ -XXX,XX +XXX,XX @@ char *uri_string_unescape(const char *str, int len, char *target)
533
const char *in;
534
535
if (str == NULL)
536
- return (NULL);
537
+ return NULL;
538
if (len <= 0)
539
len = strlen(str);
540
if (len < 0)
541
- return (NULL);
542
+ return NULL;
543
544
if (target == NULL) {
545
ret = g_malloc(len + 1);
546
@@ -XXX,XX +XXX,XX @@ char *uri_string_unescape(const char *str, int len, char *target)
547
}
548
}
549
*out = 0;
550
- return (ret);
551
+ return ret;
552
}
553
554
/**
555
@@ -XXX,XX +XXX,XX @@ char *uri_string_escape(const char *str, const char *list)
556
int len, out;
557
558
if (str == NULL)
559
- return (NULL);
560
+ return NULL;
561
if (str[0] == 0)
562
- return (g_strdup(str));
563
+ return g_strdup(str);
564
len = strlen(str);
565
if (!(len > 0))
566
- return (NULL);
567
+ return NULL;
568
569
len += 20;
570
ret = g_malloc(len);
571
@@ -XXX,XX +XXX,XX @@ char *uri_string_escape(const char *str, const char *list)
572
}
573
}
574
ret[out] = 0;
575
- return (ret);
576
+ return ret;
577
}
578
579
/************************************************************************
580
@@ -XXX,XX +XXX,XX @@ done:
581
uri_free(bas);
582
if (res != NULL)
583
uri_free(res);
584
- return (val);
585
+ return val;
586
}
587
588
/**
589
--
156
--
590
2.14.3
157
2.9.3
591
158
592
159
diff view generated by jsdifflib
1
From: Su Hang <suhang16@mails.ucas.ac.cn>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
For this patch, using curly braces to wrap `if` `while` `else` statements,
3
Pull the increment/decrement pair out of aio_bh_poll and into the
4
which only hold single statement. For example:
4
callers.
5
'''
6
if (cond)
7
statement;
8
'''
9
to
10
'''
11
if (cond) {
12
statement;
13
}
14
'''
15
5
16
And using tricks that compare the disassemblies before and after
6
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
17
code changes, to make sure code logic isn't changed:
7
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
18
'''
8
Reviewed-by: Fam Zheng <famz@redhat.com>
19
git checkout master
9
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
20
make util/uri.o
10
Message-id: 20170213135235.12274-18-pbonzini@redhat.com
21
strip util/uri.o
22
objdump -Drx util/uri.o > /tmp/uri-master.txt
23
git checkout cleanupbranch
24
make util/uri.o
25
strip util/uri.o
26
objdump -Drx util/uri.o > /tmp/uri-cleanup.txt
27
28
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
29
---
12
---
30
util/uri.c | 463 +++++++++++++++++++++++++++++++++++++++----------------------
13
util/aio-posix.c | 8 +++-----
31
1 file changed, 294 insertions(+), 169 deletions(-)
14
util/aio-win32.c | 8 ++++----
15
util/async.c | 12 ++++++------
16
3 files changed, 13 insertions(+), 15 deletions(-)
32
17
33
diff --git a/util/uri.c b/util/uri.c
18
diff --git a/util/aio-posix.c b/util/aio-posix.c
34
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
35
--- a/util/uri.c
20
--- a/util/aio-posix.c
36
+++ b/util/uri.c
21
+++ b/util/aio-posix.c
37
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_scheme(URI *uri, const char **str)
22
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx)
23
24
void aio_dispatch(AioContext *ctx)
38
{
25
{
39
const char *cur;
26
+ qemu_lockcnt_inc(&ctx->list_lock);
40
27
aio_bh_poll(ctx);
41
- if (str == NULL)
28
-
42
+ if (str == NULL) {
29
- qemu_lockcnt_inc(&ctx->list_lock);
43
return -1;
30
aio_dispatch_handlers(ctx);
44
+ }
31
qemu_lockcnt_dec(&ctx->list_lock);
45
32
46
cur = *str;
33
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
47
- if (!ISA_ALPHA(cur))
48
+ if (!ISA_ALPHA(cur)) {
49
return 2;
50
+ }
51
cur++;
52
while (ISA_ALPHA(cur) || ISA_DIGIT(cur) || (*cur == '+') || (*cur == '-') ||
53
- (*cur == '.'))
54
+ (*cur == '.')) {
55
cur++;
56
+ }
57
if (uri != NULL) {
58
g_free(uri->scheme);
59
uri->scheme = g_strndup(*str, cur - *str);
60
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_fragment(URI *uri, const char **str)
61
{
62
const char *cur;
63
64
- if (str == NULL)
65
+ if (str == NULL) {
66
return -1;
67
+ }
68
69
cur = *str;
70
71
while ((ISA_PCHAR(cur)) || (*cur == '/') || (*cur == '?') ||
72
(*cur == '[') || (*cur == ']') ||
73
- ((uri != NULL) && (uri->cleanup & 1) && (IS_UNWISE(cur))))
74
+ ((uri != NULL) && (uri->cleanup & 1) && (IS_UNWISE(cur)))) {
75
NEXT(cur);
76
+ }
77
if (uri != NULL) {
78
g_free(uri->fragment);
79
- if (uri->cleanup & 2)
80
+ if (uri->cleanup & 2) {
81
uri->fragment = g_strndup(*str, cur - *str);
82
- else
83
+ } else {
84
uri->fragment = uri_string_unescape(*str, cur - *str, NULL);
85
+ }
86
}
34
}
87
*str = cur;
35
88
return 0;
36
npfd = 0;
89
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_query(URI *uri, const char **str)
37
- qemu_lockcnt_dec(&ctx->list_lock);
90
{
38
91
const char *cur;
39
progress |= aio_bh_poll(ctx);
92
40
93
- if (str == NULL)
41
if (ret > 0) {
94
+ if (str == NULL) {
42
- qemu_lockcnt_inc(&ctx->list_lock);
95
return -1;
43
progress |= aio_dispatch_handlers(ctx);
96
+ }
44
- qemu_lockcnt_dec(&ctx->list_lock);
97
98
cur = *str;
99
100
while ((ISA_PCHAR(cur)) || (*cur == '/') || (*cur == '?') ||
101
- ((uri != NULL) && (uri->cleanup & 1) && (IS_UNWISE(cur))))
102
+ ((uri != NULL) && (uri->cleanup & 1) && (IS_UNWISE(cur)))) {
103
NEXT(cur);
104
+ }
105
if (uri != NULL) {
106
g_free(uri->query);
107
uri->query = g_strndup(*str, cur - *str);
108
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_user_info(URI *uri, const char **str)
109
110
cur = *str;
111
while (ISA_UNRESERVED(cur) || ISA_PCT_ENCODED(cur) || ISA_SUB_DELIM(cur) ||
112
- (*cur == ':'))
113
+ (*cur == ':')) {
114
NEXT(cur);
115
+ }
116
if (*cur == '@') {
117
if (uri != NULL) {
118
g_free(uri->user);
119
- if (uri->cleanup & 2)
120
+ if (uri->cleanup & 2) {
121
uri->user = g_strndup(*str, cur - *str);
122
- else
123
+ } else {
124
uri->user = uri_string_unescape(*str, cur - *str, NULL);
125
+ }
126
}
127
*str = cur;
128
return 0;
129
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_dec_octet(const char **str)
130
{
131
const char *cur = *str;
132
133
- if (!(ISA_DIGIT(cur)))
134
+ if (!(ISA_DIGIT(cur))) {
135
return 1;
136
- if (!ISA_DIGIT(cur + 1))
137
+ }
138
+ if (!ISA_DIGIT(cur + 1)) {
139
cur++;
140
- else if ((*cur != '0') && (ISA_DIGIT(cur + 1)) && (!ISA_DIGIT(cur + 2)))
141
+ } else if ((*cur != '0') && (ISA_DIGIT(cur + 1)) && (!ISA_DIGIT(cur + 2))) {
142
cur += 2;
143
- else if ((*cur == '1') && (ISA_DIGIT(cur + 1)) && (ISA_DIGIT(cur + 2)))
144
+ } else if ((*cur == '1') && (ISA_DIGIT(cur + 1)) && (ISA_DIGIT(cur + 2))) {
145
cur += 3;
146
- else if ((*cur == '2') && (*(cur + 1) >= '0') && (*(cur + 1) <= '4') &&
147
- (ISA_DIGIT(cur + 2)))
148
+ } else if ((*cur == '2') && (*(cur + 1) >= '0') && (*(cur + 1) <= '4') &&
149
+ (ISA_DIGIT(cur + 2))) {
150
cur += 3;
151
- else if ((*cur == '2') && (*(cur + 1) == '5') && (*(cur + 2) >= '0') &&
152
- (*(cur + 1) <= '5'))
153
+ } else if ((*cur == '2') && (*(cur + 1) == '5') && (*(cur + 2) >= '0') &&
154
+ (*(cur + 1) <= '5')) {
155
cur += 3;
156
- else
157
+ } else {
158
return 1;
159
+ }
160
*str = cur;
161
return 0;
162
}
163
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_host(URI *uri, const char **str)
164
*/
165
if (*cur == '[') {
166
cur++;
167
- while ((*cur != ']') && (*cur != 0))
168
+ while ((*cur != ']') && (*cur != 0)) {
169
cur++;
170
- if (*cur != ']')
171
+ }
172
+ if (*cur != ']') {
173
return 1;
174
+ }
175
cur++;
176
goto found;
177
}
45
}
178
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_host(URI *uri, const char **str)
46
179
* try to parse an IPv4
47
+ qemu_lockcnt_dec(&ctx->list_lock);
180
*/
48
+
181
if (ISA_DIGIT(cur)) {
49
progress |= timerlistgroup_run_timers(&ctx->tlg);
182
- if (rfc3986_parse_dec_octet(&cur) != 0)
50
183
+ if (rfc3986_parse_dec_octet(&cur) != 0) {
51
return progress;
184
goto not_ipv4;
52
diff --git a/util/aio-win32.c b/util/aio-win32.c
185
- if (*cur != '.')
53
index XXXXXXX..XXXXXXX 100644
186
+ }
54
--- a/util/aio-win32.c
187
+ if (*cur != '.') {
55
+++ b/util/aio-win32.c
188
goto not_ipv4;
56
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
189
+ }
57
bool progress = false;
190
cur++;
58
AioHandler *tmp;
191
- if (rfc3986_parse_dec_octet(&cur) != 0)
59
192
+ if (rfc3986_parse_dec_octet(&cur) != 0) {
60
- qemu_lockcnt_inc(&ctx->list_lock);
193
goto not_ipv4;
61
-
194
- if (*cur != '.')
195
+ }
196
+ if (*cur != '.') {
197
goto not_ipv4;
198
- if (rfc3986_parse_dec_octet(&cur) != 0)
199
+ }
200
+ if (rfc3986_parse_dec_octet(&cur) != 0) {
201
goto not_ipv4;
202
- if (*cur != '.')
203
+ }
204
+ if (*cur != '.') {
205
goto not_ipv4;
206
- if (rfc3986_parse_dec_octet(&cur) != 0)
207
+ }
208
+ if (rfc3986_parse_dec_octet(&cur) != 0) {
209
goto not_ipv4;
210
+ }
211
goto found;
212
not_ipv4:
213
cur = *str;
214
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_host(URI *uri, const char **str)
215
/*
62
/*
216
* then this should be a hostname which can be empty
63
* We have to walk very carefully in case aio_set_fd_handler is
217
*/
64
* called while we're walking.
218
- while (ISA_UNRESERVED(cur) || ISA_PCT_ENCODED(cur) || ISA_SUB_DELIM(cur))
65
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
219
+ while (ISA_UNRESERVED(cur) || ISA_PCT_ENCODED(cur) || ISA_SUB_DELIM(cur)) {
220
NEXT(cur);
221
+ }
222
found:
223
if (uri != NULL) {
224
g_free(uri->authority);
225
uri->authority = NULL;
226
g_free(uri->server);
227
if (cur != host) {
228
- if (uri->cleanup & 2)
229
+ if (uri->cleanup & 2) {
230
uri->server = g_strndup(host, cur - host);
231
- else
232
+ } else {
233
uri->server = uri_string_unescape(host, cur - host, NULL);
234
- } else
235
+ }
236
+ } else {
237
uri->server = NULL;
238
+ }
239
}
240
*str = cur;
241
return 0;
242
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_authority(URI *uri, const char **str)
243
* try to parse a userinfo and check for the trailing @
244
*/
245
ret = rfc3986_parse_user_info(uri, &cur);
246
- if ((ret != 0) || (*cur != '@'))
247
+ if ((ret != 0) || (*cur != '@')) {
248
cur = *str;
249
- else
250
+ } else {
251
cur++;
252
+ }
253
ret = rfc3986_parse_host(uri, &cur);
254
- if (ret != 0)
255
+ if (ret != 0) {
256
return ret;
257
+ }
258
if (*cur == ':') {
259
cur++;
260
ret = rfc3986_parse_port(uri, &cur);
261
- if (ret != 0)
262
+ if (ret != 0) {
263
return ret;
264
+ }
265
}
266
*str = cur;
267
return 0;
268
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_segment(const char **str, char forbid, int empty)
269
270
cur = *str;
271
if (!ISA_PCHAR(cur)) {
272
- if (empty)
273
+ if (empty) {
274
return 0;
275
+ }
276
return 1;
277
}
278
- while (ISA_PCHAR(cur) && (*cur != forbid))
279
+ while (ISA_PCHAR(cur) && (*cur != forbid)) {
280
NEXT(cur);
281
+ }
282
*str = cur;
283
return 0;
284
}
285
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_path_ab_empty(URI *uri, const char **str)
286
while (*cur == '/') {
287
cur++;
288
ret = rfc3986_parse_segment(&cur, 0, 1);
289
- if (ret != 0)
290
+ if (ret != 0) {
291
return ret;
292
+ }
293
}
294
if (uri != NULL) {
295
g_free(uri->path);
296
if (*str != cur) {
297
- if (uri->cleanup & 2)
298
+ if (uri->cleanup & 2) {
299
uri->path = g_strndup(*str, cur - *str);
300
- else
301
+ } else {
302
uri->path = uri_string_unescape(*str, cur - *str, NULL);
303
+ }
304
} else {
305
uri->path = NULL;
306
}
307
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_path_absolute(URI *uri, const char **str)
308
309
cur = *str;
310
311
- if (*cur != '/')
312
+ if (*cur != '/') {
313
return 1;
314
+ }
315
cur++;
316
ret = rfc3986_parse_segment(&cur, 0, 0);
317
if (ret == 0) {
318
while (*cur == '/') {
319
cur++;
320
ret = rfc3986_parse_segment(&cur, 0, 1);
321
- if (ret != 0)
322
+ if (ret != 0) {
323
return ret;
324
+ }
325
}
66
}
326
}
67
}
327
if (uri != NULL) {
68
328
g_free(uri->path);
69
- qemu_lockcnt_dec(&ctx->list_lock);
329
if (cur != *str) {
70
return progress;
330
- if (uri->cleanup & 2)
71
}
331
+ if (uri->cleanup & 2) {
72
332
uri->path = g_strndup(*str, cur - *str);
73
void aio_dispatch(AioContext *ctx)
333
- else
334
+ } else {
335
uri->path = uri_string_unescape(*str, cur - *str, NULL);
336
+ }
337
} else {
338
uri->path = NULL;
339
}
340
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_path_rootless(URI *uri, const char **str)
341
cur = *str;
342
343
ret = rfc3986_parse_segment(&cur, 0, 0);
344
- if (ret != 0)
345
+ if (ret != 0) {
346
return ret;
347
+ }
348
while (*cur == '/') {
349
cur++;
350
ret = rfc3986_parse_segment(&cur, 0, 1);
351
- if (ret != 0)
352
+ if (ret != 0) {
353
return ret;
354
+ }
355
}
356
if (uri != NULL) {
357
g_free(uri->path);
358
if (cur != *str) {
359
- if (uri->cleanup & 2)
360
+ if (uri->cleanup & 2) {
361
uri->path = g_strndup(*str, cur - *str);
362
- else
363
+ } else {
364
uri->path = uri_string_unescape(*str, cur - *str, NULL);
365
+ }
366
} else {
367
uri->path = NULL;
368
}
369
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_path_no_scheme(URI *uri, const char **str)
370
cur = *str;
371
372
ret = rfc3986_parse_segment(&cur, ':', 0);
373
- if (ret != 0)
374
+ if (ret != 0) {
375
return ret;
376
+ }
377
while (*cur == '/') {
378
cur++;
379
ret = rfc3986_parse_segment(&cur, 0, 1);
380
- if (ret != 0)
381
+ if (ret != 0) {
382
return ret;
383
+ }
384
}
385
if (uri != NULL) {
386
g_free(uri->path);
387
if (cur != *str) {
388
- if (uri->cleanup & 2)
389
+ if (uri->cleanup & 2) {
390
uri->path = g_strndup(*str, cur - *str);
391
- else
392
+ } else {
393
uri->path = uri_string_unescape(*str, cur - *str, NULL);
394
+ }
395
} else {
396
uri->path = NULL;
397
}
398
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_hier_part(URI *uri, const char **str)
399
if ((*cur == '/') && (*(cur + 1) == '/')) {
400
cur += 2;
401
ret = rfc3986_parse_authority(uri, &cur);
402
- if (ret != 0)
403
+ if (ret != 0) {
404
return ret;
405
+ }
406
ret = rfc3986_parse_path_ab_empty(uri, &cur);
407
- if (ret != 0)
408
+ if (ret != 0) {
409
return ret;
410
+ }
411
*str = cur;
412
return 0;
413
} else if (*cur == '/') {
414
ret = rfc3986_parse_path_absolute(uri, &cur);
415
- if (ret != 0)
416
+ if (ret != 0) {
417
return ret;
418
+ }
419
} else if (ISA_PCHAR(cur)) {
420
ret = rfc3986_parse_path_rootless(uri, &cur);
421
- if (ret != 0)
422
+ if (ret != 0) {
423
return ret;
424
+ }
425
} else {
426
/* path-empty is effectively empty */
427
if (uri != NULL) {
428
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_relative_ref(URI *uri, const char *str)
429
if ((*str == '/') && (*(str + 1) == '/')) {
430
str += 2;
431
ret = rfc3986_parse_authority(uri, &str);
432
- if (ret != 0)
433
+ if (ret != 0) {
434
return ret;
435
+ }
436
ret = rfc3986_parse_path_ab_empty(uri, &str);
437
- if (ret != 0)
438
+ if (ret != 0) {
439
return ret;
440
+ }
441
} else if (*str == '/') {
442
ret = rfc3986_parse_path_absolute(uri, &str);
443
- if (ret != 0)
444
+ if (ret != 0) {
445
return ret;
446
+ }
447
} else if (ISA_PCHAR(str)) {
448
ret = rfc3986_parse_path_no_scheme(uri, &str);
449
- if (ret != 0)
450
+ if (ret != 0) {
451
return ret;
452
+ }
453
} else {
454
/* path-empty is effectively empty */
455
if (uri != NULL) {
456
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_relative_ref(URI *uri, const char *str)
457
if (*str == '?') {
458
str++;
459
ret = rfc3986_parse_query(uri, &str);
460
- if (ret != 0)
461
+ if (ret != 0) {
462
return ret;
463
+ }
464
}
465
if (*str == '#') {
466
str++;
467
ret = rfc3986_parse_fragment(uri, &str);
468
- if (ret != 0)
469
+ if (ret != 0) {
470
return ret;
471
+ }
472
}
473
if (*str != 0) {
474
uri_clean(uri);
475
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse(URI *uri, const char *str)
476
int ret;
477
478
ret = rfc3986_parse_scheme(uri, &str);
479
- if (ret != 0)
480
+ if (ret != 0) {
481
return ret;
482
+ }
483
if (*str != ':') {
484
return 1;
485
}
486
str++;
487
ret = rfc3986_parse_hier_part(uri, &str);
488
- if (ret != 0)
489
+ if (ret != 0) {
490
return ret;
491
+ }
492
if (*str == '?') {
493
str++;
494
ret = rfc3986_parse_query(uri, &str);
495
- if (ret != 0)
496
+ if (ret != 0) {
497
return ret;
498
+ }
499
}
500
if (*str == '#') {
501
str++;
502
ret = rfc3986_parse_fragment(uri, &str);
503
- if (ret != 0)
504
+ if (ret != 0) {
505
return ret;
506
+ }
507
}
508
if (*str != 0) {
509
uri_clean(uri);
510
@@ -XXX,XX +XXX,XX @@ static int rfc3986_parse_uri_reference(URI *uri, const char *str)
511
{
74
{
512
int ret;
75
+ qemu_lockcnt_inc(&ctx->list_lock);
513
76
aio_bh_poll(ctx);
514
- if (str == NULL)
77
aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
515
+ if (str == NULL) {
78
+ qemu_lockcnt_dec(&ctx->list_lock);
516
return -1;
79
timerlistgroup_run_timers(&ctx->tlg);
517
+ }
80
}
518
uri_clean(uri);
81
519
82
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
520
/*
521
@@ -XXX,XX +XXX,XX @@ URI *uri_parse(const char *str)
522
URI *uri;
523
int ret;
524
525
- if (str == NULL)
526
+ if (str == NULL) {
527
return NULL;
528
+ }
529
uri = uri_new();
530
ret = rfc3986_parse_uri_reference(uri, str);
531
if (ret) {
532
@@ -XXX,XX +XXX,XX @@ URI *uri_parse_raw(const char *str, int raw)
533
URI *uri;
534
int ret;
535
536
- if (str == NULL)
537
+ if (str == NULL) {
538
return NULL;
539
+ }
540
uri = uri_new();
541
if (raw) {
542
uri->cleanup |= 2;
543
@@ -XXX,XX +XXX,XX @@ char *uri_to_string(URI *uri)
544
int len;
545
int max;
546
547
- if (uri == NULL)
548
+ if (uri == NULL) {
549
return NULL;
550
+ }
551
552
max = 80;
553
ret = g_malloc(max + 1);
554
@@ -XXX,XX +XXX,XX @@ char *uri_to_string(URI *uri)
555
temp = realloc2n(ret, &max);
556
ret = temp;
557
}
558
- if (IS_RESERVED(*(p)) || IS_UNRESERVED(*(p)))
559
+ if (IS_RESERVED(*(p)) || IS_UNRESERVED(*(p))) {
560
ret[len++] = *p++;
561
- else {
562
+ } else {
563
int val = *(unsigned char *)p++;
564
int hi = val / 0x10, lo = val % 0x10;
565
ret[len++] = '%';
566
@@ -XXX,XX +XXX,XX @@ char *uri_to_string(URI *uri)
567
}
568
if ((IS_UNRESERVED(*(p))) || ((*(p) == ';')) ||
569
((*(p) == ':')) || ((*(p) == '&')) || ((*(p) == '=')) ||
570
- ((*(p) == '+')) || ((*(p) == '$')) || ((*(p) == ',')))
571
+ ((*(p) == '+')) || ((*(p) == '$')) || ((*(p) == ','))) {
572
ret[len++] = *p++;
573
- else {
574
+ } else {
575
int val = *(unsigned char *)p++;
576
int hi = val / 0x10, lo = val % 0x10;
577
ret[len++] = '%';
578
@@ -XXX,XX +XXX,XX @@ char *uri_to_string(URI *uri)
579
if ((IS_UNRESERVED(*(p))) || ((*(p) == '$')) ||
580
((*(p) == ',')) || ((*(p) == ';')) || ((*(p) == ':')) ||
581
((*(p) == '@')) || ((*(p) == '&')) || ((*(p) == '=')) ||
582
- ((*(p) == '+')))
583
+ ((*(p) == '+'))) {
584
ret[len++] = *p++;
585
- else {
586
+ } else {
587
int val = *(unsigned char *)p++;
588
int hi = val / 0x10, lo = val % 0x10;
589
ret[len++] = '%';
590
@@ -XXX,XX +XXX,XX @@ char *uri_to_string(URI *uri)
591
if ((IS_UNRESERVED(*(p))) || ((*(p) == '/')) ||
592
((*(p) == ';')) || ((*(p) == '@')) || ((*(p) == '&')) ||
593
((*(p) == '=')) || ((*(p) == '+')) || ((*(p) == '$')) ||
594
- ((*(p) == ',')))
595
+ ((*(p) == ','))) {
596
ret[len++] = *p++;
597
- else {
598
+ } else {
599
int val = *(unsigned char *)p++;
600
int hi = val / 0x10, lo = val % 0x10;
601
ret[len++] = '%';
602
@@ -XXX,XX +XXX,XX @@ char *uri_to_string(URI *uri)
603
temp = realloc2n(ret, &max);
604
ret = temp;
605
}
606
- if ((IS_UNRESERVED(*(p))) || (IS_RESERVED(*(p))))
607
+ if ((IS_UNRESERVED(*(p))) || (IS_RESERVED(*(p)))) {
608
ret[len++] = *p++;
609
- else {
610
+ } else {
611
int val = *(unsigned char *)p++;
612
int hi = val / 0x10, lo = val % 0x10;
613
ret[len++] = '%';
614
@@ -XXX,XX +XXX,XX @@ char *uri_to_string(URI *uri)
615
*/
616
static void uri_clean(URI *uri)
617
{
618
- if (uri == NULL)
619
+ if (uri == NULL) {
620
return;
621
+ }
622
623
g_free(uri->scheme);
624
uri->scheme = NULL;
625
@@ -XXX,XX +XXX,XX @@ static int normalize_uri_path(char *path)
626
{
627
char *cur, *out;
628
629
- if (path == NULL)
630
+ if (path == NULL) {
631
return -1;
632
+ }
633
634
/* Skip all initial "/" chars. We want to get to the beginning of the
635
* first non-empty segment.
636
*/
637
cur = path;
638
- while (cur[0] == '/')
639
+ while (cur[0] == '/') {
640
++cur;
641
- if (cur[0] == '\0')
642
+ }
643
+ if (cur[0] == '\0') {
644
return 0;
645
+ }
646
647
/* Keep everything we've seen so far. */
648
out = cur;
649
@@ -XXX,XX +XXX,XX @@ static int normalize_uri_path(char *path)
650
if ((cur[0] == '.') && (cur[1] == '/')) {
651
cur += 2;
652
/* '//' normalization should be done at this point too */
653
- while (cur[0] == '/')
654
+ while (cur[0] == '/') {
655
cur++;
656
+ }
657
continue;
658
}
659
660
@@ -XXX,XX +XXX,XX @@ static int normalize_uri_path(char *path)
661
* d) If the buffer string ends with "." as a complete path segment,
662
* that "." is removed.
663
*/
664
- if ((cur[0] == '.') && (cur[1] == '\0'))
665
+ if ((cur[0] == '.') && (cur[1] == '\0')) {
666
break;
667
+ }
668
669
/* Otherwise keep the segment. */
670
while (cur[0] != '/') {
671
- if (cur[0] == '\0')
672
+ if (cur[0] == '\0') {
673
goto done_cd;
674
+ }
675
(out++)[0] = (cur++)[0];
676
}
677
/* nomalize // */
678
- while ((cur[0] == '/') && (cur[1] == '/'))
679
+ while ((cur[0] == '/') && (cur[1] == '/')) {
680
cur++;
681
+ }
682
683
(out++)[0] = (cur++)[0];
684
}
685
@@ -XXX,XX +XXX,XX @@ done_cd:
686
687
/* Reset to the beginning of the first segment for the next sequence. */
688
cur = path;
689
- while (cur[0] == '/')
690
+ while (cur[0] == '/') {
691
++cur;
692
- if (cur[0] == '\0')
693
+ }
694
+ if (cur[0] == '\0') {
695
return 0;
696
+ }
697
698
/*
699
* Analyze each segment in sequence for cases (e) and (f).
700
@@ -XXX,XX +XXX,XX @@ done_cd:
701
702
/* Find the end of the current segment. */
703
segp = cur;
704
- while ((segp[0] != '/') && (segp[0] != '\0'))
705
+ while ((segp[0] != '/') && (segp[0] != '\0')) {
706
++segp;
707
+ }
708
709
/* If this is the last segment, we're done (we need at least two
710
* segments to meet the criteria for the (e) and (f) cases).
711
*/
712
- if (segp[0] == '\0')
713
+ if (segp[0] == '\0') {
714
break;
715
+ }
716
717
/* If the first segment is "..", or if the next segment _isn't_ "..",
718
* keep this segment and try the next one.
719
@@ -XXX,XX +XXX,XX @@ done_cd:
720
/* string will overlap, do not use strcpy */
721
tmp = cur;
722
segp += 3;
723
- while ((*tmp++ = *segp++) != 0)
724
- ;
725
+ while ((*tmp++ = *segp++) != 0) {
726
+ /* No further work */
727
+ }
728
729
/* If there are no previous segments, then keep going from here. */
730
segp = cur;
731
- while ((segp > path) && ((--segp)[0] == '/'))
732
- ;
733
- if (segp == path)
734
+ while ((segp > path) && ((--segp)[0] == '/')) {
735
+ /* No further work */
736
+ }
737
+ if (segp == path) {
738
continue;
739
+ }
740
741
/* "segp" is pointing to the end of a previous segment; find it's
742
* start. We need to back up to the previous segment and start
743
@@ -XXX,XX +XXX,XX @@ done_cd:
744
* remove the "foo/..".
745
*/
746
cur = segp;
747
- while ((cur > path) && (cur[-1] != '/'))
748
+ while ((cur > path) && (cur[-1] != '/')) {
749
--cur;
750
+ }
751
}
752
out[0] = '\0';
753
754
@@ -XXX,XX +XXX,XX @@ done_cd:
755
if (path[0] == '/') {
756
cur = path;
757
while ((cur[0] == '/') && (cur[1] == '.') && (cur[2] == '.') &&
758
- ((cur[3] == '/') || (cur[3] == '\0')))
759
+ ((cur[3] == '/') || (cur[3] == '\0'))) {
760
cur += 3;
761
+ }
762
763
if (cur != path) {
764
out = path;
765
- while (cur[0] != '\0')
766
+ while (cur[0] != '\0') {
767
(out++)[0] = (cur++)[0];
768
+ }
769
out[0] = 0;
770
}
83
}
771
}
84
}
772
@@ -XXX,XX +XXX,XX @@ done_cd:
85
773
static int is_hex(char c)
86
- qemu_lockcnt_dec(&ctx->list_lock);
87
first = true;
88
89
/* ctx->notifier is always registered. */
90
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
91
progress |= aio_dispatch_handlers(ctx, event);
92
} while (count > 0);
93
94
+ qemu_lockcnt_dec(&ctx->list_lock);
95
+
96
progress |= timerlistgroup_run_timers(&ctx->tlg);
97
return progress;
98
}
99
diff --git a/util/async.c b/util/async.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/util/async.c
102
+++ b/util/async.c
103
@@ -XXX,XX +XXX,XX @@ void aio_bh_call(QEMUBH *bh)
104
bh->cb(bh->opaque);
105
}
106
107
-/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
108
+/* Multiple occurrences of aio_bh_poll cannot be called concurrently.
109
+ * The count in ctx->list_lock is incremented before the call, and is
110
+ * not affected by the call.
111
+ */
112
int aio_bh_poll(AioContext *ctx)
774
{
113
{
775
if (((c >= '0') && (c <= '9')) || ((c >= 'a') && (c <= 'f')) ||
114
QEMUBH *bh, **bhp, *next;
776
- ((c >= 'A') && (c <= 'F')))
115
int ret;
777
+ ((c >= 'A') && (c <= 'F'))) {
116
bool deleted = false;
778
return 1;
117
779
+ }
118
- qemu_lockcnt_inc(&ctx->list_lock);
780
return 0;
119
-
781
}
120
ret = 0;
782
121
for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) {
783
@@ -XXX,XX +XXX,XX @@ char *uri_string_unescape(const char *str, int len, char *target)
122
next = atomic_rcu_read(&bh->next);
784
char *ret, *out;
123
@@ -XXX,XX +XXX,XX @@ int aio_bh_poll(AioContext *ctx)
785
const char *in;
124
786
125
/* remove deleted bhs */
787
- if (str == NULL)
126
if (!deleted) {
788
+ if (str == NULL) {
127
- qemu_lockcnt_dec(&ctx->list_lock);
789
return NULL;
128
return ret;
790
- if (len <= 0)
791
+ }
792
+ if (len <= 0) {
793
len = strlen(str);
794
- if (len < 0)
795
+ }
796
+ if (len < 0) {
797
return NULL;
798
+ }
799
800
if (target == NULL) {
801
ret = g_malloc(len + 1);
802
- } else
803
+ } else {
804
ret = target;
805
+ }
806
in = str;
807
out = ret;
808
while (len > 0) {
809
if ((len > 2) && (*in == '%') && (is_hex(in[1])) && (is_hex(in[2]))) {
810
in++;
811
- if ((*in >= '0') && (*in <= '9'))
812
+ if ((*in >= '0') && (*in <= '9')) {
813
*out = (*in - '0');
814
- else if ((*in >= 'a') && (*in <= 'f'))
815
+ } else if ((*in >= 'a') && (*in <= 'f')) {
816
*out = (*in - 'a') + 10;
817
- else if ((*in >= 'A') && (*in <= 'F'))
818
+ } else if ((*in >= 'A') && (*in <= 'F')) {
819
*out = (*in - 'A') + 10;
820
+ }
821
in++;
822
- if ((*in >= '0') && (*in <= '9'))
823
+ if ((*in >= '0') && (*in <= '9')) {
824
*out = *out * 16 + (*in - '0');
825
- else if ((*in >= 'a') && (*in <= 'f'))
826
+ } else if ((*in >= 'a') && (*in <= 'f')) {
827
*out = *out * 16 + (*in - 'a') + 10;
828
- else if ((*in >= 'A') && (*in <= 'F'))
829
+ } else if ((*in >= 'A') && (*in <= 'F')) {
830
*out = *out * 16 + (*in - 'A') + 10;
831
+ }
832
in++;
833
len -= 3;
834
out++;
835
@@ -XXX,XX +XXX,XX @@ char *uri_string_escape(const char *str, const char *list)
836
const char *in;
837
int len, out;
838
839
- if (str == NULL)
840
+ if (str == NULL) {
841
return NULL;
842
- if (str[0] == 0)
843
+ }
844
+ if (str[0] == 0) {
845
return g_strdup(str);
846
+ }
847
len = strlen(str);
848
- if (!(len > 0))
849
+ if (!(len > 0)) {
850
return NULL;
851
+ }
852
853
len += 20;
854
ret = g_malloc(len);
855
@@ -XXX,XX +XXX,XX @@ char *uri_string_escape(const char *str, const char *list)
856
unsigned char val;
857
ret[out++] = '%';
858
val = ch >> 4;
859
- if (val <= 9)
860
+ if (val <= 9) {
861
ret[out++] = '0' + val;
862
- else
863
+ } else {
864
ret[out++] = 'A' + val - 0xA;
865
+ }
866
val = ch & 0xF;
867
- if (val <= 9)
868
+ if (val <= 9) {
869
ret[out++] = '0' + val;
870
- else
871
+ } else {
872
ret[out++] = 'A' + val - 0xA;
873
+ }
874
in++;
875
} else {
876
ret[out++] = *in++;
877
@@ -XXX,XX +XXX,XX @@ char *uri_resolve(const char *uri, const char *base)
878
* as a reference to "." rather than as a synonym for the current
879
* URI. Should we do that here?
880
*/
881
- if (uri == NULL)
882
+ if (uri == NULL) {
883
ret = -1;
884
- else {
885
+ } else {
886
if (*uri) {
887
ref = uri_new();
888
ret = uri_parse_into(ref, uri);
889
- } else
890
+ } else {
891
ret = 0;
892
+ }
893
}
129
}
894
- if (ret != 0)
130
895
+ if (ret != 0) {
131
- if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) {
896
goto done;
132
+ if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
897
+ }
133
bhp = &ctx->first_bh;
898
if ((ref != NULL) && (ref->scheme != NULL)) {
134
while (*bhp) {
899
/*
135
bh = *bhp;
900
* The URI is absolute don't modify.
136
@@ -XXX,XX +XXX,XX @@ int aio_bh_poll(AioContext *ctx)
901
@@ -XXX,XX +XXX,XX @@ char *uri_resolve(const char *uri, const char *base)
137
bhp = &bh->next;
902
val = g_strdup(uri);
903
goto done;
904
}
905
- if (base == NULL)
906
+ if (base == NULL) {
907
ret = -1;
908
- else {
909
+ } else {
910
bas = uri_new();
911
ret = uri_parse_into(bas, base);
912
}
913
if (ret != 0) {
914
- if (ref)
915
+ if (ref) {
916
val = uri_to_string(ref);
917
+ }
918
goto done;
919
}
920
if (ref == NULL) {
921
@@ -XXX,XX +XXX,XX @@ char *uri_resolve(const char *uri, const char *base)
922
if ((ref->scheme == NULL) && (ref->path == NULL) &&
923
((ref->authority == NULL) && (ref->server == NULL))) {
924
res->scheme = g_strdup(bas->scheme);
925
- if (bas->authority != NULL)
926
+ if (bas->authority != NULL) {
927
res->authority = g_strdup(bas->authority);
928
- else if (bas->server != NULL) {
929
+ } else if (bas->server != NULL) {
930
res->server = g_strdup(bas->server);
931
res->user = g_strdup(bas->user);
932
res->port = bas->port;
933
@@ -XXX,XX +XXX,XX @@ char *uri_resolve(const char *uri, const char *base)
934
* use an authority component.
935
*/
936
if ((ref->authority != NULL) || (ref->server != NULL)) {
937
- if (ref->authority != NULL)
938
+ if (ref->authority != NULL) {
939
res->authority = g_strdup(ref->authority);
940
- else {
941
+ } else {
942
res->server = g_strdup(ref->server);
943
res->user = g_strdup(ref->user);
944
res->port = ref->port;
945
@@ -XXX,XX +XXX,XX @@ char *uri_resolve(const char *uri, const char *base)
946
res->path = g_strdup(ref->path);
947
goto step_7;
948
}
949
- if (bas->authority != NULL)
950
+ if (bas->authority != NULL) {
951
res->authority = g_strdup(bas->authority);
952
- else if (bas->server != NULL) {
953
+ } else if (bas->server != NULL) {
954
res->server = g_strdup(bas->server);
955
res->user = g_strdup(bas->user);
956
res->port = bas->port;
957
@@ -XXX,XX +XXX,XX @@ char *uri_resolve(const char *uri, const char *base)
958
* Allocate a buffer large enough for the result string.
959
*/
960
len = 2; /* extra / and 0 */
961
- if (ref->path != NULL)
962
+ if (ref->path != NULL) {
963
len += strlen(ref->path);
964
- if (bas->path != NULL)
965
+ }
966
+ if (bas->path != NULL) {
967
len += strlen(bas->path);
968
+ }
969
res->path = g_malloc(len);
970
res->path[0] = 0;
971
972
@@ -XXX,XX +XXX,XX @@ char *uri_resolve(const char *uri, const char *base)
973
out = 0;
974
if (bas->path != NULL) {
975
while (bas->path[cur] != 0) {
976
- while ((bas->path[cur] != 0) && (bas->path[cur] != '/'))
977
+ while ((bas->path[cur] != 0) && (bas->path[cur] != '/')) {
978
cur++;
979
- if (bas->path[cur] == 0)
980
+ }
981
+ if (bas->path[cur] == 0) {
982
break;
983
+ }
984
985
cur++;
986
while (out < cur) {
987
@@ -XXX,XX +XXX,XX @@ char *uri_resolve(const char *uri, const char *base)
988
/*
989
* Ensure the path includes a '/'
990
*/
991
- if ((out == 0) && (bas->server != NULL))
992
+ if ((out == 0) && (bas->server != NULL)) {
993
res->path[out++] = '/';
994
+ }
995
while (ref->path[indx] != 0) {
996
res->path[out++] = ref->path[indx++];
997
}
998
@@ -XXX,XX +XXX,XX @@ step_7:
999
val = uri_to_string(res);
1000
1001
done:
1002
- if (ref != NULL)
1003
+ if (ref != NULL) {
1004
uri_free(ref);
1005
- if (bas != NULL)
1006
+ }
1007
+ if (bas != NULL) {
1008
uri_free(bas);
1009
- if (res != NULL)
1010
+ }
1011
+ if (res != NULL) {
1012
uri_free(res);
1013
+ }
1014
return val;
1015
}
1016
1017
@@ -XXX,XX +XXX,XX @@ char *uri_resolve_relative(const char *uri, const char *base)
1018
char *bptr, *uptr, *vptr;
1019
int remove_path = 0;
1020
1021
- if ((uri == NULL) || (*uri == 0))
1022
+ if ((uri == NULL) || (*uri == 0)) {
1023
return NULL;
1024
+ }
1025
1026
/*
1027
* First parse URI into a standard form
1028
@@ -XXX,XX +XXX,XX @@ char *uri_resolve_relative(const char *uri, const char *base)
1029
/* If URI not already in "relative" form */
1030
if (uri[0] != '.') {
1031
ret = uri_parse_into(ref, uri);
1032
- if (ret != 0)
1033
+ if (ret != 0) {
1034
goto done; /* Error in URI, return NULL */
1035
- } else
1036
+ }
1037
+ } else {
1038
ref->path = g_strdup(uri);
1039
+ }
1040
1041
/*
1042
* Next parse base into the same standard form
1043
@@ -XXX,XX +XXX,XX @@ char *uri_resolve_relative(const char *uri, const char *base)
1044
bas = uri_new();
1045
if (base[0] != '.') {
1046
ret = uri_parse_into(bas, base);
1047
- if (ret != 0)
1048
+ if (ret != 0) {
1049
goto done; /* Error in base, return NULL */
1050
- } else
1051
+ }
1052
+ } else {
1053
bas->path = g_strdup(base);
1054
+ }
1055
1056
/*
1057
* If the scheme / server on the URI differs from the base,
1058
@@ -XXX,XX +XXX,XX @@ char *uri_resolve_relative(const char *uri, const char *base)
1059
if (bas->path == NULL) {
1060
if (ref->path != NULL) {
1061
uptr = ref->path;
1062
- if (*uptr == '/')
1063
+ if (*uptr == '/') {
1064
uptr++;
1065
+ }
1066
/* exception characters from uri_to_string */
1067
val = uri_string_escape(uptr, "/;&=+$,");
1068
}
1069
@@ -XXX,XX +XXX,XX @@ char *uri_resolve_relative(const char *uri, const char *base)
1070
bptr = bas->path;
1071
if (ref->path == NULL) {
1072
for (ix = 0; bptr[ix] != 0; ix++) {
1073
- if (bptr[ix] == '/')
1074
+ if (bptr[ix] == '/') {
1075
nbslash++;
1076
+ }
1077
}
1078
uptr = NULL;
1079
len = 1; /* this is for a string terminator only */
1080
@@ -XXX,XX +XXX,XX @@ char *uri_resolve_relative(const char *uri, const char *base)
1081
/*
1082
* Next we compare the two strings and find where they first differ
1083
*/
1084
- if ((ref->path[pos] == '.') && (ref->path[pos + 1] == '/'))
1085
+ if ((ref->path[pos] == '.') && (ref->path[pos + 1] == '/')) {
1086
pos += 2;
1087
- if ((*bptr == '.') && (bptr[1] == '/'))
1088
+ }
1089
+ if ((*bptr == '.') && (bptr[1] == '/')) {
1090
bptr += 2;
1091
- else if ((*bptr == '/') && (ref->path[pos] != '/'))
1092
+ } else if ((*bptr == '/') && (ref->path[pos] != '/')) {
1093
bptr++;
1094
- while ((bptr[pos] == ref->path[pos]) && (bptr[pos] != 0))
1095
+ }
1096
+ while ((bptr[pos] == ref->path[pos]) && (bptr[pos] != 0)) {
1097
pos++;
1098
+ }
1099
1100
if (bptr[pos] == ref->path[pos]) {
1101
val = g_strdup("");
1102
@@ -XXX,XX +XXX,XX @@ char *uri_resolve_relative(const char *uri, const char *base)
1103
* beginning of the "unique" suffix of URI
1104
*/
1105
ix = pos;
1106
- if ((ref->path[ix] == '/') && (ix > 0))
1107
+ if ((ref->path[ix] == '/') && (ix > 0)) {
1108
ix--;
1109
- else if ((ref->path[ix] == 0) && (ix > 1) && (ref->path[ix - 1] == '/'))
1110
+ } else if ((ref->path[ix] == 0) && (ix > 1)
1111
+ && (ref->path[ix - 1] == '/')) {
1112
ix -= 2;
1113
+ }
1114
for (; ix > 0; ix--) {
1115
- if (ref->path[ix] == '/')
1116
+ if (ref->path[ix] == '/') {
1117
break;
1118
+ }
1119
}
1120
if (ix == 0) {
1121
uptr = ref->path;
1122
@@ -XXX,XX +XXX,XX @@ char *uri_resolve_relative(const char *uri, const char *base)
1123
*/
1124
if (bptr[pos] != ref->path[pos]) { /* check for trivial URI == base */
1125
for (; bptr[ix] != 0; ix++) {
1126
- if (bptr[ix] == '/')
1127
+ if (bptr[ix] == '/') {
1128
nbslash++;
1129
+ }
1130
}
138
}
1131
}
139
}
1132
len = strlen(uptr) + 1;
140
- qemu_lockcnt_unlock(&ctx->list_lock);
141
+ qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
1133
}
142
}
1134
143
return ret;
1135
if (nbslash == 0) {
1136
- if (uptr != NULL)
1137
+ if (uptr != NULL) {
1138
/* exception characters from uri_to_string */
1139
val = uri_string_escape(uptr, "/;&=+$,");
1140
+ }
1141
goto done;
1142
}
1143
1144
@@ -XXX,XX +XXX,XX @@ done:
1145
/*
1146
* Free the working variables
1147
*/
1148
- if (remove_path != 0)
1149
+ if (remove_path != 0) {
1150
ref->path = NULL;
1151
- if (ref != NULL)
1152
+ }
1153
+ if (ref != NULL) {
1154
uri_free(ref);
1155
- if (bas != NULL)
1156
+ }
1157
+ if (bas != NULL) {
1158
uri_free(bas);
1159
+ }
1160
1161
return val;
1162
}
144
}
1163
@@ -XXX,XX +XXX,XX @@ struct QueryParams *query_params_new(int init_alloc)
1164
{
1165
struct QueryParams *ps;
1166
1167
- if (init_alloc <= 0)
1168
+ if (init_alloc <= 0) {
1169
init_alloc = 1;
1170
+ }
1171
1172
ps = g_new(QueryParams, 1);
1173
ps->n = 0;
1174
@@ -XXX,XX +XXX,XX @@ struct QueryParams *query_params_parse(const char *query)
1175
const char *end, *eq;
1176
1177
ps = query_params_new(0);
1178
- if (!query || query[0] == '\0')
1179
+ if (!query || query[0] == '\0') {
1180
return ps;
1181
+ }
1182
1183
while (*query) {
1184
char *name = NULL, *value = NULL;
1185
1186
/* Find the next separator, or end of the string. */
1187
end = strchr(query, '&');
1188
- if (!end)
1189
+ if (!end) {
1190
end = strchr(query, ';');
1191
- if (!end)
1192
+ }
1193
+ if (!end) {
1194
end = query + strlen(query);
1195
+ }
1196
1197
/* Find the first '=' character between here and end. */
1198
eq = strchr(query, '=');
1199
- if (eq && eq >= end)
1200
+ if (eq && eq >= end) {
1201
eq = NULL;
1202
+ }
1203
1204
/* Empty section (eg. "&&"). */
1205
- if (end == query)
1206
+ if (end == query) {
1207
goto next;
1208
+ }
1209
1210
/* If there is no '=' character, then we have just "name"
1211
* and consistent with CGI.pm we assume value is "".
1212
@@ -XXX,XX +XXX,XX @@ struct QueryParams *query_params_parse(const char *query)
1213
/* If the '=' character is at the beginning then we have
1214
* "=value" and consistent with CGI.pm we _ignore_ this.
1215
*/
1216
- else if (query == eq)
1217
+ else if (query == eq) {
1218
goto next;
1219
+ }
1220
1221
/* Otherwise it's "name=value". */
1222
else {
1223
@@ -XXX,XX +XXX,XX @@ struct QueryParams *query_params_parse(const char *query)
1224
1225
next:
1226
query = end;
1227
- if (*query)
1228
+ if (*query) {
1229
query++; /* skip '&' separator */
1230
+ }
1231
}
1232
1233
return ps;
1234
--
145
--
1235
2.14.3
146
2.9.3
1236
147
1237
148
diff view generated by jsdifflib
New patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
2
3
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
4
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
5
Reviewed-by: Fam Zheng <famz@redhat.com>
6
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
7
Message-id: 20170213135235.12274-19-pbonzini@redhat.com
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
10
include/block/block_int.h | 64 +++++++++++++++++++++++++-----------------
11
include/sysemu/block-backend.h | 14 ++++++---
12
2 files changed, 49 insertions(+), 29 deletions(-)
13
14
diff --git a/include/block/block_int.h b/include/block/block_int.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/block/block_int.h
17
+++ b/include/block/block_int.h
18
@@ -XXX,XX +XXX,XX @@ struct BdrvChild {
19
* copied as well.
20
*/
21
struct BlockDriverState {
22
- int64_t total_sectors; /* if we are reading a disk image, give its
23
- size in sectors */
24
+ /* Protected by big QEMU lock or read-only after opening. No special
25
+ * locking needed during I/O...
26
+ */
27
int open_flags; /* flags used to open the file, re-used for re-open */
28
bool read_only; /* if true, the media is read only */
29
bool encrypted; /* if true, the media is encrypted */
30
@@ -XXX,XX +XXX,XX @@ struct BlockDriverState {
31
bool sg; /* if true, the device is a /dev/sg* */
32
bool probed; /* if true, format was probed rather than specified */
33
34
- int copy_on_read; /* if nonzero, copy read backing sectors into image.
35
- note this is a reference count */
36
-
37
- CoQueue flush_queue; /* Serializing flush queue */
38
- bool active_flush_req; /* Flush request in flight? */
39
- unsigned int write_gen; /* Current data generation */
40
- unsigned int flushed_gen; /* Flushed write generation */
41
-
42
BlockDriver *drv; /* NULL means no media */
43
void *opaque;
44
45
@@ -XXX,XX +XXX,XX @@ struct BlockDriverState {
46
BdrvChild *backing;
47
BdrvChild *file;
48
49
- /* Callback before write request is processed */
50
- NotifierWithReturnList before_write_notifiers;
51
-
52
- /* number of in-flight requests; overall and serialising */
53
- unsigned int in_flight;
54
- unsigned int serialising_in_flight;
55
-
56
- bool wakeup;
57
-
58
- /* Offset after the highest byte written to */
59
- uint64_t wr_highest_offset;
60
-
61
/* I/O Limits */
62
BlockLimits bl;
63
64
@@ -XXX,XX +XXX,XX @@ struct BlockDriverState {
65
QTAILQ_ENTRY(BlockDriverState) bs_list;
66
/* element of the list of monitor-owned BDS */
67
QTAILQ_ENTRY(BlockDriverState) monitor_list;
68
- QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
69
int refcnt;
70
71
- QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
72
-
73
/* operation blockers */
74
QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX];
75
76
@@ -XXX,XX +XXX,XX @@ struct BlockDriverState {
77
/* The error object in use for blocking operations on backing_hd */
78
Error *backing_blocker;
79
80
+ /* Protected by AioContext lock */
81
+
82
+ /* If true, copy read backing sectors into image. Can be >1 if more
83
+ * than one client has requested copy-on-read.
84
+ */
85
+ int copy_on_read;
86
+
87
+ /* If we are reading a disk image, give its size in sectors.
88
+ * Generally read-only; it is written to by load_vmstate and save_vmstate,
89
+ * but the block layer is quiescent during those.
90
+ */
91
+ int64_t total_sectors;
92
+
93
+ /* Callback before write request is processed */
94
+ NotifierWithReturnList before_write_notifiers;
95
+
96
+ /* number of in-flight requests; overall and serialising */
97
+ unsigned int in_flight;
98
+ unsigned int serialising_in_flight;
99
+
100
+ bool wakeup;
101
+
102
+ /* Offset after the highest byte written to */
103
+ uint64_t wr_highest_offset;
104
+
105
/* threshold limit for writes, in bytes. "High water mark". */
106
uint64_t write_threshold_offset;
107
NotifierWithReturn write_threshold_notifier;
108
@@ -XXX,XX +XXX,XX @@ struct BlockDriverState {
109
/* counter for nested bdrv_io_plug */
110
unsigned io_plugged;
111
112
+ QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
113
+ CoQueue flush_queue; /* Serializing flush queue */
114
+ bool active_flush_req; /* Flush request in flight? */
115
+ unsigned int write_gen; /* Current data generation */
116
+ unsigned int flushed_gen; /* Flushed write generation */
117
+
118
+ QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
119
+
120
+ /* do we need to tell the quest if we have a volatile write cache? */
121
+ int enable_write_cache;
122
+
123
int quiesce_counter;
124
};
125
126
diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h
127
index XXXXXXX..XXXXXXX 100644
128
--- a/include/sysemu/block-backend.h
129
+++ b/include/sysemu/block-backend.h
130
@@ -XXX,XX +XXX,XX @@ typedef struct BlockDevOps {
131
* fields that must be public. This is in particular for QLIST_ENTRY() and
132
* friends so that BlockBackends can be kept in lists outside block-backend.c */
133
typedef struct BlockBackendPublic {
134
- /* I/O throttling.
135
- * throttle_state tells us if this BlockBackend has I/O limits configured.
136
- * io_limits_disabled tells us if they are currently being enforced */
137
+ /* I/O throttling has its own locking, but also some fields are
138
+ * protected by the AioContext lock.
139
+ */
140
+
141
+ /* Protected by AioContext lock. */
142
CoQueue throttled_reqs[2];
143
+
144
+ /* Nonzero if the I/O limits are currently being ignored; generally
145
+ * it is zero. */
146
unsigned int io_limits_disabled;
147
148
/* The following fields are protected by the ThrottleGroup lock.
149
- * See the ThrottleGroup documentation for details. */
150
+ * See the ThrottleGroup documentation for details.
151
+ * throttle_state tells us if I/O limits are configured. */
152
ThrottleState *throttle_state;
153
ThrottleTimers throttle_timers;
154
unsigned pending_reqs[2];
155
--
156
2.9.3
157
158
diff view generated by jsdifflib
New patch
1
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
3
This uses the lock-free mutex described in the paper '"Blocking without
4
Locking", or LFTHREADS: A lock-free thread library' by Gidenstam and
5
Papatriantafilou. The same technique is used in OSv, and in fact
6
the code is essentially a conversion to C of OSv's code.
7
8
[Added missing coroutine_fn in tests/test-aio-multithread.c.
9
--Stefan]
10
11
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
12
Reviewed-by: Fam Zheng <famz@redhat.com>
13
Message-id: 20170213181244.16297-2-pbonzini@redhat.com
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
---
16
include/qemu/coroutine.h | 17 ++++-
17
tests/test-aio-multithread.c | 86 ++++++++++++++++++++++++
18
util/qemu-coroutine-lock.c | 155 ++++++++++++++++++++++++++++++++++++++++---
19
util/trace-events | 1 +
20
4 files changed, 246 insertions(+), 13 deletions(-)
21
22
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/include/qemu/coroutine.h
25
+++ b/include/qemu/coroutine.h
26
@@ -XXX,XX +XXX,XX @@ bool qemu_co_queue_empty(CoQueue *queue);
27
/**
28
* Provides a mutex that can be used to synchronise coroutines
29
*/
30
+struct CoWaitRecord;
31
typedef struct CoMutex {
32
- bool locked;
33
+ /* Count of pending lockers; 0 for a free mutex, 1 for an
34
+ * uncontended mutex.
35
+ */
36
+ unsigned locked;
37
+
38
+ /* A queue of waiters. Elements are added atomically in front of
39
+ * from_push. to_pop is only populated, and popped from, by whoever
40
+ * is in charge of the next wakeup. This can be an unlocker or,
41
+ * through the handoff protocol, a locker that is about to go to sleep.
42
+ */
43
+ QSLIST_HEAD(, CoWaitRecord) from_push, to_pop;
44
+
45
+ unsigned handoff, sequence;
46
+
47
Coroutine *holder;
48
- CoQueue queue;
49
} CoMutex;
50
51
/**
52
diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/tests/test-aio-multithread.c
55
+++ b/tests/test-aio-multithread.c
56
@@ -XXX,XX +XXX,XX @@ static void test_multi_co_schedule_10(void)
57
test_multi_co_schedule(10);
58
}
59
60
+/* CoMutex thread-safety. */
61
+
62
+static uint32_t atomic_counter;
63
+static uint32_t running;
64
+static uint32_t counter;
65
+static CoMutex comutex;
66
+
67
+static void coroutine_fn test_multi_co_mutex_entry(void *opaque)
68
+{
69
+ while (!atomic_mb_read(&now_stopping)) {
70
+ qemu_co_mutex_lock(&comutex);
71
+ counter++;
72
+ qemu_co_mutex_unlock(&comutex);
73
+
74
+ /* Increase atomic_counter *after* releasing the mutex. Otherwise
75
+ * there is a chance (it happens about 1 in 3 runs) that the iothread
76
+ * exits before the coroutine is woken up, causing a spurious
77
+ * assertion failure.
78
+ */
79
+ atomic_inc(&atomic_counter);
80
+ }
81
+ atomic_dec(&running);
82
+}
83
+
84
+static void test_multi_co_mutex(int threads, int seconds)
85
+{
86
+ int i;
87
+
88
+ qemu_co_mutex_init(&comutex);
89
+ counter = 0;
90
+ atomic_counter = 0;
91
+ now_stopping = false;
92
+
93
+ create_aio_contexts();
94
+ assert(threads <= NUM_CONTEXTS);
95
+ running = threads;
96
+ for (i = 0; i < threads; i++) {
97
+ Coroutine *co1 = qemu_coroutine_create(test_multi_co_mutex_entry, NULL);
98
+ aio_co_schedule(ctx[i], co1);
99
+ }
100
+
101
+ g_usleep(seconds * 1000000);
102
+
103
+ atomic_mb_set(&now_stopping, true);
104
+ while (running > 0) {
105
+ g_usleep(100000);
106
+ }
107
+
108
+ join_aio_contexts();
109
+ g_test_message("%d iterations/second\n", counter / seconds);
110
+ g_assert_cmpint(counter, ==, atomic_counter);
111
+}
112
+
113
+/* Testing with NUM_CONTEXTS threads focuses on the queue. The mutex however
114
+ * is too contended (and the threads spend too much time in aio_poll)
115
+ * to actually stress the handoff protocol.
116
+ */
117
+static void test_multi_co_mutex_1(void)
118
+{
119
+ test_multi_co_mutex(NUM_CONTEXTS, 1);
120
+}
121
+
122
+static void test_multi_co_mutex_10(void)
123
+{
124
+ test_multi_co_mutex(NUM_CONTEXTS, 10);
125
+}
126
+
127
+/* Testing with fewer threads stresses the handoff protocol too. Still, the
128
+ * case where the locker _can_ pick up a handoff is very rare, happening
129
+ * about 10 times in 1 million, so increase the runtime a bit compared to
130
+ * other "quick" testcases that only run for 1 second.
131
+ */
132
+static void test_multi_co_mutex_2_3(void)
133
+{
134
+ test_multi_co_mutex(2, 3);
135
+}
136
+
137
+static void test_multi_co_mutex_2_30(void)
138
+{
139
+ test_multi_co_mutex(2, 30);
140
+}
141
+
142
/* End of tests. */
143
144
int main(int argc, char **argv)
145
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
146
g_test_add_func("/aio/multi/lifecycle", test_lifecycle);
147
if (g_test_quick()) {
148
g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1);
149
+ g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_1);
150
+ g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_3);
151
} else {
152
g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10);
153
+ g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_10);
154
+ g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_30);
155
}
156
return g_test_run();
157
}
158
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
159
index XXXXXXX..XXXXXXX 100644
160
--- a/util/qemu-coroutine-lock.c
161
+++ b/util/qemu-coroutine-lock.c
162
@@ -XXX,XX +XXX,XX @@
163
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
164
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
165
* THE SOFTWARE.
166
+ *
167
+ * The lock-free mutex implementation is based on OSv
168
+ * (core/lfmutex.cc, include/lockfree/mutex.hh).
169
+ * Copyright (C) 2013 Cloudius Systems, Ltd.
170
*/
171
172
#include "qemu/osdep.h"
173
@@ -XXX,XX +XXX,XX @@ bool qemu_co_queue_empty(CoQueue *queue)
174
return QSIMPLEQ_FIRST(&queue->entries) == NULL;
175
}
176
177
+/* The wait records are handled with a multiple-producer, single-consumer
178
+ * lock-free queue. There cannot be two concurrent pop_waiter() calls
179
+ * because pop_waiter() can only be called while mutex->handoff is zero.
180
+ * This can happen in three cases:
181
+ * - in qemu_co_mutex_unlock, before the hand-off protocol has started.
182
+ * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and
183
+ * not take part in the handoff.
184
+ * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from
185
+ * qemu_co_mutex_unlock. In this case, qemu_co_mutex_unlock will fail
186
+ * the cmpxchg (it will see either 0 or the next sequence value) and
187
+ * exit. The next hand-off cannot begin until qemu_co_mutex_lock has
188
+ * woken up someone.
189
+ * - in qemu_co_mutex_unlock, if it takes the hand-off token itself.
190
+ * In this case another iteration starts with mutex->handoff == 0;
191
+ * a concurrent qemu_co_mutex_lock will fail the cmpxchg, and
192
+ * qemu_co_mutex_unlock will go back to case (1).
193
+ *
194
+ * The following functions manage this queue.
195
+ */
196
+typedef struct CoWaitRecord {
197
+ Coroutine *co;
198
+ QSLIST_ENTRY(CoWaitRecord) next;
199
+} CoWaitRecord;
200
+
201
+static void push_waiter(CoMutex *mutex, CoWaitRecord *w)
202
+{
203
+ w->co = qemu_coroutine_self();
204
+ QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next);
205
+}
206
+
207
+static void move_waiters(CoMutex *mutex)
208
+{
209
+ QSLIST_HEAD(, CoWaitRecord) reversed;
210
+ QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push);
211
+ while (!QSLIST_EMPTY(&reversed)) {
212
+ CoWaitRecord *w = QSLIST_FIRST(&reversed);
213
+ QSLIST_REMOVE_HEAD(&reversed, next);
214
+ QSLIST_INSERT_HEAD(&mutex->to_pop, w, next);
215
+ }
216
+}
217
+
218
+static CoWaitRecord *pop_waiter(CoMutex *mutex)
219
+{
220
+ CoWaitRecord *w;
221
+
222
+ if (QSLIST_EMPTY(&mutex->to_pop)) {
223
+ move_waiters(mutex);
224
+ if (QSLIST_EMPTY(&mutex->to_pop)) {
225
+ return NULL;
226
+ }
227
+ }
228
+ w = QSLIST_FIRST(&mutex->to_pop);
229
+ QSLIST_REMOVE_HEAD(&mutex->to_pop, next);
230
+ return w;
231
+}
232
+
233
+static bool has_waiters(CoMutex *mutex)
234
+{
235
+ return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push);
236
+}
237
+
238
void qemu_co_mutex_init(CoMutex *mutex)
239
{
240
memset(mutex, 0, sizeof(*mutex));
241
- qemu_co_queue_init(&mutex->queue);
242
}
243
244
-void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
245
+static void coroutine_fn qemu_co_mutex_lock_slowpath(CoMutex *mutex)
246
{
247
Coroutine *self = qemu_coroutine_self();
248
+ CoWaitRecord w;
249
+ unsigned old_handoff;
250
251
trace_qemu_co_mutex_lock_entry(mutex, self);
252
+ w.co = self;
253
+ push_waiter(mutex, &w);
254
255
- while (mutex->locked) {
256
- qemu_co_queue_wait(&mutex->queue);
257
+ /* This is the "Responsibility Hand-Off" protocol; a lock() picks from
258
+ * a concurrent unlock() the responsibility of waking somebody up.
259
+ */
260
+ old_handoff = atomic_mb_read(&mutex->handoff);
261
+ if (old_handoff &&
262
+ has_waiters(mutex) &&
263
+ atomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) {
264
+ /* There can be no concurrent pops, because there can be only
265
+ * one active handoff at a time.
266
+ */
267
+ CoWaitRecord *to_wake = pop_waiter(mutex);
268
+ Coroutine *co = to_wake->co;
269
+ if (co == self) {
270
+ /* We got the lock ourselves! */
271
+ assert(to_wake == &w);
272
+ return;
273
+ }
274
+
275
+ aio_co_wake(co);
276
}
277
278
- mutex->locked = true;
279
- mutex->holder = self;
280
- self->locks_held++;
281
-
282
+ qemu_coroutine_yield();
283
trace_qemu_co_mutex_lock_return(mutex, self);
284
}
285
286
+void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
287
+{
288
+ Coroutine *self = qemu_coroutine_self();
289
+
290
+ if (atomic_fetch_inc(&mutex->locked) == 0) {
291
+ /* Uncontended. */
292
+ trace_qemu_co_mutex_lock_uncontended(mutex, self);
293
+ } else {
294
+ qemu_co_mutex_lock_slowpath(mutex);
295
+ }
296
+ mutex->holder = self;
297
+ self->locks_held++;
298
+}
299
+
300
void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
301
{
302
Coroutine *self = qemu_coroutine_self();
303
304
trace_qemu_co_mutex_unlock_entry(mutex, self);
305
306
- assert(mutex->locked == true);
307
+ assert(mutex->locked);
308
assert(mutex->holder == self);
309
assert(qemu_in_coroutine());
310
311
- mutex->locked = false;
312
mutex->holder = NULL;
313
self->locks_held--;
314
- qemu_co_queue_next(&mutex->queue);
315
+ if (atomic_fetch_dec(&mutex->locked) == 1) {
316
+ /* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */
317
+ return;
318
+ }
319
+
320
+ for (;;) {
321
+ CoWaitRecord *to_wake = pop_waiter(mutex);
322
+ unsigned our_handoff;
323
+
324
+ if (to_wake) {
325
+ Coroutine *co = to_wake->co;
326
+ aio_co_wake(co);
327
+ break;
328
+ }
329
+
330
+ /* Some concurrent lock() is in progress (we know this because
331
+ * mutex->locked was >1) but it hasn't yet put itself on the wait
332
+ * queue. Pick a sequence number for the handoff protocol (not 0).
333
+ */
334
+ if (++mutex->sequence == 0) {
335
+ mutex->sequence = 1;
336
+ }
337
+
338
+ our_handoff = mutex->sequence;
339
+ atomic_mb_set(&mutex->handoff, our_handoff);
340
+ if (!has_waiters(mutex)) {
341
+ /* The concurrent lock has not added itself yet, so it
342
+ * will be able to pick our handoff.
343
+ */
344
+ break;
345
+ }
346
+
347
+ /* Try to do the handoff protocol ourselves; if somebody else has
348
+ * already taken it, however, we're done and they're responsible.
349
+ */
350
+ if (atomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) {
351
+ break;
352
+ }
353
+ }
354
355
trace_qemu_co_mutex_unlock_return(mutex, self);
356
}
357
diff --git a/util/trace-events b/util/trace-events
358
index XXXXXXX..XXXXXXX 100644
359
--- a/util/trace-events
360
+++ b/util/trace-events
361
@@ -XXX,XX +XXX,XX @@ qemu_coroutine_terminate(void *co) "self %p"
362
363
# util/qemu-coroutine-lock.c
364
qemu_co_queue_run_restart(void *co) "co %p"
365
+qemu_co_mutex_lock_uncontended(void *mutex, void *self) "mutex %p self %p"
366
qemu_co_mutex_lock_entry(void *mutex, void *self) "mutex %p self %p"
367
qemu_co_mutex_lock_return(void *mutex, void *self) "mutex %p self %p"
368
qemu_co_mutex_unlock_entry(void *mutex, void *self) "mutex %p self %p"
369
--
370
2.9.3
371
372
diff view generated by jsdifflib
1
From: Fam Zheng <famz@redhat.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
Signed-off-by: Fam Zheng <famz@redhat.com>
3
Running a very small critical section on pthread_mutex_t and CoMutex
4
Message-id: 20180226030326.20219-3-famz@redhat.com
4
shows that pthread_mutex_t is much faster because it doesn't actually
5
go to sleep. What happens is that the critical section is shorter
6
than the latency of entering the kernel and thus FUTEX_WAIT always
7
fails. With CoMutex there is no such latency but you still want to
8
avoid wait and wakeup. So introduce it artificially.
9
10
This only works with one waiters; because CoMutex is fair, it will
11
always have more waits and wakeups than a pthread_mutex_t.
12
13
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
14
Reviewed-by: Fam Zheng <famz@redhat.com>
15
Message-id: 20170213181244.16297-3-pbonzini@redhat.com
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6
---
17
---
7
README | 31 ++++++++++++++++++++++++++++++-
18
include/qemu/coroutine.h | 5 +++++
8
1 file changed, 30 insertions(+), 1 deletion(-)
19
util/qemu-coroutine-lock.c | 51 ++++++++++++++++++++++++++++++++++++++++------
20
util/qemu-coroutine.c | 2 +-
21
3 files changed, 51 insertions(+), 7 deletions(-)
9
22
10
diff --git a/README b/README
23
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
11
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
12
--- a/README
25
--- a/include/qemu/coroutine.h
13
+++ b/README
26
+++ b/include/qemu/coroutine.h
14
@@ -XXX,XX +XXX,XX @@ The QEMU source code is maintained under the GIT version control system.
27
@@ -XXX,XX +XXX,XX @@ typedef struct CoMutex {
15
28
*/
16
git clone git://git.qemu.org/qemu.git
29
unsigned locked;
17
30
18
-When submitting patches, the preferred approach is to use 'git
31
+ /* Context that is holding the lock. Useful to avoid spinning
19
+When submitting patches, one common approach is to use 'git
32
+ * when two coroutines on the same AioContext try to get the lock. :)
20
format-patch' and/or 'git send-email' to format & send the mail to the
33
+ */
21
qemu-devel@nongnu.org mailing list. All patches submitted must contain
34
+ AioContext *ctx;
22
a 'Signed-off-by' line from the author. Patches should follow the
23
@@ -XXX,XX +XXX,XX @@ The QEMU website is also maintained under source control.
24
git clone git://git.qemu.org/qemu-web.git
25
https://www.qemu.org/2017/02/04/the-new-qemu-website-is-up/
26
27
+A 'git-profile' utility was created to make above process less
28
+cumbersome, and is highly recommended for making regular contributions,
29
+or even just for sending consecutive patch series revisions. It also
30
+requires a working 'git send-email' setup, and by default doesn't
31
+automate everything, so you may want to go through the above steps
32
+manually for once.
33
+
35
+
34
+For installation instructions, please go to
36
/* A queue of waiters. Elements are added atomically in front of
37
* from_push. to_pop is only populated, and popped from, by whoever
38
* is in charge of the next wakeup. This can be an unlocker or,
39
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/util/qemu-coroutine-lock.c
42
+++ b/util/qemu-coroutine-lock.c
43
@@ -XXX,XX +XXX,XX @@
44
#include "qemu-common.h"
45
#include "qemu/coroutine.h"
46
#include "qemu/coroutine_int.h"
47
+#include "qemu/processor.h"
48
#include "qemu/queue.h"
49
#include "block/aio.h"
50
#include "trace.h"
51
@@ -XXX,XX +XXX,XX @@ void qemu_co_mutex_init(CoMutex *mutex)
52
memset(mutex, 0, sizeof(*mutex));
53
}
54
55
-static void coroutine_fn qemu_co_mutex_lock_slowpath(CoMutex *mutex)
56
+static void coroutine_fn qemu_co_mutex_wake(CoMutex *mutex, Coroutine *co)
57
+{
58
+ /* Read co before co->ctx; pairs with smp_wmb() in
59
+ * qemu_coroutine_enter().
60
+ */
61
+ smp_read_barrier_depends();
62
+ mutex->ctx = co->ctx;
63
+ aio_co_wake(co);
64
+}
35
+
65
+
36
+ https://github.com/stefanha/git-publish
66
+static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx,
67
+ CoMutex *mutex)
68
{
69
Coroutine *self = qemu_coroutine_self();
70
CoWaitRecord w;
71
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn qemu_co_mutex_lock_slowpath(CoMutex *mutex)
72
if (co == self) {
73
/* We got the lock ourselves! */
74
assert(to_wake == &w);
75
+ mutex->ctx = ctx;
76
return;
77
}
78
79
- aio_co_wake(co);
80
+ qemu_co_mutex_wake(mutex, co);
81
}
82
83
qemu_coroutine_yield();
84
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn qemu_co_mutex_lock_slowpath(CoMutex *mutex)
85
86
void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
87
{
88
+ AioContext *ctx = qemu_get_current_aio_context();
89
Coroutine *self = qemu_coroutine_self();
90
+ int waiters, i;
91
92
- if (atomic_fetch_inc(&mutex->locked) == 0) {
93
+ /* Running a very small critical section on pthread_mutex_t and CoMutex
94
+ * shows that pthread_mutex_t is much faster because it doesn't actually
95
+ * go to sleep. What happens is that the critical section is shorter
96
+ * than the latency of entering the kernel and thus FUTEX_WAIT always
97
+ * fails. With CoMutex there is no such latency but you still want to
98
+ * avoid wait and wakeup. So introduce it artificially.
99
+ */
100
+ i = 0;
101
+retry_fast_path:
102
+ waiters = atomic_cmpxchg(&mutex->locked, 0, 1);
103
+ if (waiters != 0) {
104
+ while (waiters == 1 && ++i < 1000) {
105
+ if (atomic_read(&mutex->ctx) == ctx) {
106
+ break;
107
+ }
108
+ if (atomic_read(&mutex->locked) == 0) {
109
+ goto retry_fast_path;
110
+ }
111
+ cpu_relax();
112
+ }
113
+ waiters = atomic_fetch_inc(&mutex->locked);
114
+ }
37
+
115
+
38
+The workflow with 'git-publish' is:
116
+ if (waiters == 0) {
39
+
117
/* Uncontended. */
40
+ $ git checkout master -b my-feature
118
trace_qemu_co_mutex_lock_uncontended(mutex, self);
41
+ $ # work on new commits, add your 'Signed-off-by' lines to each
119
+ mutex->ctx = ctx;
42
+ $ git publish
120
} else {
43
+
121
- qemu_co_mutex_lock_slowpath(mutex);
44
+Your patch series will be sent and tagged as my-feature-v1 if you need to refer
122
+ qemu_co_mutex_lock_slowpath(ctx, mutex);
45
+back to it in the future.
123
}
46
+
124
mutex->holder = self;
47
+Sending v2:
125
self->locks_held++;
48
+
126
@@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
49
+ $ git checkout my-feature # same topic branch
127
assert(mutex->holder == self);
50
+ $ # making changes to the commits (using 'git rebase', for example)
128
assert(qemu_in_coroutine());
51
+ $ git publish
129
52
+
130
+ mutex->ctx = NULL;
53
+Your patch series will be sent with 'v2' tag in the subject and the git tip
131
mutex->holder = NULL;
54
+will be tagged as my-feature-v2.
132
self->locks_held--;
55
+
133
if (atomic_fetch_dec(&mutex->locked) == 1) {
56
Bug reporting
134
@@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
57
=============
135
unsigned our_handoff;
136
137
if (to_wake) {
138
- Coroutine *co = to_wake->co;
139
- aio_co_wake(co);
140
+ qemu_co_mutex_wake(mutex, to_wake->co);
141
break;
142
}
143
144
diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/util/qemu-coroutine.c
147
+++ b/util/qemu-coroutine.c
148
@@ -XXX,XX +XXX,XX @@ void qemu_coroutine_enter(Coroutine *co)
149
co->ctx = qemu_get_current_aio_context();
150
151
/* Store co->ctx before anything that stores co. Matches
152
- * barrier in aio_co_wake.
153
+ * barrier in aio_co_wake and qemu_co_mutex_wake.
154
*/
155
smp_wmb();
58
156
59
--
157
--
60
2.14.3
158
2.9.3
61
159
62
160
diff view generated by jsdifflib
New patch
1
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
3
Add two implementations of the same benchmark as the previous patch,
4
but using pthreads. One uses a normal QemuMutex, the other is Linux
5
only and implements a fair mutex based on MCS locks and futexes.
6
This shows that the slower performance of the 5-thread case is due to
7
the fairness of CoMutex, rather than to coroutines. If fairness does
8
not matter, as is the case with two threads, CoMutex can actually be
9
faster than pthreads.
10
11
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
12
Reviewed-by: Fam Zheng <famz@redhat.com>
13
Message-id: 20170213181244.16297-4-pbonzini@redhat.com
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
---
16
tests/test-aio-multithread.c | 164 +++++++++++++++++++++++++++++++++++++++++++
17
1 file changed, 164 insertions(+)
18
19
diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tests/test-aio-multithread.c
22
+++ b/tests/test-aio-multithread.c
23
@@ -XXX,XX +XXX,XX @@ static void test_multi_co_mutex_2_30(void)
24
test_multi_co_mutex(2, 30);
25
}
26
27
+/* Same test with fair mutexes, for performance comparison. */
28
+
29
+#ifdef CONFIG_LINUX
30
+#include "qemu/futex.h"
31
+
32
+/* The nodes for the mutex reside in this structure (on which we try to avoid
33
+ * false sharing). The head of the mutex is in the "mutex_head" variable.
34
+ */
35
+static struct {
36
+ int next, locked;
37
+ int padding[14];
38
+} nodes[NUM_CONTEXTS] __attribute__((__aligned__(64)));
39
+
40
+static int mutex_head = -1;
41
+
42
+static void mcs_mutex_lock(void)
43
+{
44
+ int prev;
45
+
46
+ nodes[id].next = -1;
47
+ nodes[id].locked = 1;
48
+ prev = atomic_xchg(&mutex_head, id);
49
+ if (prev != -1) {
50
+ atomic_set(&nodes[prev].next, id);
51
+ qemu_futex_wait(&nodes[id].locked, 1);
52
+ }
53
+}
54
+
55
+static void mcs_mutex_unlock(void)
56
+{
57
+ int next;
58
+ if (nodes[id].next == -1) {
59
+ if (atomic_read(&mutex_head) == id &&
60
+ atomic_cmpxchg(&mutex_head, id, -1) == id) {
61
+ /* Last item in the list, exit. */
62
+ return;
63
+ }
64
+ while (atomic_read(&nodes[id].next) == -1) {
65
+ /* mcs_mutex_lock did the xchg, but has not updated
66
+ * nodes[prev].next yet.
67
+ */
68
+ }
69
+ }
70
+
71
+ /* Wake up the next in line. */
72
+ next = nodes[id].next;
73
+ nodes[next].locked = 0;
74
+ qemu_futex_wake(&nodes[next].locked, 1);
75
+}
76
+
77
+static void test_multi_fair_mutex_entry(void *opaque)
78
+{
79
+ while (!atomic_mb_read(&now_stopping)) {
80
+ mcs_mutex_lock();
81
+ counter++;
82
+ mcs_mutex_unlock();
83
+ atomic_inc(&atomic_counter);
84
+ }
85
+ atomic_dec(&running);
86
+}
87
+
88
+static void test_multi_fair_mutex(int threads, int seconds)
89
+{
90
+ int i;
91
+
92
+ assert(mutex_head == -1);
93
+ counter = 0;
94
+ atomic_counter = 0;
95
+ now_stopping = false;
96
+
97
+ create_aio_contexts();
98
+ assert(threads <= NUM_CONTEXTS);
99
+ running = threads;
100
+ for (i = 0; i < threads; i++) {
101
+ Coroutine *co1 = qemu_coroutine_create(test_multi_fair_mutex_entry, NULL);
102
+ aio_co_schedule(ctx[i], co1);
103
+ }
104
+
105
+ g_usleep(seconds * 1000000);
106
+
107
+ atomic_mb_set(&now_stopping, true);
108
+ while (running > 0) {
109
+ g_usleep(100000);
110
+ }
111
+
112
+ join_aio_contexts();
113
+ g_test_message("%d iterations/second\n", counter / seconds);
114
+ g_assert_cmpint(counter, ==, atomic_counter);
115
+}
116
+
117
+static void test_multi_fair_mutex_1(void)
118
+{
119
+ test_multi_fair_mutex(NUM_CONTEXTS, 1);
120
+}
121
+
122
+static void test_multi_fair_mutex_10(void)
123
+{
124
+ test_multi_fair_mutex(NUM_CONTEXTS, 10);
125
+}
126
+#endif
127
+
128
+/* Same test with pthread mutexes, for performance comparison and
129
+ * portability. */
130
+
131
+static QemuMutex mutex;
132
+
133
+static void test_multi_mutex_entry(void *opaque)
134
+{
135
+ while (!atomic_mb_read(&now_stopping)) {
136
+ qemu_mutex_lock(&mutex);
137
+ counter++;
138
+ qemu_mutex_unlock(&mutex);
139
+ atomic_inc(&atomic_counter);
140
+ }
141
+ atomic_dec(&running);
142
+}
143
+
144
+static void test_multi_mutex(int threads, int seconds)
145
+{
146
+ int i;
147
+
148
+ qemu_mutex_init(&mutex);
149
+ counter = 0;
150
+ atomic_counter = 0;
151
+ now_stopping = false;
152
+
153
+ create_aio_contexts();
154
+ assert(threads <= NUM_CONTEXTS);
155
+ running = threads;
156
+ for (i = 0; i < threads; i++) {
157
+ Coroutine *co1 = qemu_coroutine_create(test_multi_mutex_entry, NULL);
158
+ aio_co_schedule(ctx[i], co1);
159
+ }
160
+
161
+ g_usleep(seconds * 1000000);
162
+
163
+ atomic_mb_set(&now_stopping, true);
164
+ while (running > 0) {
165
+ g_usleep(100000);
166
+ }
167
+
168
+ join_aio_contexts();
169
+ g_test_message("%d iterations/second\n", counter / seconds);
170
+ g_assert_cmpint(counter, ==, atomic_counter);
171
+}
172
+
173
+static void test_multi_mutex_1(void)
174
+{
175
+ test_multi_mutex(NUM_CONTEXTS, 1);
176
+}
177
+
178
+static void test_multi_mutex_10(void)
179
+{
180
+ test_multi_mutex(NUM_CONTEXTS, 10);
181
+}
182
+
183
/* End of tests. */
184
185
int main(int argc, char **argv)
186
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
187
g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1);
188
g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_1);
189
g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_3);
190
+#ifdef CONFIG_LINUX
191
+ g_test_add_func("/aio/multi/mutex/mcs", test_multi_fair_mutex_1);
192
+#endif
193
+ g_test_add_func("/aio/multi/mutex/pthread", test_multi_mutex_1);
194
} else {
195
g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10);
196
g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_10);
197
g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_30);
198
+#ifdef CONFIG_LINUX
199
+ g_test_add_func("/aio/multi/mutex/mcs", test_multi_fair_mutex_10);
200
+#endif
201
+ g_test_add_func("/aio/multi/mutex/pthread", test_multi_mutex_10);
202
}
203
return g_test_run();
204
}
205
--
206
2.9.3
207
208
diff view generated by jsdifflib
New patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
2
3
This will avoid forward references in the next patch. It is also
4
more logical because CoQueue is not anymore the basic primitive.
5
6
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
7
Reviewed-by: Fam Zheng <famz@redhat.com>
8
Message-id: 20170213181244.16297-5-pbonzini@redhat.com
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
11
include/qemu/coroutine.h | 89 ++++++++++++++++++++++++------------------------
12
1 file changed, 44 insertions(+), 45 deletions(-)
13
14
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/qemu/coroutine.h
17
+++ b/include/qemu/coroutine.h
18
@@ -XXX,XX +XXX,XX @@ bool qemu_in_coroutine(void);
19
*/
20
bool qemu_coroutine_entered(Coroutine *co);
21
22
-
23
-/**
24
- * CoQueues are a mechanism to queue coroutines in order to continue executing
25
- * them later. They provide the fundamental primitives on which coroutine locks
26
- * are built.
27
- */
28
-typedef struct CoQueue {
29
- QSIMPLEQ_HEAD(, Coroutine) entries;
30
-} CoQueue;
31
-
32
-/**
33
- * Initialise a CoQueue. This must be called before any other operation is used
34
- * on the CoQueue.
35
- */
36
-void qemu_co_queue_init(CoQueue *queue);
37
-
38
-/**
39
- * Adds the current coroutine to the CoQueue and transfers control to the
40
- * caller of the coroutine.
41
- */
42
-void coroutine_fn qemu_co_queue_wait(CoQueue *queue);
43
-
44
-/**
45
- * Restarts the next coroutine in the CoQueue and removes it from the queue.
46
- *
47
- * Returns true if a coroutine was restarted, false if the queue is empty.
48
- */
49
-bool coroutine_fn qemu_co_queue_next(CoQueue *queue);
50
-
51
-/**
52
- * Restarts all coroutines in the CoQueue and leaves the queue empty.
53
- */
54
-void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue);
55
-
56
-/**
57
- * Enter the next coroutine in the queue
58
- */
59
-bool qemu_co_enter_next(CoQueue *queue);
60
-
61
-/**
62
- * Checks if the CoQueue is empty.
63
- */
64
-bool qemu_co_queue_empty(CoQueue *queue);
65
-
66
-
67
/**
68
* Provides a mutex that can be used to synchronise coroutines
69
*/
70
@@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex);
71
*/
72
void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex);
73
74
+
75
+/**
76
+ * CoQueues are a mechanism to queue coroutines in order to continue executing
77
+ * them later.
78
+ */
79
+typedef struct CoQueue {
80
+ QSIMPLEQ_HEAD(, Coroutine) entries;
81
+} CoQueue;
82
+
83
+/**
84
+ * Initialise a CoQueue. This must be called before any other operation is used
85
+ * on the CoQueue.
86
+ */
87
+void qemu_co_queue_init(CoQueue *queue);
88
+
89
+/**
90
+ * Adds the current coroutine to the CoQueue and transfers control to the
91
+ * caller of the coroutine.
92
+ */
93
+void coroutine_fn qemu_co_queue_wait(CoQueue *queue);
94
+
95
+/**
96
+ * Restarts the next coroutine in the CoQueue and removes it from the queue.
97
+ *
98
+ * Returns true if a coroutine was restarted, false if the queue is empty.
99
+ */
100
+bool coroutine_fn qemu_co_queue_next(CoQueue *queue);
101
+
102
+/**
103
+ * Restarts all coroutines in the CoQueue and leaves the queue empty.
104
+ */
105
+void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue);
106
+
107
+/**
108
+ * Enter the next coroutine in the queue
109
+ */
110
+bool qemu_co_enter_next(CoQueue *queue);
111
+
112
+/**
113
+ * Checks if the CoQueue is empty.
114
+ */
115
+bool qemu_co_queue_empty(CoQueue *queue);
116
+
117
+
118
typedef struct CoRwlock {
119
bool writer;
120
int reader;
121
--
122
2.9.3
123
124
diff view generated by jsdifflib
New patch
1
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
3
All that CoQueue needs in order to become thread-safe is help
4
from an external mutex. Add this to the API.
5
6
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
7
Reviewed-by: Fam Zheng <famz@redhat.com>
8
Message-id: 20170213181244.16297-6-pbonzini@redhat.com
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
11
include/qemu/coroutine.h | 8 +++++---
12
block/backup.c | 2 +-
13
block/io.c | 4 ++--
14
block/nbd-client.c | 2 +-
15
block/qcow2-cluster.c | 4 +---
16
block/sheepdog.c | 2 +-
17
block/throttle-groups.c | 2 +-
18
hw/9pfs/9p.c | 2 +-
19
util/qemu-coroutine-lock.c | 24 +++++++++++++++++++++---
20
9 files changed, 34 insertions(+), 16 deletions(-)
21
22
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/include/qemu/coroutine.h
25
+++ b/include/qemu/coroutine.h
26
@@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex);
27
28
/**
29
* CoQueues are a mechanism to queue coroutines in order to continue executing
30
- * them later.
31
+ * them later. They are similar to condition variables, but they need help
32
+ * from an external mutex in order to maintain thread-safety.
33
*/
34
typedef struct CoQueue {
35
QSIMPLEQ_HEAD(, Coroutine) entries;
36
@@ -XXX,XX +XXX,XX @@ void qemu_co_queue_init(CoQueue *queue);
37
38
/**
39
* Adds the current coroutine to the CoQueue and transfers control to the
40
- * caller of the coroutine.
41
+ * caller of the coroutine. The mutex is unlocked during the wait and
42
+ * locked again afterwards.
43
*/
44
-void coroutine_fn qemu_co_queue_wait(CoQueue *queue);
45
+void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex);
46
47
/**
48
* Restarts the next coroutine in the CoQueue and removes it from the queue.
49
diff --git a/block/backup.c b/block/backup.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/block/backup.c
52
+++ b/block/backup.c
53
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
54
retry = false;
55
QLIST_FOREACH(req, &job->inflight_reqs, list) {
56
if (end > req->start && start < req->end) {
57
- qemu_co_queue_wait(&req->wait_queue);
58
+ qemu_co_queue_wait(&req->wait_queue, NULL);
59
retry = true;
60
break;
61
}
62
diff --git a/block/io.c b/block/io.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/block/io.c
65
+++ b/block/io.c
66
@@ -XXX,XX +XXX,XX @@ static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
67
* (instead of producing a deadlock in the former case). */
68
if (!req->waiting_for) {
69
self->waiting_for = req;
70
- qemu_co_queue_wait(&req->wait_queue);
71
+ qemu_co_queue_wait(&req->wait_queue, NULL);
72
self->waiting_for = NULL;
73
retry = true;
74
waited = true;
75
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
76
77
/* Wait until any previous flushes are completed */
78
while (bs->active_flush_req) {
79
- qemu_co_queue_wait(&bs->flush_queue);
80
+ qemu_co_queue_wait(&bs->flush_queue, NULL);
81
}
82
83
bs->active_flush_req = true;
84
diff --git a/block/nbd-client.c b/block/nbd-client.c
85
index XXXXXXX..XXXXXXX 100644
86
--- a/block/nbd-client.c
87
+++ b/block/nbd-client.c
88
@@ -XXX,XX +XXX,XX @@ static void nbd_coroutine_start(NBDClientSession *s,
89
/* Poor man semaphore. The free_sema is locked when no other request
90
* can be accepted, and unlocked after receiving one reply. */
91
if (s->in_flight == MAX_NBD_REQUESTS) {
92
- qemu_co_queue_wait(&s->free_sema);
93
+ qemu_co_queue_wait(&s->free_sema, NULL);
94
assert(s->in_flight < MAX_NBD_REQUESTS);
95
}
96
s->in_flight++;
97
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
98
index XXXXXXX..XXXXXXX 100644
99
--- a/block/qcow2-cluster.c
100
+++ b/block/qcow2-cluster.c
101
@@ -XXX,XX +XXX,XX @@ static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
102
if (bytes == 0) {
103
/* Wait for the dependency to complete. We need to recheck
104
* the free/allocated clusters when we continue. */
105
- qemu_co_mutex_unlock(&s->lock);
106
- qemu_co_queue_wait(&old_alloc->dependent_requests);
107
- qemu_co_mutex_lock(&s->lock);
108
+ qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
109
return -EAGAIN;
110
}
111
}
112
diff --git a/block/sheepdog.c b/block/sheepdog.c
113
index XXXXXXX..XXXXXXX 100644
114
--- a/block/sheepdog.c
115
+++ b/block/sheepdog.c
116
@@ -XXX,XX +XXX,XX @@ static void wait_for_overlapping_aiocb(BDRVSheepdogState *s, SheepdogAIOCB *acb)
117
retry:
118
QLIST_FOREACH(cb, &s->inflight_aiocb_head, aiocb_siblings) {
119
if (AIOCBOverlapping(acb, cb)) {
120
- qemu_co_queue_wait(&s->overlapping_queue);
121
+ qemu_co_queue_wait(&s->overlapping_queue, NULL);
122
goto retry;
123
}
124
}
125
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/block/throttle-groups.c
128
+++ b/block/throttle-groups.c
129
@@ -XXX,XX +XXX,XX @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk,
130
if (must_wait || blkp->pending_reqs[is_write]) {
131
blkp->pending_reqs[is_write]++;
132
qemu_mutex_unlock(&tg->lock);
133
- qemu_co_queue_wait(&blkp->throttled_reqs[is_write]);
134
+ qemu_co_queue_wait(&blkp->throttled_reqs[is_write], NULL);
135
qemu_mutex_lock(&tg->lock);
136
blkp->pending_reqs[is_write]--;
137
}
138
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
139
index XXXXXXX..XXXXXXX 100644
140
--- a/hw/9pfs/9p.c
141
+++ b/hw/9pfs/9p.c
142
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn v9fs_flush(void *opaque)
143
/*
144
* Wait for pdu to complete.
145
*/
146
- qemu_co_queue_wait(&cancel_pdu->complete);
147
+ qemu_co_queue_wait(&cancel_pdu->complete, NULL);
148
cancel_pdu->cancelled = 0;
149
pdu_free(cancel_pdu);
150
}
151
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
152
index XXXXXXX..XXXXXXX 100644
153
--- a/util/qemu-coroutine-lock.c
154
+++ b/util/qemu-coroutine-lock.c
155
@@ -XXX,XX +XXX,XX @@ void qemu_co_queue_init(CoQueue *queue)
156
QSIMPLEQ_INIT(&queue->entries);
157
}
158
159
-void coroutine_fn qemu_co_queue_wait(CoQueue *queue)
160
+void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex)
161
{
162
Coroutine *self = qemu_coroutine_self();
163
QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next);
164
+
165
+ if (mutex) {
166
+ qemu_co_mutex_unlock(mutex);
167
+ }
168
+
169
+ /* There is no race condition here. Other threads will call
170
+ * aio_co_schedule on our AioContext, which can reenter this
171
+ * coroutine but only after this yield and after the main loop
172
+ * has gone through the next iteration.
173
+ */
174
qemu_coroutine_yield();
175
assert(qemu_in_coroutine());
176
+
177
+ /* TODO: OSv implements wait morphing here, where the wakeup
178
+ * primitive automatically places the woken coroutine on the
179
+ * mutex's queue. This avoids the thundering herd effect.
180
+ */
181
+ if (mutex) {
182
+ qemu_co_mutex_lock(mutex);
183
+ }
184
}
185
186
/**
187
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_rdlock(CoRwlock *lock)
188
Coroutine *self = qemu_coroutine_self();
189
190
while (lock->writer) {
191
- qemu_co_queue_wait(&lock->queue);
192
+ qemu_co_queue_wait(&lock->queue, NULL);
193
}
194
lock->reader++;
195
self->locks_held++;
196
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_wrlock(CoRwlock *lock)
197
Coroutine *self = qemu_coroutine_self();
198
199
while (lock->writer || lock->reader) {
200
- qemu_co_queue_wait(&lock->queue);
201
+ qemu_co_queue_wait(&lock->queue, NULL);
202
}
203
lock->writer = true;
204
self->locks_held++;
205
--
206
2.9.3
207
208
diff view generated by jsdifflib
1
From: Fam Zheng <famz@redhat.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
git-publish [1] is a convenient tool to send patches and has been
3
This adds a CoMutex around the existing CoQueue. Because the write-side
4
popular among QEMU developers. Recently it has been made available in
4
can just take CoMutex, the old "writer" field is not necessary anymore.
5
Fedora/Debian official repo.
5
Instead of removing it altogether, count the number of pending writers
6
during a read-side critical section and forbid further readers from
7
entering.
6
8
7
One nice feature of the tool is a per-project configuration with
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
8
profiles, especially in which the cccmd option is a handy method to
10
Reviewed-by: Fam Zheng <famz@redhat.com>
9
create the Cc list.
11
Message-id: 20170213181244.16297-7-pbonzini@redhat.com
10
11
[1]: https://github.com/stefanha/git-publish
12
13
Signed-off-by: Fam Zheng <famz@redhat.com>
14
Message-id: 20180226030326.20219-2-famz@redhat.com
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
16
---
13
---
17
.gitpublish | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++
14
include/qemu/coroutine.h | 3 ++-
18
1 file changed, 51 insertions(+)
15
util/qemu-coroutine-lock.c | 35 ++++++++++++++++++++++++-----------
19
create mode 100644 .gitpublish
16
2 files changed, 26 insertions(+), 12 deletions(-)
20
17
21
diff --git a/.gitpublish b/.gitpublish
18
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
22
new file mode 100644
19
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX
20
--- a/include/qemu/coroutine.h
24
--- /dev/null
21
+++ b/include/qemu/coroutine.h
25
+++ b/.gitpublish
22
@@ -XXX,XX +XXX,XX @@ bool qemu_co_queue_empty(CoQueue *queue);
26
@@ -XXX,XX +XXX,XX @@
23
27
+#
24
28
+# Common git-publish profiles that can be used to send patches to QEMU upstream.
25
typedef struct CoRwlock {
29
+#
26
- bool writer;
30
+# See https://github.com/stefanha/git-publish for more information
27
+ int pending_writer;
31
+#
28
int reader;
32
+[gitpublishprofile "default"]
29
+ CoMutex mutex;
33
+base = master
30
CoQueue queue;
34
+to = qemu-devel@nongnu.org
31
} CoRwlock;
35
+cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
32
33
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/util/qemu-coroutine-lock.c
36
+++ b/util/qemu-coroutine-lock.c
37
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_init(CoRwlock *lock)
38
{
39
memset(lock, 0, sizeof(*lock));
40
qemu_co_queue_init(&lock->queue);
41
+ qemu_co_mutex_init(&lock->mutex);
42
}
43
44
void qemu_co_rwlock_rdlock(CoRwlock *lock)
45
{
46
Coroutine *self = qemu_coroutine_self();
47
48
- while (lock->writer) {
49
- qemu_co_queue_wait(&lock->queue, NULL);
50
+ qemu_co_mutex_lock(&lock->mutex);
51
+ /* For fairness, wait if a writer is in line. */
52
+ while (lock->pending_writer) {
53
+ qemu_co_queue_wait(&lock->queue, &lock->mutex);
54
}
55
lock->reader++;
56
+ qemu_co_mutex_unlock(&lock->mutex);
36
+
57
+
37
+[gitpublishprofile "rfc"]
58
+ /* The rest of the read-side critical section is run without the mutex. */
38
+base = master
59
self->locks_held++;
39
+prefix = RFC PATCH
60
}
40
+to = qemu-devel@nongnu.org
61
41
+cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
62
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_unlock(CoRwlock *lock)
63
Coroutine *self = qemu_coroutine_self();
64
65
assert(qemu_in_coroutine());
66
- if (lock->writer) {
67
- lock->writer = false;
68
+ if (!lock->reader) {
69
+ /* The critical section started in qemu_co_rwlock_wrlock. */
70
qemu_co_queue_restart_all(&lock->queue);
71
} else {
72
+ self->locks_held--;
42
+
73
+
43
+[gitpublishprofile "stable"]
74
+ qemu_co_mutex_lock(&lock->mutex);
44
+base = master
75
lock->reader--;
45
+to = qemu-devel@nongnu.org
76
assert(lock->reader >= 0);
46
+cc = qemu-stable@nongnu.org
77
/* Wakeup only one waiting writer */
47
+cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
78
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_unlock(CoRwlock *lock)
79
qemu_co_queue_next(&lock->queue);
80
}
81
}
82
- self->locks_held--;
83
+ qemu_co_mutex_unlock(&lock->mutex);
84
}
85
86
void qemu_co_rwlock_wrlock(CoRwlock *lock)
87
{
88
- Coroutine *self = qemu_coroutine_self();
89
-
90
- while (lock->writer || lock->reader) {
91
- qemu_co_queue_wait(&lock->queue, NULL);
92
+ qemu_co_mutex_lock(&lock->mutex);
93
+ lock->pending_writer++;
94
+ while (lock->reader) {
95
+ qemu_co_queue_wait(&lock->queue, &lock->mutex);
96
}
97
- lock->writer = true;
98
- self->locks_held++;
99
+ lock->pending_writer--;
48
+
100
+
49
+[gitpublishprofile "trivial"]
101
+ /* The rest of the write-side critical section is run with
50
+base = master
102
+ * the mutex taken, so that lock->reader remains zero.
51
+to = qemu-devel@nongnu.org
103
+ * There is no need to update self->locks_held.
52
+cc = qemu-trivial@nongnu.org
104
+ */
53
+cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
105
}
54
+
55
+[gitpublishprofile "block"]
56
+base = master
57
+to = qemu-devel@nongnu.org
58
+cc = qemu-block@nongnu.org
59
+cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
60
+
61
+[gitpublishprofile "arm"]
62
+base = master
63
+to = qemu-devel@nongnu.org
64
+cc = qemu-arm@nongnu.org
65
+cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
66
+
67
+[gitpublishprofile "s390"]
68
+base = master
69
+to = qemu-devel@nongnu.org
70
+cc = qemu-s390@nongnu.org
71
+cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
72
+
73
+[gitpublishprofile "ppc"]
74
+base = master
75
+to = qemu-devel@nongnu.org
76
+cc = qemu-ppc@nongnu.org
77
+cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
78
--
106
--
79
2.14.3
107
2.9.3
80
108
81
109
diff view generated by jsdifflib