1
The following changes since commit 4100a344eb3d50d88f9da85cae334afc47aee134:
1
The following changes since commit 98b2e3c9ab3abfe476a2b02f8f51813edb90e72d:
2
2
3
Merge remote-tracking branch 'remotes/sstabellini/tags/xen-20170202' into staging (2017-02-03 12:31:40 +0000)
3
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging (2019-10-08 16:08:35 +0100)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/stefanha/qemu.git tags/block-pull-request
7
https://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to cdd7abfdba9287a289c404dfdcb02316f9ffee7d:
9
for you to fetch changes up to 69de48445a0d6169f1e2a6c5bfab994e1c810e33:
10
10
11
iothread: enable AioContext polling by default (2017-02-03 14:23:38 +0000)
11
test-bdrv-drain: fix iothread_join() hang (2019-10-14 09:48:01 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Pull request
14
15
15
----------------------------------------------------------------
16
----------------------------------------------------------------
16
17
17
Stefan Hajnoczi (1):
18
Stefan Hajnoczi (1):
18
iothread: enable AioContext polling by default
19
test-bdrv-drain: fix iothread_join() hang
19
20
20
iothread.c | 14 ++++++++++++++
21
tests/iothread.c | 10 ++++++++--
21
1 file changed, 14 insertions(+)
22
1 file changed, 8 insertions(+), 2 deletions(-)
22
23
23
--
24
--
24
2.9.3
25
2.21.0
25
26
26
27
diff view generated by jsdifflib
1
IOThread AioContexts are likely to consist only of event sources like
1
tests/test-bdrv-drain can hang in tests/iothread.c:iothread_run():
2
virtqueue ioeventfds and LinuxAIO completion eventfds that are pollable
3
from userspace (without system calls).
4
2
5
We recently merged the AioContext polling feature but didn't enable it
3
while (!atomic_read(&iothread->stopping)) {
6
by default yet. I have gone back over the performance data on the
4
aio_poll(iothread->ctx, true);
7
mailing list and picked a default polling value that gave good results.
5
}
8
6
9
Let's enable AioContext polling by default so users don't have another
7
The iothread_join() function works as follows:
10
switch they need to set manually. If performance regressions are found
11
we can still disable this for the QEMU 2.9 release.
12
8
13
Cc: Paolo Bonzini <pbonzini@redhat.com>
9
void iothread_join(IOThread *iothread)
14
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
10
{
15
Cc: Karl Rister <krister@redhat.com>
11
iothread->stopping = true;
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
aio_notify(iothread->ctx);
17
Message-id: 20170126170119.27876-1-stefanha@redhat.com
13
qemu_thread_join(&iothread->thread);
14
15
If iothread_run() checks iothread->stopping before the iothread_join()
16
thread sets stopping to true, then aio_notify() may be optimized away
17
and iothread_run() hangs forever in aio_poll().
18
19
The correct way to change iothread->stopping is from a BH that executes
20
within iothread_run(). This ensures that iothread->stopping is checked
21
after we set it to true.
22
23
This was already fixed for ./iothread.c (note this is a different source
24
file!) by commit 2362a28ea11c145e1a13ae79342d76dc118a72a6 ("iothread:
25
fix iothread_stop() race condition"), but not for tests/iothread.c.
26
27
Fixes: 0c330a734b51c177ab8488932ac3b0c4d63a718a
28
("aio: introduce aio_co_schedule and aio_co_wake")
29
Reported-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
30
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
31
Message-Id: <20191003100103.331-1-stefanha@redhat.com>
18
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
32
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
19
---
33
---
20
iothread.c | 14 ++++++++++++++
34
tests/iothread.c | 10 ++++++++--
21
1 file changed, 14 insertions(+)
35
1 file changed, 8 insertions(+), 2 deletions(-)
22
36
23
diff --git a/iothread.c b/iothread.c
37
diff --git a/tests/iothread.c b/tests/iothread.c
24
index XXXXXXX..XXXXXXX 100644
38
index XXXXXXX..XXXXXXX 100644
25
--- a/iothread.c
39
--- a/tests/iothread.c
26
+++ b/iothread.c
40
+++ b/tests/iothread.c
27
@@ -XXX,XX +XXX,XX @@ typedef ObjectClass IOThreadClass;
41
@@ -XXX,XX +XXX,XX @@ static void *iothread_run(void *opaque)
28
#define IOTHREAD_CLASS(klass) \
42
return NULL;
29
OBJECT_CLASS_CHECK(IOThreadClass, klass, TYPE_IOTHREAD)
43
}
30
44
31
+/* Benchmark results from 2016 on NVMe SSD drives show max polling times around
45
-void iothread_join(IOThread *iothread)
32
+ * 16-32 microseconds yield IOPS improvements for both iodepth=1 and iodepth=32
46
+static void iothread_stop_bh(void *opaque)
33
+ * workloads.
47
{
34
+ */
48
+ IOThread *iothread = opaque;
35
+#define IOTHREAD_POLL_MAX_NS_DEFAULT 32768ULL
36
+
49
+
37
static __thread IOThread *my_iothread;
50
iothread->stopping = true;
38
51
- aio_notify(iothread->ctx);
39
AioContext *qemu_get_current_aio_context(void)
40
@@ -XXX,XX +XXX,XX @@ static int iothread_stop(Object *object, void *opaque)
41
return 0;
42
}
43
44
+static void iothread_instance_init(Object *obj)
45
+{
46
+ IOThread *iothread = IOTHREAD(obj);
47
+
48
+ iothread->poll_max_ns = IOTHREAD_POLL_MAX_NS_DEFAULT;
49
+}
52
+}
50
+
53
+
51
static void iothread_instance_finalize(Object *obj)
54
+void iothread_join(IOThread *iothread)
52
{
55
+{
53
IOThread *iothread = IOTHREAD(obj);
56
+ aio_bh_schedule_oneshot(iothread->ctx, iothread_stop_bh, iothread);
54
@@ -XXX,XX +XXX,XX @@ static const TypeInfo iothread_info = {
57
qemu_thread_join(&iothread->thread);
55
.parent = TYPE_OBJECT,
58
qemu_cond_destroy(&iothread->init_done_cond);
56
.class_init = iothread_class_init,
59
qemu_mutex_destroy(&iothread->init_done_lock);
57
.instance_size = sizeof(IOThread),
58
+ .instance_init = iothread_instance_init,
59
.instance_finalize = iothread_instance_finalize,
60
.interfaces = (InterfaceInfo[]) {
61
{TYPE_USER_CREATABLE},
62
--
60
--
63
2.9.3
61
2.21.0
64
62
65
63
diff view generated by jsdifflib