1
The following changes since commit 61c265f0660ee476985808c8aa7915617c44fd53:
1
The following changes since commit d0ed6a69d399ae193959225cdeaa9382746c91cc:
2
2
3
Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20200313a' into staging (2020-03-13 10:33:04 +0000)
3
Update version for v5.1.0 release (2020-08-11 17:07:03 +0100)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/stefanha/qemu.git tags/block-pull-request
7
https://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 4ab78b19189a81038e744728ed949d09aa477550:
9
for you to fetch changes up to 44277bf914471962c9e88e09c859aae65ae109c4:
10
10
11
block/io: fix bdrv_co_do_copy_on_readv (2020-03-16 11:46:11 +0000)
11
aio-posix: keep aio_notify_me disabled during polling (2020-08-13 13:34:14 =
12
+0100)
12
13
13
----------------------------------------------------------------
14
----------------------------------------------------------------
14
Pull request
15
Pull request
15
16
16
----------------------------------------------------------------
17
----------------------------------------------------------------
17
18
18
Vladimir Sementsov-Ogievskiy (1):
19
Stefan Hajnoczi (3):
19
block/io: fix bdrv_co_do_copy_on_readv
20
async: rename event_notifier_dummy_cb/poll()
21
async: always set ctx->notified in aio_notify()
22
aio-posix: keep aio_notify_me disabled during polling
20
23
21
block/io.c | 2 +-
24
util/aio-posix.c | 47 +++++++++++++++++++++++++----------------------
22
1 file changed, 1 insertion(+), 1 deletion(-)
25
util/async.c | 36 +++++++++++++++++++++++-------------
26
2 files changed, 48 insertions(+), 35 deletions(-)
23
27
24
--
28
--=20
25
2.24.1
29
2.26.2
26
30
diff view generated by jsdifflib
New patch
1
The event_notifier_*() prefix can be confused with the EventNotifier
2
APIs that are also called event_notifier_*().
1
3
4
Rename the functions to aio_context_notifier_*() to make it clear that
5
they relate to the AioContext::notifier field.
6
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
10
Message-id: 20200806131802.569478-2-stefanha@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
util/async.c | 8 ++++----
14
1 file changed, 4 insertions(+), 4 deletions(-)
15
16
diff --git a/util/async.c b/util/async.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/util/async.c
19
+++ b/util/async.c
20
@@ -XXX,XX +XXX,XX @@ static void aio_timerlist_notify(void *opaque, QEMUClockType type)
21
aio_notify(opaque);
22
}
23
24
-static void event_notifier_dummy_cb(EventNotifier *e)
25
+static void aio_context_notifier_dummy_cb(EventNotifier *e)
26
{
27
}
28
29
/* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
30
-static bool event_notifier_poll(void *opaque)
31
+static bool aio_context_notifier_poll(void *opaque)
32
{
33
EventNotifier *e = opaque;
34
AioContext *ctx = container_of(e, AioContext, notifier);
35
@@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp)
36
37
aio_set_event_notifier(ctx, &ctx->notifier,
38
false,
39
- event_notifier_dummy_cb,
40
- event_notifier_poll);
41
+ aio_context_notifier_dummy_cb,
42
+ aio_context_notifier_poll);
43
#ifdef CONFIG_LINUX_AIO
44
ctx->linux_aio = NULL;
45
#endif
46
--
47
2.26.2
48
diff view generated by jsdifflib
New patch
1
aio_notify() does not set ctx->notified when called with
2
ctx->aio_notify_me disabled. Therefore aio_notify_me needs to be enabled
3
during polling.
1
4
5
This is suboptimal since expensive event_notifier_set(&ctx->notifier)
6
and event_notifier_test_and_clear(&ctx->notifier) calls are required
7
when ctx->aio_notify_me is enabled.
8
9
Change aio_notify() so that aio->notified is always set, regardless of
10
ctx->aio_notify_me. This will make polling cheaper since
11
ctx->aio_notify_me can remain disabled. Move the
12
event_notifier_test_and_clear() to the fd handler function (which is now
13
no longer an empty function so "dummy" has been dropped from its name).
14
15
The next patch takes advantage of this by optimizing polling in
16
util/aio-posix.c.
17
18
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
19
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
20
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
21
Message-id: 20200806131802.569478-3-stefanha@redhat.com
22
23
[Paolo Bonzini pointed out that the smp_wmb() in aio_notify_accept()
24
should be smp_wb() but the comment should be smp_wmb() instead of
25
smp_wb(). Fixed.
26
--Stefan]
27
28
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
29
---
30
util/async.c | 32 +++++++++++++++++++++-----------
31
1 file changed, 21 insertions(+), 11 deletions(-)
32
33
diff --git a/util/async.c b/util/async.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/util/async.c
36
+++ b/util/async.c
37
@@ -XXX,XX +XXX,XX @@ LuringState *aio_get_linux_io_uring(AioContext *ctx)
38
39
void aio_notify(AioContext *ctx)
40
{
41
- /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
42
+ /*
43
+ * Write e.g. bh->flags before writing ctx->notified. Pairs with smp_mb in
44
+ * aio_notify_accept.
45
+ */
46
+ smp_wmb();
47
+ atomic_set(&ctx->notified, true);
48
+
49
+ /*
50
+ * Write ctx->notified before reading ctx->notify_me. Pairs
51
* with smp_mb in aio_ctx_prepare or aio_poll.
52
*/
53
smp_mb();
54
if (atomic_read(&ctx->notify_me)) {
55
event_notifier_set(&ctx->notifier);
56
- atomic_mb_set(&ctx->notified, true);
57
}
58
}
59
60
void aio_notify_accept(AioContext *ctx)
61
{
62
- if (atomic_xchg(&ctx->notified, false)
63
-#ifdef WIN32
64
- || true
65
-#endif
66
- ) {
67
- event_notifier_test_and_clear(&ctx->notifier);
68
- }
69
+ atomic_set(&ctx->notified, false);
70
+
71
+ /*
72
+ * Write ctx->notified before reading e.g. bh->flags. Pairs with smp_wmb
73
+ * in aio_notify.
74
+ */
75
+ smp_mb();
76
}
77
78
static void aio_timerlist_notify(void *opaque, QEMUClockType type)
79
@@ -XXX,XX +XXX,XX @@ static void aio_timerlist_notify(void *opaque, QEMUClockType type)
80
aio_notify(opaque);
81
}
82
83
-static void aio_context_notifier_dummy_cb(EventNotifier *e)
84
+static void aio_context_notifier_cb(EventNotifier *e)
85
{
86
+ AioContext *ctx = container_of(e, AioContext, notifier);
87
+
88
+ event_notifier_test_and_clear(&ctx->notifier);
89
}
90
91
/* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
92
@@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp)
93
94
aio_set_event_notifier(ctx, &ctx->notifier,
95
false,
96
- aio_context_notifier_dummy_cb,
97
+ aio_context_notifier_cb,
98
aio_context_notifier_poll);
99
#ifdef CONFIG_LINUX_AIO
100
ctx->linux_aio = NULL;
101
--
102
2.26.2
103
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
Polling only monitors the ctx->notified field and does not need the
2
ctx->notifier EventNotifier to be signalled. Keep ctx->aio_notify_me
3
disabled while polling to avoid unnecessary EventNotifier syscalls.
2
4
3
Prior to 1143ec5ebf4 it was OK to qemu_iovec_from_buf() from aligned-up
5
This optimization improves virtio-blk 4KB random read performance by
4
buffer to original qiov, as qemu_iovec_from_buf() will stop at qiov end
6
18%. The following results are with an IOThread and the null-co block
5
anyway.
7
driver:
6
8
7
But after 1143ec5ebf4 we assume that bdrv_co_do_copy_on_readv works on
9
Test IOPS Error
8
part of original qiov, defined by qiov_offset and bytes. So we must not
10
Before 244518.62 ± 1.20%
9
touch qiov behind qiov_offset+bytes bound. Fix it.
11
After 290706.11 ± 0.44%
10
12
11
Cc: qemu-stable@nongnu.org # v4.2
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Fixes: 1143ec5ebf4
14
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
13
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
15
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
Reviewed-by: John Snow <jsnow@redhat.com>
16
Message-id: 20200806131802.569478-4-stefanha@redhat.com
15
Message-id: 20200312081949.5350-1-vsementsov@virtuozzo.com
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
---
18
---
18
block/io.c | 2 +-
19
util/aio-posix.c | 47 +++++++++++++++++++++++++----------------------
19
1 file changed, 1 insertion(+), 1 deletion(-)
20
1 file changed, 25 insertions(+), 22 deletions(-)
20
21
21
diff --git a/block/io.c b/block/io.c
22
diff --git a/util/aio-posix.c b/util/aio-posix.c
22
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
23
--- a/block/io.c
24
--- a/util/aio-posix.c
24
+++ b/block/io.c
25
+++ b/util/aio-posix.c
25
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
26
@@ -XXX,XX +XXX,XX @@ static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now)
26
if (!(flags & BDRV_REQ_PREFETCH)) {
27
*
27
qemu_iovec_from_buf(qiov, qiov_offset + progress,
28
* Polls for a given time.
28
bounce_buffer + skip_bytes,
29
*
29
- pnum - skip_bytes);
30
- * Note that ctx->notify_me must be non-zero so this function can detect
30
+ MIN(pnum - skip_bytes, bytes - progress));
31
- * aio_notify().
31
}
32
- *
32
} else if (!(flags & BDRV_REQ_PREFETCH)) {
33
* Note that the caller must have incremented ctx->list_lock.
33
/* Read directly into the destination */
34
*
35
* Returns: true if progress was made, false otherwise
36
@@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
37
bool progress;
38
int64_t start_time, elapsed_time;
39
40
- assert(ctx->notify_me);
41
assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
42
43
trace_run_poll_handlers_begin(ctx, max_ns, *timeout);
44
@@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
45
* @timeout: timeout for blocking wait, computed by the caller and updated if
46
* polling succeeds.
47
*
48
- * ctx->notify_me must be non-zero so this function can detect aio_notify().
49
- *
50
* Note that the caller must have incremented ctx->list_lock.
51
*
52
* Returns: true if progress was made, false otherwise
53
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
54
AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
55
int ret = 0;
56
bool progress;
57
+ bool use_notify_me;
58
int64_t timeout;
59
int64_t start = 0;
60
61
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
62
*/
63
assert(in_aio_context_home_thread(ctx));
64
65
- /* aio_notify can avoid the expensive event_notifier_set if
66
+ qemu_lockcnt_inc(&ctx->list_lock);
67
+
68
+ if (ctx->poll_max_ns) {
69
+ start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
70
+ }
71
+
72
+ timeout = blocking ? aio_compute_timeout(ctx) : 0;
73
+ progress = try_poll_mode(ctx, &timeout);
74
+ assert(!(timeout && progress));
75
+
76
+ /*
77
+ * aio_notify can avoid the expensive event_notifier_set if
78
* everything (file descriptors, bottom halves, timers) will
79
* be re-evaluated before the next blocking poll(). This is
80
* already true when aio_poll is called with blocking == false;
81
* if blocking == true, it is only true after poll() returns,
82
* so disable the optimization now.
83
*/
84
- if (blocking) {
85
+ use_notify_me = timeout != 0;
86
+ if (use_notify_me) {
87
atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2);
88
/*
89
- * Write ctx->notify_me before computing the timeout
90
- * (reading bottom half flags, etc.). Pairs with
91
+ * Write ctx->notify_me before reading ctx->notified. Pairs with
92
* smp_mb in aio_notify().
93
*/
94
smp_mb();
95
- }
96
-
97
- qemu_lockcnt_inc(&ctx->list_lock);
98
99
- if (ctx->poll_max_ns) {
100
- start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
101
+ /* Don't block if aio_notify() was called */
102
+ if (atomic_read(&ctx->notified)) {
103
+ timeout = 0;
104
+ }
105
}
106
107
- timeout = blocking ? aio_compute_timeout(ctx) : 0;
108
- progress = try_poll_mode(ctx, &timeout);
109
- assert(!(timeout && progress));
110
-
111
/* If polling is allowed, non-blocking aio_poll does not need the
112
* system call---a single round of run_poll_handlers_once suffices.
113
*/
114
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
115
ret = ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
116
}
117
118
- if (blocking) {
119
+ if (use_notify_me) {
120
/* Finish the poll before clearing the flag. */
121
- atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2);
122
- aio_notify_accept(ctx);
123
+ atomic_store_release(&ctx->notify_me,
124
+ atomic_read(&ctx->notify_me) - 2);
125
}
126
127
+ aio_notify_accept(ctx);
128
+
129
/* Adjust polling time */
130
if (ctx->poll_max_ns) {
131
int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
34
--
132
--
35
2.24.1
133
2.26.2
36
134
diff view generated by jsdifflib