1 | The following changes since commit 64175afc695c0672876fbbfc31b299c86d562cb4: | 1 | The following changes since commit 8bac3ba57eecc466b7e73dabf7d19328a59f684e: |
---|---|---|---|
2 | 2 | ||
3 | arm_gicv3: Fix ICC_BPR1 reset value when EL3 not implemented (2017-06-07 17:21:44 +0100) | 3 | Merge remote-tracking branch 'remotes/rth/tags/pull-rx-20200408' into staging (2020-04-09 13:23:30 +0100) |
4 | 4 | ||
5 | are available in the git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | git://github.com/codyprime/qemu-kvm-jtc.git tags/block-pull-request | 7 | https://github.com/stefanha/qemu.git tags/block-pull-request |
8 | 8 | ||
9 | for you to fetch changes up to 56faeb9bb6872b3f926b3b3e0452a70beea10af2: | 9 | for you to fetch changes up to 5710a3e09f9b85801e5ce70797a4a511e5fc9e2c: |
10 | 10 | ||
11 | block/gluster.c: Handle qdict_array_entries() failure (2017-06-09 08:41:29 -0400) | 11 | async: use explicit memory barriers (2020-04-09 16:17:14 +0100) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Gluster patch | 14 | Pull request |
15 | |||
16 | Fixes for QEMU on aarch64 ARM hosts and fdmon-io_uring. | ||
17 | |||
15 | ---------------------------------------------------------------- | 18 | ---------------------------------------------------------------- |
16 | 19 | ||
17 | Peter Maydell (1): | 20 | Paolo Bonzini (2): |
18 | block/gluster.c: Handle qdict_array_entries() failure | 21 | aio-wait: delegate polling of main AioContext if BQL not held |
22 | async: use explicit memory barriers | ||
19 | 23 | ||
20 | block/gluster.c | 3 +-- | 24 | Stefan Hajnoczi (1): |
21 | 1 file changed, 1 insertion(+), 2 deletions(-) | 25 | aio-posix: signal-proof fdmon-io_uring |
26 | |||
27 | include/block/aio-wait.h | 22 ++++++++++++++++++++++ | ||
28 | include/block/aio.h | 29 ++++++++++------------------- | ||
29 | util/aio-posix.c | 16 ++++++++++++++-- | ||
30 | util/aio-win32.c | 17 ++++++++++++++--- | ||
31 | util/async.c | 16 ++++++++++++---- | ||
32 | util/fdmon-io_uring.c | 10 ++++++++-- | ||
33 | 6 files changed, 80 insertions(+), 30 deletions(-) | ||
22 | 34 | ||
23 | -- | 35 | -- |
24 | 2.9.3 | 36 | 2.25.1 |
25 | 37 | ||
26 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | The io_uring_enter(2) syscall returns with errno=EINTR when interrupted | ||
2 | by a signal. Retry the syscall in this case. | ||
1 | 3 | ||
4 | It's essential to do this in the io_uring_submit_and_wait() case. My | ||
5 | interpretation of the Linux v5.5 io_uring_enter(2) code is that it | ||
6 | shouldn't affect the io_uring_submit() case, but there is no guarantee | ||
7 | this will always be the case. Let's check for -EINTR around both APIs. | ||
8 | |||
9 | Note that the liburing APIs have -errno return values. | ||
10 | |||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> | ||
13 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
14 | Message-id: 20200408091139.273851-1-stefanha@redhat.com | ||
15 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
16 | --- | ||
17 | util/fdmon-io_uring.c | 10 ++++++++-- | ||
18 | 1 file changed, 8 insertions(+), 2 deletions(-) | ||
19 | |||
20 | diff --git a/util/fdmon-io_uring.c b/util/fdmon-io_uring.c | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/util/fdmon-io_uring.c | ||
23 | +++ b/util/fdmon-io_uring.c | ||
24 | @@ -XXX,XX +XXX,XX @@ static struct io_uring_sqe *get_sqe(AioContext *ctx) | ||
25 | } | ||
26 | |||
27 | /* No free sqes left, submit pending sqes first */ | ||
28 | - ret = io_uring_submit(ring); | ||
29 | + do { | ||
30 | + ret = io_uring_submit(ring); | ||
31 | + } while (ret == -EINTR); | ||
32 | + | ||
33 | assert(ret > 1); | ||
34 | sqe = io_uring_get_sqe(ring); | ||
35 | assert(sqe); | ||
36 | @@ -XXX,XX +XXX,XX @@ static int fdmon_io_uring_wait(AioContext *ctx, AioHandlerList *ready_list, | ||
37 | |||
38 | fill_sq_ring(ctx); | ||
39 | |||
40 | - ret = io_uring_submit_and_wait(&ctx->fdmon_io_uring, wait_nr); | ||
41 | + do { | ||
42 | + ret = io_uring_submit_and_wait(&ctx->fdmon_io_uring, wait_nr); | ||
43 | + } while (ret == -EINTR); | ||
44 | + | ||
45 | assert(ret >= 0); | ||
46 | |||
47 | return process_cq_ring(ctx, ready_list); | ||
48 | -- | ||
49 | 2.25.1 | ||
50 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Any thread that is not a iothread returns NULL for qemu_get_current_aio_context(). | ||
4 | As a result, it would also return true for | ||
5 | in_aio_context_home_thread(qemu_get_aio_context()), causing | ||
6 | AIO_WAIT_WHILE to invoke aio_poll() directly. This is incorrect | ||
7 | if the BQL is not held, because aio_poll() does not expect to | ||
8 | run concurrently from multiple threads, and it can actually | ||
9 | happen when savevm writes to the vmstate file from the | ||
10 | migration thread. | ||
11 | |||
12 | Therefore, restrict in_aio_context_home_thread to return true | ||
13 | for the main AioContext only if the BQL is held. | ||
14 | |||
15 | The function is moved to aio-wait.h because it is mostly used | ||
16 | there and to avoid a circular reference between main-loop.h | ||
17 | and block/aio.h. | ||
18 | |||
19 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
20 | Message-Id: <20200407140746.8041-5-pbonzini@redhat.com> | ||
21 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
22 | --- | ||
23 | include/block/aio-wait.h | 22 ++++++++++++++++++++++ | ||
24 | include/block/aio.h | 29 ++++++++++------------------- | ||
25 | 2 files changed, 32 insertions(+), 19 deletions(-) | ||
26 | |||
27 | diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h | ||
28 | index XXXXXXX..XXXXXXX 100644 | ||
29 | --- a/include/block/aio-wait.h | ||
30 | +++ b/include/block/aio-wait.h | ||
31 | @@ -XXX,XX +XXX,XX @@ | ||
32 | #define QEMU_AIO_WAIT_H | ||
33 | |||
34 | #include "block/aio.h" | ||
35 | +#include "qemu/main-loop.h" | ||
36 | |||
37 | /** | ||
38 | * AioWait: | ||
39 | @@ -XXX,XX +XXX,XX @@ void aio_wait_kick(void); | ||
40 | */ | ||
41 | void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque); | ||
42 | |||
43 | +/** | ||
44 | + * in_aio_context_home_thread: | ||
45 | + * @ctx: the aio context | ||
46 | + * | ||
47 | + * Return whether we are running in the thread that normally runs @ctx. Note | ||
48 | + * that acquiring/releasing ctx does not affect the outcome, each AioContext | ||
49 | + * still only has one home thread that is responsible for running it. | ||
50 | + */ | ||
51 | +static inline bool in_aio_context_home_thread(AioContext *ctx) | ||
52 | +{ | ||
53 | + if (ctx == qemu_get_current_aio_context()) { | ||
54 | + return true; | ||
55 | + } | ||
56 | + | ||
57 | + if (ctx == qemu_get_aio_context()) { | ||
58 | + return qemu_mutex_iothread_locked(); | ||
59 | + } else { | ||
60 | + return false; | ||
61 | + } | ||
62 | +} | ||
63 | + | ||
64 | #endif /* QEMU_AIO_WAIT_H */ | ||
65 | diff --git a/include/block/aio.h b/include/block/aio.h | ||
66 | index XXXXXXX..XXXXXXX 100644 | ||
67 | --- a/include/block/aio.h | ||
68 | +++ b/include/block/aio.h | ||
69 | @@ -XXX,XX +XXX,XX @@ struct AioContext { | ||
70 | AioHandlerList deleted_aio_handlers; | ||
71 | |||
72 | /* Used to avoid unnecessary event_notifier_set calls in aio_notify; | ||
73 | - * accessed with atomic primitives. If this field is 0, everything | ||
74 | - * (file descriptors, bottom halves, timers) will be re-evaluated | ||
75 | - * before the next blocking poll(), thus the event_notifier_set call | ||
76 | - * can be skipped. If it is non-zero, you may need to wake up a | ||
77 | - * concurrent aio_poll or the glib main event loop, making | ||
78 | - * event_notifier_set necessary. | ||
79 | + * only written from the AioContext home thread, or under the BQL in | ||
80 | + * the case of the main AioContext. However, it is read from any | ||
81 | + * thread so it is still accessed with atomic primitives. | ||
82 | + * | ||
83 | + * If this field is 0, everything (file descriptors, bottom halves, | ||
84 | + * timers) will be re-evaluated before the next blocking poll() or | ||
85 | + * io_uring wait; therefore, the event_notifier_set call can be | ||
86 | + * skipped. If it is non-zero, you may need to wake up a concurrent | ||
87 | + * aio_poll or the glib main event loop, making event_notifier_set | ||
88 | + * necessary. | ||
89 | * | ||
90 | * Bit 0 is reserved for GSource usage of the AioContext, and is 1 | ||
91 | * between a call to aio_ctx_prepare and the next call to aio_ctx_check. | ||
92 | @@ -XXX,XX +XXX,XX @@ void aio_co_enter(AioContext *ctx, struct Coroutine *co); | ||
93 | */ | ||
94 | AioContext *qemu_get_current_aio_context(void); | ||
95 | |||
96 | -/** | ||
97 | - * in_aio_context_home_thread: | ||
98 | - * @ctx: the aio context | ||
99 | - * | ||
100 | - * Return whether we are running in the thread that normally runs @ctx. Note | ||
101 | - * that acquiring/releasing ctx does not affect the outcome, each AioContext | ||
102 | - * still only has one home thread that is responsible for running it. | ||
103 | - */ | ||
104 | -static inline bool in_aio_context_home_thread(AioContext *ctx) | ||
105 | -{ | ||
106 | - return ctx == qemu_get_current_aio_context(); | ||
107 | -} | ||
108 | - | ||
109 | /** | ||
110 | * aio_context_setup: | ||
111 | * @ctx: the aio context | ||
112 | -- | ||
113 | 2.25.1 | ||
114 | diff view generated by jsdifflib |
1 | From: Peter Maydell <peter.maydell@linaro.org> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | In qemu_gluster_parse_json(), the call to qdict_array_entries() | 3 | When using C11 atomics, non-seqcst reads and writes do not participate |
4 | could return a negative error code, which we were ignoring | 4 | in the total order of seqcst operations. In util/async.c and util/aio-posix.c, |
5 | because we assigned the result to an unsigned variable. | 5 | in particular, the pattern that we use |
6 | Fix this by using the 'int' type instead, which matches the | ||
7 | return type of qdict_array_entries() and also the type | ||
8 | we use for the loop enumeration variable 'i'. | ||
9 | 6 | ||
10 | (Spotted by Coverity, CID 1360960.) | 7 | write ctx->notify_me write bh->scheduled |
8 | read bh->scheduled read ctx->notify_me | ||
9 | if !bh->scheduled, sleep if ctx->notify_me, notify | ||
11 | 10 | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 11 | needs to use seqcst operations for both the write and the read. In |
13 | Reviewed-by: Eric Blake <eblake@redhat.com> | 12 | general this is something that we do not want, because there can be |
14 | Reviewed-by: Jeff Cody <jcody@redhat.com> | 13 | many sources that are polled in addition to bottom halves. The |
15 | Message-id: 1496682098-1540-1-git-send-email-peter.maydell@linaro.org | 14 | alternative is to place a seqcst memory barrier between the write |
16 | Signed-off-by: Jeff Cody <jcody@redhat.com> | 15 | and the read. This also comes with a disadvantage, in that the |
16 | memory barrier is implicit on strongly-ordered architectures and | ||
17 | it wastes a few dozen clock cycles. | ||
18 | |||
19 | Fortunately, ctx->notify_me is never written concurrently by two | ||
20 | threads, so we can assert that and relax the writes to ctx->notify_me. | ||
21 | The resulting solution works and performs well on both aarch64 and x86. | ||
22 | |||
23 | Note that the atomic_set/atomic_read combination is not an atomic | ||
24 | read-modify-write, and therefore it is even weaker than C11 ATOMIC_RELAXED; | ||
25 | on x86, ATOMIC_RELAXED compiles to a locked operation. | ||
26 | |||
27 | Analyzed-by: Ying Fang <fangying1@huawei.com> | ||
28 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
29 | Tested-by: Ying Fang <fangying1@huawei.com> | ||
30 | Message-Id: <20200407140746.8041-6-pbonzini@redhat.com> | ||
31 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
17 | --- | 32 | --- |
18 | block/gluster.c | 3 +-- | 33 | util/aio-posix.c | 16 ++++++++++++++-- |
19 | 1 file changed, 1 insertion(+), 2 deletions(-) | 34 | util/aio-win32.c | 17 ++++++++++++++--- |
35 | util/async.c | 16 ++++++++++++---- | ||
36 | 3 files changed, 40 insertions(+), 9 deletions(-) | ||
20 | 37 | ||
21 | diff --git a/block/gluster.c b/block/gluster.c | 38 | diff --git a/util/aio-posix.c b/util/aio-posix.c |
22 | index XXXXXXX..XXXXXXX 100644 | 39 | index XXXXXXX..XXXXXXX 100644 |
23 | --- a/block/gluster.c | 40 | --- a/util/aio-posix.c |
24 | +++ b/block/gluster.c | 41 | +++ b/util/aio-posix.c |
25 | @@ -XXX,XX +XXX,XX @@ static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf, | 42 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) |
26 | Error *local_err = NULL; | 43 | int64_t timeout; |
27 | char *str = NULL; | 44 | int64_t start = 0; |
28 | const char *ptr; | 45 | |
29 | - size_t num_servers; | 46 | + /* |
30 | - int i, type; | 47 | + * There cannot be two concurrent aio_poll calls for the same AioContext (or |
31 | + int i, type, num_servers; | 48 | + * an aio_poll concurrent with a GSource prepare/check/dispatch callback). |
32 | 49 | + * We rely on this below to avoid slow locked accesses to ctx->notify_me. | |
33 | /* create opts info from runtime_json_opts list */ | 50 | + */ |
34 | opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort); | 51 | assert(in_aio_context_home_thread(ctx)); |
52 | |||
53 | /* aio_notify can avoid the expensive event_notifier_set if | ||
54 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
55 | * so disable the optimization now. | ||
56 | */ | ||
57 | if (blocking) { | ||
58 | - atomic_add(&ctx->notify_me, 2); | ||
59 | + atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2); | ||
60 | + /* | ||
61 | + * Write ctx->notify_me before computing the timeout | ||
62 | + * (reading bottom half flags, etc.). Pairs with | ||
63 | + * smp_mb in aio_notify(). | ||
64 | + */ | ||
65 | + smp_mb(); | ||
66 | } | ||
67 | |||
68 | qemu_lockcnt_inc(&ctx->list_lock); | ||
69 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
70 | } | ||
71 | |||
72 | if (blocking) { | ||
73 | - atomic_sub(&ctx->notify_me, 2); | ||
74 | + /* Finish the poll before clearing the flag. */ | ||
75 | + atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2); | ||
76 | aio_notify_accept(ctx); | ||
77 | } | ||
78 | |||
79 | diff --git a/util/aio-win32.c b/util/aio-win32.c | ||
80 | index XXXXXXX..XXXXXXX 100644 | ||
81 | --- a/util/aio-win32.c | ||
82 | +++ b/util/aio-win32.c | ||
83 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
84 | int count; | ||
85 | int timeout; | ||
86 | |||
87 | + /* | ||
88 | + * There cannot be two concurrent aio_poll calls for the same AioContext (or | ||
89 | + * an aio_poll concurrent with a GSource prepare/check/dispatch callback). | ||
90 | + * We rely on this below to avoid slow locked accesses to ctx->notify_me. | ||
91 | + */ | ||
92 | + assert(in_aio_context_home_thread(ctx)); | ||
93 | progress = false; | ||
94 | |||
95 | /* aio_notify can avoid the expensive event_notifier_set if | ||
96 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
97 | * so disable the optimization now. | ||
98 | */ | ||
99 | if (blocking) { | ||
100 | - atomic_add(&ctx->notify_me, 2); | ||
101 | + atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2); | ||
102 | + /* | ||
103 | + * Write ctx->notify_me before computing the timeout | ||
104 | + * (reading bottom half flags, etc.). Pairs with | ||
105 | + * smp_mb in aio_notify(). | ||
106 | + */ | ||
107 | + smp_mb(); | ||
108 | } | ||
109 | |||
110 | qemu_lockcnt_inc(&ctx->list_lock); | ||
111 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
112 | ret = WaitForMultipleObjects(count, events, FALSE, timeout); | ||
113 | if (blocking) { | ||
114 | assert(first); | ||
115 | - assert(in_aio_context_home_thread(ctx)); | ||
116 | - atomic_sub(&ctx->notify_me, 2); | ||
117 | + atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2); | ||
118 | aio_notify_accept(ctx); | ||
119 | } | ||
120 | |||
121 | diff --git a/util/async.c b/util/async.c | ||
122 | index XXXXXXX..XXXXXXX 100644 | ||
123 | --- a/util/async.c | ||
124 | +++ b/util/async.c | ||
125 | @@ -XXX,XX +XXX,XX @@ aio_ctx_prepare(GSource *source, gint *timeout) | ||
126 | { | ||
127 | AioContext *ctx = (AioContext *) source; | ||
128 | |||
129 | - atomic_or(&ctx->notify_me, 1); | ||
130 | + atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) | 1); | ||
131 | + | ||
132 | + /* | ||
133 | + * Write ctx->notify_me before computing the timeout | ||
134 | + * (reading bottom half flags, etc.). Pairs with | ||
135 | + * smp_mb in aio_notify(). | ||
136 | + */ | ||
137 | + smp_mb(); | ||
138 | |||
139 | /* We assume there is no timeout already supplied */ | ||
140 | *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); | ||
141 | @@ -XXX,XX +XXX,XX @@ aio_ctx_check(GSource *source) | ||
142 | QEMUBH *bh; | ||
143 | BHListSlice *s; | ||
144 | |||
145 | - atomic_and(&ctx->notify_me, ~1); | ||
146 | + /* Finish computing the timeout before clearing the flag. */ | ||
147 | + atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) & ~1); | ||
148 | aio_notify_accept(ctx); | ||
149 | |||
150 | QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) { | ||
151 | @@ -XXX,XX +XXX,XX @@ LuringState *aio_get_linux_io_uring(AioContext *ctx) | ||
152 | void aio_notify(AioContext *ctx) | ||
153 | { | ||
154 | /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs | ||
155 | - * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll. | ||
156 | + * with smp_mb in aio_ctx_prepare or aio_poll. | ||
157 | */ | ||
158 | smp_mb(); | ||
159 | - if (ctx->notify_me) { | ||
160 | + if (atomic_read(&ctx->notify_me)) { | ||
161 | event_notifier_set(&ctx->notifier); | ||
162 | atomic_mb_set(&ctx->notified, true); | ||
163 | } | ||
35 | -- | 164 | -- |
36 | 2.9.3 | 165 | 2.25.1 |
37 | 166 | ||
38 | diff view generated by jsdifflib |