1 | The following changes since commit 8bac3ba57eecc466b7e73dabf7d19328a59f684e: | 1 | The following changes since commit 6c769690ac845fa62642a5f93b4e4bd906adab95: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/rth/tags/pull-rx-20200408' into staging (2020-04-09 13:23:30 +0100) | 3 | Merge remote-tracking branch 'remotes/vsementsov/tags/pull-simplebench-2021-05-04' into staging (2021-05-21 12:02:34 +0100) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | https://github.com/stefanha/qemu.git tags/block-pull-request | 7 | https://gitlab.com/stefanha/qemu.git tags/block-pull-request |
8 | 8 | ||
9 | for you to fetch changes up to 5710a3e09f9b85801e5ce70797a4a511e5fc9e2c: | 9 | for you to fetch changes up to 0a6f0c76a030710780ce10d6347a70f098024d21: |
10 | 10 | ||
11 | async: use explicit memory barriers (2020-04-09 16:17:14 +0100) | 11 | coroutine-sleep: introduce qemu_co_sleep (2021-05-21 18:22:33 +0100) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Pull request | 14 | Pull request |
15 | 15 | ||
16 | Fixes for QEMU on aarch64 ARM hosts and fdmon-io_uring. | 16 | (Resent due to an email preparation mistake.) |
17 | 17 | ||
18 | ---------------------------------------------------------------- | 18 | ---------------------------------------------------------------- |
19 | 19 | ||
20 | Paolo Bonzini (2): | 20 | Paolo Bonzini (6): |
21 | aio-wait: delegate polling of main AioContext if BQL not held | 21 | coroutine-sleep: use a stack-allocated timer |
22 | async: use explicit memory barriers | 22 | coroutine-sleep: disallow NULL QemuCoSleepState** argument |
23 | coroutine-sleep: allow qemu_co_sleep_wake that wakes nothing | ||
24 | coroutine-sleep: move timer out of QemuCoSleepState | ||
25 | coroutine-sleep: replace QemuCoSleepState pointer with struct in the | ||
26 | API | ||
27 | coroutine-sleep: introduce qemu_co_sleep | ||
23 | 28 | ||
24 | Stefan Hajnoczi (1): | 29 | Philippe Mathieu-Daudé (1): |
25 | aio-posix: signal-proof fdmon-io_uring | 30 | bitops.h: Improve find_xxx_bit() documentation |
26 | 31 | ||
27 | include/block/aio-wait.h | 22 ++++++++++++++++++++++ | 32 | Zenghui Yu (1): |
28 | include/block/aio.h | 29 ++++++++++------------------- | 33 | multi-process: Initialize variables declared with g_auto* |
29 | util/aio-posix.c | 16 ++++++++++++++-- | 34 | |
30 | util/aio-win32.c | 17 ++++++++++++++--- | 35 | include/qemu/bitops.h | 15 ++++++-- |
31 | util/async.c | 16 ++++++++++++---- | 36 | include/qemu/coroutine.h | 27 ++++++++----- |
32 | util/fdmon-io_uring.c | 10 ++++++++-- | 37 | block/block-copy.c | 10 ++--- |
33 | 6 files changed, 80 insertions(+), 30 deletions(-) | 38 | block/nbd.c | 14 +++---- |
39 | hw/remote/memory.c | 5 +-- | ||
40 | hw/remote/proxy.c | 3 +- | ||
41 | util/qemu-coroutine-sleep.c | 75 +++++++++++++++++++------------------ | ||
42 | 7 files changed, 79 insertions(+), 70 deletions(-) | ||
34 | 43 | ||
35 | -- | 44 | -- |
36 | 2.25.1 | 45 | 2.31.1 |
37 | 46 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Zenghui Yu <yuzenghui@huawei.com> | ||
1 | 2 | ||
3 | Quote docs/devel/style.rst (section "Automatic memory deallocation"): | ||
4 | |||
5 | * Variables declared with g_auto* MUST always be initialized, | ||
6 | otherwise the cleanup function will use uninitialized stack memory | ||
7 | |||
8 | Initialize @name properly to get rid of the compilation error (using | ||
9 | gcc-7.3.0 on CentOS): | ||
10 | |||
11 | ../hw/remote/proxy.c: In function 'pci_proxy_dev_realize': | ||
12 | /usr/include/glib-2.0/glib/glib-autocleanups.h:28:3: error: 'name' may be used uninitialized in this function [-Werror=maybe-uninitialized] | ||
13 | g_free (*pp); | ||
14 | ^~~~~~~~~~~~ | ||
15 | ../hw/remote/proxy.c:350:30: note: 'name' was declared here | ||
16 | g_autofree char *name; | ||
17 | ^~~~ | ||
18 | |||
19 | Signed-off-by: Zenghui Yu <yuzenghui@huawei.com> | ||
20 | Reviewed-by: Jagannathan Raman <jag.raman@oracle.com> | ||
21 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
22 | Reviewed-by: Miroslav Rezanina <mrezanin@redhat.com> | ||
23 | Message-id: 20210312112143.1369-1-yuzenghui@huawei.com | ||
24 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
25 | --- | ||
26 | hw/remote/memory.c | 5 ++--- | ||
27 | hw/remote/proxy.c | 3 +-- | ||
28 | 2 files changed, 3 insertions(+), 5 deletions(-) | ||
29 | |||
30 | diff --git a/hw/remote/memory.c b/hw/remote/memory.c | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/hw/remote/memory.c | ||
33 | +++ b/hw/remote/memory.c | ||
34 | @@ -XXX,XX +XXX,XX @@ void remote_sysmem_reconfig(MPQemuMsg *msg, Error **errp) | ||
35 | |||
36 | remote_sysmem_reset(); | ||
37 | |||
38 | - for (region = 0; region < msg->num_fds; region++) { | ||
39 | - g_autofree char *name; | ||
40 | + for (region = 0; region < msg->num_fds; region++, suffix++) { | ||
41 | + g_autofree char *name = g_strdup_printf("remote-mem-%u", suffix); | ||
42 | subregion = g_new(MemoryRegion, 1); | ||
43 | - name = g_strdup_printf("remote-mem-%u", suffix++); | ||
44 | memory_region_init_ram_from_fd(subregion, NULL, | ||
45 | name, sysmem_info->sizes[region], | ||
46 | true, msg->fds[region], | ||
47 | diff --git a/hw/remote/proxy.c b/hw/remote/proxy.c | ||
48 | index XXXXXXX..XXXXXXX 100644 | ||
49 | --- a/hw/remote/proxy.c | ||
50 | +++ b/hw/remote/proxy.c | ||
51 | @@ -XXX,XX +XXX,XX @@ static void probe_pci_info(PCIDevice *dev, Error **errp) | ||
52 | PCI_BASE_ADDRESS_SPACE_IO : PCI_BASE_ADDRESS_SPACE_MEMORY; | ||
53 | |||
54 | if (size) { | ||
55 | - g_autofree char *name; | ||
56 | + g_autofree char *name = g_strdup_printf("bar-region-%d", i); | ||
57 | pdev->region[i].dev = pdev; | ||
58 | pdev->region[i].present = true; | ||
59 | if (type == PCI_BASE_ADDRESS_SPACE_MEMORY) { | ||
60 | pdev->region[i].memory = true; | ||
61 | } | ||
62 | - name = g_strdup_printf("bar-region-%d", i); | ||
63 | memory_region_init_io(&pdev->region[i].mr, OBJECT(pdev), | ||
64 | &proxy_mr_ops, &pdev->region[i], | ||
65 | name, size); | ||
66 | -- | ||
67 | 2.31.1 | ||
68 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
1 | 2 | ||
3 | Document the following functions return the bitmap size | ||
4 | if no matching bit is found: | ||
5 | |||
6 | - find_first_bit | ||
7 | - find_next_bit | ||
8 | - find_last_bit | ||
9 | - find_first_zero_bit | ||
10 | - find_next_zero_bit | ||
11 | |||
12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
13 | Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
14 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
15 | Message-id: 20210510200758.2623154-2-philmd@redhat.com | ||
16 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
17 | --- | ||
18 | include/qemu/bitops.h | 15 ++++++++++++--- | ||
19 | 1 file changed, 12 insertions(+), 3 deletions(-) | ||
20 | |||
21 | diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h | ||
22 | index XXXXXXX..XXXXXXX 100644 | ||
23 | --- a/include/qemu/bitops.h | ||
24 | +++ b/include/qemu/bitops.h | ||
25 | @@ -XXX,XX +XXX,XX @@ static inline int test_bit(long nr, const unsigned long *addr) | ||
26 | * @addr: The address to start the search at | ||
27 | * @size: The maximum size to search | ||
28 | * | ||
29 | - * Returns the bit number of the first set bit, or size. | ||
30 | + * Returns the bit number of the last set bit, | ||
31 | + * or @size if there is no set bit in the bitmap. | ||
32 | */ | ||
33 | unsigned long find_last_bit(const unsigned long *addr, | ||
34 | unsigned long size); | ||
35 | @@ -XXX,XX +XXX,XX @@ unsigned long find_last_bit(const unsigned long *addr, | ||
36 | * @addr: The address to base the search on | ||
37 | * @offset: The bitnumber to start searching at | ||
38 | * @size: The bitmap size in bits | ||
39 | + * | ||
40 | + * Returns the bit number of the next set bit, | ||
41 | + * or @size if there are no further set bits in the bitmap. | ||
42 | */ | ||
43 | unsigned long find_next_bit(const unsigned long *addr, | ||
44 | unsigned long size, | ||
45 | @@ -XXX,XX +XXX,XX @@ unsigned long find_next_bit(const unsigned long *addr, | ||
46 | * @addr: The address to base the search on | ||
47 | * @offset: The bitnumber to start searching at | ||
48 | * @size: The bitmap size in bits | ||
49 | + * | ||
50 | + * Returns the bit number of the next cleared bit, | ||
51 | + * or @size if there are no further clear bits in the bitmap. | ||
52 | */ | ||
53 | |||
54 | unsigned long find_next_zero_bit(const unsigned long *addr, | ||
55 | @@ -XXX,XX +XXX,XX @@ unsigned long find_next_zero_bit(const unsigned long *addr, | ||
56 | * @addr: The address to start the search at | ||
57 | * @size: The maximum size to search | ||
58 | * | ||
59 | - * Returns the bit number of the first set bit. | ||
60 | + * Returns the bit number of the first set bit, | ||
61 | + * or @size if there is no set bit in the bitmap. | ||
62 | */ | ||
63 | static inline unsigned long find_first_bit(const unsigned long *addr, | ||
64 | unsigned long size) | ||
65 | @@ -XXX,XX +XXX,XX @@ static inline unsigned long find_first_bit(const unsigned long *addr, | ||
66 | * @addr: The address to start the search at | ||
67 | * @size: The maximum size to search | ||
68 | * | ||
69 | - * Returns the bit number of the first cleared bit. | ||
70 | + * Returns the bit number of the first cleared bit, | ||
71 | + * or @size if there is no clear bit in the bitmap. | ||
72 | */ | ||
73 | static inline unsigned long find_first_zero_bit(const unsigned long *addr, | ||
74 | unsigned long size) | ||
75 | -- | ||
76 | 2.31.1 | ||
77 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | The lifetime of the timer is well-known (it cannot outlive | ||
4 | qemu_co_sleep_ns_wakeable, because it's deleted by the time the | ||
5 | coroutine resumes), so it is not necessary to place it on the heap. | ||
6 | |||
7 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Message-id: 20210517100548.28806-2-pbonzini@redhat.com | ||
10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
11 | --- | ||
12 | util/qemu-coroutine-sleep.c | 9 ++++----- | ||
13 | 1 file changed, 4 insertions(+), 5 deletions(-) | ||
14 | |||
15 | diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/util/qemu-coroutine-sleep.c | ||
18 | +++ b/util/qemu-coroutine-sleep.c | ||
19 | @@ -XXX,XX +XXX,XX @@ static const char *qemu_co_sleep_ns__scheduled = "qemu_co_sleep_ns"; | ||
20 | |||
21 | struct QemuCoSleepState { | ||
22 | Coroutine *co; | ||
23 | - QEMUTimer *ts; | ||
24 | + QEMUTimer ts; | ||
25 | QemuCoSleepState **user_state_pointer; | ||
26 | }; | ||
27 | |||
28 | @@ -XXX,XX +XXX,XX @@ void qemu_co_sleep_wake(QemuCoSleepState *sleep_state) | ||
29 | if (sleep_state->user_state_pointer) { | ||
30 | *sleep_state->user_state_pointer = NULL; | ||
31 | } | ||
32 | - timer_del(sleep_state->ts); | ||
33 | + timer_del(&sleep_state->ts); | ||
34 | aio_co_wake(sleep_state->co); | ||
35 | } | ||
36 | |||
37 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns, | ||
38 | AioContext *ctx = qemu_get_current_aio_context(); | ||
39 | QemuCoSleepState state = { | ||
40 | .co = qemu_coroutine_self(), | ||
41 | - .ts = aio_timer_new(ctx, type, SCALE_NS, co_sleep_cb, &state), | ||
42 | .user_state_pointer = sleep_state, | ||
43 | }; | ||
44 | |||
45 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns, | ||
46 | abort(); | ||
47 | } | ||
48 | |||
49 | + aio_timer_init(ctx, &state.ts, type, SCALE_NS, co_sleep_cb, &state); | ||
50 | if (sleep_state) { | ||
51 | *sleep_state = &state; | ||
52 | } | ||
53 | - timer_mod(state.ts, qemu_clock_get_ns(type) + ns); | ||
54 | + timer_mod(&state.ts, qemu_clock_get_ns(type) + ns); | ||
55 | qemu_coroutine_yield(); | ||
56 | if (sleep_state) { | ||
57 | /* | ||
58 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns, | ||
59 | */ | ||
60 | assert(*sleep_state == NULL); | ||
61 | } | ||
62 | - timer_free(state.ts); | ||
63 | } | ||
64 | -- | ||
65 | 2.31.1 | ||
66 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Any thread that is not a iothread returns NULL for qemu_get_current_aio_context(). | 3 | Simplify the code by removing conditionals. qemu_co_sleep_ns |
4 | As a result, it would also return true for | 4 | can simply point the argument to an on-stack temporary. |
5 | in_aio_context_home_thread(qemu_get_aio_context()), causing | ||
6 | AIO_WAIT_WHILE to invoke aio_poll() directly. This is incorrect | ||
7 | if the BQL is not held, because aio_poll() does not expect to | ||
8 | run concurrently from multiple threads, and it can actually | ||
9 | happen when savevm writes to the vmstate file from the | ||
10 | migration thread. | ||
11 | 5 | ||
12 | Therefore, restrict in_aio_context_home_thread to return true | 6 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> |
13 | for the main AioContext only if the BQL is held. | ||
14 | |||
15 | The function is moved to aio-wait.h because it is mostly used | ||
16 | there and to avoid a circular reference between main-loop.h | ||
17 | and block/aio.h. | ||
18 | |||
19 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
20 | Message-Id: <20200407140746.8041-5-pbonzini@redhat.com> | 8 | Message-id: 20210517100548.28806-3-pbonzini@redhat.com |
21 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
22 | --- | 10 | --- |
23 | include/block/aio-wait.h | 22 ++++++++++++++++++++++ | 11 | include/qemu/coroutine.h | 5 +++-- |
24 | include/block/aio.h | 29 ++++++++++------------------- | 12 | util/qemu-coroutine-sleep.c | 18 +++++------------- |
25 | 2 files changed, 32 insertions(+), 19 deletions(-) | 13 | 2 files changed, 8 insertions(+), 15 deletions(-) |
26 | 14 | ||
27 | diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h | 15 | diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h |
28 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
29 | --- a/include/block/aio-wait.h | 17 | --- a/include/qemu/coroutine.h |
30 | +++ b/include/block/aio-wait.h | 18 | +++ b/include/qemu/coroutine.h |
31 | @@ -XXX,XX +XXX,XX @@ | 19 | @@ -XXX,XX +XXX,XX @@ typedef struct QemuCoSleepState QemuCoSleepState; |
32 | #define QEMU_AIO_WAIT_H | ||
33 | |||
34 | #include "block/aio.h" | ||
35 | +#include "qemu/main-loop.h" | ||
36 | 20 | ||
37 | /** | 21 | /** |
38 | * AioWait: | 22 | * Yield the coroutine for a given duration. During this yield, @sleep_state |
39 | @@ -XXX,XX +XXX,XX @@ void aio_wait_kick(void); | 23 | - * (if not NULL) is set to an opaque pointer, which may be used for |
40 | */ | 24 | + * is set to an opaque pointer, which may be used for |
41 | void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque); | 25 | * qemu_co_sleep_wake(). Be careful, the pointer is set back to zero when the |
42 | 26 | * timer fires. Don't save the obtained value to other variables and don't call | |
43 | +/** | 27 | * qemu_co_sleep_wake from another aio context. |
44 | + * in_aio_context_home_thread: | 28 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns, |
45 | + * @ctx: the aio context | 29 | QemuCoSleepState **sleep_state); |
46 | + * | 30 | static inline void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns) |
47 | + * Return whether we are running in the thread that normally runs @ctx. Note | 31 | { |
48 | + * that acquiring/releasing ctx does not affect the outcome, each AioContext | 32 | - qemu_co_sleep_ns_wakeable(type, ns, NULL); |
49 | + * still only has one home thread that is responsible for running it. | 33 | + QemuCoSleepState *unused = NULL; |
50 | + */ | 34 | + qemu_co_sleep_ns_wakeable(type, ns, &unused); |
51 | +static inline bool in_aio_context_home_thread(AioContext *ctx) | 35 | } |
52 | +{ | 36 | |
53 | + if (ctx == qemu_get_current_aio_context()) { | 37 | /** |
54 | + return true; | 38 | diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c |
55 | + } | 39 | index XXXXXXX..XXXXXXX 100644 |
40 | --- a/util/qemu-coroutine-sleep.c | ||
41 | +++ b/util/qemu-coroutine-sleep.c | ||
42 | @@ -XXX,XX +XXX,XX @@ void qemu_co_sleep_wake(QemuCoSleepState *sleep_state) | ||
43 | qemu_co_sleep_ns__scheduled, NULL); | ||
44 | |||
45 | assert(scheduled == qemu_co_sleep_ns__scheduled); | ||
46 | - if (sleep_state->user_state_pointer) { | ||
47 | - *sleep_state->user_state_pointer = NULL; | ||
48 | - } | ||
49 | + *sleep_state->user_state_pointer = NULL; | ||
50 | timer_del(&sleep_state->ts); | ||
51 | aio_co_wake(sleep_state->co); | ||
52 | } | ||
53 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns, | ||
54 | } | ||
55 | |||
56 | aio_timer_init(ctx, &state.ts, type, SCALE_NS, co_sleep_cb, &state); | ||
57 | - if (sleep_state) { | ||
58 | - *sleep_state = &state; | ||
59 | - } | ||
60 | + *sleep_state = &state; | ||
61 | timer_mod(&state.ts, qemu_clock_get_ns(type) + ns); | ||
62 | qemu_coroutine_yield(); | ||
63 | - if (sleep_state) { | ||
64 | - /* | ||
65 | - * Note that *sleep_state is cleared during qemu_co_sleep_wake | ||
66 | - * before resuming this coroutine. | ||
67 | - */ | ||
68 | - assert(*sleep_state == NULL); | ||
69 | - } | ||
56 | + | 70 | + |
57 | + if (ctx == qemu_get_aio_context()) { | 71 | + /* qemu_co_sleep_wake clears *sleep_state before resuming this coroutine. */ |
58 | + return qemu_mutex_iothread_locked(); | 72 | + assert(*sleep_state == NULL); |
59 | + } else { | 73 | } |
60 | + return false; | ||
61 | + } | ||
62 | +} | ||
63 | + | ||
64 | #endif /* QEMU_AIO_WAIT_H */ | ||
65 | diff --git a/include/block/aio.h b/include/block/aio.h | ||
66 | index XXXXXXX..XXXXXXX 100644 | ||
67 | --- a/include/block/aio.h | ||
68 | +++ b/include/block/aio.h | ||
69 | @@ -XXX,XX +XXX,XX @@ struct AioContext { | ||
70 | AioHandlerList deleted_aio_handlers; | ||
71 | |||
72 | /* Used to avoid unnecessary event_notifier_set calls in aio_notify; | ||
73 | - * accessed with atomic primitives. If this field is 0, everything | ||
74 | - * (file descriptors, bottom halves, timers) will be re-evaluated | ||
75 | - * before the next blocking poll(), thus the event_notifier_set call | ||
76 | - * can be skipped. If it is non-zero, you may need to wake up a | ||
77 | - * concurrent aio_poll or the glib main event loop, making | ||
78 | - * event_notifier_set necessary. | ||
79 | + * only written from the AioContext home thread, or under the BQL in | ||
80 | + * the case of the main AioContext. However, it is read from any | ||
81 | + * thread so it is still accessed with atomic primitives. | ||
82 | + * | ||
83 | + * If this field is 0, everything (file descriptors, bottom halves, | ||
84 | + * timers) will be re-evaluated before the next blocking poll() or | ||
85 | + * io_uring wait; therefore, the event_notifier_set call can be | ||
86 | + * skipped. If it is non-zero, you may need to wake up a concurrent | ||
87 | + * aio_poll or the glib main event loop, making event_notifier_set | ||
88 | + * necessary. | ||
89 | * | ||
90 | * Bit 0 is reserved for GSource usage of the AioContext, and is 1 | ||
91 | * between a call to aio_ctx_prepare and the next call to aio_ctx_check. | ||
92 | @@ -XXX,XX +XXX,XX @@ void aio_co_enter(AioContext *ctx, struct Coroutine *co); | ||
93 | */ | ||
94 | AioContext *qemu_get_current_aio_context(void); | ||
95 | |||
96 | -/** | ||
97 | - * in_aio_context_home_thread: | ||
98 | - * @ctx: the aio context | ||
99 | - * | ||
100 | - * Return whether we are running in the thread that normally runs @ctx. Note | ||
101 | - * that acquiring/releasing ctx does not affect the outcome, each AioContext | ||
102 | - * still only has one home thread that is responsible for running it. | ||
103 | - */ | ||
104 | -static inline bool in_aio_context_home_thread(AioContext *ctx) | ||
105 | -{ | ||
106 | - return ctx == qemu_get_current_aio_context(); | ||
107 | -} | ||
108 | - | ||
109 | /** | ||
110 | * aio_context_setup: | ||
111 | * @ctx: the aio context | ||
112 | -- | 74 | -- |
113 | 2.25.1 | 75 | 2.31.1 |
114 | 76 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | All callers of qemu_co_sleep_wake are checking whether they are passing | ||
4 | a NULL argument inside the pointer-to-pointer: do the check in | ||
5 | qemu_co_sleep_wake itself. | ||
6 | |||
7 | As a side effect, qemu_co_sleep_wake can be called more than once and | ||
8 | it will only wake the coroutine once; after the first time, the argument | ||
9 | will be set to NULL via *sleep_state->user_state_pointer. However, this | ||
10 | would not be safe unless co_sleep_cb keeps using the QemuCoSleepState* | ||
11 | directly, so make it go through the pointer-to-pointer instead. | ||
12 | |||
13 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
14 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
15 | Message-id: 20210517100548.28806-4-pbonzini@redhat.com | ||
16 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
17 | --- | ||
18 | block/block-copy.c | 4 +--- | ||
19 | block/nbd.c | 8 ++------ | ||
20 | util/qemu-coroutine-sleep.c | 21 ++++++++++++--------- | ||
21 | 3 files changed, 15 insertions(+), 18 deletions(-) | ||
22 | |||
23 | diff --git a/block/block-copy.c b/block/block-copy.c | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/block/block-copy.c | ||
26 | +++ b/block/block-copy.c | ||
27 | @@ -XXX,XX +XXX,XX @@ out: | ||
28 | |||
29 | void block_copy_kick(BlockCopyCallState *call_state) | ||
30 | { | ||
31 | - if (call_state->sleep_state) { | ||
32 | - qemu_co_sleep_wake(call_state->sleep_state); | ||
33 | - } | ||
34 | + qemu_co_sleep_wake(call_state->sleep_state); | ||
35 | } | ||
36 | |||
37 | /* | ||
38 | diff --git a/block/nbd.c b/block/nbd.c | ||
39 | index XXXXXXX..XXXXXXX 100644 | ||
40 | --- a/block/nbd.c | ||
41 | +++ b/block/nbd.c | ||
42 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn nbd_client_co_drain_begin(BlockDriverState *bs) | ||
43 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | ||
44 | |||
45 | s->drained = true; | ||
46 | - if (s->connection_co_sleep_ns_state) { | ||
47 | - qemu_co_sleep_wake(s->connection_co_sleep_ns_state); | ||
48 | - } | ||
49 | + qemu_co_sleep_wake(s->connection_co_sleep_ns_state); | ||
50 | |||
51 | nbd_co_establish_connection_cancel(bs, false); | ||
52 | |||
53 | @@ -XXX,XX +XXX,XX @@ static void nbd_teardown_connection(BlockDriverState *bs) | ||
54 | |||
55 | s->state = NBD_CLIENT_QUIT; | ||
56 | if (s->connection_co) { | ||
57 | - if (s->connection_co_sleep_ns_state) { | ||
58 | - qemu_co_sleep_wake(s->connection_co_sleep_ns_state); | ||
59 | - } | ||
60 | + qemu_co_sleep_wake(s->connection_co_sleep_ns_state); | ||
61 | nbd_co_establish_connection_cancel(bs, true); | ||
62 | } | ||
63 | if (qemu_in_coroutine()) { | ||
64 | diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c | ||
65 | index XXXXXXX..XXXXXXX 100644 | ||
66 | --- a/util/qemu-coroutine-sleep.c | ||
67 | +++ b/util/qemu-coroutine-sleep.c | ||
68 | @@ -XXX,XX +XXX,XX @@ struct QemuCoSleepState { | ||
69 | |||
70 | void qemu_co_sleep_wake(QemuCoSleepState *sleep_state) | ||
71 | { | ||
72 | - /* Write of schedule protected by barrier write in aio_co_schedule */ | ||
73 | - const char *scheduled = qatomic_cmpxchg(&sleep_state->co->scheduled, | ||
74 | - qemu_co_sleep_ns__scheduled, NULL); | ||
75 | + if (sleep_state) { | ||
76 | + /* Write of schedule protected by barrier write in aio_co_schedule */ | ||
77 | + const char *scheduled = qatomic_cmpxchg(&sleep_state->co->scheduled, | ||
78 | + qemu_co_sleep_ns__scheduled, NULL); | ||
79 | |||
80 | - assert(scheduled == qemu_co_sleep_ns__scheduled); | ||
81 | - *sleep_state->user_state_pointer = NULL; | ||
82 | - timer_del(&sleep_state->ts); | ||
83 | - aio_co_wake(sleep_state->co); | ||
84 | + assert(scheduled == qemu_co_sleep_ns__scheduled); | ||
85 | + *sleep_state->user_state_pointer = NULL; | ||
86 | + timer_del(&sleep_state->ts); | ||
87 | + aio_co_wake(sleep_state->co); | ||
88 | + } | ||
89 | } | ||
90 | |||
91 | static void co_sleep_cb(void *opaque) | ||
92 | { | ||
93 | - qemu_co_sleep_wake(opaque); | ||
94 | + QemuCoSleepState **sleep_state = opaque; | ||
95 | + qemu_co_sleep_wake(*sleep_state); | ||
96 | } | ||
97 | |||
98 | void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns, | ||
99 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns, | ||
100 | abort(); | ||
101 | } | ||
102 | |||
103 | - aio_timer_init(ctx, &state.ts, type, SCALE_NS, co_sleep_cb, &state); | ||
104 | + aio_timer_init(ctx, &state.ts, type, SCALE_NS, co_sleep_cb, sleep_state); | ||
105 | *sleep_state = &state; | ||
106 | timer_mod(&state.ts, qemu_clock_get_ns(type) + ns); | ||
107 | qemu_coroutine_yield(); | ||
108 | -- | ||
109 | 2.31.1 | ||
110 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | When using C11 atomics, non-seqcst reads and writes do not participate | 3 | This simplification is enabled by the previous patch. Now aio_co_wake |
4 | in the total order of seqcst operations. In util/async.c and util/aio-posix.c, | 4 | will only be called once, therefore we do not care about a spurious |
5 | in particular, the pattern that we use | 5 | firing of the timer after a qemu_co_sleep_wake. |
6 | 6 | ||
7 | write ctx->notify_me write bh->scheduled | 7 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> |
8 | read bh->scheduled read ctx->notify_me | ||
9 | if !bh->scheduled, sleep if ctx->notify_me, notify | ||
10 | |||
11 | needs to use seqcst operations for both the write and the read. In | ||
12 | general this is something that we do not want, because there can be | ||
13 | many sources that are polled in addition to bottom halves. The | ||
14 | alternative is to place a seqcst memory barrier between the write | ||
15 | and the read. This also comes with a disadvantage, in that the | ||
16 | memory barrier is implicit on strongly-ordered architectures and | ||
17 | it wastes a few dozen clock cycles. | ||
18 | |||
19 | Fortunately, ctx->notify_me is never written concurrently by two | ||
20 | threads, so we can assert that and relax the writes to ctx->notify_me. | ||
21 | The resulting solution works and performs well on both aarch64 and x86. | ||
22 | |||
23 | Note that the atomic_set/atomic_read combination is not an atomic | ||
24 | read-modify-write, and therefore it is even weaker than C11 ATOMIC_RELAXED; | ||
25 | on x86, ATOMIC_RELAXED compiles to a locked operation. | ||
26 | |||
27 | Analyzed-by: Ying Fang <fangying1@huawei.com> | ||
28 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
29 | Tested-by: Ying Fang <fangying1@huawei.com> | 9 | Message-id: 20210517100548.28806-5-pbonzini@redhat.com |
30 | Message-Id: <20200407140746.8041-6-pbonzini@redhat.com> | ||
31 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
32 | --- | 11 | --- |
33 | util/aio-posix.c | 16 ++++++++++++++-- | 12 | util/qemu-coroutine-sleep.c | 8 ++++---- |
34 | util/aio-win32.c | 17 ++++++++++++++--- | 13 | 1 file changed, 4 insertions(+), 4 deletions(-) |
35 | util/async.c | 16 ++++++++++++---- | ||
36 | 3 files changed, 40 insertions(+), 9 deletions(-) | ||
37 | 14 | ||
38 | diff --git a/util/aio-posix.c b/util/aio-posix.c | 15 | diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c |
39 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
40 | --- a/util/aio-posix.c | 17 | --- a/util/qemu-coroutine-sleep.c |
41 | +++ b/util/aio-posix.c | 18 | +++ b/util/qemu-coroutine-sleep.c |
42 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | 19 | @@ -XXX,XX +XXX,XX @@ static const char *qemu_co_sleep_ns__scheduled = "qemu_co_sleep_ns"; |
43 | int64_t timeout; | 20 | |
44 | int64_t start = 0; | 21 | struct QemuCoSleepState { |
45 | 22 | Coroutine *co; | |
46 | + /* | 23 | - QEMUTimer ts; |
47 | + * There cannot be two concurrent aio_poll calls for the same AioContext (or | 24 | QemuCoSleepState **user_state_pointer; |
48 | + * an aio_poll concurrent with a GSource prepare/check/dispatch callback). | 25 | }; |
49 | + * We rely on this below to avoid slow locked accesses to ctx->notify_me. | 26 | |
50 | + */ | 27 | @@ -XXX,XX +XXX,XX @@ void qemu_co_sleep_wake(QemuCoSleepState *sleep_state) |
51 | assert(in_aio_context_home_thread(ctx)); | 28 | |
52 | 29 | assert(scheduled == qemu_co_sleep_ns__scheduled); | |
53 | /* aio_notify can avoid the expensive event_notifier_set if | 30 | *sleep_state->user_state_pointer = NULL; |
54 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | 31 | - timer_del(&sleep_state->ts); |
55 | * so disable the optimization now. | 32 | aio_co_wake(sleep_state->co); |
56 | */ | ||
57 | if (blocking) { | ||
58 | - atomic_add(&ctx->notify_me, 2); | ||
59 | + atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2); | ||
60 | + /* | ||
61 | + * Write ctx->notify_me before computing the timeout | ||
62 | + * (reading bottom half flags, etc.). Pairs with | ||
63 | + * smp_mb in aio_notify(). | ||
64 | + */ | ||
65 | + smp_mb(); | ||
66 | } | 33 | } |
67 | 34 | } | |
68 | qemu_lockcnt_inc(&ctx->list_lock); | 35 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns, |
69 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | 36 | QemuCoSleepState **sleep_state) |
37 | { | ||
38 | AioContext *ctx = qemu_get_current_aio_context(); | ||
39 | + QEMUTimer ts; | ||
40 | QemuCoSleepState state = { | ||
41 | .co = qemu_coroutine_self(), | ||
42 | .user_state_pointer = sleep_state, | ||
43 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns, | ||
44 | abort(); | ||
70 | } | 45 | } |
71 | 46 | ||
72 | if (blocking) { | 47 | - aio_timer_init(ctx, &state.ts, type, SCALE_NS, co_sleep_cb, sleep_state); |
73 | - atomic_sub(&ctx->notify_me, 2); | 48 | + aio_timer_init(ctx, &ts, type, SCALE_NS, co_sleep_cb, sleep_state); |
74 | + /* Finish the poll before clearing the flag. */ | 49 | *sleep_state = &state; |
75 | + atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2); | 50 | - timer_mod(&state.ts, qemu_clock_get_ns(type) + ns); |
76 | aio_notify_accept(ctx); | 51 | + timer_mod(&ts, qemu_clock_get_ns(type) + ns); |
77 | } | 52 | qemu_coroutine_yield(); |
78 | 53 | + timer_del(&ts); | |
79 | diff --git a/util/aio-win32.c b/util/aio-win32.c | 54 | |
80 | index XXXXXXX..XXXXXXX 100644 | 55 | /* qemu_co_sleep_wake clears *sleep_state before resuming this coroutine. */ |
81 | --- a/util/aio-win32.c | 56 | assert(*sleep_state == NULL); |
82 | +++ b/util/aio-win32.c | ||
83 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
84 | int count; | ||
85 | int timeout; | ||
86 | |||
87 | + /* | ||
88 | + * There cannot be two concurrent aio_poll calls for the same AioContext (or | ||
89 | + * an aio_poll concurrent with a GSource prepare/check/dispatch callback). | ||
90 | + * We rely on this below to avoid slow locked accesses to ctx->notify_me. | ||
91 | + */ | ||
92 | + assert(in_aio_context_home_thread(ctx)); | ||
93 | progress = false; | ||
94 | |||
95 | /* aio_notify can avoid the expensive event_notifier_set if | ||
96 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
97 | * so disable the optimization now. | ||
98 | */ | ||
99 | if (blocking) { | ||
100 | - atomic_add(&ctx->notify_me, 2); | ||
101 | + atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2); | ||
102 | + /* | ||
103 | + * Write ctx->notify_me before computing the timeout | ||
104 | + * (reading bottom half flags, etc.). Pairs with | ||
105 | + * smp_mb in aio_notify(). | ||
106 | + */ | ||
107 | + smp_mb(); | ||
108 | } | ||
109 | |||
110 | qemu_lockcnt_inc(&ctx->list_lock); | ||
111 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
112 | ret = WaitForMultipleObjects(count, events, FALSE, timeout); | ||
113 | if (blocking) { | ||
114 | assert(first); | ||
115 | - assert(in_aio_context_home_thread(ctx)); | ||
116 | - atomic_sub(&ctx->notify_me, 2); | ||
117 | + atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2); | ||
118 | aio_notify_accept(ctx); | ||
119 | } | ||
120 | |||
121 | diff --git a/util/async.c b/util/async.c | ||
122 | index XXXXXXX..XXXXXXX 100644 | ||
123 | --- a/util/async.c | ||
124 | +++ b/util/async.c | ||
125 | @@ -XXX,XX +XXX,XX @@ aio_ctx_prepare(GSource *source, gint *timeout) | ||
126 | { | ||
127 | AioContext *ctx = (AioContext *) source; | ||
128 | |||
129 | - atomic_or(&ctx->notify_me, 1); | ||
130 | + atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) | 1); | ||
131 | + | ||
132 | + /* | ||
133 | + * Write ctx->notify_me before computing the timeout | ||
134 | + * (reading bottom half flags, etc.). Pairs with | ||
135 | + * smp_mb in aio_notify(). | ||
136 | + */ | ||
137 | + smp_mb(); | ||
138 | |||
139 | /* We assume there is no timeout already supplied */ | ||
140 | *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); | ||
141 | @@ -XXX,XX +XXX,XX @@ aio_ctx_check(GSource *source) | ||
142 | QEMUBH *bh; | ||
143 | BHListSlice *s; | ||
144 | |||
145 | - atomic_and(&ctx->notify_me, ~1); | ||
146 | + /* Finish computing the timeout before clearing the flag. */ | ||
147 | + atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) & ~1); | ||
148 | aio_notify_accept(ctx); | ||
149 | |||
150 | QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) { | ||
151 | @@ -XXX,XX +XXX,XX @@ LuringState *aio_get_linux_io_uring(AioContext *ctx) | ||
152 | void aio_notify(AioContext *ctx) | ||
153 | { | ||
154 | /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs | ||
155 | - * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll. | ||
156 | + * with smp_mb in aio_ctx_prepare or aio_poll. | ||
157 | */ | ||
158 | smp_mb(); | ||
159 | - if (ctx->notify_me) { | ||
160 | + if (atomic_read(&ctx->notify_me)) { | ||
161 | event_notifier_set(&ctx->notifier); | ||
162 | atomic_mb_set(&ctx->notified, true); | ||
163 | } | ||
164 | -- | 57 | -- |
165 | 2.25.1 | 58 | 2.31.1 |
166 | 59 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Paolo Bonzini <pbonzini@redhat.com> | |
2 | |||
3 | Right now, users of qemu_co_sleep_ns_wakeable are simply passing | ||
4 | a pointer to QemuCoSleepState by reference to the function. But | ||
5 | QemuCoSleepState really is just a Coroutine*; making the | ||
6 | content of the struct public is just as efficient and lets us | ||
7 | skip the user_state_pointer indirection. | ||
8 | |||
9 | Since the usage is changed, take the occasion to rename the | ||
10 | struct to QemuCoSleep. | ||
11 | |||
12 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
13 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
14 | Message-id: 20210517100548.28806-6-pbonzini@redhat.com | ||
15 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
16 | --- | ||
17 | include/qemu/coroutine.h | 23 +++++++++++---------- | ||
18 | block/block-copy.c | 8 ++++---- | ||
19 | block/nbd.c | 10 ++++----- | ||
20 | util/qemu-coroutine-sleep.c | 41 ++++++++++++++++--------------------- | ||
21 | 4 files changed, 39 insertions(+), 43 deletions(-) | ||
22 | |||
23 | diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/include/qemu/coroutine.h | ||
26 | +++ b/include/qemu/coroutine.h | ||
27 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_wrlock(CoRwlock *lock); | ||
28 | */ | ||
29 | void qemu_co_rwlock_unlock(CoRwlock *lock); | ||
30 | |||
31 | -typedef struct QemuCoSleepState QemuCoSleepState; | ||
32 | +typedef struct QemuCoSleep { | ||
33 | + Coroutine *to_wake; | ||
34 | +} QemuCoSleep; | ||
35 | |||
36 | /** | ||
37 | - * Yield the coroutine for a given duration. During this yield, @sleep_state | ||
38 | - * is set to an opaque pointer, which may be used for | ||
39 | - * qemu_co_sleep_wake(). Be careful, the pointer is set back to zero when the | ||
40 | - * timer fires. Don't save the obtained value to other variables and don't call | ||
41 | - * qemu_co_sleep_wake from another aio context. | ||
42 | + * Yield the coroutine for a given duration. Initializes @w so that, | ||
43 | + * during this yield, it can be passed to qemu_co_sleep_wake() to | ||
44 | + * terminate the sleep. | ||
45 | */ | ||
46 | -void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns, | ||
47 | - QemuCoSleepState **sleep_state); | ||
48 | +void coroutine_fn qemu_co_sleep_ns_wakeable(QemuCoSleep *w, | ||
49 | + QEMUClockType type, int64_t ns); | ||
50 | + | ||
51 | static inline void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns) | ||
52 | { | ||
53 | - QemuCoSleepState *unused = NULL; | ||
54 | - qemu_co_sleep_ns_wakeable(type, ns, &unused); | ||
55 | + QemuCoSleep w = { 0 }; | ||
56 | + qemu_co_sleep_ns_wakeable(&w, type, ns); | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | @@ -XXX,XX +XXX,XX @@ static inline void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns) | ||
61 | * qemu_co_sleep_ns() and should be checked to be non-NULL before calling | ||
62 | * qemu_co_sleep_wake(). | ||
63 | */ | ||
64 | -void qemu_co_sleep_wake(QemuCoSleepState *sleep_state); | ||
65 | +void qemu_co_sleep_wake(QemuCoSleep *w); | ||
66 | |||
67 | /** | ||
68 | * Yield until a file descriptor becomes readable | ||
69 | diff --git a/block/block-copy.c b/block/block-copy.c | ||
70 | index XXXXXXX..XXXXXXX 100644 | ||
71 | --- a/block/block-copy.c | ||
72 | +++ b/block/block-copy.c | ||
73 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockCopyCallState { | ||
74 | /* State */ | ||
75 | int ret; | ||
76 | bool finished; | ||
77 | - QemuCoSleepState *sleep_state; | ||
78 | + QemuCoSleep sleep; | ||
79 | bool cancelled; | ||
80 | |||
81 | /* OUT parameters */ | ||
82 | @@ -XXX,XX +XXX,XX @@ block_copy_dirty_clusters(BlockCopyCallState *call_state) | ||
83 | if (ns > 0) { | ||
84 | block_copy_task_end(task, -EAGAIN); | ||
85 | g_free(task); | ||
86 | - qemu_co_sleep_ns_wakeable(QEMU_CLOCK_REALTIME, ns, | ||
87 | - &call_state->sleep_state); | ||
88 | + qemu_co_sleep_ns_wakeable(&call_state->sleep, | ||
89 | + QEMU_CLOCK_REALTIME, ns); | ||
90 | continue; | ||
91 | } | ||
92 | } | ||
93 | @@ -XXX,XX +XXX,XX @@ out: | ||
94 | |||
95 | void block_copy_kick(BlockCopyCallState *call_state) | ||
96 | { | ||
97 | - qemu_co_sleep_wake(call_state->sleep_state); | ||
98 | + qemu_co_sleep_wake(&call_state->sleep); | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | diff --git a/block/nbd.c b/block/nbd.c | ||
103 | index XXXXXXX..XXXXXXX 100644 | ||
104 | --- a/block/nbd.c | ||
105 | +++ b/block/nbd.c | ||
106 | @@ -XXX,XX +XXX,XX @@ typedef struct BDRVNBDState { | ||
107 | CoQueue free_sema; | ||
108 | Coroutine *connection_co; | ||
109 | Coroutine *teardown_co; | ||
110 | - QemuCoSleepState *connection_co_sleep_ns_state; | ||
111 | + QemuCoSleep reconnect_sleep; | ||
112 | bool drained; | ||
113 | bool wait_drained_end; | ||
114 | int in_flight; | ||
115 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn nbd_client_co_drain_begin(BlockDriverState *bs) | ||
116 | BDRVNBDState *s = (BDRVNBDState *)bs->opaque; | ||
117 | |||
118 | s->drained = true; | ||
119 | - qemu_co_sleep_wake(s->connection_co_sleep_ns_state); | ||
120 | + qemu_co_sleep_wake(&s->reconnect_sleep); | ||
121 | |||
122 | nbd_co_establish_connection_cancel(bs, false); | ||
123 | |||
124 | @@ -XXX,XX +XXX,XX @@ static void nbd_teardown_connection(BlockDriverState *bs) | ||
125 | |||
126 | s->state = NBD_CLIENT_QUIT; | ||
127 | if (s->connection_co) { | ||
128 | - qemu_co_sleep_wake(s->connection_co_sleep_ns_state); | ||
129 | + qemu_co_sleep_wake(&s->reconnect_sleep); | ||
130 | nbd_co_establish_connection_cancel(bs, true); | ||
131 | } | ||
132 | if (qemu_in_coroutine()) { | ||
133 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn void nbd_co_reconnect_loop(BDRVNBDState *s) | ||
134 | } | ||
135 | bdrv_inc_in_flight(s->bs); | ||
136 | } else { | ||
137 | - qemu_co_sleep_ns_wakeable(QEMU_CLOCK_REALTIME, timeout, | ||
138 | - &s->connection_co_sleep_ns_state); | ||
139 | + qemu_co_sleep_ns_wakeable(&s->reconnect_sleep, | ||
140 | + QEMU_CLOCK_REALTIME, timeout); | ||
141 | if (s->drained) { | ||
142 | continue; | ||
143 | } | ||
144 | diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c | ||
145 | index XXXXXXX..XXXXXXX 100644 | ||
146 | --- a/util/qemu-coroutine-sleep.c | ||
147 | +++ b/util/qemu-coroutine-sleep.c | ||
148 | @@ -XXX,XX +XXX,XX @@ | ||
149 | |||
150 | static const char *qemu_co_sleep_ns__scheduled = "qemu_co_sleep_ns"; | ||
151 | |||
152 | -struct QemuCoSleepState { | ||
153 | +void qemu_co_sleep_wake(QemuCoSleep *w) | ||
154 | +{ | ||
155 | Coroutine *co; | ||
156 | - QemuCoSleepState **user_state_pointer; | ||
157 | -}; | ||
158 | |||
159 | -void qemu_co_sleep_wake(QemuCoSleepState *sleep_state) | ||
160 | -{ | ||
161 | - if (sleep_state) { | ||
162 | + co = w->to_wake; | ||
163 | + w->to_wake = NULL; | ||
164 | + if (co) { | ||
165 | /* Write of schedule protected by barrier write in aio_co_schedule */ | ||
166 | - const char *scheduled = qatomic_cmpxchg(&sleep_state->co->scheduled, | ||
167 | + const char *scheduled = qatomic_cmpxchg(&co->scheduled, | ||
168 | qemu_co_sleep_ns__scheduled, NULL); | ||
169 | |||
170 | assert(scheduled == qemu_co_sleep_ns__scheduled); | ||
171 | - *sleep_state->user_state_pointer = NULL; | ||
172 | - aio_co_wake(sleep_state->co); | ||
173 | + aio_co_wake(co); | ||
174 | } | ||
175 | } | ||
176 | |||
177 | static void co_sleep_cb(void *opaque) | ||
178 | { | ||
179 | - QemuCoSleepState **sleep_state = opaque; | ||
180 | - qemu_co_sleep_wake(*sleep_state); | ||
181 | + QemuCoSleep *w = opaque; | ||
182 | + qemu_co_sleep_wake(w); | ||
183 | } | ||
184 | |||
185 | -void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns, | ||
186 | - QemuCoSleepState **sleep_state) | ||
187 | +void coroutine_fn qemu_co_sleep_ns_wakeable(QemuCoSleep *w, | ||
188 | + QEMUClockType type, int64_t ns) | ||
189 | { | ||
190 | + Coroutine *co = qemu_coroutine_self(); | ||
191 | AioContext *ctx = qemu_get_current_aio_context(); | ||
192 | QEMUTimer ts; | ||
193 | - QemuCoSleepState state = { | ||
194 | - .co = qemu_coroutine_self(), | ||
195 | - .user_state_pointer = sleep_state, | ||
196 | - }; | ||
197 | |||
198 | - const char *scheduled = qatomic_cmpxchg(&state.co->scheduled, NULL, | ||
199 | - qemu_co_sleep_ns__scheduled); | ||
200 | + const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL, | ||
201 | + qemu_co_sleep_ns__scheduled); | ||
202 | if (scheduled) { | ||
203 | fprintf(stderr, | ||
204 | "%s: Co-routine was already scheduled in '%s'\n", | ||
205 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns, | ||
206 | abort(); | ||
207 | } | ||
208 | |||
209 | - aio_timer_init(ctx, &ts, type, SCALE_NS, co_sleep_cb, sleep_state); | ||
210 | - *sleep_state = &state; | ||
211 | + w->to_wake = co; | ||
212 | + aio_timer_init(ctx, &ts, type, SCALE_NS, co_sleep_cb, w), | ||
213 | timer_mod(&ts, qemu_clock_get_ns(type) + ns); | ||
214 | qemu_coroutine_yield(); | ||
215 | timer_del(&ts); | ||
216 | |||
217 | - /* qemu_co_sleep_wake clears *sleep_state before resuming this coroutine. */ | ||
218 | - assert(*sleep_state == NULL); | ||
219 | + /* w->to_wake is cleared before resuming this coroutine. */ | ||
220 | + assert(w->to_wake == NULL); | ||
221 | } | ||
222 | -- | ||
223 | 2.31.1 | ||
224 | diff view generated by jsdifflib |
1 | The io_uring_enter(2) syscall returns with errno=EINTR when interrupted | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | by a signal. Retry the syscall in this case. | ||
3 | 2 | ||
4 | It's essential to do this in the io_uring_submit_and_wait() case. My | 3 | Allow using QemuCoSleep to sleep forever until woken by qemu_co_sleep_wake. |
5 | interpretation of the Linux v5.5 io_uring_enter(2) code is that it | 4 | This makes the logic of qemu_co_sleep_ns_wakeable easy to understand. |
6 | shouldn't affect the io_uring_submit() case, but there is no guarantee | ||
7 | this will always be the case. Let's check for -EINTR around both APIs. | ||
8 | 5 | ||
9 | Note that the liburing APIs have -errno return values. | 6 | In the future we will introduce an API that can work even if the |
7 | sleep and wake happen from different threads. For now, initializing | ||
8 | w->to_wake after timer_mod is fine because the timer can only fire in | ||
9 | the same AioContext. | ||
10 | 10 | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 11 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> |
12 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> | 12 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
13 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | 13 | Message-id: 20210517100548.28806-7-pbonzini@redhat.com |
14 | Message-id: 20200408091139.273851-1-stefanha@redhat.com | ||
15 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
16 | --- | 15 | --- |
17 | util/fdmon-io_uring.c | 10 ++++++++-- | 16 | include/qemu/coroutine.h | 5 +++++ |
18 | 1 file changed, 8 insertions(+), 2 deletions(-) | 17 | util/qemu-coroutine-sleep.c | 26 +++++++++++++++++++------- |
18 | 2 files changed, 24 insertions(+), 7 deletions(-) | ||
19 | 19 | ||
20 | diff --git a/util/fdmon-io_uring.c b/util/fdmon-io_uring.c | 20 | diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h |
21 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/util/fdmon-io_uring.c | 22 | --- a/include/qemu/coroutine.h |
23 | +++ b/util/fdmon-io_uring.c | 23 | +++ b/include/qemu/coroutine.h |
24 | @@ -XXX,XX +XXX,XX @@ static struct io_uring_sqe *get_sqe(AioContext *ctx) | 24 | @@ -XXX,XX +XXX,XX @@ typedef struct QemuCoSleep { |
25 | void coroutine_fn qemu_co_sleep_ns_wakeable(QemuCoSleep *w, | ||
26 | QEMUClockType type, int64_t ns); | ||
27 | |||
28 | +/** | ||
29 | + * Yield the coroutine until the next call to qemu_co_sleep_wake. | ||
30 | + */ | ||
31 | +void coroutine_fn qemu_co_sleep(QemuCoSleep *w); | ||
32 | + | ||
33 | static inline void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns) | ||
34 | { | ||
35 | QemuCoSleep w = { 0 }; | ||
36 | diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/util/qemu-coroutine-sleep.c | ||
39 | +++ b/util/qemu-coroutine-sleep.c | ||
40 | @@ -XXX,XX +XXX,XX @@ static void co_sleep_cb(void *opaque) | ||
41 | qemu_co_sleep_wake(w); | ||
42 | } | ||
43 | |||
44 | -void coroutine_fn qemu_co_sleep_ns_wakeable(QemuCoSleep *w, | ||
45 | - QEMUClockType type, int64_t ns) | ||
46 | +void coroutine_fn qemu_co_sleep(QemuCoSleep *w) | ||
47 | { | ||
48 | Coroutine *co = qemu_coroutine_self(); | ||
49 | - AioContext *ctx = qemu_get_current_aio_context(); | ||
50 | - QEMUTimer ts; | ||
51 | |||
52 | const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL, | ||
53 | qemu_co_sleep_ns__scheduled); | ||
54 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_sleep_ns_wakeable(QemuCoSleep *w, | ||
25 | } | 55 | } |
26 | 56 | ||
27 | /* No free sqes left, submit pending sqes first */ | 57 | w->to_wake = co; |
28 | - ret = io_uring_submit(ring); | 58 | - aio_timer_init(ctx, &ts, type, SCALE_NS, co_sleep_cb, w), |
29 | + do { | 59 | - timer_mod(&ts, qemu_clock_get_ns(type) + ns); |
30 | + ret = io_uring_submit(ring); | 60 | qemu_coroutine_yield(); |
31 | + } while (ret == -EINTR); | 61 | - timer_del(&ts); |
62 | |||
63 | /* w->to_wake is cleared before resuming this coroutine. */ | ||
64 | assert(w->to_wake == NULL); | ||
65 | } | ||
32 | + | 66 | + |
33 | assert(ret > 1); | 67 | +void coroutine_fn qemu_co_sleep_ns_wakeable(QemuCoSleep *w, |
34 | sqe = io_uring_get_sqe(ring); | 68 | + QEMUClockType type, int64_t ns) |
35 | assert(sqe); | 69 | +{ |
36 | @@ -XXX,XX +XXX,XX @@ static int fdmon_io_uring_wait(AioContext *ctx, AioHandlerList *ready_list, | 70 | + AioContext *ctx = qemu_get_current_aio_context(); |
37 | 71 | + QEMUTimer ts; | |
38 | fill_sq_ring(ctx); | ||
39 | |||
40 | - ret = io_uring_submit_and_wait(&ctx->fdmon_io_uring, wait_nr); | ||
41 | + do { | ||
42 | + ret = io_uring_submit_and_wait(&ctx->fdmon_io_uring, wait_nr); | ||
43 | + } while (ret == -EINTR); | ||
44 | + | 72 | + |
45 | assert(ret >= 0); | 73 | + aio_timer_init(ctx, &ts, type, SCALE_NS, co_sleep_cb, w); |
46 | 74 | + timer_mod(&ts, qemu_clock_get_ns(type) + ns); | |
47 | return process_cq_ring(ctx, ready_list); | 75 | + |
76 | + /* | ||
77 | + * The timer will fire in the current AiOContext, so the callback | ||
78 | + * must happen after qemu_co_sleep yields and there is no race | ||
79 | + * between timer_mod and qemu_co_sleep. | ||
80 | + */ | ||
81 | + qemu_co_sleep(w); | ||
82 | + timer_del(&ts); | ||
83 | +} | ||
48 | -- | 84 | -- |
49 | 2.25.1 | 85 | 2.31.1 |
50 | 86 | diff view generated by jsdifflib |