1
The following changes since commit 8bac3ba57eecc466b7e73dabf7d19328a59f684e:
1
The following changes since commit 15ef89d2a1a7b93845a6b09c2ee8e1979f6eb30b:
2
2
3
Merge remote-tracking branch 'remotes/rth/tags/pull-rx-20200408' into staging (2020-04-09 13:23:30 +0100)
3
Update version for v7.0.0-rc1 release (2022-03-22 22:58:44 +0000)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/stefanha/qemu.git tags/block-pull-request
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 5710a3e09f9b85801e5ce70797a4a511e5fc9e2c:
9
for you to fetch changes up to 2539eade4f689eda7e9fe45486f18334bfbafaf0:
10
10
11
async: use explicit memory barriers (2020-04-09 16:17:14 +0100)
11
hw: Fix misleading hexadecimal format (2022-03-24 10:38:42 +0000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Pull request
14
Pull request
15
15
16
Fixes for QEMU on aarch64 ARM hosts and fdmon-io_uring.
16
Philippe found cases where the 0x%d format string was used, leading to
17
misleading output. The patches look harmless and could save people time, so I
18
think it's worth including them in 7.0.
17
19
18
----------------------------------------------------------------
20
----------------------------------------------------------------
19
21
20
Paolo Bonzini (2):
22
Philippe Mathieu-Daudé (2):
21
aio-wait: delegate polling of main AioContext if BQL not held
23
block: Fix misleading hexadecimal format
22
async: use explicit memory barriers
24
hw: Fix misleading hexadecimal format
23
25
24
Stefan Hajnoczi (1):
26
block/parallels-ext.c | 2 +-
25
aio-posix: signal-proof fdmon-io_uring
27
hw/i386/sgx.c | 2 +-
26
28
hw/i386/trace-events | 6 +++---
27
include/block/aio-wait.h | 22 ++++++++++++++++++++++
29
hw/misc/trace-events | 4 ++--
28
include/block/aio.h | 29 ++++++++++-------------------
30
hw/scsi/trace-events | 4 ++--
29
util/aio-posix.c | 16 ++++++++++++++--
31
5 files changed, 9 insertions(+), 9 deletions(-)
30
util/aio-win32.c | 17 ++++++++++++++---
31
util/async.c | 16 ++++++++++++----
32
util/fdmon-io_uring.c | 10 ++++++++--
33
6 files changed, 80 insertions(+), 30 deletions(-)
34
32
35
--
33
--
36
2.25.1
34
2.35.1
37
35
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
2
3
When using C11 atomics, non-seqcst reads and writes do not participate
3
"0x%u" format is very misleading, replace by "0x%x".
4
in the total order of seqcst operations. In util/async.c and util/aio-posix.c,
5
in particular, the pattern that we use
6
4
7
write ctx->notify_me write bh->scheduled
5
Found running:
8
read bh->scheduled read ctx->notify_me
9
if !bh->scheduled, sleep if ctx->notify_me, notify
10
6
11
needs to use seqcst operations for both the write and the read. In
7
$ git grep -E '0x%[0-9]*([lL]*|" ?PRI)[dDuU]' block/
12
general this is something that we do not want, because there can be
13
many sources that are polled in addition to bottom halves. The
14
alternative is to place a seqcst memory barrier between the write
15
and the read. This also comes with a disadvantage, in that the
16
memory barrier is implicit on strongly-ordered architectures and
17
it wastes a few dozen clock cycles.
18
8
19
Fortunately, ctx->notify_me is never written concurrently by two
9
Inspired-by: Richard Henderson <richard.henderson@linaro.org>
20
threads, so we can assert that and relax the writes to ctx->notify_me.
10
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
21
The resulting solution works and performs well on both aarch64 and x86.
11
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
22
12
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
23
Note that the atomic_set/atomic_read combination is not an atomic
13
Reviewed-by: Denis V. Lunev <den@openvz.org>
24
read-modify-write, and therefore it is even weaker than C11 ATOMIC_RELAXED;
14
Message-id: 20220323114718.58714-2-philippe.mathieu.daude@gmail.com
25
on x86, ATOMIC_RELAXED compiles to a locked operation.
26
27
Analyzed-by: Ying Fang <fangying1@huawei.com>
28
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
29
Tested-by: Ying Fang <fangying1@huawei.com>
30
Message-Id: <20200407140746.8041-6-pbonzini@redhat.com>
31
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
32
---
16
---
33
util/aio-posix.c | 16 ++++++++++++++--
17
block/parallels-ext.c | 2 +-
34
util/aio-win32.c | 17 ++++++++++++++---
18
1 file changed, 1 insertion(+), 1 deletion(-)
35
util/async.c | 16 ++++++++++++----
36
3 files changed, 40 insertions(+), 9 deletions(-)
37
19
38
diff --git a/util/aio-posix.c b/util/aio-posix.c
20
diff --git a/block/parallels-ext.c b/block/parallels-ext.c
39
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
40
--- a/util/aio-posix.c
22
--- a/block/parallels-ext.c
41
+++ b/util/aio-posix.c
23
+++ b/block/parallels-ext.c
42
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
24
@@ -XXX,XX +XXX,XX @@ static int parallels_parse_format_extension(BlockDriverState *bs,
43
int64_t timeout;
25
break;
44
int64_t start = 0;
26
45
27
default:
46
+ /*
28
- error_setg(errp, "Unknown feature: 0x%" PRIu64, fh.magic);
47
+ * There cannot be two concurrent aio_poll calls for the same AioContext (or
29
+ error_setg(errp, "Unknown feature: 0x%" PRIx64, fh.magic);
48
+ * an aio_poll concurrent with a GSource prepare/check/dispatch callback).
30
goto fail;
49
+ * We rely on this below to avoid slow locked accesses to ctx->notify_me.
50
+ */
51
assert(in_aio_context_home_thread(ctx));
52
53
/* aio_notify can avoid the expensive event_notifier_set if
54
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
55
* so disable the optimization now.
56
*/
57
if (blocking) {
58
- atomic_add(&ctx->notify_me, 2);
59
+ atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2);
60
+ /*
61
+ * Write ctx->notify_me before computing the timeout
62
+ * (reading bottom half flags, etc.). Pairs with
63
+ * smp_mb in aio_notify().
64
+ */
65
+ smp_mb();
66
}
67
68
qemu_lockcnt_inc(&ctx->list_lock);
69
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
70
}
71
72
if (blocking) {
73
- atomic_sub(&ctx->notify_me, 2);
74
+ /* Finish the poll before clearing the flag. */
75
+ atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2);
76
aio_notify_accept(ctx);
77
}
78
79
diff --git a/util/aio-win32.c b/util/aio-win32.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/util/aio-win32.c
82
+++ b/util/aio-win32.c
83
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
84
int count;
85
int timeout;
86
87
+ /*
88
+ * There cannot be two concurrent aio_poll calls for the same AioContext (or
89
+ * an aio_poll concurrent with a GSource prepare/check/dispatch callback).
90
+ * We rely on this below to avoid slow locked accesses to ctx->notify_me.
91
+ */
92
+ assert(in_aio_context_home_thread(ctx));
93
progress = false;
94
95
/* aio_notify can avoid the expensive event_notifier_set if
96
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
97
* so disable the optimization now.
98
*/
99
if (blocking) {
100
- atomic_add(&ctx->notify_me, 2);
101
+ atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2);
102
+ /*
103
+ * Write ctx->notify_me before computing the timeout
104
+ * (reading bottom half flags, etc.). Pairs with
105
+ * smp_mb in aio_notify().
106
+ */
107
+ smp_mb();
108
}
109
110
qemu_lockcnt_inc(&ctx->list_lock);
111
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
112
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
113
if (blocking) {
114
assert(first);
115
- assert(in_aio_context_home_thread(ctx));
116
- atomic_sub(&ctx->notify_me, 2);
117
+ atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2);
118
aio_notify_accept(ctx);
119
}
31
}
120
32
121
diff --git a/util/async.c b/util/async.c
122
index XXXXXXX..XXXXXXX 100644
123
--- a/util/async.c
124
+++ b/util/async.c
125
@@ -XXX,XX +XXX,XX @@ aio_ctx_prepare(GSource *source, gint *timeout)
126
{
127
AioContext *ctx = (AioContext *) source;
128
129
- atomic_or(&ctx->notify_me, 1);
130
+ atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) | 1);
131
+
132
+ /*
133
+ * Write ctx->notify_me before computing the timeout
134
+ * (reading bottom half flags, etc.). Pairs with
135
+ * smp_mb in aio_notify().
136
+ */
137
+ smp_mb();
138
139
/* We assume there is no timeout already supplied */
140
*timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
141
@@ -XXX,XX +XXX,XX @@ aio_ctx_check(GSource *source)
142
QEMUBH *bh;
143
BHListSlice *s;
144
145
- atomic_and(&ctx->notify_me, ~1);
146
+ /* Finish computing the timeout before clearing the flag. */
147
+ atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) & ~1);
148
aio_notify_accept(ctx);
149
150
QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
151
@@ -XXX,XX +XXX,XX @@ LuringState *aio_get_linux_io_uring(AioContext *ctx)
152
void aio_notify(AioContext *ctx)
153
{
154
/* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
155
- * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
156
+ * with smp_mb in aio_ctx_prepare or aio_poll.
157
*/
158
smp_mb();
159
- if (ctx->notify_me) {
160
+ if (atomic_read(&ctx->notify_me)) {
161
event_notifier_set(&ctx->notifier);
162
atomic_mb_set(&ctx->notified, true);
163
}
164
--
33
--
165
2.25.1
34
2.35.1
166
35
36
diff view generated by jsdifflib
1
The io_uring_enter(2) syscall returns with errno=EINTR when interrupted
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
by a signal. Retry the syscall in this case.
3
2
4
It's essential to do this in the io_uring_submit_and_wait() case. My
3
"0x%u" format is very misleading, replace by "0x%x".
5
interpretation of the Linux v5.5 io_uring_enter(2) code is that it
6
shouldn't affect the io_uring_submit() case, but there is no guarantee
7
this will always be the case. Let's check for -EINTR around both APIs.
8
4
9
Note that the liburing APIs have -errno return values.
5
Found running:
10
6
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
7
$ git grep -E '0x%[0-9]*([lL]*|" ?PRI)[dDuU]' hw/
12
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
8
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Inspired-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20200408091139.273851-1-stefanha@redhat.com
10
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
12
Message-id: 20220323114718.58714-3-philippe.mathieu.daude@gmail.com
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
16
---
14
---
17
util/fdmon-io_uring.c | 10 ++++++++--
15
hw/i386/sgx.c | 2 +-
18
1 file changed, 8 insertions(+), 2 deletions(-)
16
hw/i386/trace-events | 6 +++---
17
hw/misc/trace-events | 4 ++--
18
hw/scsi/trace-events | 4 ++--
19
4 files changed, 8 insertions(+), 8 deletions(-)
19
20
20
diff --git a/util/fdmon-io_uring.c b/util/fdmon-io_uring.c
21
diff --git a/hw/i386/sgx.c b/hw/i386/sgx.c
21
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
22
--- a/util/fdmon-io_uring.c
23
--- a/hw/i386/sgx.c
23
+++ b/util/fdmon-io_uring.c
24
+++ b/hw/i386/sgx.c
24
@@ -XXX,XX +XXX,XX @@ static struct io_uring_sqe *get_sqe(AioContext *ctx)
25
@@ -XXX,XX +XXX,XX @@ void pc_machine_init_sgx_epc(PCMachineState *pcms)
25
}
26
}
26
27
27
/* No free sqes left, submit pending sqes first */
28
if ((sgx_epc->base + sgx_epc->size) < sgx_epc->base) {
28
- ret = io_uring_submit(ring);
29
- error_report("Size of all 'sgx-epc' =0x%"PRIu64" causes EPC to wrap",
29
+ do {
30
+ error_report("Size of all 'sgx-epc' =0x%"PRIx64" causes EPC to wrap",
30
+ ret = io_uring_submit(ring);
31
sgx_epc->size);
31
+ } while (ret == -EINTR);
32
exit(EXIT_FAILURE);
32
+
33
}
33
assert(ret > 1);
34
diff --git a/hw/i386/trace-events b/hw/i386/trace-events
34
sqe = io_uring_get_sqe(ring);
35
index XXXXXXX..XXXXXXX 100644
35
assert(sqe);
36
--- a/hw/i386/trace-events
36
@@ -XXX,XX +XXX,XX @@ static int fdmon_io_uring_wait(AioContext *ctx, AioHandlerList *ready_list,
37
+++ b/hw/i386/trace-events
37
38
@@ -XXX,XX +XXX,XX @@ vtd_fault_disabled(void) "Fault processing disabled for context entry"
38
fill_sq_ring(ctx);
39
vtd_replay_ce_valid(const char *mode, uint8_t bus, uint8_t dev, uint8_t fn, uint16_t domain, uint64_t hi, uint64_t lo) "%s: replay valid context device %02"PRIx8":%02"PRIx8".%02"PRIx8" domain 0x%"PRIx16" hi 0x%"PRIx64" lo 0x%"PRIx64
39
40
vtd_replay_ce_invalid(uint8_t bus, uint8_t dev, uint8_t fn) "replay invalid context device %02"PRIx8":%02"PRIx8".%02"PRIx8
40
- ret = io_uring_submit_and_wait(&ctx->fdmon_io_uring, wait_nr);
41
vtd_page_walk_level(uint64_t addr, uint32_t level, uint64_t start, uint64_t end) "walk (base=0x%"PRIx64", level=%"PRIu32") iova range 0x%"PRIx64" - 0x%"PRIx64
41
+ do {
42
-vtd_page_walk_one(uint16_t domain, uint64_t iova, uint64_t gpa, uint64_t mask, int perm) "domain 0x%"PRIu16" iova 0x%"PRIx64" -> gpa 0x%"PRIx64" mask 0x%"PRIx64" perm %d"
42
+ ret = io_uring_submit_and_wait(&ctx->fdmon_io_uring, wait_nr);
43
+vtd_page_walk_one(uint16_t domain, uint64_t iova, uint64_t gpa, uint64_t mask, int perm) "domain 0x%"PRIx16" iova 0x%"PRIx64" -> gpa 0x%"PRIx64" mask 0x%"PRIx64" perm %d"
43
+ } while (ret == -EINTR);
44
vtd_page_walk_one_skip_map(uint64_t iova, uint64_t mask, uint64_t translated) "iova 0x%"PRIx64" mask 0x%"PRIx64" translated 0x%"PRIx64
44
+
45
vtd_page_walk_one_skip_unmap(uint64_t iova, uint64_t mask) "iova 0x%"PRIx64" mask 0x%"PRIx64
45
assert(ret >= 0);
46
vtd_page_walk_skip_read(uint64_t iova, uint64_t next) "Page walk skip iova 0x%"PRIx64" - 0x%"PRIx64" due to unable to read"
46
47
vtd_page_walk_skip_reserve(uint64_t iova, uint64_t next) "Page walk skip iova 0x%"PRIx64" - 0x%"PRIx64" due to rsrv set"
47
return process_cq_ring(ctx, ready_list);
48
vtd_switch_address_space(uint8_t bus, uint8_t slot, uint8_t fn, bool on) "Device %02x:%02x.%x switching address space (iommu enabled=%d)"
49
vtd_as_unmap_whole(uint8_t bus, uint8_t slot, uint8_t fn, uint64_t iova, uint64_t size) "Device %02x:%02x.%x start 0x%"PRIx64" size 0x%"PRIx64
50
-vtd_translate_pt(uint16_t sid, uint64_t addr) "source id 0x%"PRIu16", iova 0x%"PRIx64
51
-vtd_pt_enable_fast_path(uint16_t sid, bool success) "sid 0x%"PRIu16" %d"
52
+vtd_translate_pt(uint16_t sid, uint64_t addr) "source id 0x%"PRIx16", iova 0x%"PRIx64
53
+vtd_pt_enable_fast_path(uint16_t sid, bool success) "sid 0x%"PRIx16" %d"
54
vtd_irq_generate(uint64_t addr, uint64_t data) "addr 0x%"PRIx64" data 0x%"PRIx64
55
vtd_reg_read(uint64_t addr, uint64_t size) "addr 0x%"PRIx64" size 0x%"PRIx64
56
vtd_reg_write(uint64_t addr, uint64_t size, uint64_t val) "addr 0x%"PRIx64" size 0x%"PRIx64" value 0x%"PRIx64
57
diff --git a/hw/misc/trace-events b/hw/misc/trace-events
58
index XXXXXXX..XXXXXXX 100644
59
--- a/hw/misc/trace-events
60
+++ b/hw/misc/trace-events
61
@@ -XXX,XX +XXX,XX @@
62
# See docs/devel/tracing.rst for syntax documentation.
63
64
# allwinner-cpucfg.c
65
-allwinner_cpucfg_cpu_reset(uint8_t cpu_id, uint32_t reset_addr) "id %u, reset_addr 0x%" PRIu32
66
+allwinner_cpucfg_cpu_reset(uint8_t cpu_id, uint32_t reset_addr) "id %u, reset_addr 0x%" PRIx32
67
allwinner_cpucfg_read(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32
68
allwinner_cpucfg_write(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32
69
70
@@ -XXX,XX +XXX,XX @@ imx7_gpr_write(uint64_t offset, uint64_t value) "addr 0x%08" PRIx64 "value 0x%08
71
72
# mos6522.c
73
mos6522_set_counter(int index, unsigned int val) "T%d.counter=%d"
74
-mos6522_get_next_irq_time(uint16_t latch, int64_t d, int64_t delta) "latch=%d counter=0x%"PRId64 " delta_next=0x%"PRId64
75
+mos6522_get_next_irq_time(uint16_t latch, int64_t d, int64_t delta) "latch=%d counter=0x%"PRIx64 " delta_next=0x%"PRIx64
76
mos6522_set_sr_int(void) "set sr_int"
77
mos6522_write(uint64_t addr, const char *name, uint64_t val) "reg=0x%"PRIx64 " [%s] val=0x%"PRIx64
78
mos6522_read(uint64_t addr, const char *name, unsigned val) "reg=0x%"PRIx64 " [%s] val=0x%x"
79
diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events
80
index XXXXXXX..XXXXXXX 100644
81
--- a/hw/scsi/trace-events
82
+++ b/hw/scsi/trace-events
83
@@ -XXX,XX +XXX,XX @@ lsi_bad_phase_interrupt(void) "Phase mismatch interrupt"
84
lsi_bad_selection(uint32_t id) "Selected absent target %"PRIu32
85
lsi_do_dma_unavailable(void) "DMA no data available"
86
lsi_do_dma(uint64_t addr, int len) "DMA addr=0x%"PRIx64" len=%d"
87
-lsi_queue_command(uint32_t tag) "Queueing tag=0x%"PRId32
88
+lsi_queue_command(uint32_t tag) "Queueing tag=0x%"PRIx32
89
lsi_add_msg_byte_error(void) "MSG IN data too long"
90
lsi_add_msg_byte(uint8_t data) "MSG IN 0x%02x"
91
lsi_reselect(int id) "Reselected target %d"
92
@@ -XXX,XX +XXX,XX @@ lsi_do_msgout_noop(void) "MSG: No Operation"
93
lsi_do_msgout_extended(uint8_t msg, uint8_t len) "Extended message 0x%x (len %d)"
94
lsi_do_msgout_ignored(const char *msg) "%s (ignored)"
95
lsi_do_msgout_simplequeue(uint8_t select_tag) "SIMPLE queue tag=0x%x"
96
-lsi_do_msgout_abort(uint32_t tag) "MSG: ABORT TAG tag=0x%"PRId32
97
+lsi_do_msgout_abort(uint32_t tag) "MSG: ABORT TAG tag=0x%"PRIx32
98
lsi_do_msgout_clearqueue(uint32_t tag) "MSG: CLEAR QUEUE tag=0x%"PRIx32
99
lsi_do_msgout_busdevicereset(uint32_t tag) "MSG: BUS DEVICE RESET tag=0x%"PRIx32
100
lsi_do_msgout_select(int id) "Select LUN %d"
48
--
101
--
49
2.25.1
102
2.35.1
50
103
104
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Any thread that is not a iothread returns NULL for qemu_get_current_aio_context().
4
As a result, it would also return true for
5
in_aio_context_home_thread(qemu_get_aio_context()), causing
6
AIO_WAIT_WHILE to invoke aio_poll() directly. This is incorrect
7
if the BQL is not held, because aio_poll() does not expect to
8
run concurrently from multiple threads, and it can actually
9
happen when savevm writes to the vmstate file from the
10
migration thread.
11
12
Therefore, restrict in_aio_context_home_thread to return true
13
for the main AioContext only if the BQL is held.
14
15
The function is moved to aio-wait.h because it is mostly used
16
there and to avoid a circular reference between main-loop.h
17
and block/aio.h.
18
19
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
20
Message-Id: <20200407140746.8041-5-pbonzini@redhat.com>
21
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
22
---
23
include/block/aio-wait.h | 22 ++++++++++++++++++++++
24
include/block/aio.h | 29 ++++++++++-------------------
25
2 files changed, 32 insertions(+), 19 deletions(-)
26
27
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/include/block/aio-wait.h
30
+++ b/include/block/aio-wait.h
31
@@ -XXX,XX +XXX,XX @@
32
#define QEMU_AIO_WAIT_H
33
34
#include "block/aio.h"
35
+#include "qemu/main-loop.h"
36
37
/**
38
* AioWait:
39
@@ -XXX,XX +XXX,XX @@ void aio_wait_kick(void);
40
*/
41
void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
42
43
+/**
44
+ * in_aio_context_home_thread:
45
+ * @ctx: the aio context
46
+ *
47
+ * Return whether we are running in the thread that normally runs @ctx. Note
48
+ * that acquiring/releasing ctx does not affect the outcome, each AioContext
49
+ * still only has one home thread that is responsible for running it.
50
+ */
51
+static inline bool in_aio_context_home_thread(AioContext *ctx)
52
+{
53
+ if (ctx == qemu_get_current_aio_context()) {
54
+ return true;
55
+ }
56
+
57
+ if (ctx == qemu_get_aio_context()) {
58
+ return qemu_mutex_iothread_locked();
59
+ } else {
60
+ return false;
61
+ }
62
+}
63
+
64
#endif /* QEMU_AIO_WAIT_H */
65
diff --git a/include/block/aio.h b/include/block/aio.h
66
index XXXXXXX..XXXXXXX 100644
67
--- a/include/block/aio.h
68
+++ b/include/block/aio.h
69
@@ -XXX,XX +XXX,XX @@ struct AioContext {
70
AioHandlerList deleted_aio_handlers;
71
72
/* Used to avoid unnecessary event_notifier_set calls in aio_notify;
73
- * accessed with atomic primitives. If this field is 0, everything
74
- * (file descriptors, bottom halves, timers) will be re-evaluated
75
- * before the next blocking poll(), thus the event_notifier_set call
76
- * can be skipped. If it is non-zero, you may need to wake up a
77
- * concurrent aio_poll or the glib main event loop, making
78
- * event_notifier_set necessary.
79
+ * only written from the AioContext home thread, or under the BQL in
80
+ * the case of the main AioContext. However, it is read from any
81
+ * thread so it is still accessed with atomic primitives.
82
+ *
83
+ * If this field is 0, everything (file descriptors, bottom halves,
84
+ * timers) will be re-evaluated before the next blocking poll() or
85
+ * io_uring wait; therefore, the event_notifier_set call can be
86
+ * skipped. If it is non-zero, you may need to wake up a concurrent
87
+ * aio_poll or the glib main event loop, making event_notifier_set
88
+ * necessary.
89
*
90
* Bit 0 is reserved for GSource usage of the AioContext, and is 1
91
* between a call to aio_ctx_prepare and the next call to aio_ctx_check.
92
@@ -XXX,XX +XXX,XX @@ void aio_co_enter(AioContext *ctx, struct Coroutine *co);
93
*/
94
AioContext *qemu_get_current_aio_context(void);
95
96
-/**
97
- * in_aio_context_home_thread:
98
- * @ctx: the aio context
99
- *
100
- * Return whether we are running in the thread that normally runs @ctx. Note
101
- * that acquiring/releasing ctx does not affect the outcome, each AioContext
102
- * still only has one home thread that is responsible for running it.
103
- */
104
-static inline bool in_aio_context_home_thread(AioContext *ctx)
105
-{
106
- return ctx == qemu_get_current_aio_context();
107
-}
108
-
109
/**
110
* aio_context_setup:
111
* @ctx: the aio context
112
--
113
2.25.1
114
diff view generated by jsdifflib