1
The following changes since commit ac5f7bf8e208cd7893dbb1a9520559e569a4677c:
1
The following changes since commit 825b96dbcee23d134b691fc75618b59c5f53da32:
2
2
3
Merge tag 'migration-20230424-pull-request' of https://gitlab.com/juan.quintela/qemu into staging (2023-04-24 15:00:39 +0100)
3
Merge tag 'migration-20250310-pull-request' of https://gitlab.com/farosas/qemu into staging (2025-03-11 09:32:07 +0800)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://repo.or.cz/qemu/kevin.git tags/for-upstream
7
https://repo.or.cz/qemu/kevin.git tags/for-upstream
8
8
9
for you to fetch changes up to 8c1e8fb2e7fc2cbeb57703e143965a4cd3ad301a:
9
for you to fetch changes up to df957115c46845e2c0ccc29ac0a75eb9700a9a0d:
10
10
11
block/monitor: Fix crash when executing HMP commit (2023-04-25 15:11:57 +0200)
11
scripts/qcow2-to-stdout.py: Add script to write qcow2 images to stdout (2025-03-13 17:57:23 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Block layer patches
14
Block layer patches
15
15
16
- Protect BlockBackend.queued_requests with its own lock
16
- virtio-scsi: add iothread-vq-mapping parameter
17
- Switch to AIO_WAIT_WHILE_UNLOCKED() where possible
17
- Improve writethrough performance
18
- AioContext removal: LinuxAioState/LuringState/ThreadPool
18
- Fix missing zero init in bdrv_snapshot_goto()
19
- Add more coroutine_fn annotations, use bdrv/blk_co_*
19
- Added scripts/qcow2-to-stdout.py
20
- Fix crash when execute hmp_commit
20
- Code cleanup and iotests fixes
21
21
22
----------------------------------------------------------------
22
----------------------------------------------------------------
23
Emanuele Giuseppe Esposito (4):
23
Alberto Garcia (1):
24
linux-aio: use LinuxAioState from the running thread
24
scripts/qcow2-to-stdout.py: Add script to write qcow2 images to stdout
25
io_uring: use LuringState from the running thread
26
thread-pool: use ThreadPool from the running thread
27
thread-pool: avoid passing the pool parameter every time
28
25
29
Paolo Bonzini (9):
26
Kevin Wolf (8):
30
vvfat: mark various functions as coroutine_fn
27
block: Remove unused blk_op_is_blocked()
31
blkdebug: add missing coroutine_fn annotation
28
block: Zero block driver state before reopening
32
mirror: make mirror_flush a coroutine_fn, do not use co_wrappers
29
file-posix: Support FUA writes
33
nbd: mark more coroutine_fns, do not use co_wrappers
30
block/io: Ignore FUA with cache.no-flush=on
34
9pfs: mark more coroutine_fns
31
aio: Create AioPolledEvent
35
qemu-pr-helper: mark more coroutine_fns
32
aio-posix: Factor out adjust_polling_time()
36
tests: mark more coroutine_fns
33
aio-posix: Separate AioPolledEvent per AioHandler
37
qcow2: mark various functions as coroutine_fn and GRAPH_RDLOCK
34
aio-posix: Adjust polling time also for new handlers
38
vmdk: make vmdk_is_cid_valid a coroutine_fn
39
35
40
Stefan Hajnoczi (10):
36
Stefan Hajnoczi (13):
41
block: make BlockBackend->quiesce_counter atomic
37
scsi-disk: drop unused SCSIDiskState->bh field
42
block: make BlockBackend->disable_request_queuing atomic
38
dma: use current AioContext for dma_blk_io()
43
block: protect BlockBackend->queued_requests with a lock
39
scsi: track per-SCSIRequest AioContext
44
block: don't acquire AioContext lock in bdrv_drain_all()
40
scsi: introduce requests_lock
45
block: convert blk_exp_close_all_type() to AIO_WAIT_WHILE_UNLOCKED()
41
virtio-scsi: introduce event and ctrl virtqueue locks
46
block: convert bdrv_graph_wrlock() to AIO_WAIT_WHILE_UNLOCKED()
42
virtio-scsi: protect events_dropped field
47
block: convert bdrv_drain_all_begin() to AIO_WAIT_WHILE_UNLOCKED()
43
virtio-scsi: perform TMFs in appropriate AioContexts
48
hmp: convert handle_hmp_command() to AIO_WAIT_WHILE_UNLOCKED()
44
virtio-blk: extract cleanup_iothread_vq_mapping() function
49
monitor: convert monitor_cleanup() to AIO_WAIT_WHILE_UNLOCKED()
45
virtio-blk: tidy up iothread_vq_mapping functions
50
block: add missing coroutine_fn to bdrv_sum_allocated_file_size()
46
virtio: extract iothread-vq-mapping.h API
47
virtio-scsi: add iothread-vq-mapping parameter
48
virtio-scsi: handle ctrl virtqueue in main loop
49
virtio-scsi: only expose cmd vqs via iothread-vq-mapping
51
50
52
Wang Liang (1):
51
Thomas Huth (1):
53
block/monitor: Fix crash when executing HMP commit
52
iotests: Limit qsd-migrate to working formats
54
53
55
Wilfred Mallawa (1):
54
include/block/aio.h | 5 +-
56
include/block: fixup typos
55
include/block/raw-aio.h | 19 +-
57
56
include/hw/scsi/scsi.h | 8 +-
58
block/qcow2.h | 15 +++++-----
57
include/hw/virtio/iothread-vq-mapping.h | 45 +++
59
hw/9pfs/9p.h | 4 +--
58
include/hw/virtio/virtio-scsi.h | 15 +-
60
include/block/aio-wait.h | 2 +-
59
include/system/block-backend-global-state.h | 1 -
61
include/block/aio.h | 8 ------
60
include/system/dma.h | 3 +-
62
include/block/block_int-common.h | 2 +-
61
util/aio-posix.h | 1 +
63
include/block/raw-aio.h | 33 +++++++++++++++-------
62
block/block-backend.c | 12 -
64
include/block/thread-pool.h | 15 ++++++----
63
block/file-posix.c | 29 +-
65
include/sysemu/block-backend-io.h | 5 ++++
64
block/io.c | 4 +
66
backends/tpm/tpm_backend.c | 4 +--
65
block/io_uring.c | 25 +-
67
block.c | 2 +-
66
block/linux-aio.c | 25 +-
68
block/blkdebug.c | 4 +--
67
block/snapshot.c | 1 +
69
block/block-backend.c | 45 ++++++++++++++++++------------
68
hw/block/virtio-blk.c | 132 +-------
70
block/export/export.c | 2 +-
69
hw/ide/core.c | 3 +-
71
block/file-posix.c | 45 ++++++++++++------------------
70
hw/ide/macio.c | 3 +-
72
block/file-win32.c | 4 +--
71
hw/scsi/scsi-bus.c | 121 +++++--
73
block/graph-lock.c | 2 +-
72
hw/scsi/scsi-disk.c | 24 +-
74
block/io.c | 2 +-
73
hw/scsi/virtio-scsi-dataplane.c | 103 ++++--
75
block/io_uring.c | 23 ++++++++++------
74
hw/scsi/virtio-scsi.c | 502 ++++++++++++++++------------
76
block/linux-aio.c | 29 ++++++++++++--------
75
hw/virtio/iothread-vq-mapping.c | 131 ++++++++
77
block/mirror.c | 4 +--
76
system/dma-helpers.c | 8 +-
78
block/monitor/block-hmp-cmds.c | 10 ++++---
77
util/aio-posix.c | 114 ++++---
79
block/qcow2-bitmap.c | 2 +-
78
util/async.c | 1 -
80
block/qcow2-cluster.c | 21 ++++++++------
79
scripts/qcow2-to-stdout.py | 449 +++++++++++++++++++++++++
81
block/qcow2-refcount.c | 8 +++---
80
hw/virtio/meson.build | 1 +
82
block/qcow2-snapshot.c | 25 +++++++++--------
81
meson.build | 8 +
83
block/qcow2-threads.c | 3 +-
82
tests/qemu-iotests/051.pc.out | 2 +-
84
block/qcow2.c | 27 +++++++++---------
83
tests/qemu-iotests/tests/qsd-migrate | 2 +-
85
block/vmdk.c | 2 +-
84
30 files changed, 1286 insertions(+), 511 deletions(-)
86
block/vvfat.c | 58 ++++++++++++++++++++-------------------
85
create mode 100644 include/hw/virtio/iothread-vq-mapping.h
87
hw/9pfs/codir.c | 6 ++--
86
create mode 100644 hw/virtio/iothread-vq-mapping.c
88
hw/9pfs/coth.c | 3 +-
87
create mode 100755 scripts/qcow2-to-stdout.py
89
hw/ppc/spapr_nvdimm.c | 6 ++--
90
hw/virtio/virtio-pmem.c | 3 +-
91
monitor/hmp.c | 2 +-
92
monitor/monitor.c | 4 +--
93
nbd/server.c | 48 ++++++++++++++++----------------
94
scsi/pr-manager.c | 3 +-
95
scsi/qemu-pr-helper.c | 25 ++++++++---------
96
tests/unit/test-thread-pool.c | 14 ++++------
97
util/thread-pool.c | 25 ++++++++---------
98
40 files changed, 283 insertions(+), 262 deletions(-)
diff view generated by jsdifflib
Deleted patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
1
3
The main loop thread increments/decrements BlockBackend->quiesce_counter
4
when drained sections begin/end. The counter is read in the I/O code
5
path. Therefore this field is used to communicate between threads
6
without a lock.
7
8
Acquire/release are not necessary because the BlockBackend->in_flight
9
counter already uses sequentially consistent accesses and running I/O
10
requests hold that counter when blk_wait_while_drained() is called.
11
qatomic_read() can be used.
12
13
Use qatomic_fetch_inc()/qatomic_fetch_dec() for modifications even
14
though sequentially consistent atomic accesses are not strictly required
15
here. They are, however, nicer to read than multiple calls to
16
qatomic_read() and qatomic_set(). Since beginning and ending drain is
17
not a hot path the extra cost doesn't matter.
18
19
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
20
Message-Id: <20230307210427.269214-2-stefanha@redhat.com>
21
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
22
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
23
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
24
---
25
block/block-backend.c | 14 +++++++-------
26
1 file changed, 7 insertions(+), 7 deletions(-)
27
28
diff --git a/block/block-backend.c b/block/block-backend.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/block/block-backend.c
31
+++ b/block/block-backend.c
32
@@ -XXX,XX +XXX,XX @@ struct BlockBackend {
33
NotifierList remove_bs_notifiers, insert_bs_notifiers;
34
QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
35
36
- int quiesce_counter;
37
+ int quiesce_counter; /* atomic: written under BQL, read by other threads */
38
CoQueue queued_requests;
39
bool disable_request_queuing;
40
41
@@ -XXX,XX +XXX,XX @@ void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
42
blk->dev_opaque = opaque;
43
44
/* Are we currently quiesced? Should we enforce this right now? */
45
- if (blk->quiesce_counter && ops && ops->drained_begin) {
46
+ if (qatomic_read(&blk->quiesce_counter) && ops && ops->drained_begin) {
47
ops->drained_begin(opaque);
48
}
49
}
50
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
51
{
52
assert(blk->in_flight > 0);
53
54
- if (blk->quiesce_counter && !blk->disable_request_queuing) {
55
+ if (qatomic_read(&blk->quiesce_counter) && !blk->disable_request_queuing) {
56
blk_dec_in_flight(blk);
57
qemu_co_queue_wait(&blk->queued_requests, NULL);
58
blk_inc_in_flight(blk);
59
@@ -XXX,XX +XXX,XX @@ static void blk_root_drained_begin(BdrvChild *child)
60
BlockBackend *blk = child->opaque;
61
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
62
63
- if (++blk->quiesce_counter == 1) {
64
+ if (qatomic_fetch_inc(&blk->quiesce_counter) == 0) {
65
if (blk->dev_ops && blk->dev_ops->drained_begin) {
66
blk->dev_ops->drained_begin(blk->dev_opaque);
67
}
68
@@ -XXX,XX +XXX,XX @@ static bool blk_root_drained_poll(BdrvChild *child)
69
{
70
BlockBackend *blk = child->opaque;
71
bool busy = false;
72
- assert(blk->quiesce_counter);
73
+ assert(qatomic_read(&blk->quiesce_counter));
74
75
if (blk->dev_ops && blk->dev_ops->drained_poll) {
76
busy = blk->dev_ops->drained_poll(blk->dev_opaque);
77
@@ -XXX,XX +XXX,XX @@ static bool blk_root_drained_poll(BdrvChild *child)
78
static void blk_root_drained_end(BdrvChild *child)
79
{
80
BlockBackend *blk = child->opaque;
81
- assert(blk->quiesce_counter);
82
+ assert(qatomic_read(&blk->quiesce_counter));
83
84
assert(blk->public.throttle_group_member.io_limits_disabled);
85
qatomic_dec(&blk->public.throttle_group_member.io_limits_disabled);
86
87
- if (--blk->quiesce_counter == 0) {
88
+ if (qatomic_fetch_dec(&blk->quiesce_counter) == 1) {
89
if (blk->dev_ops && blk->dev_ops->drained_end) {
90
blk->dev_ops->drained_end(blk->dev_opaque);
91
}
92
--
93
2.40.0
94
95
diff view generated by jsdifflib
Deleted patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
1
3
This field is accessed by multiple threads without a lock. Use explicit
4
qatomic_read()/qatomic_set() calls. There is no need for acquire/release
5
because blk_set_disable_request_queuing() doesn't provide any
6
guarantees (it helps that it's used at BlockBackend creation time and
7
not when there is I/O in flight).
8
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
11
Message-Id: <20230307210427.269214-3-stefanha@redhat.com>
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
13
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
---
16
block/block-backend.c | 7 ++++---
17
1 file changed, 4 insertions(+), 3 deletions(-)
18
19
diff --git a/block/block-backend.c b/block/block-backend.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/block/block-backend.c
22
+++ b/block/block-backend.c
23
@@ -XXX,XX +XXX,XX @@ struct BlockBackend {
24
25
int quiesce_counter; /* atomic: written under BQL, read by other threads */
26
CoQueue queued_requests;
27
- bool disable_request_queuing;
28
+ bool disable_request_queuing; /* atomic */
29
30
VMChangeStateEntry *vmsh;
31
bool force_allow_inactivate;
32
@@ -XXX,XX +XXX,XX @@ void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow)
33
void blk_set_disable_request_queuing(BlockBackend *blk, bool disable)
34
{
35
IO_CODE();
36
- blk->disable_request_queuing = disable;
37
+ qatomic_set(&blk->disable_request_queuing, disable);
38
}
39
40
static int coroutine_fn GRAPH_RDLOCK
41
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
42
{
43
assert(blk->in_flight > 0);
44
45
- if (qatomic_read(&blk->quiesce_counter) && !blk->disable_request_queuing) {
46
+ if (qatomic_read(&blk->quiesce_counter) &&
47
+ !qatomic_read(&blk->disable_request_queuing)) {
48
blk_dec_in_flight(blk);
49
qemu_co_queue_wait(&blk->queued_requests, NULL);
50
blk_inc_in_flight(blk);
51
--
52
2.40.0
53
54
diff view generated by jsdifflib
Deleted patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
1
3
The CoQueue API offers thread-safety via the lock argument that
4
qemu_co_queue_wait() and qemu_co_enter_next() take. BlockBackend
5
currently does not make use of the lock argument. This means that
6
multiple threads submitting I/O requests can corrupt the CoQueue's
7
QSIMPLEQ.
8
9
Add a QemuMutex and pass it to CoQueue APIs so that the queue is
10
protected. While we're at it, also assert that the queue is empty when
11
the BlockBackend is deleted.
12
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
15
Message-Id: <20230307210427.269214-4-stefanha@redhat.com>
16
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
17
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
18
---
19
block/block-backend.c | 18 ++++++++++++++++--
20
1 file changed, 16 insertions(+), 2 deletions(-)
21
22
diff --git a/block/block-backend.c b/block/block-backend.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/block/block-backend.c
25
+++ b/block/block-backend.c
26
@@ -XXX,XX +XXX,XX @@ struct BlockBackend {
27
QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
28
29
int quiesce_counter; /* atomic: written under BQL, read by other threads */
30
+ QemuMutex queued_requests_lock; /* protects queued_requests */
31
CoQueue queued_requests;
32
bool disable_request_queuing; /* atomic */
33
34
@@ -XXX,XX +XXX,XX @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)
35
36
block_acct_init(&blk->stats);
37
38
+ qemu_mutex_init(&blk->queued_requests_lock);
39
qemu_co_queue_init(&blk->queued_requests);
40
notifier_list_init(&blk->remove_bs_notifiers);
41
notifier_list_init(&blk->insert_bs_notifiers);
42
@@ -XXX,XX +XXX,XX @@ static void blk_delete(BlockBackend *blk)
43
assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
44
assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
45
assert(QLIST_EMPTY(&blk->aio_notifiers));
46
+ assert(qemu_co_queue_empty(&blk->queued_requests));
47
+ qemu_mutex_destroy(&blk->queued_requests_lock);
48
QTAILQ_REMOVE(&block_backends, blk, link);
49
drive_info_del(blk->legacy_dinfo);
50
block_acct_cleanup(&blk->stats);
51
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
52
53
if (qatomic_read(&blk->quiesce_counter) &&
54
!qatomic_read(&blk->disable_request_queuing)) {
55
+ /*
56
+ * Take lock before decrementing in flight counter so main loop thread
57
+ * waits for us to enqueue ourselves before it can leave the drained
58
+ * section.
59
+ */
60
+ qemu_mutex_lock(&blk->queued_requests_lock);
61
blk_dec_in_flight(blk);
62
- qemu_co_queue_wait(&blk->queued_requests, NULL);
63
+ qemu_co_queue_wait(&blk->queued_requests, &blk->queued_requests_lock);
64
blk_inc_in_flight(blk);
65
+ qemu_mutex_unlock(&blk->queued_requests_lock);
66
}
67
}
68
69
@@ -XXX,XX +XXX,XX @@ static void blk_root_drained_end(BdrvChild *child)
70
if (blk->dev_ops && blk->dev_ops->drained_end) {
71
blk->dev_ops->drained_end(blk->dev_opaque);
72
}
73
- while (qemu_co_enter_next(&blk->queued_requests, NULL)) {
74
+ qemu_mutex_lock(&blk->queued_requests_lock);
75
+ while (qemu_co_enter_next(&blk->queued_requests,
76
+ &blk->queued_requests_lock)) {
77
/* Resume all queued requests */
78
}
79
+ qemu_mutex_unlock(&blk->queued_requests_lock);
80
}
81
}
82
83
--
84
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
1
3
There is no need for the AioContext lock in bdrv_drain_all() because
4
nothing in AIO_WAIT_WHILE() needs the lock and the condition is atomic.
5
6
AIO_WAIT_WHILE_UNLOCKED() has no use for the AioContext parameter other
7
than performing a check that is nowadays already done by the
8
GLOBAL_STATE_CODE()/IO_CODE() macros. Set the ctx argument to NULL here
9
to help us keep track of all converted callers. Eventually all callers
10
will have been converted and then the argument can be dropped entirely.
11
12
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Message-Id: <20230309190855.414275-2-stefanha@redhat.com>
15
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
16
Reviewed-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
17
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
18
---
19
block/block-backend.c | 8 +-------
20
1 file changed, 1 insertion(+), 7 deletions(-)
21
22
diff --git a/block/block-backend.c b/block/block-backend.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/block/block-backend.c
25
+++ b/block/block-backend.c
26
@@ -XXX,XX +XXX,XX @@ void blk_drain_all(void)
27
bdrv_drain_all_begin();
28
29
while ((blk = blk_all_next(blk)) != NULL) {
30
- AioContext *ctx = blk_get_aio_context(blk);
31
-
32
- aio_context_acquire(ctx);
33
-
34
/* We may have -ENOMEDIUM completions in flight */
35
- AIO_WAIT_WHILE(ctx, qatomic_read(&blk->in_flight) > 0);
36
-
37
- aio_context_release(ctx);
38
+ AIO_WAIT_WHILE_UNLOCKED(NULL, qatomic_read(&blk->in_flight) > 0);
39
}
40
41
bdrv_drain_all_end();
42
--
43
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
1
3
There is no change in behavior. Switch to AIO_WAIT_WHILE_UNLOCKED()
4
instead of AIO_WAIT_WHILE() to document that this code has already been
5
audited and converted. The AioContext argument is already NULL so
6
aio_context_release() is never called anyway.
7
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Tested-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Message-Id: <20230309190855.414275-3-stefanha@redhat.com>
13
Reviewed-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
---
16
block/export/export.c | 2 +-
17
1 file changed, 1 insertion(+), 1 deletion(-)
18
19
diff --git a/block/export/export.c b/block/export/export.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/block/export/export.c
22
+++ b/block/export/export.c
23
@@ -XXX,XX +XXX,XX @@ void blk_exp_close_all_type(BlockExportType type)
24
blk_exp_request_shutdown(exp);
25
}
26
27
- AIO_WAIT_WHILE(NULL, blk_exp_has_type(type));
28
+ AIO_WAIT_WHILE_UNLOCKED(NULL, blk_exp_has_type(type));
29
}
30
31
void blk_exp_close_all(void)
32
--
33
2.40.0
34
35
diff view generated by jsdifflib
Deleted patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
1
3
The following conversion is safe and does not change behavior:
4
5
GLOBAL_STATE_CODE();
6
...
7
- AIO_WAIT_WHILE(qemu_get_aio_context(), ...);
8
+ AIO_WAIT_WHILE_UNLOCKED(NULL, ...);
9
10
Since we're in GLOBAL_STATE_CODE(), qemu_get_aio_context() is our home
11
thread's AioContext. Thus AIO_WAIT_WHILE() does not unlock the
12
AioContext:
13
14
if (ctx_ && in_aio_context_home_thread(ctx_)) { \
15
while ((cond)) { \
16
aio_poll(ctx_, true); \
17
waited_ = true; \
18
} \
19
20
And that means AIO_WAIT_WHILE_UNLOCKED(NULL, ...) can be substituted.
21
22
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
23
Tested-by: Philippe Mathieu-Daudé <philmd@linaro.org>
24
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
25
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
26
Message-Id: <20230309190855.414275-4-stefanha@redhat.com>
27
Reviewed-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
28
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
29
---
30
block/graph-lock.c | 2 +-
31
1 file changed, 1 insertion(+), 1 deletion(-)
32
33
diff --git a/block/graph-lock.c b/block/graph-lock.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/block/graph-lock.c
36
+++ b/block/graph-lock.c
37
@@ -XXX,XX +XXX,XX @@ void bdrv_graph_wrlock(void)
38
* reader lock.
39
*/
40
qatomic_set(&has_writer, 0);
41
- AIO_WAIT_WHILE(qemu_get_aio_context(), reader_count() >= 1);
42
+ AIO_WAIT_WHILE_UNLOCKED(NULL, reader_count() >= 1);
43
qatomic_set(&has_writer, 1);
44
45
/*
46
--
47
2.40.0
48
49
diff view generated by jsdifflib
Deleted patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
1
3
Since the AioContext argument was already NULL, AIO_WAIT_WHILE() was
4
never going to unlock the AioContext. Therefore it is possible to
5
replace AIO_WAIT_WHILE() with AIO_WAIT_WHILE_UNLOCKED().
6
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Tested-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Message-Id: <20230309190855.414275-5-stefanha@redhat.com>
12
Reviewed-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
13
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
14
---
15
block/io.c | 2 +-
16
1 file changed, 1 insertion(+), 1 deletion(-)
17
18
diff --git a/block/io.c b/block/io.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/block/io.c
21
+++ b/block/io.c
22
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void)
23
bdrv_drain_all_begin_nopoll();
24
25
/* Now poll the in-flight requests */
26
- AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
27
+ AIO_WAIT_WHILE_UNLOCKED(NULL, bdrv_drain_all_poll());
28
29
while ((bs = bdrv_next_all_states(bs))) {
30
bdrv_drain_assert_idle(bs);
31
--
32
2.40.0
33
34
diff view generated by jsdifflib
Deleted patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
1
3
The HMP monitor runs in the main loop thread. Calling
4
AIO_WAIT_WHILE(qemu_get_aio_context(), ...) from the main loop thread is
5
equivalent to AIO_WAIT_WHILE_UNLOCKED(NULL, ...) because neither unlocks
6
the AioContext and the latter's assertion that we're in the main loop
7
succeeds.
8
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Tested-by: Philippe Mathieu-Daudé <philmd@linaro.org>
11
Reviewed-by: Markus Armbruster <armbru@redhat.com>
12
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Message-Id: <20230309190855.414275-6-stefanha@redhat.com>
15
Reviewed-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
16
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
17
---
18
monitor/hmp.c | 2 +-
19
1 file changed, 1 insertion(+), 1 deletion(-)
20
21
diff --git a/monitor/hmp.c b/monitor/hmp.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/monitor/hmp.c
24
+++ b/monitor/hmp.c
25
@@ -XXX,XX +XXX,XX @@ void handle_hmp_command(MonitorHMP *mon, const char *cmdline)
26
Coroutine *co = qemu_coroutine_create(handle_hmp_command_co, &data);
27
monitor_set_cur(co, &mon->common);
28
aio_co_enter(qemu_get_aio_context(), co);
29
- AIO_WAIT_WHILE(qemu_get_aio_context(), !data.done);
30
+ AIO_WAIT_WHILE_UNLOCKED(NULL, !data.done);
31
}
32
33
qobject_unref(qdict);
34
--
35
2.40.0
36
37
diff view generated by jsdifflib
Deleted patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
1
3
monitor_cleanup() is called from the main loop thread. Calling
4
AIO_WAIT_WHILE(qemu_get_aio_context(), ...) from the main loop thread is
5
equivalent to AIO_WAIT_WHILE_UNLOCKED(NULL, ...) because neither unlocks
6
the AioContext and the latter's assertion that we're in the main loop
7
succeeds.
8
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Tested-by: Philippe Mathieu-Daudé <philmd@linaro.org>
11
Reviewed-by: Markus Armbruster <armbru@redhat.com>
12
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Message-Id: <20230309190855.414275-7-stefanha@redhat.com>
15
Reviewed-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
16
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
17
---
18
monitor/monitor.c | 4 ++--
19
1 file changed, 2 insertions(+), 2 deletions(-)
20
21
diff --git a/monitor/monitor.c b/monitor/monitor.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/monitor/monitor.c
24
+++ b/monitor/monitor.c
25
@@ -XXX,XX +XXX,XX @@ void monitor_cleanup(void)
26
* We need to poll both qemu_aio_context and iohandler_ctx to make
27
* sure that the dispatcher coroutine keeps making progress and
28
* eventually terminates. qemu_aio_context is automatically
29
- * polled by calling AIO_WAIT_WHILE on it, but we must poll
30
+ * polled by calling AIO_WAIT_WHILE_UNLOCKED on it, but we must poll
31
* iohandler_ctx manually.
32
*
33
* Letting the iothread continue while shutting down the dispatcher
34
@@ -XXX,XX +XXX,XX @@ void monitor_cleanup(void)
35
aio_co_wake(qmp_dispatcher_co);
36
}
37
38
- AIO_WAIT_WHILE(qemu_get_aio_context(),
39
+ AIO_WAIT_WHILE_UNLOCKED(NULL,
40
(aio_poll(iohandler_get_aio_context(), false),
41
qatomic_mb_read(&qmp_dispatcher_co_busy)));
42
43
--
44
2.40.0
45
46
diff view generated by jsdifflib
Deleted patch
1
From: Wilfred Mallawa <wilfred.mallawa@wdc.com>
2
1
3
Fixup a few minor typos
4
5
Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
6
Message-Id: <20230313003744.55476-1-wilfred.mallawa@opensource.wdc.com>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
---
11
include/block/aio-wait.h | 2 +-
12
include/block/block_int-common.h | 2 +-
13
2 files changed, 2 insertions(+), 2 deletions(-)
14
15
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/block/aio-wait.h
18
+++ b/include/block/aio-wait.h
19
@@ -XXX,XX +XXX,XX @@ extern AioWait global_aio_wait;
20
* @ctx: the aio context, or NULL if multiple aio contexts (for which the
21
* caller does not hold a lock) are involved in the polling condition.
22
* @cond: wait while this conditional expression is true
23
- * @unlock: whether to unlock and then lock again @ctx. This apples
24
+ * @unlock: whether to unlock and then lock again @ctx. This applies
25
* only when waiting for another AioContext from the main loop.
26
* Otherwise it's ignored.
27
*
28
diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/include/block/block_int-common.h
31
+++ b/include/block/block_int-common.h
32
@@ -XXX,XX +XXX,XX @@ extern QemuOptsList bdrv_create_opts_simple;
33
/*
34
* Common functions that are neither I/O nor Global State.
35
*
36
- * See include/block/block-commmon.h for more information about
37
+ * See include/block/block-common.h for more information about
38
* the Common API.
39
*/
40
41
--
42
2.40.0
43
44
diff view generated by jsdifflib
Deleted patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
1
3
Not a coroutine_fn, you say?
4
5
static int64_t bdrv_sum_allocated_file_size(BlockDriverState *bs)
6
{
7
BdrvChild *child;
8
int64_t child_size, sum = 0;
9
10
QLIST_FOREACH(child, &bs->children, next) {
11
if (child->role & (BDRV_CHILD_DATA | BDRV_CHILD_METADATA |
12
BDRV_CHILD_FILTERED))
13
{
14
child_size = bdrv_co_get_allocated_file_size(child->bs);
15
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
16
17
Well what do we have here?!
18
19
I rest my case, your honor.
20
21
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
22
Message-Id: <20230308211435.346375-1-stefanha@redhat.com>
23
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
24
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
25
---
26
block.c | 2 +-
27
1 file changed, 1 insertion(+), 1 deletion(-)
28
29
diff --git a/block.c b/block.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/block.c
32
+++ b/block.c
33
@@ -XXX,XX +XXX,XX @@ exit:
34
* sums the size of all data-bearing children. (This excludes backing
35
* children.)
36
*/
37
-static int64_t bdrv_sum_allocated_file_size(BlockDriverState *bs)
38
+static int64_t coroutine_fn bdrv_sum_allocated_file_size(BlockDriverState *bs)
39
{
40
BdrvChild *child;
41
int64_t child_size, sum = 0;
42
--
43
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
Remove usage of aio_context_acquire by always submitting asynchronous
4
AIO to the current thread's LinuxAioState.
5
6
In order to prevent mistakes from the caller side, avoid passing LinuxAioState
7
in laio_io_{plug/unplug} and laio_co_submit, and document the functions
8
to make clear that they work in the current thread's AioContext.
9
10
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
11
Message-Id: <20230203131731.851116-2-eesposit@redhat.com>
12
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
13
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
---
16
include/block/aio.h | 4 ----
17
include/block/raw-aio.h | 18 ++++++++++++------
18
include/sysemu/block-backend-io.h | 5 +++++
19
block/file-posix.c | 10 +++-------
20
block/linux-aio.c | 29 +++++++++++++++++------------
21
5 files changed, 37 insertions(+), 29 deletions(-)
22
23
diff --git a/include/block/aio.h b/include/block/aio.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/include/block/aio.h
26
+++ b/include/block/aio.h
27
@@ -XXX,XX +XXX,XX @@ struct AioContext {
28
struct ThreadPool *thread_pool;
29
30
#ifdef CONFIG_LINUX_AIO
31
- /*
32
- * State for native Linux AIO. Uses aio_context_acquire/release for
33
- * locking.
34
- */
35
struct LinuxAioState *linux_aio;
36
#endif
37
#ifdef CONFIG_LINUX_IO_URING
38
diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h
39
index XXXXXXX..XXXXXXX 100644
40
--- a/include/block/raw-aio.h
41
+++ b/include/block/raw-aio.h
42
@@ -XXX,XX +XXX,XX @@
43
typedef struct LinuxAioState LinuxAioState;
44
LinuxAioState *laio_init(Error **errp);
45
void laio_cleanup(LinuxAioState *s);
46
-int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
47
- uint64_t offset, QEMUIOVector *qiov, int type,
48
- uint64_t dev_max_batch);
49
+
50
+/* laio_co_submit: submit I/O requests in the thread's current AioContext. */
51
+int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov,
52
+ int type, uint64_t dev_max_batch);
53
+
54
void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context);
55
void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context);
56
-void laio_io_plug(BlockDriverState *bs, LinuxAioState *s);
57
-void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s,
58
- uint64_t dev_max_batch);
59
+
60
+/*
61
+ * laio_io_plug/unplug work in the thread's current AioContext, therefore the
62
+ * caller must ensure that they are paired in the same IOThread.
63
+ */
64
+void laio_io_plug(void);
65
+void laio_io_unplug(uint64_t dev_max_batch);
66
#endif
67
/* io_uring.c - Linux io_uring implementation */
68
#ifdef CONFIG_LINUX_IO_URING
69
diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h
70
index XXXXXXX..XXXXXXX 100644
71
--- a/include/sysemu/block-backend-io.h
72
+++ b/include/sysemu/block-backend-io.h
73
@@ -XXX,XX +XXX,XX @@ void blk_iostatus_set_err(BlockBackend *blk, int error);
74
int blk_get_max_iov(BlockBackend *blk);
75
int blk_get_max_hw_iov(BlockBackend *blk);
76
77
+/*
78
+ * blk_io_plug/unplug are thread-local operations. This means that multiple
79
+ * IOThreads can simultaneously call plug/unplug, but the caller must ensure
80
+ * that each unplug() is called in the same IOThread of the matching plug().
81
+ */
82
void coroutine_fn blk_co_io_plug(BlockBackend *blk);
83
void co_wrapper blk_io_plug(BlockBackend *blk);
84
85
diff --git a/block/file-posix.c b/block/file-posix.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/block/file-posix.c
88
+++ b/block/file-posix.c
89
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
90
#endif
91
#ifdef CONFIG_LINUX_AIO
92
} else if (s->use_linux_aio) {
93
- LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
94
assert(qiov->size == bytes);
95
- return laio_co_submit(bs, aio, s->fd, offset, qiov, type,
96
- s->aio_max_batch);
97
+ return laio_co_submit(s->fd, offset, qiov, type, s->aio_max_batch);
98
#endif
99
}
100
101
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn raw_co_io_plug(BlockDriverState *bs)
102
BDRVRawState __attribute__((unused)) *s = bs->opaque;
103
#ifdef CONFIG_LINUX_AIO
104
if (s->use_linux_aio) {
105
- LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
106
- laio_io_plug(bs, aio);
107
+ laio_io_plug();
108
}
109
#endif
110
#ifdef CONFIG_LINUX_IO_URING
111
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn raw_co_io_unplug(BlockDriverState *bs)
112
BDRVRawState __attribute__((unused)) *s = bs->opaque;
113
#ifdef CONFIG_LINUX_AIO
114
if (s->use_linux_aio) {
115
- LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
116
- laio_io_unplug(bs, aio, s->aio_max_batch);
117
+ laio_io_unplug(s->aio_max_batch);
118
}
119
#endif
120
#ifdef CONFIG_LINUX_IO_URING
121
diff --git a/block/linux-aio.c b/block/linux-aio.c
122
index XXXXXXX..XXXXXXX 100644
123
--- a/block/linux-aio.c
124
+++ b/block/linux-aio.c
125
@@ -XXX,XX +XXX,XX @@
126
#include "qemu/coroutine.h"
127
#include "qapi/error.h"
128
129
+/* Only used for assertions. */
130
+#include "qemu/coroutine_int.h"
131
+
132
#include <libaio.h>
133
134
/*
135
@@ -XXX,XX +XXX,XX @@ struct LinuxAioState {
136
io_context_t ctx;
137
EventNotifier e;
138
139
- /* io queue for submit at batch. Protected by AioContext lock. */
140
+ /* No locking required, only accessed from AioContext home thread */
141
LaioQueue io_q;
142
-
143
- /* I/O completion processing. Only runs in I/O thread. */
144
QEMUBH *completion_bh;
145
int event_idx;
146
int event_max;
147
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
148
* later. Coroutines cannot be entered recursively so avoid doing
149
* that!
150
*/
151
+ assert(laiocb->co->ctx == laiocb->ctx->aio_context);
152
if (!qemu_coroutine_entered(laiocb->co)) {
153
aio_co_wake(laiocb->co);
154
}
155
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completions(LinuxAioState *s)
156
157
static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
158
{
159
- aio_context_acquire(s->aio_context);
160
qemu_laio_process_completions(s);
161
162
if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
163
ioq_submit(s);
164
}
165
- aio_context_release(s->aio_context);
166
}
167
168
static void qemu_laio_completion_bh(void *opaque)
169
@@ -XXX,XX +XXX,XX @@ static uint64_t laio_max_batch(LinuxAioState *s, uint64_t dev_max_batch)
170
return max_batch;
171
}
172
173
-void laio_io_plug(BlockDriverState *bs, LinuxAioState *s)
174
+void laio_io_plug(void)
175
{
176
+ AioContext *ctx = qemu_get_current_aio_context();
177
+ LinuxAioState *s = aio_get_linux_aio(ctx);
178
+
179
s->io_q.plugged++;
180
}
181
182
-void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s,
183
- uint64_t dev_max_batch)
184
+void laio_io_unplug(uint64_t dev_max_batch)
185
{
186
+ AioContext *ctx = qemu_get_current_aio_context();
187
+ LinuxAioState *s = aio_get_linux_aio(ctx);
188
+
189
assert(s->io_q.plugged);
190
s->io_q.plugged--;
191
192
@@ -XXX,XX +XXX,XX @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
193
return 0;
194
}
195
196
-int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
197
- uint64_t offset, QEMUIOVector *qiov, int type,
198
- uint64_t dev_max_batch)
199
+int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov,
200
+ int type, uint64_t dev_max_batch)
201
{
202
int ret;
203
+ AioContext *ctx = qemu_get_current_aio_context();
204
struct qemu_laiocb laiocb = {
205
.co = qemu_coroutine_self(),
206
.nbytes = qiov->size,
207
- .ctx = s,
208
+ .ctx = aio_get_linux_aio(ctx),
209
.ret = -EINPROGRESS,
210
.is_read = (type == QEMU_AIO_READ),
211
.qiov = qiov,
212
--
213
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
Remove usage of aio_context_acquire by always submitting asynchronous
4
AIO to the current thread's LuringState.
5
6
In order to prevent mistakes from the caller side, avoid passing LuringState
7
in luring_io_{plug/unplug} and luring_co_submit, and document the functions
8
to make clear that they work in the current thread's AioContext.
9
10
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
11
Message-Id: <20230203131731.851116-3-eesposit@redhat.com>
12
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
13
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
---
16
include/block/aio.h | 4 ----
17
include/block/raw-aio.h | 15 +++++++++++----
18
block/file-posix.c | 12 ++++--------
19
block/io_uring.c | 23 +++++++++++++++--------
20
4 files changed, 30 insertions(+), 24 deletions(-)
21
22
diff --git a/include/block/aio.h b/include/block/aio.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/include/block/aio.h
25
+++ b/include/block/aio.h
26
@@ -XXX,XX +XXX,XX @@ struct AioContext {
27
struct LinuxAioState *linux_aio;
28
#endif
29
#ifdef CONFIG_LINUX_IO_URING
30
- /*
31
- * State for Linux io_uring. Uses aio_context_acquire/release for
32
- * locking.
33
- */
34
struct LuringState *linux_io_uring;
35
36
/* State for file descriptor monitoring using Linux io_uring */
37
diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/include/block/raw-aio.h
40
+++ b/include/block/raw-aio.h
41
@@ -XXX,XX +XXX,XX @@ void laio_io_unplug(uint64_t dev_max_batch);
42
typedef struct LuringState LuringState;
43
LuringState *luring_init(Error **errp);
44
void luring_cleanup(LuringState *s);
45
-int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd,
46
- uint64_t offset, QEMUIOVector *qiov, int type);
47
+
48
+/* luring_co_submit: submit I/O requests in the thread's current AioContext. */
49
+int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset,
50
+ QEMUIOVector *qiov, int type);
51
void luring_detach_aio_context(LuringState *s, AioContext *old_context);
52
void luring_attach_aio_context(LuringState *s, AioContext *new_context);
53
-void luring_io_plug(BlockDriverState *bs, LuringState *s);
54
-void luring_io_unplug(BlockDriverState *bs, LuringState *s);
55
+
56
+/*
57
+ * luring_io_plug/unplug work in the thread's current AioContext, therefore the
58
+ * caller must ensure that they are paired in the same IOThread.
59
+ */
60
+void luring_io_plug(void);
61
+void luring_io_unplug(void);
62
#endif
63
64
#ifdef _WIN32
65
diff --git a/block/file-posix.c b/block/file-posix.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/block/file-posix.c
68
+++ b/block/file-posix.c
69
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
70
type |= QEMU_AIO_MISALIGNED;
71
#ifdef CONFIG_LINUX_IO_URING
72
} else if (s->use_linux_io_uring) {
73
- LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
74
assert(qiov->size == bytes);
75
- return luring_co_submit(bs, aio, s->fd, offset, qiov, type);
76
+ return luring_co_submit(bs, s->fd, offset, qiov, type);
77
#endif
78
#ifdef CONFIG_LINUX_AIO
79
} else if (s->use_linux_aio) {
80
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn raw_co_io_plug(BlockDriverState *bs)
81
#endif
82
#ifdef CONFIG_LINUX_IO_URING
83
if (s->use_linux_io_uring) {
84
- LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
85
- luring_io_plug(bs, aio);
86
+ luring_io_plug();
87
}
88
#endif
89
}
90
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn raw_co_io_unplug(BlockDriverState *bs)
91
#endif
92
#ifdef CONFIG_LINUX_IO_URING
93
if (s->use_linux_io_uring) {
94
- LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
95
- luring_io_unplug(bs, aio);
96
+ luring_io_unplug();
97
}
98
#endif
99
}
100
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
101
102
#ifdef CONFIG_LINUX_IO_URING
103
if (s->use_linux_io_uring) {
104
- LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
105
- return luring_co_submit(bs, aio, s->fd, 0, NULL, QEMU_AIO_FLUSH);
106
+ return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH);
107
}
108
#endif
109
return raw_thread_pool_submit(bs, handle_aiocb_flush, &acb);
110
diff --git a/block/io_uring.c b/block/io_uring.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/block/io_uring.c
113
+++ b/block/io_uring.c
114
@@ -XXX,XX +XXX,XX @@
115
#include "qapi/error.h"
116
#include "trace.h"
117
118
+/* Only used for assertions. */
119
+#include "qemu/coroutine_int.h"
120
+
121
/* io_uring ring size */
122
#define MAX_ENTRIES 128
123
124
@@ -XXX,XX +XXX,XX @@ typedef struct LuringState {
125
126
struct io_uring ring;
127
128
- /* io queue for submit at batch. Protected by AioContext lock. */
129
+ /* No locking required, only accessed from AioContext home thread */
130
LuringQueue io_q;
131
132
- /* I/O completion processing. Only runs in I/O thread. */
133
QEMUBH *completion_bh;
134
} LuringState;
135
136
@@ -XXX,XX +XXX,XX @@ end:
137
* eventually runs later. Coroutines cannot be entered recursively
138
* so avoid doing that!
139
*/
140
+ assert(luringcb->co->ctx == s->aio_context);
141
if (!qemu_coroutine_entered(luringcb->co)) {
142
aio_co_wake(luringcb->co);
143
}
144
@@ -XXX,XX +XXX,XX @@ static int ioq_submit(LuringState *s)
145
146
static void luring_process_completions_and_submit(LuringState *s)
147
{
148
- aio_context_acquire(s->aio_context);
149
luring_process_completions(s);
150
151
if (!s->io_q.plugged && s->io_q.in_queue > 0) {
152
ioq_submit(s);
153
}
154
- aio_context_release(s->aio_context);
155
}
156
157
static void qemu_luring_completion_bh(void *opaque)
158
@@ -XXX,XX +XXX,XX @@ static void ioq_init(LuringQueue *io_q)
159
io_q->blocked = false;
160
}
161
162
-void luring_io_plug(BlockDriverState *bs, LuringState *s)
163
+void luring_io_plug(void)
164
{
165
+ AioContext *ctx = qemu_get_current_aio_context();
166
+ LuringState *s = aio_get_linux_io_uring(ctx);
167
trace_luring_io_plug(s);
168
s->io_q.plugged++;
169
}
170
171
-void luring_io_unplug(BlockDriverState *bs, LuringState *s)
172
+void luring_io_unplug(void)
173
{
174
+ AioContext *ctx = qemu_get_current_aio_context();
175
+ LuringState *s = aio_get_linux_io_uring(ctx);
176
assert(s->io_q.plugged);
177
trace_luring_io_unplug(s, s->io_q.blocked, s->io_q.plugged,
178
s->io_q.in_queue, s->io_q.in_flight);
179
@@ -XXX,XX +XXX,XX @@ static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s,
180
return 0;
181
}
182
183
-int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd,
184
- uint64_t offset, QEMUIOVector *qiov, int type)
185
+int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset,
186
+ QEMUIOVector *qiov, int type)
187
{
188
int ret;
189
+ AioContext *ctx = qemu_get_current_aio_context();
190
+ LuringState *s = aio_get_linux_io_uring(ctx);
191
LuringAIOCB luringcb = {
192
.co = qemu_coroutine_self(),
193
.ret = -EINPROGRESS,
194
--
195
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
Use qemu_get_current_aio_context() where possible, since we always
4
submit work to the current thread anyways.
5
6
We want to also be sure that the thread submitting the work is
7
the same as the one processing the pool, to avoid adding
8
synchronization to the pool list.
9
10
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
11
Message-Id: <20230203131731.851116-4-eesposit@redhat.com>
12
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
13
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
---
16
include/block/thread-pool.h | 5 +++++
17
block/file-posix.c | 21 ++++++++++-----------
18
block/file-win32.c | 2 +-
19
block/qcow2-threads.c | 2 +-
20
util/thread-pool.c | 9 ++++-----
21
5 files changed, 21 insertions(+), 18 deletions(-)
22
23
diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/include/block/thread-pool.h
26
+++ b/include/block/thread-pool.h
27
@@ -XXX,XX +XXX,XX @@ typedef struct ThreadPool ThreadPool;
28
ThreadPool *thread_pool_new(struct AioContext *ctx);
29
void thread_pool_free(ThreadPool *pool);
30
31
+/*
32
+ * thread_pool_submit* API: submit I/O requests in the thread's
33
+ * current AioContext.
34
+ */
35
BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
36
ThreadPoolFunc *func, void *arg,
37
BlockCompletionFunc *cb, void *opaque);
38
int coroutine_fn thread_pool_submit_co(ThreadPool *pool,
39
ThreadPoolFunc *func, void *arg);
40
void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg);
41
+
42
void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx);
43
44
#endif
45
diff --git a/block/file-posix.c b/block/file-posix.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/block/file-posix.c
48
+++ b/block/file-posix.c
49
@@ -XXX,XX +XXX,XX @@ out:
50
return result;
51
}
52
53
-static int coroutine_fn raw_thread_pool_submit(BlockDriverState *bs,
54
- ThreadPoolFunc func, void *arg)
55
+static int coroutine_fn raw_thread_pool_submit(ThreadPoolFunc func, void *arg)
56
{
57
/* @bs can be NULL, bdrv_get_aio_context() returns the main context then */
58
- ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
59
+ ThreadPool *pool = aio_get_thread_pool(qemu_get_current_aio_context());
60
return thread_pool_submit_co(pool, func, arg);
61
}
62
63
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
64
};
65
66
assert(qiov->size == bytes);
67
- return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
68
+ return raw_thread_pool_submit(handle_aiocb_rw, &acb);
69
}
70
71
static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
72
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
73
return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH);
74
}
75
#endif
76
- return raw_thread_pool_submit(bs, handle_aiocb_flush, &acb);
77
+ return raw_thread_pool_submit(handle_aiocb_flush, &acb);
78
}
79
80
static void raw_aio_attach_aio_context(BlockDriverState *bs,
81
@@ -XXX,XX +XXX,XX @@ raw_regular_truncate(BlockDriverState *bs, int fd, int64_t offset,
82
},
83
};
84
85
- return raw_thread_pool_submit(bs, handle_aiocb_truncate, &acb);
86
+ return raw_thread_pool_submit(handle_aiocb_truncate, &acb);
87
}
88
89
static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset,
90
@@ -XXX,XX +XXX,XX @@ raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes,
91
acb.aio_type |= QEMU_AIO_BLKDEV;
92
}
93
94
- ret = raw_thread_pool_submit(bs, handle_aiocb_discard, &acb);
95
+ ret = raw_thread_pool_submit(handle_aiocb_discard, &acb);
96
raw_account_discard(s, bytes, ret);
97
return ret;
98
}
99
@@ -XXX,XX +XXX,XX @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
100
handler = handle_aiocb_write_zeroes;
101
}
102
103
- return raw_thread_pool_submit(bs, handler, &acb);
104
+ return raw_thread_pool_submit(handler, &acb);
105
}
106
107
static int coroutine_fn raw_co_pwrite_zeroes(
108
@@ -XXX,XX +XXX,XX @@ raw_co_copy_range_to(BlockDriverState *bs,
109
},
110
};
111
112
- return raw_thread_pool_submit(bs, handle_aiocb_copy_range, &acb);
113
+ return raw_thread_pool_submit(handle_aiocb_copy_range, &acb);
114
}
115
116
BlockDriver bdrv_file = {
117
@@ -XXX,XX +XXX,XX @@ hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
118
struct sg_io_hdr *io_hdr = buf;
119
if (io_hdr->cmdp[0] == PERSISTENT_RESERVE_OUT ||
120
io_hdr->cmdp[0] == PERSISTENT_RESERVE_IN) {
121
- return pr_manager_execute(s->pr_mgr, bdrv_get_aio_context(bs),
122
+ return pr_manager_execute(s->pr_mgr, qemu_get_current_aio_context(),
123
s->fd, io_hdr);
124
}
125
}
126
@@ -XXX,XX +XXX,XX @@ hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
127
},
128
};
129
130
- return raw_thread_pool_submit(bs, handle_aiocb_ioctl, &acb);
131
+ return raw_thread_pool_submit(handle_aiocb_ioctl, &acb);
132
}
133
#endif /* linux */
134
135
diff --git a/block/file-win32.c b/block/file-win32.c
136
index XXXXXXX..XXXXXXX 100644
137
--- a/block/file-win32.c
138
+++ b/block/file-win32.c
139
@@ -XXX,XX +XXX,XX @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile,
140
acb->aio_offset = offset;
141
142
trace_file_paio_submit(acb, opaque, offset, count, type);
143
- pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
144
+ pool = aio_get_thread_pool(qemu_get_current_aio_context());
145
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
146
}
147
148
diff --git a/block/qcow2-threads.c b/block/qcow2-threads.c
149
index XXXXXXX..XXXXXXX 100644
150
--- a/block/qcow2-threads.c
151
+++ b/block/qcow2-threads.c
152
@@ -XXX,XX +XXX,XX @@ qcow2_co_process(BlockDriverState *bs, ThreadPoolFunc *func, void *arg)
153
{
154
int ret;
155
BDRVQcow2State *s = bs->opaque;
156
- ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
157
+ ThreadPool *pool = aio_get_thread_pool(qemu_get_current_aio_context());
158
159
qemu_co_mutex_lock(&s->lock);
160
while (s->nb_threads >= QCOW2_MAX_THREADS) {
161
diff --git a/util/thread-pool.c b/util/thread-pool.c
162
index XXXXXXX..XXXXXXX 100644
163
--- a/util/thread-pool.c
164
+++ b/util/thread-pool.c
165
@@ -XXX,XX +XXX,XX @@ struct ThreadPoolElement {
166
/* Access to this list is protected by lock. */
167
QTAILQ_ENTRY(ThreadPoolElement) reqs;
168
169
- /* Access to this list is protected by the global mutex. */
170
+ /* This list is only written by the thread pool's mother thread. */
171
QLIST_ENTRY(ThreadPoolElement) all;
172
};
173
174
@@ -XXX,XX +XXX,XX @@ static void thread_pool_completion_bh(void *opaque)
175
ThreadPool *pool = opaque;
176
ThreadPoolElement *elem, *next;
177
178
- aio_context_acquire(pool->ctx);
179
restart:
180
QLIST_FOREACH_SAFE(elem, &pool->head, all, next) {
181
if (elem->state != THREAD_DONE) {
182
@@ -XXX,XX +XXX,XX @@ restart:
183
*/
184
qemu_bh_schedule(pool->completion_bh);
185
186
- aio_context_release(pool->ctx);
187
elem->common.cb(elem->common.opaque, elem->ret);
188
- aio_context_acquire(pool->ctx);
189
190
/* We can safely cancel the completion_bh here regardless of someone
191
* else having scheduled it meanwhile because we reenter the
192
@@ -XXX,XX +XXX,XX @@ restart:
193
qemu_aio_unref(elem);
194
}
195
}
196
- aio_context_release(pool->ctx);
197
}
198
199
static void thread_pool_cancel(BlockAIOCB *acb)
200
@@ -XXX,XX +XXX,XX @@ BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
201
{
202
ThreadPoolElement *req;
203
204
+ /* Assert that the thread submitting work is the same running the pool */
205
+ assert(pool->ctx == qemu_get_current_aio_context());
206
+
207
req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque);
208
req->func = func;
209
req->arg = arg;
210
--
211
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
thread_pool_submit_aio() is always called on a pool taken from
4
qemu_get_current_aio_context(), and that is the only intended
5
use: each pool runs only in the same thread that is submitting
6
work to it, it can't run anywhere else.
7
8
Therefore simplify the thread_pool_submit* API and remove the
9
ThreadPool function parameter.
10
11
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
12
Message-Id: <20230203131731.851116-5-eesposit@redhat.com>
13
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
14
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
15
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
16
---
17
include/block/thread-pool.h | 10 ++++------
18
backends/tpm/tpm_backend.c | 4 +---
19
block/file-posix.c | 4 +---
20
block/file-win32.c | 4 +---
21
block/qcow2-threads.c | 3 +--
22
hw/9pfs/coth.c | 3 +--
23
hw/ppc/spapr_nvdimm.c | 6 ++----
24
hw/virtio/virtio-pmem.c | 3 +--
25
scsi/pr-manager.c | 3 +--
26
scsi/qemu-pr-helper.c | 3 +--
27
tests/unit/test-thread-pool.c | 12 +++++-------
28
util/thread-pool.c | 16 ++++++++--------
29
12 files changed, 27 insertions(+), 44 deletions(-)
30
31
diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/include/block/thread-pool.h
34
+++ b/include/block/thread-pool.h
35
@@ -XXX,XX +XXX,XX @@ void thread_pool_free(ThreadPool *pool);
36
* thread_pool_submit* API: submit I/O requests in the thread's
37
* current AioContext.
38
*/
39
-BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
40
- ThreadPoolFunc *func, void *arg,
41
- BlockCompletionFunc *cb, void *opaque);
42
-int coroutine_fn thread_pool_submit_co(ThreadPool *pool,
43
- ThreadPoolFunc *func, void *arg);
44
-void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg);
45
+BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
46
+ BlockCompletionFunc *cb, void *opaque);
47
+int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg);
48
+void thread_pool_submit(ThreadPoolFunc *func, void *arg);
49
50
void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx);
51
52
diff --git a/backends/tpm/tpm_backend.c b/backends/tpm/tpm_backend.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/backends/tpm/tpm_backend.c
55
+++ b/backends/tpm/tpm_backend.c
56
@@ -XXX,XX +XXX,XX @@ bool tpm_backend_had_startup_error(TPMBackend *s)
57
58
void tpm_backend_deliver_request(TPMBackend *s, TPMBackendCmd *cmd)
59
{
60
- ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
61
-
62
if (s->cmd != NULL) {
63
error_report("There is a TPM request pending");
64
return;
65
@@ -XXX,XX +XXX,XX @@ void tpm_backend_deliver_request(TPMBackend *s, TPMBackendCmd *cmd)
66
67
s->cmd = cmd;
68
object_ref(OBJECT(s));
69
- thread_pool_submit_aio(pool, tpm_backend_worker_thread, s,
70
+ thread_pool_submit_aio(tpm_backend_worker_thread, s,
71
tpm_backend_request_completed, s);
72
}
73
74
diff --git a/block/file-posix.c b/block/file-posix.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/block/file-posix.c
77
+++ b/block/file-posix.c
78
@@ -XXX,XX +XXX,XX @@ out:
79
80
static int coroutine_fn raw_thread_pool_submit(ThreadPoolFunc func, void *arg)
81
{
82
- /* @bs can be NULL, bdrv_get_aio_context() returns the main context then */
83
- ThreadPool *pool = aio_get_thread_pool(qemu_get_current_aio_context());
84
- return thread_pool_submit_co(pool, func, arg);
85
+ return thread_pool_submit_co(func, arg);
86
}
87
88
/*
89
diff --git a/block/file-win32.c b/block/file-win32.c
90
index XXXXXXX..XXXXXXX 100644
91
--- a/block/file-win32.c
92
+++ b/block/file-win32.c
93
@@ -XXX,XX +XXX,XX @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile,
94
BlockCompletionFunc *cb, void *opaque, int type)
95
{
96
RawWin32AIOData *acb = g_new(RawWin32AIOData, 1);
97
- ThreadPool *pool;
98
99
acb->bs = bs;
100
acb->hfile = hfile;
101
@@ -XXX,XX +XXX,XX @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile,
102
acb->aio_offset = offset;
103
104
trace_file_paio_submit(acb, opaque, offset, count, type);
105
- pool = aio_get_thread_pool(qemu_get_current_aio_context());
106
- return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
107
+ return thread_pool_submit_aio(aio_worker, acb, cb, opaque);
108
}
109
110
int qemu_ftruncate64(int fd, int64_t length)
111
diff --git a/block/qcow2-threads.c b/block/qcow2-threads.c
112
index XXXXXXX..XXXXXXX 100644
113
--- a/block/qcow2-threads.c
114
+++ b/block/qcow2-threads.c
115
@@ -XXX,XX +XXX,XX @@ qcow2_co_process(BlockDriverState *bs, ThreadPoolFunc *func, void *arg)
116
{
117
int ret;
118
BDRVQcow2State *s = bs->opaque;
119
- ThreadPool *pool = aio_get_thread_pool(qemu_get_current_aio_context());
120
121
qemu_co_mutex_lock(&s->lock);
122
while (s->nb_threads >= QCOW2_MAX_THREADS) {
123
@@ -XXX,XX +XXX,XX @@ qcow2_co_process(BlockDriverState *bs, ThreadPoolFunc *func, void *arg)
124
s->nb_threads++;
125
qemu_co_mutex_unlock(&s->lock);
126
127
- ret = thread_pool_submit_co(pool, func, arg);
128
+ ret = thread_pool_submit_co(func, arg);
129
130
qemu_co_mutex_lock(&s->lock);
131
s->nb_threads--;
132
diff --git a/hw/9pfs/coth.c b/hw/9pfs/coth.c
133
index XXXXXXX..XXXXXXX 100644
134
--- a/hw/9pfs/coth.c
135
+++ b/hw/9pfs/coth.c
136
@@ -XXX,XX +XXX,XX @@ static int coroutine_enter_func(void *arg)
137
void co_run_in_worker_bh(void *opaque)
138
{
139
Coroutine *co = opaque;
140
- thread_pool_submit_aio(aio_get_thread_pool(qemu_get_aio_context()),
141
- coroutine_enter_func, co, coroutine_enter_cb, co);
142
+ thread_pool_submit_aio(coroutine_enter_func, co, coroutine_enter_cb, co);
143
}
144
diff --git a/hw/ppc/spapr_nvdimm.c b/hw/ppc/spapr_nvdimm.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/hw/ppc/spapr_nvdimm.c
147
+++ b/hw/ppc/spapr_nvdimm.c
148
@@ -XXX,XX +XXX,XX @@ static int spapr_nvdimm_flush_post_load(void *opaque, int version_id)
149
{
150
SpaprNVDIMMDevice *s_nvdimm = (SpaprNVDIMMDevice *)opaque;
151
SpaprNVDIMMDeviceFlushState *state;
152
- ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
153
HostMemoryBackend *backend = MEMORY_BACKEND(PC_DIMM(s_nvdimm)->hostmem);
154
bool is_pmem = object_property_get_bool(OBJECT(backend), "pmem", NULL);
155
bool pmem_override = object_property_get_bool(OBJECT(s_nvdimm),
156
@@ -XXX,XX +XXX,XX @@ static int spapr_nvdimm_flush_post_load(void *opaque, int version_id)
157
}
158
159
QLIST_FOREACH(state, &s_nvdimm->pending_nvdimm_flush_states, node) {
160
- thread_pool_submit_aio(pool, flush_worker_cb, state,
161
+ thread_pool_submit_aio(flush_worker_cb, state,
162
spapr_nvdimm_flush_completion_cb, state);
163
}
164
165
@@ -XXX,XX +XXX,XX @@ static target_ulong h_scm_flush(PowerPCCPU *cpu, SpaprMachineState *spapr,
166
PCDIMMDevice *dimm;
167
HostMemoryBackend *backend = NULL;
168
SpaprNVDIMMDeviceFlushState *state;
169
- ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
170
int fd;
171
172
if (!drc || !drc->dev ||
173
@@ -XXX,XX +XXX,XX @@ static target_ulong h_scm_flush(PowerPCCPU *cpu, SpaprMachineState *spapr,
174
175
state->drcidx = drc_index;
176
177
- thread_pool_submit_aio(pool, flush_worker_cb, state,
178
+ thread_pool_submit_aio(flush_worker_cb, state,
179
spapr_nvdimm_flush_completion_cb, state);
180
181
continue_token = state->continue_token;
182
diff --git a/hw/virtio/virtio-pmem.c b/hw/virtio/virtio-pmem.c
183
index XXXXXXX..XXXXXXX 100644
184
--- a/hw/virtio/virtio-pmem.c
185
+++ b/hw/virtio/virtio-pmem.c
186
@@ -XXX,XX +XXX,XX @@ static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq)
187
VirtIODeviceRequest *req_data;
188
VirtIOPMEM *pmem = VIRTIO_PMEM(vdev);
189
HostMemoryBackend *backend = MEMORY_BACKEND(pmem->memdev);
190
- ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
191
192
trace_virtio_pmem_flush_request();
193
req_data = virtqueue_pop(vq, sizeof(VirtIODeviceRequest));
194
@@ -XXX,XX +XXX,XX @@ static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq)
195
req_data->fd = memory_region_get_fd(&backend->mr);
196
req_data->pmem = pmem;
197
req_data->vdev = vdev;
198
- thread_pool_submit_aio(pool, worker_cb, req_data, done_cb, req_data);
199
+ thread_pool_submit_aio(worker_cb, req_data, done_cb, req_data);
200
}
201
202
static void virtio_pmem_get_config(VirtIODevice *vdev, uint8_t *config)
203
diff --git a/scsi/pr-manager.c b/scsi/pr-manager.c
204
index XXXXXXX..XXXXXXX 100644
205
--- a/scsi/pr-manager.c
206
+++ b/scsi/pr-manager.c
207
@@ -XXX,XX +XXX,XX @@ static int pr_manager_worker(void *opaque)
208
int coroutine_fn pr_manager_execute(PRManager *pr_mgr, AioContext *ctx, int fd,
209
struct sg_io_hdr *hdr)
210
{
211
- ThreadPool *pool = aio_get_thread_pool(ctx);
212
PRManagerData data = {
213
.pr_mgr = pr_mgr,
214
.fd = fd,
215
@@ -XXX,XX +XXX,XX @@ int coroutine_fn pr_manager_execute(PRManager *pr_mgr, AioContext *ctx, int fd,
216
217
/* The matching object_unref is in pr_manager_worker. */
218
object_ref(OBJECT(pr_mgr));
219
- return thread_pool_submit_co(pool, pr_manager_worker, &data);
220
+ return thread_pool_submit_co(pr_manager_worker, &data);
221
}
222
223
bool pr_manager_is_connected(PRManager *pr_mgr)
224
diff --git a/scsi/qemu-pr-helper.c b/scsi/qemu-pr-helper.c
225
index XXXXXXX..XXXXXXX 100644
226
--- a/scsi/qemu-pr-helper.c
227
+++ b/scsi/qemu-pr-helper.c
228
@@ -XXX,XX +XXX,XX @@ static int do_sgio_worker(void *opaque)
229
static int do_sgio(int fd, const uint8_t *cdb, uint8_t *sense,
230
uint8_t *buf, int *sz, int dir)
231
{
232
- ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
233
int r;
234
235
PRHelperSGIOData data = {
236
@@ -XXX,XX +XXX,XX @@ static int do_sgio(int fd, const uint8_t *cdb, uint8_t *sense,
237
.dir = dir,
238
};
239
240
- r = thread_pool_submit_co(pool, do_sgio_worker, &data);
241
+ r = thread_pool_submit_co(do_sgio_worker, &data);
242
*sz = data.sz;
243
return r;
244
}
245
diff --git a/tests/unit/test-thread-pool.c b/tests/unit/test-thread-pool.c
246
index XXXXXXX..XXXXXXX 100644
247
--- a/tests/unit/test-thread-pool.c
248
+++ b/tests/unit/test-thread-pool.c
249
@@ -XXX,XX +XXX,XX @@
250
#include "qemu/main-loop.h"
251
252
static AioContext *ctx;
253
-static ThreadPool *pool;
254
static int active;
255
256
typedef struct {
257
@@ -XXX,XX +XXX,XX @@ static void done_cb(void *opaque, int ret)
258
static void test_submit(void)
259
{
260
WorkerTestData data = { .n = 0 };
261
- thread_pool_submit(pool, worker_cb, &data);
262
+ thread_pool_submit(worker_cb, &data);
263
while (data.n == 0) {
264
aio_poll(ctx, true);
265
}
266
@@ -XXX,XX +XXX,XX @@ static void test_submit(void)
267
static void test_submit_aio(void)
268
{
269
WorkerTestData data = { .n = 0, .ret = -EINPROGRESS };
270
- data.aiocb = thread_pool_submit_aio(pool, worker_cb, &data,
271
+ data.aiocb = thread_pool_submit_aio(worker_cb, &data,
272
done_cb, &data);
273
274
/* The callbacks are not called until after the first wait. */
275
@@ -XXX,XX +XXX,XX @@ static void co_test_cb(void *opaque)
276
active = 1;
277
data->n = 0;
278
data->ret = -EINPROGRESS;
279
- thread_pool_submit_co(pool, worker_cb, data);
280
+ thread_pool_submit_co(worker_cb, data);
281
282
/* The test continues in test_submit_co, after qemu_coroutine_enter... */
283
284
@@ -XXX,XX +XXX,XX @@ static void test_submit_many(void)
285
for (i = 0; i < 100; i++) {
286
data[i].n = 0;
287
data[i].ret = -EINPROGRESS;
288
- thread_pool_submit_aio(pool, worker_cb, &data[i], done_cb, &data[i]);
289
+ thread_pool_submit_aio(worker_cb, &data[i], done_cb, &data[i]);
290
}
291
292
active = 100;
293
@@ -XXX,XX +XXX,XX @@ static void do_test_cancel(bool sync)
294
for (i = 0; i < 100; i++) {
295
data[i].n = 0;
296
data[i].ret = -EINPROGRESS;
297
- data[i].aiocb = thread_pool_submit_aio(pool, long_cb, &data[i],
298
+ data[i].aiocb = thread_pool_submit_aio(long_cb, &data[i],
299
done_cb, &data[i]);
300
}
301
302
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
303
{
304
qemu_init_main_loop(&error_abort);
305
ctx = qemu_get_current_aio_context();
306
- pool = aio_get_thread_pool(ctx);
307
308
g_test_init(&argc, &argv, NULL);
309
g_test_add_func("/thread-pool/submit", test_submit);
310
diff --git a/util/thread-pool.c b/util/thread-pool.c
311
index XXXXXXX..XXXXXXX 100644
312
--- a/util/thread-pool.c
313
+++ b/util/thread-pool.c
314
@@ -XXX,XX +XXX,XX @@ static const AIOCBInfo thread_pool_aiocb_info = {
315
.get_aio_context = thread_pool_get_aio_context,
316
};
317
318
-BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
319
- ThreadPoolFunc *func, void *arg,
320
- BlockCompletionFunc *cb, void *opaque)
321
+BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
322
+ BlockCompletionFunc *cb, void *opaque)
323
{
324
ThreadPoolElement *req;
325
+ AioContext *ctx = qemu_get_current_aio_context();
326
+ ThreadPool *pool = aio_get_thread_pool(ctx);
327
328
/* Assert that the thread submitting work is the same running the pool */
329
assert(pool->ctx == qemu_get_current_aio_context());
330
@@ -XXX,XX +XXX,XX @@ static void thread_pool_co_cb(void *opaque, int ret)
331
aio_co_wake(co->co);
332
}
333
334
-int coroutine_fn thread_pool_submit_co(ThreadPool *pool, ThreadPoolFunc *func,
335
- void *arg)
336
+int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg)
337
{
338
ThreadPoolCo tpc = { .co = qemu_coroutine_self(), .ret = -EINPROGRESS };
339
assert(qemu_in_coroutine());
340
- thread_pool_submit_aio(pool, func, arg, thread_pool_co_cb, &tpc);
341
+ thread_pool_submit_aio(func, arg, thread_pool_co_cb, &tpc);
342
qemu_coroutine_yield();
343
return tpc.ret;
344
}
345
346
-void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg)
347
+void thread_pool_submit(ThreadPoolFunc *func, void *arg)
348
{
349
- thread_pool_submit_aio(pool, func, arg, NULL, NULL);
350
+ thread_pool_submit_aio(func, arg, NULL, NULL);
351
}
352
353
void thread_pool_update_params(ThreadPool *pool, AioContext *ctx)
354
--
355
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Functions that can do I/O are prime candidates for being coroutine_fns. Make the
4
change for those that are themselves called only from coroutine_fns.
5
6
In addition, coroutine_fns should do I/O using bdrv_co_*() functions, for
7
which it is required to hold the BlockDriverState graph lock. So also nnotate
8
functions on the I/O path with TSA attributes, making it possible to
9
switch them to use bdrv_co_*() functions.
10
11
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
12
Message-Id: <20230309084456.304669-2-pbonzini@redhat.com>
13
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
---
16
block/vvfat.c | 58 ++++++++++++++++++++++++++-------------------------
17
1 file changed, 30 insertions(+), 28 deletions(-)
18
19
diff --git a/block/vvfat.c b/block/vvfat.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/block/vvfat.c
22
+++ b/block/vvfat.c
23
@@ -XXX,XX +XXX,XX @@ static BDRVVVFATState *vvv = NULL;
24
#endif
25
26
static int enable_write_target(BlockDriverState *bs, Error **errp);
27
-static int is_consistent(BDRVVVFATState *s);
28
+static int coroutine_fn is_consistent(BDRVVVFATState *s);
29
30
static QemuOptsList runtime_opts = {
31
.name = "vvfat",
32
@@ -XXX,XX +XXX,XX @@ static void print_mapping(const mapping_t* mapping)
33
}
34
#endif
35
36
-static int vvfat_read(BlockDriverState *bs, int64_t sector_num,
37
- uint8_t *buf, int nb_sectors)
38
+static int coroutine_fn GRAPH_RDLOCK
39
+vvfat_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors)
40
{
41
BDRVVVFATState *s = bs->opaque;
42
int i;
43
@@ -XXX,XX +XXX,XX @@ static int vvfat_read(BlockDriverState *bs, int64_t sector_num,
44
DLOG(fprintf(stderr, "sectors %" PRId64 "+%" PRId64
45
" allocated\n", sector_num,
46
n >> BDRV_SECTOR_BITS));
47
- if (bdrv_pread(s->qcow, sector_num * BDRV_SECTOR_SIZE, n,
48
- buf + i * 0x200, 0) < 0) {
49
+ if (bdrv_co_pread(s->qcow, sector_num * BDRV_SECTOR_SIZE, n,
50
+ buf + i * 0x200, 0) < 0) {
51
return -1;
52
}
53
i += (n >> BDRV_SECTOR_BITS) - 1;
54
@@ -XXX,XX +XXX,XX @@ static int vvfat_read(BlockDriverState *bs, int64_t sector_num,
55
return 0;
56
}
57
58
-static int coroutine_fn
59
+static int coroutine_fn GRAPH_RDLOCK
60
vvfat_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
61
QEMUIOVector *qiov, BdrvRequestFlags flags)
62
{
63
@@ -XXX,XX +XXX,XX @@ static inline uint32_t modified_fat_get(BDRVVVFATState* s,
64
}
65
}
66
67
-static inline bool cluster_was_modified(BDRVVVFATState *s,
68
- uint32_t cluster_num)
69
+static inline bool coroutine_fn GRAPH_RDLOCK
70
+cluster_was_modified(BDRVVVFATState *s, uint32_t cluster_num)
71
{
72
int was_modified = 0;
73
int i;
74
@@ -XXX,XX +XXX,XX @@ typedef enum {
75
* Further, the files/directories handled by this function are
76
* assumed to be *not* deleted (and *only* those).
77
*/
78
-static uint32_t get_cluster_count_for_direntry(BDRVVVFATState* s,
79
- direntry_t* direntry, const char* path)
80
+static uint32_t coroutine_fn GRAPH_RDLOCK
81
+get_cluster_count_for_direntry(BDRVVVFATState* s, direntry_t* direntry, const char* path)
82
{
83
/*
84
* This is a little bit tricky:
85
@@ -XXX,XX +XXX,XX @@ static uint32_t get_cluster_count_for_direntry(BDRVVVFATState* s,
86
if (res) {
87
return -1;
88
}
89
- res = bdrv_pwrite(s->qcow, offset * BDRV_SECTOR_SIZE,
90
- BDRV_SECTOR_SIZE, s->cluster_buffer,
91
- 0);
92
+ res = bdrv_co_pwrite(s->qcow, offset * BDRV_SECTOR_SIZE,
93
+ BDRV_SECTOR_SIZE, s->cluster_buffer,
94
+ 0);
95
if (res < 0) {
96
return -2;
97
}
98
@@ -XXX,XX +XXX,XX @@ static uint32_t get_cluster_count_for_direntry(BDRVVVFATState* s,
99
* It returns 0 upon inconsistency or error, and the number of clusters
100
* used by the directory, its subdirectories and their files.
101
*/
102
-static int check_directory_consistency(BDRVVVFATState *s,
103
- int cluster_num, const char* path)
104
+static int coroutine_fn GRAPH_RDLOCK
105
+check_directory_consistency(BDRVVVFATState *s, int cluster_num, const char* path)
106
{
107
int ret = 0;
108
unsigned char* cluster = g_malloc(s->cluster_size);
109
@@ -XXX,XX +XXX,XX @@ DLOG(fprintf(stderr, "check direntry %d:\n", i); print_direntry(direntries + i))
110
}
111
112
/* returns 1 on success */
113
-static int is_consistent(BDRVVVFATState* s)
114
+static int coroutine_fn GRAPH_RDLOCK
115
+is_consistent(BDRVVVFATState* s)
116
{
117
int i, check;
118
int used_clusters_count = 0;
119
@@ -XXX,XX +XXX,XX @@ static int commit_mappings(BDRVVVFATState* s,
120
return 0;
121
}
122
123
-static int commit_direntries(BDRVVVFATState* s,
124
- int dir_index, int parent_mapping_index)
125
+static int coroutine_fn GRAPH_RDLOCK
126
+commit_direntries(BDRVVVFATState* s, int dir_index, int parent_mapping_index)
127
{
128
direntry_t* direntry = array_get(&(s->directory), dir_index);
129
uint32_t first_cluster = dir_index == 0 ? 0 : begin_of_direntry(direntry);
130
@@ -XXX,XX +XXX,XX @@ static int commit_direntries(BDRVVVFATState* s,
131
132
/* commit one file (adjust contents, adjust mapping),
133
return first_mapping_index */
134
-static int commit_one_file(BDRVVVFATState* s,
135
- int dir_index, uint32_t offset)
136
+static int coroutine_fn GRAPH_RDLOCK
137
+commit_one_file(BDRVVVFATState* s, int dir_index, uint32_t offset)
138
{
139
direntry_t* direntry = array_get(&(s->directory), dir_index);
140
uint32_t c = begin_of_direntry(direntry);
141
@@ -XXX,XX +XXX,XX @@ static int handle_renames_and_mkdirs(BDRVVVFATState* s)
142
/*
143
* TODO: make sure that the short name is not matching *another* file
144
*/
145
-static int handle_commits(BDRVVVFATState* s)
146
+static int coroutine_fn GRAPH_RDLOCK handle_commits(BDRVVVFATState* s)
147
{
148
int i, fail = 0;
149
150
@@ -XXX,XX +XXX,XX @@ static int handle_deletes(BDRVVVFATState* s)
151
* - recurse direntries from root (using bs->bdrv_pread)
152
* - delete files corresponding to mappings marked as deleted
153
*/
154
-static int do_commit(BDRVVVFATState* s)
155
+static int coroutine_fn GRAPH_RDLOCK do_commit(BDRVVVFATState* s)
156
{
157
int ret = 0;
158
159
@@ -XXX,XX +XXX,XX @@ DLOG(checkpoint());
160
return 0;
161
}
162
163
-static int try_commit(BDRVVVFATState* s)
164
+static int coroutine_fn GRAPH_RDLOCK try_commit(BDRVVVFATState* s)
165
{
166
vvfat_close_current_file(s);
167
DLOG(checkpoint());
168
@@ -XXX,XX +XXX,XX @@ DLOG(checkpoint());
169
return do_commit(s);
170
}
171
172
-static int vvfat_write(BlockDriverState *bs, int64_t sector_num,
173
- const uint8_t *buf, int nb_sectors)
174
+static int coroutine_fn GRAPH_RDLOCK
175
+vvfat_write(BlockDriverState *bs, int64_t sector_num,
176
+ const uint8_t *buf, int nb_sectors)
177
{
178
BDRVVVFATState *s = bs->opaque;
179
int i, ret;
180
@@ -XXX,XX +XXX,XX @@ DLOG(checkpoint());
181
* Use qcow backend. Commit later.
182
*/
183
DLOG(fprintf(stderr, "Write to qcow backend: %d + %d\n", (int)sector_num, nb_sectors));
184
- ret = bdrv_pwrite(s->qcow, sector_num * BDRV_SECTOR_SIZE,
185
- nb_sectors * BDRV_SECTOR_SIZE, buf, 0);
186
+ ret = bdrv_co_pwrite(s->qcow, sector_num * BDRV_SECTOR_SIZE,
187
+ nb_sectors * BDRV_SECTOR_SIZE, buf, 0);
188
if (ret < 0) {
189
fprintf(stderr, "Error writing to qcow backend\n");
190
return ret;
191
@@ -XXX,XX +XXX,XX @@ DLOG(checkpoint());
192
return 0;
193
}
194
195
-static int coroutine_fn
196
+static int coroutine_fn GRAPH_RDLOCK
197
vvfat_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
198
QEMUIOVector *qiov, BdrvRequestFlags flags)
199
{
200
--
201
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
4
Message-Id: <20230309084456.304669-3-pbonzini@redhat.com>
5
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
6
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
7
---
8
block/blkdebug.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
11
diff --git a/block/blkdebug.c b/block/blkdebug.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/block/blkdebug.c
14
+++ b/block/blkdebug.c
15
@@ -XXX,XX +XXX,XX @@ out:
16
return ret;
17
}
18
19
-static int rule_check(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
20
- BlkdebugIOType iotype)
21
+static int coroutine_fn rule_check(BlockDriverState *bs, uint64_t offset,
22
+ uint64_t bytes, BlkdebugIOType iotype)
23
{
24
BDRVBlkdebugState *s = bs->opaque;
25
BlkdebugRule *rule = NULL;
26
--
27
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
mirror_flush calls a mixed function blk_flush but it is only called
4
from mirror_run; so call the coroutine version and make mirror_flush
5
a coroutine_fn too.
6
7
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
8
Message-Id: <20230309084456.304669-4-pbonzini@redhat.com>
9
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
---
12
block/mirror.c | 4 ++--
13
1 file changed, 2 insertions(+), 2 deletions(-)
14
15
diff --git a/block/mirror.c b/block/mirror.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/block/mirror.c
18
+++ b/block/mirror.c
19
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
20
/* Called when going out of the streaming phase to flush the bulk of the
21
* data to the medium, or just before completing.
22
*/
23
-static int mirror_flush(MirrorBlockJob *s)
24
+static int coroutine_fn mirror_flush(MirrorBlockJob *s)
25
{
26
- int ret = blk_flush(s->target);
27
+ int ret = blk_co_flush(s->target);
28
if (ret < 0) {
29
if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
30
s->ret = ret;
31
--
32
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
4
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
5
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
6
---
7
nbd/server.c | 48 ++++++++++++++++++++++++------------------------
8
1 file changed, 24 insertions(+), 24 deletions(-)
9
10
diff --git a/nbd/server.c b/nbd/server.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/nbd/server.c
13
+++ b/nbd/server.c
14
@@ -XXX,XX +XXX,XX @@ nbd_read_eof(NBDClient *client, void *buffer, size_t size, Error **errp)
15
return 1;
16
}
17
18
-static int nbd_receive_request(NBDClient *client, NBDRequest *request,
19
- Error **errp)
20
+static int coroutine_fn nbd_receive_request(NBDClient *client, NBDRequest *request,
21
+ Error **errp)
22
{
23
uint8_t buf[NBD_REQUEST_SIZE];
24
uint32_t magic;
25
@@ -XXX,XX +XXX,XX @@ static inline void set_be_simple_reply(NBDSimpleReply *reply, uint64_t error,
26
stq_be_p(&reply->handle, handle);
27
}
28
29
-static int nbd_co_send_simple_reply(NBDClient *client,
30
- uint64_t handle,
31
- uint32_t error,
32
- void *data,
33
- size_t len,
34
- Error **errp)
35
+static int coroutine_fn nbd_co_send_simple_reply(NBDClient *client,
36
+ uint64_t handle,
37
+ uint32_t error,
38
+ void *data,
39
+ size_t len,
40
+ Error **errp)
41
{
42
NBDSimpleReply reply;
43
int nbd_err = system_errno_to_nbd_errno(error);
44
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
45
stl_be_p(&chunk.length, pnum);
46
ret = nbd_co_send_iov(client, iov, 1, errp);
47
} else {
48
- ret = blk_pread(exp->common.blk, offset + progress, pnum,
49
- data + progress, 0);
50
+ ret = blk_co_pread(exp->common.blk, offset + progress, pnum,
51
+ data + progress, 0);
52
if (ret < 0) {
53
error_setg_errno(errp, -ret, "reading from file failed");
54
break;
55
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn blockalloc_to_extents(BlockBackend *blk,
56
* @ea is converted to BE by the function
57
* @last controls whether NBD_REPLY_FLAG_DONE is sent.
58
*/
59
-static int nbd_co_send_extents(NBDClient *client, uint64_t handle,
60
- NBDExtentArray *ea,
61
- bool last, uint32_t context_id, Error **errp)
62
+static int coroutine_fn
63
+nbd_co_send_extents(NBDClient *client, uint64_t handle, NBDExtentArray *ea,
64
+ bool last, uint32_t context_id, Error **errp)
65
{
66
NBDStructuredMeta chunk;
67
struct iovec iov[] = {
68
@@ -XXX,XX +XXX,XX @@ static void bitmap_to_extents(BdrvDirtyBitmap *bitmap,
69
bdrv_dirty_bitmap_unlock(bitmap);
70
}
71
72
-static int nbd_co_send_bitmap(NBDClient *client, uint64_t handle,
73
- BdrvDirtyBitmap *bitmap, uint64_t offset,
74
- uint32_t length, bool dont_fragment, bool last,
75
- uint32_t context_id, Error **errp)
76
+static int coroutine_fn nbd_co_send_bitmap(NBDClient *client, uint64_t handle,
77
+ BdrvDirtyBitmap *bitmap, uint64_t offset,
78
+ uint32_t length, bool dont_fragment, bool last,
79
+ uint32_t context_id, Error **errp)
80
{
81
unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS;
82
g_autoptr(NBDExtentArray) ea = nbd_extent_array_new(nb_extents);
83
@@ -XXX,XX +XXX,XX @@ static int nbd_co_send_bitmap(NBDClient *client, uint64_t handle,
84
* to the client (although the caller may still need to disconnect after
85
* reporting the error).
86
*/
87
-static int nbd_co_receive_request(NBDRequestData *req, NBDRequest *request,
88
- Error **errp)
89
+static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, NBDRequest *request,
90
+ Error **errp)
91
{
92
NBDClient *client = req->client;
93
int valid_flags;
94
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request,
95
data, request->len, errp);
96
}
97
98
- ret = blk_pread(exp->common.blk, request->from, request->len, data, 0);
99
+ ret = blk_co_pread(exp->common.blk, request->from, request->len, data, 0);
100
if (ret < 0) {
101
return nbd_send_generic_reply(client, request->handle, ret,
102
"reading from file failed", errp);
103
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
104
if (request->flags & NBD_CMD_FLAG_FUA) {
105
flags |= BDRV_REQ_FUA;
106
}
107
- ret = blk_pwrite(exp->common.blk, request->from, request->len, data,
108
- flags);
109
+ ret = blk_co_pwrite(exp->common.blk, request->from, request->len, data,
110
+ flags);
111
return nbd_send_generic_reply(client, request->handle, ret,
112
"writing to file failed", errp);
113
114
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
115
if (request->flags & NBD_CMD_FLAG_FAST_ZERO) {
116
flags |= BDRV_REQ_NO_FALLBACK;
117
}
118
- ret = blk_pwrite_zeroes(exp->common.blk, request->from, request->len,
119
- flags);
120
+ ret = blk_co_pwrite_zeroes(exp->common.blk, request->from, request->len,
121
+ flags);
122
return nbd_send_generic_reply(client, request->handle, ret,
123
"writing to file failed", errp);
124
125
--
126
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
4
Message-Id: <20230309084456.304669-6-pbonzini@redhat.com>
5
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
6
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
7
---
8
hw/9pfs/9p.h | 4 ++--
9
hw/9pfs/codir.c | 6 +++---
10
2 files changed, 5 insertions(+), 5 deletions(-)
11
12
diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/hw/9pfs/9p.h
15
+++ b/hw/9pfs/9p.h
16
@@ -XXX,XX +XXX,XX @@ typedef struct V9fsDir {
17
QemuMutex readdir_mutex_L;
18
} V9fsDir;
19
20
-static inline void v9fs_readdir_lock(V9fsDir *dir)
21
+static inline void coroutine_fn v9fs_readdir_lock(V9fsDir *dir)
22
{
23
if (dir->proto_version == V9FS_PROTO_2000U) {
24
qemu_co_mutex_lock(&dir->readdir_mutex_u);
25
@@ -XXX,XX +XXX,XX @@ static inline void v9fs_readdir_lock(V9fsDir *dir)
26
}
27
}
28
29
-static inline void v9fs_readdir_unlock(V9fsDir *dir)
30
+static inline void coroutine_fn v9fs_readdir_unlock(V9fsDir *dir)
31
{
32
if (dir->proto_version == V9FS_PROTO_2000U) {
33
qemu_co_mutex_unlock(&dir->readdir_mutex_u);
34
diff --git a/hw/9pfs/codir.c b/hw/9pfs/codir.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/hw/9pfs/codir.c
37
+++ b/hw/9pfs/codir.c
38
@@ -XXX,XX +XXX,XX @@ int coroutine_fn v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp,
39
*
40
* See v9fs_co_readdir_many() (as its only user) below for details.
41
*/
42
-static int do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp,
43
- struct V9fsDirEnt **entries, off_t offset,
44
- int32_t maxsize, bool dostat)
45
+static int coroutine_fn
46
+do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, struct V9fsDirEnt **entries,
47
+ off_t offset, int32_t maxsize, bool dostat)
48
{
49
V9fsState *s = pdu->s;
50
V9fsString name;
51
--
52
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
do_sgio can suspend via the coroutine function thread_pool_submit_co, so it
4
has to be coroutine_fn as well---and the same is true of all its direct and
5
indirect callers.
6
7
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
8
Message-Id: <20230309084456.304669-7-pbonzini@redhat.com>
9
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
---
12
scsi/qemu-pr-helper.c | 22 +++++++++++-----------
13
1 file changed, 11 insertions(+), 11 deletions(-)
14
15
diff --git a/scsi/qemu-pr-helper.c b/scsi/qemu-pr-helper.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/scsi/qemu-pr-helper.c
18
+++ b/scsi/qemu-pr-helper.c
19
@@ -XXX,XX +XXX,XX @@ static int do_sgio_worker(void *opaque)
20
return status;
21
}
22
23
-static int do_sgio(int fd, const uint8_t *cdb, uint8_t *sense,
24
- uint8_t *buf, int *sz, int dir)
25
+static int coroutine_fn do_sgio(int fd, const uint8_t *cdb, uint8_t *sense,
26
+ uint8_t *buf, int *sz, int dir)
27
{
28
int r;
29
30
@@ -XXX,XX +XXX,XX @@ static SCSISense mpath_generic_sense(int r)
31
}
32
}
33
34
-static int mpath_reconstruct_sense(int fd, int r, uint8_t *sense)
35
+static int coroutine_fn mpath_reconstruct_sense(int fd, int r, uint8_t *sense)
36
{
37
switch (r) {
38
case MPATH_PR_SUCCESS:
39
@@ -XXX,XX +XXX,XX @@ static int mpath_reconstruct_sense(int fd, int r, uint8_t *sense)
40
}
41
}
42
43
-static int multipath_pr_in(int fd, const uint8_t *cdb, uint8_t *sense,
44
- uint8_t *data, int sz)
45
+static int coroutine_fn multipath_pr_in(int fd, const uint8_t *cdb, uint8_t *sense,
46
+ uint8_t *data, int sz)
47
{
48
int rq_servact = cdb[1];
49
struct prin_resp resp;
50
@@ -XXX,XX +XXX,XX @@ static int multipath_pr_in(int fd, const uint8_t *cdb, uint8_t *sense,
51
return mpath_reconstruct_sense(fd, r, sense);
52
}
53
54
-static int multipath_pr_out(int fd, const uint8_t *cdb, uint8_t *sense,
55
- const uint8_t *param, int sz)
56
+static int coroutine_fn multipath_pr_out(int fd, const uint8_t *cdb, uint8_t *sense,
57
+ const uint8_t *param, int sz)
58
{
59
int rq_servact = cdb[1];
60
int rq_scope = cdb[2] >> 4;
61
@@ -XXX,XX +XXX,XX @@ static int multipath_pr_out(int fd, const uint8_t *cdb, uint8_t *sense,
62
}
63
#endif
64
65
-static int do_pr_in(int fd, const uint8_t *cdb, uint8_t *sense,
66
- uint8_t *data, int *resp_sz)
67
+static int coroutine_fn do_pr_in(int fd, const uint8_t *cdb, uint8_t *sense,
68
+ uint8_t *data, int *resp_sz)
69
{
70
#ifdef CONFIG_MPATH
71
if (is_mpath(fd)) {
72
@@ -XXX,XX +XXX,XX @@ static int do_pr_in(int fd, const uint8_t *cdb, uint8_t *sense,
73
SG_DXFER_FROM_DEV);
74
}
75
76
-static int do_pr_out(int fd, const uint8_t *cdb, uint8_t *sense,
77
- const uint8_t *param, int sz)
78
+static int coroutine_fn do_pr_out(int fd, const uint8_t *cdb, uint8_t *sense,
79
+ const uint8_t *param, int sz)
80
{
81
int resp_sz;
82
83
--
84
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
4
Message-Id: <20230309084456.304669-8-pbonzini@redhat.com>
5
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
6
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
7
---
8
tests/unit/test-thread-pool.c | 2 +-
9
1 file changed, 1 insertion(+), 1 deletion(-)
10
11
diff --git a/tests/unit/test-thread-pool.c b/tests/unit/test-thread-pool.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tests/unit/test-thread-pool.c
14
+++ b/tests/unit/test-thread-pool.c
15
@@ -XXX,XX +XXX,XX @@ static void test_submit_aio(void)
16
g_assert_cmpint(data.ret, ==, 0);
17
}
18
19
-static void co_test_cb(void *opaque)
20
+static void coroutine_fn co_test_cb(void *opaque)
21
{
22
WorkerTestData *data = opaque;
23
24
--
25
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Functions that can do I/O (including calling bdrv_is_allocated
4
and bdrv_block_status functions) are prime candidates for being
5
coroutine_fns. Make the change for those that are themselves called
6
only from coroutine_fns. Also annotate that they are called with the
7
graph rdlock taken, thus allowing them to call bdrv_co_*() functions
8
for I/O.
9
10
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
11
Message-Id: <20230309084456.304669-9-pbonzini@redhat.com>
12
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
13
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
14
---
15
block/qcow2.h | 15 ++++++++-------
16
block/qcow2-bitmap.c | 2 +-
17
block/qcow2-cluster.c | 21 +++++++++++++--------
18
block/qcow2-refcount.c | 8 ++++----
19
block/qcow2-snapshot.c | 25 +++++++++++++------------
20
block/qcow2.c | 27 ++++++++++++++-------------
21
6 files changed, 53 insertions(+), 45 deletions(-)
22
23
diff --git a/block/qcow2.h b/block/qcow2.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/block/qcow2.h
26
+++ b/block/qcow2.h
27
@@ -XXX,XX +XXX,XX @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t offset,
28
uint64_t new_refblock_offset);
29
30
int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size);
31
-int64_t qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
32
- int64_t nb_clusters);
33
-int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size);
34
+int64_t coroutine_fn qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
35
+ int64_t nb_clusters);
36
+int64_t coroutine_fn qcow2_alloc_bytes(BlockDriverState *bs, int size);
37
void qcow2_free_clusters(BlockDriverState *bs,
38
int64_t offset, int64_t size,
39
enum qcow2_discard_type type);
40
@@ -XXX,XX +XXX,XX @@ int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
41
BlockDriverAmendStatusCB *status_cb,
42
void *cb_opaque, Error **errp);
43
int coroutine_fn GRAPH_RDLOCK qcow2_shrink_reftable(BlockDriverState *bs);
44
-int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size);
45
+int64_t coroutine_fn qcow2_get_last_cluster(BlockDriverState *bs, int64_t size);
46
int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs);
47
48
/* qcow2-cluster.c functions */
49
@@ -XXX,XX +XXX,XX @@ void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
50
int coroutine_fn GRAPH_RDLOCK
51
qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m);
52
53
-void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m);
54
+void coroutine_fn qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m);
55
int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
56
uint64_t bytes, enum qcow2_discard_type type,
57
bool full_discard);
58
@@ -XXX,XX +XXX,XX @@ int qcow2_snapshot_load_tmp(BlockDriverState *bs,
59
Error **errp);
60
61
void qcow2_free_snapshots(BlockDriverState *bs);
62
-int qcow2_read_snapshots(BlockDriverState *bs, Error **errp);
63
+int coroutine_fn GRAPH_RDLOCK
64
+qcow2_read_snapshots(BlockDriverState *bs, Error **errp);
65
int qcow2_write_snapshots(BlockDriverState *bs);
66
67
int coroutine_fn GRAPH_RDLOCK
68
@@ -XXX,XX +XXX,XX @@ bool coroutine_fn qcow2_load_dirty_bitmaps(BlockDriverState *bs,
69
bool qcow2_get_bitmap_info_list(BlockDriverState *bs,
70
Qcow2BitmapInfoList **info_list, Error **errp);
71
int qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp);
72
-int qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp);
73
+int coroutine_fn qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp);
74
bool qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs,
75
bool release_stored, Error **errp);
76
int qcow2_reopen_bitmaps_ro(BlockDriverState *bs, Error **errp);
77
diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/block/qcow2-bitmap.c
80
+++ b/block/qcow2-bitmap.c
81
@@ -XXX,XX +XXX,XX @@ out:
82
}
83
84
/* Checks to see if it's safe to resize bitmaps */
85
-int qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp)
86
+int coroutine_fn qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp)
87
{
88
BDRVQcow2State *s = bs->opaque;
89
Qcow2BitmapList *bm_list;
90
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/block/qcow2-cluster.c
93
+++ b/block/qcow2-cluster.c
94
@@ -XXX,XX +XXX,XX @@ err:
95
* Frees the allocated clusters because the request failed and they won't
96
* actually be linked.
97
*/
98
-void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m)
99
+void coroutine_fn qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m)
100
{
101
BDRVQcow2State *s = bs->opaque;
102
if (!has_data_file(bs) && !m->keep_old_clusters) {
103
@@ -XXX,XX +XXX,XX @@ void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m)
104
*
105
* Returns 0 on success, -errno on failure.
106
*/
107
-static int calculate_l2_meta(BlockDriverState *bs, uint64_t host_cluster_offset,
108
- uint64_t guest_offset, unsigned bytes,
109
- uint64_t *l2_slice, QCowL2Meta **m, bool keep_old)
110
+static int coroutine_fn calculate_l2_meta(BlockDriverState *bs,
111
+ uint64_t host_cluster_offset,
112
+ uint64_t guest_offset, unsigned bytes,
113
+ uint64_t *l2_slice, QCowL2Meta **m,
114
+ bool keep_old)
115
{
116
BDRVQcow2State *s = bs->opaque;
117
int sc_index, l2_index = offset_to_l2_slice_index(s, guest_offset);
118
@@ -XXX,XX +XXX,XX @@ out:
119
* function has been waiting for another request and the allocation must be
120
* restarted, but the whole request should not be failed.
121
*/
122
-static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
123
- uint64_t *host_offset, uint64_t *nb_clusters)
124
+static int coroutine_fn do_alloc_cluster_offset(BlockDriverState *bs,
125
+ uint64_t guest_offset,
126
+ uint64_t *host_offset,
127
+ uint64_t *nb_clusters)
128
{
129
BDRVQcow2State *s = bs->opaque;
130
131
@@ -XXX,XX +XXX,XX @@ static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
132
return nb_clusters;
133
}
134
135
-static int zero_l2_subclusters(BlockDriverState *bs, uint64_t offset,
136
- unsigned nb_subclusters)
137
+static int coroutine_fn
138
+zero_l2_subclusters(BlockDriverState *bs, uint64_t offset,
139
+ unsigned nb_subclusters)
140
{
141
BDRVQcow2State *s = bs->opaque;
142
uint64_t *l2_slice;
143
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/block/qcow2-refcount.c
146
+++ b/block/qcow2-refcount.c
147
@@ -XXX,XX +XXX,XX @@ int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size)
148
return offset;
149
}
150
151
-int64_t qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
152
- int64_t nb_clusters)
153
+int64_t coroutine_fn qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
154
+ int64_t nb_clusters)
155
{
156
BDRVQcow2State *s = bs->opaque;
157
uint64_t cluster_index, refcount;
158
@@ -XXX,XX +XXX,XX @@ int64_t qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
159
160
/* only used to allocate compressed sectors. We try to allocate
161
contiguous sectors. size must be <= cluster_size */
162
-int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size)
163
+int64_t coroutine_fn qcow2_alloc_bytes(BlockDriverState *bs, int size)
164
{
165
BDRVQcow2State *s = bs->opaque;
166
int64_t offset;
167
@@ -XXX,XX +XXX,XX @@ out:
168
return ret;
169
}
170
171
-int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size)
172
+int64_t coroutine_fn qcow2_get_last_cluster(BlockDriverState *bs, int64_t size)
173
{
174
BDRVQcow2State *s = bs->opaque;
175
int64_t i;
176
diff --git a/block/qcow2-snapshot.c b/block/qcow2-snapshot.c
177
index XXXXXXX..XXXXXXX 100644
178
--- a/block/qcow2-snapshot.c
179
+++ b/block/qcow2-snapshot.c
180
@@ -XXX,XX +XXX,XX @@ void qcow2_free_snapshots(BlockDriverState *bs)
181
* qcow2_check_refcounts() does not do anything with snapshots'
182
* extra data.)
183
*/
184
-static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair,
185
- int *nb_clusters_reduced,
186
- int *extra_data_dropped,
187
- Error **errp)
188
+static coroutine_fn GRAPH_RDLOCK
189
+int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair,
190
+ int *nb_clusters_reduced,
191
+ int *extra_data_dropped,
192
+ Error **errp)
193
{
194
BDRVQcow2State *s = bs->opaque;
195
QCowSnapshotHeader h;
196
@@ -XXX,XX +XXX,XX @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair,
197
198
/* Read statically sized part of the snapshot header */
199
offset = ROUND_UP(offset, 8);
200
- ret = bdrv_pread(bs->file, offset, sizeof(h), &h, 0);
201
+ ret = bdrv_co_pread(bs->file, offset, sizeof(h), &h, 0);
202
if (ret < 0) {
203
error_setg_errno(errp, -ret, "Failed to read snapshot table");
204
goto fail;
205
@@ -XXX,XX +XXX,XX @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair,
206
}
207
208
/* Read known extra data */
209
- ret = bdrv_pread(bs->file, offset,
210
- MIN(sizeof(extra), sn->extra_data_size), &extra, 0);
211
+ ret = bdrv_co_pread(bs->file, offset,
212
+ MIN(sizeof(extra), sn->extra_data_size), &extra, 0);
213
if (ret < 0) {
214
error_setg_errno(errp, -ret, "Failed to read snapshot table");
215
goto fail;
216
@@ -XXX,XX +XXX,XX @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair,
217
/* Store unknown extra data */
218
unknown_extra_data_size = sn->extra_data_size - sizeof(extra);
219
sn->unknown_extra_data = g_malloc(unknown_extra_data_size);
220
- ret = bdrv_pread(bs->file, offset, unknown_extra_data_size,
221
- sn->unknown_extra_data, 0);
222
+ ret = bdrv_co_pread(bs->file, offset, unknown_extra_data_size,
223
+ sn->unknown_extra_data, 0);
224
if (ret < 0) {
225
error_setg_errno(errp, -ret,
226
"Failed to read snapshot table");
227
@@ -XXX,XX +XXX,XX @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair,
228
229
/* Read snapshot ID */
230
sn->id_str = g_malloc(id_str_size + 1);
231
- ret = bdrv_pread(bs->file, offset, id_str_size, sn->id_str, 0);
232
+ ret = bdrv_co_pread(bs->file, offset, id_str_size, sn->id_str, 0);
233
if (ret < 0) {
234
error_setg_errno(errp, -ret, "Failed to read snapshot table");
235
goto fail;
236
@@ -XXX,XX +XXX,XX @@ static int qcow2_do_read_snapshots(BlockDriverState *bs, bool repair,
237
238
/* Read snapshot name */
239
sn->name = g_malloc(name_size + 1);
240
- ret = bdrv_pread(bs->file, offset, name_size, sn->name, 0);
241
+ ret = bdrv_co_pread(bs->file, offset, name_size, sn->name, 0);
242
if (ret < 0) {
243
error_setg_errno(errp, -ret, "Failed to read snapshot table");
244
goto fail;
245
@@ -XXX,XX +XXX,XX @@ fail:
246
return ret;
247
}
248
249
-int qcow2_read_snapshots(BlockDriverState *bs, Error **errp)
250
+int coroutine_fn qcow2_read_snapshots(BlockDriverState *bs, Error **errp)
251
{
252
return qcow2_do_read_snapshots(bs, false, NULL, NULL, errp);
253
}
254
diff --git a/block/qcow2.c b/block/qcow2.c
255
index XXXXXXX..XXXXXXX 100644
256
--- a/block/qcow2.c
257
+++ b/block/qcow2.c
258
@@ -XXX,XX +XXX,XX @@ qcow2_extract_crypto_opts(QemuOpts *opts, const char *fmt, Error **errp)
259
* unknown magic is skipped (future extension this version knows nothing about)
260
* return 0 upon success, non-0 otherwise
261
*/
262
-static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
263
- uint64_t end_offset, void **p_feature_table,
264
- int flags, bool *need_update_header,
265
- Error **errp)
266
+static int coroutine_fn GRAPH_RDLOCK
267
+qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
268
+ uint64_t end_offset, void **p_feature_table,
269
+ int flags, bool *need_update_header, Error **errp)
270
{
271
BDRVQcow2State *s = bs->opaque;
272
QCowExtension ext;
273
@@ -XXX,XX +XXX,XX @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
274
printf("attempting to read extended header in offset %lu\n", offset);
275
#endif
276
277
- ret = bdrv_pread(bs->file, offset, sizeof(ext), &ext, 0);
278
+ ret = bdrv_co_pread(bs->file, offset, sizeof(ext), &ext, 0);
279
if (ret < 0) {
280
error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: "
281
"pread fail from offset %" PRIu64, offset);
282
@@ -XXX,XX +XXX,XX @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
283
sizeof(bs->backing_format));
284
return 2;
285
}
286
- ret = bdrv_pread(bs->file, offset, ext.len, bs->backing_format, 0);
287
+ ret = bdrv_co_pread(bs->file, offset, ext.len, bs->backing_format, 0);
288
if (ret < 0) {
289
error_setg_errno(errp, -ret, "ERROR: ext_backing_format: "
290
"Could not read format name");
291
@@ -XXX,XX +XXX,XX @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
292
case QCOW2_EXT_MAGIC_FEATURE_TABLE:
293
if (p_feature_table != NULL) {
294
void *feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature));
295
- ret = bdrv_pread(bs->file, offset, ext.len, feature_table, 0);
296
+ ret = bdrv_co_pread(bs->file, offset, ext.len, feature_table, 0);
297
if (ret < 0) {
298
error_setg_errno(errp, -ret, "ERROR: ext_feature_table: "
299
"Could not read table");
300
@@ -XXX,XX +XXX,XX @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
301
return -EINVAL;
302
}
303
304
- ret = bdrv_pread(bs->file, offset, ext.len, &s->crypto_header, 0);
305
+ ret = bdrv_co_pread(bs->file, offset, ext.len, &s->crypto_header, 0);
306
if (ret < 0) {
307
error_setg_errno(errp, -ret,
308
"Unable to read CRYPTO header extension");
309
@@ -XXX,XX +XXX,XX @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
310
break;
311
}
312
313
- ret = bdrv_pread(bs->file, offset, ext.len, &bitmaps_ext, 0);
314
+ ret = bdrv_co_pread(bs->file, offset, ext.len, &bitmaps_ext, 0);
315
if (ret < 0) {
316
error_setg_errno(errp, -ret, "bitmaps_ext: "
317
"Could not read ext header");
318
@@ -XXX,XX +XXX,XX @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
319
case QCOW2_EXT_MAGIC_DATA_FILE:
320
{
321
s->image_data_file = g_malloc0(ext.len + 1);
322
- ret = bdrv_pread(bs->file, offset, ext.len, s->image_data_file, 0);
323
+ ret = bdrv_co_pread(bs->file, offset, ext.len, s->image_data_file, 0);
324
if (ret < 0) {
325
error_setg_errno(errp, -ret,
326
"ERROR: Could not read data file name");
327
@@ -XXX,XX +XXX,XX @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
328
uext->len = ext.len;
329
QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next);
330
331
- ret = bdrv_pread(bs->file, offset, uext->len, uext->data, 0);
332
+ ret = bdrv_co_pread(bs->file, offset, uext->len, uext->data, 0);
333
if (ret < 0) {
334
error_setg_errno(errp, -ret, "ERROR: unknown extension: "
335
"Could not read data");
336
@@ -XXX,XX +XXX,XX @@ static void qcow2_update_options_abort(BlockDriverState *bs,
337
qapi_free_QCryptoBlockOpenOptions(r->crypto_opts);
338
}
339
340
-static int qcow2_update_options(BlockDriverState *bs, QDict *options,
341
- int flags, Error **errp)
342
+static int coroutine_fn
343
+qcow2_update_options(BlockDriverState *bs, QDict *options, int flags,
344
+ Error **errp)
345
{
346
Qcow2ReopenState r = {};
347
int ret;
348
--
349
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Functions that can do I/O are prime candidates for being coroutine_fns. Make the
4
change for the one that is itself called only from coroutine_fns. Unfortunately
5
vmdk does not use a coroutine_fn for the bulk of the open (like qcow2 does) so
6
vmdk_read_cid cannot have the same treatment.
7
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-Id: <20230309084456.304669-10-pbonzini@redhat.com>
10
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
---
13
block/vmdk.c | 2 +-
14
1 file changed, 1 insertion(+), 1 deletion(-)
15
16
diff --git a/block/vmdk.c b/block/vmdk.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/vmdk.c
19
+++ b/block/vmdk.c
20
@@ -XXX,XX +XXX,XX @@ out:
21
return ret;
22
}
23
24
-static int vmdk_is_cid_valid(BlockDriverState *bs)
25
+static int coroutine_fn vmdk_is_cid_valid(BlockDriverState *bs)
26
{
27
BDRVVmdkState *s = bs->opaque;
28
uint32_t cur_pcid;
29
--
30
2.40.0
diff view generated by jsdifflib
Deleted patch
1
From: Wang Liang <wangliangzz@inspur.com>
2
1
3
hmp_commit() calls blk_is_available() from a non-coroutine context (and in
4
the main loop). blk_is_available() is a co_wrapper_mixed_bdrv_rdlock
5
function, and in the non-coroutine context it calls AIO_WAIT_WHILE(),
6
which crashes if the aio_context lock is not taken before.
7
8
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1615
9
Signed-off-by: Wang Liang <wangliangzz@inspur.com>
10
Message-Id: <20230424103902.45265-1-wangliangzz@126.com>
11
Reviewed-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
12
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
13
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
14
---
15
block/monitor/block-hmp-cmds.c | 10 ++++++----
16
1 file changed, 6 insertions(+), 4 deletions(-)
17
18
diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/block/monitor/block-hmp-cmds.c
21
+++ b/block/monitor/block-hmp-cmds.c
22
@@ -XXX,XX +XXX,XX @@ void hmp_commit(Monitor *mon, const QDict *qdict)
23
error_report("Device '%s' not found", device);
24
return;
25
}
26
- if (!blk_is_available(blk)) {
27
- error_report("Device '%s' has no medium", device);
28
- return;
29
- }
30
31
bs = bdrv_skip_implicit_filters(blk_bs(blk));
32
aio_context = bdrv_get_aio_context(bs);
33
aio_context_acquire(aio_context);
34
35
+ if (!blk_is_available(blk)) {
36
+ error_report("Device '%s' has no medium", device);
37
+ aio_context_release(aio_context);
38
+ return;
39
+ }
40
+
41
ret = bdrv_commit(bs);
42
43
aio_context_release(aio_context);
44
--
45
2.40.0
diff view generated by jsdifflib