1
The following changes since commit 42f6c9179be4401974dd3a75ee72defd16b5092d:
1
The following changes since commit 825b96dbcee23d134b691fc75618b59c5f53da32:
2
2
3
Merge tag 'pull-ppc-20211112' of https://github.com/legoater/qemu into staging (2021-11-12 12:28:25 +0100)
3
Merge tag 'migration-20250310-pull-request' of https://gitlab.com/farosas/qemu into staging (2025-03-11 09:32:07 +0800)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
git://repo.or.cz/qemu/kevin.git tags/for-upstream
7
https://repo.or.cz/qemu/kevin.git tags/for-upstream
8
8
9
for you to fetch changes up to 7461272c5f6032436ef9032c091c0118539483e4:
9
for you to fetch changes up to a93c04f3cbe690877b3297a9df4767aa811fcd97:
10
10
11
softmmu/qdev-monitor: fix use-after-free in qdev_set_id() (2021-11-15 15:49:46 +0100)
11
virtio-scsi: only expose cmd vqs via iothread-vq-mapping (2025-03-11 15:49:22 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Block layer patches
14
Block layer patches
15
15
16
- Fixes to image streaming job and block layer reconfiguration to make
16
- virtio-scsi: add iothread-vq-mapping parameter
17
iotest 030 pass again
17
- Improve writethrough performance
18
- docs: Deprecate incorrectly typed device_add arguments
18
- Fix missing zero init in bdrv_snapshot_goto()
19
- file-posix: Fix alignment after reopen changing O_DIRECT
19
- Code cleanup and iotests fixes
20
20
21
----------------------------------------------------------------
21
----------------------------------------------------------------
22
Hanna Reitz (10):
22
Kevin Wolf (8):
23
stream: Traverse graph after modification
23
block: Remove unused blk_op_is_blocked()
24
block: Manipulate children list in .attach/.detach
24
block: Zero block driver state before reopening
25
block: Unite remove_empty_child and child_free
25
file-posix: Support FUA writes
26
block: Drop detached child from ignore list
26
block/io: Ignore FUA with cache.no-flush=on
27
block: Pass BdrvChild ** to replace_child_noperm
27
aio: Create AioPolledEvent
28
block: Restructure remove_file_or_backing_child()
28
aio-posix: Factor out adjust_polling_time()
29
transactions: Invoke clean() after everything else
29
aio-posix: Separate AioPolledEvent per AioHandler
30
block: Let replace_child_tran keep indirect pointer
30
aio-posix: Adjust polling time also for new handlers
31
block: Let replace_child_noperm free children
32
iotests/030: Unthrottle parallel jobs in reverse
33
31
34
Kevin Wolf (2):
32
Stefan Hajnoczi (13):
35
docs: Deprecate incorrectly typed device_add arguments
33
scsi-disk: drop unused SCSIDiskState->bh field
36
file-posix: Fix alignment after reopen changing O_DIRECT
34
dma: use current AioContext for dma_blk_io()
35
scsi: track per-SCSIRequest AioContext
36
scsi: introduce requests_lock
37
virtio-scsi: introduce event and ctrl virtqueue locks
38
virtio-scsi: protect events_dropped field
39
virtio-scsi: perform TMFs in appropriate AioContexts
40
virtio-blk: extract cleanup_iothread_vq_mapping() function
41
virtio-blk: tidy up iothread_vq_mapping functions
42
virtio: extract iothread-vq-mapping.h API
43
virtio-scsi: add iothread-vq-mapping parameter
44
virtio-scsi: handle ctrl virtqueue in main loop
45
virtio-scsi: only expose cmd vqs via iothread-vq-mapping
37
46
38
Stefan Hajnoczi (1):
47
Thomas Huth (1):
39
softmmu/qdev-monitor: fix use-after-free in qdev_set_id()
48
iotests: Limit qsd-migrate to working formats
40
49
41
docs/about/deprecated.rst | 14 +++
50
include/block/aio.h | 5 +-
42
include/qemu/transactions.h | 3 +
51
include/block/raw-aio.h | 8 +-
43
block.c | 233 +++++++++++++++++++++++++++++++++-----------
52
include/hw/scsi/scsi.h | 8 +-
44
block/file-posix.c | 20 +++-
53
include/hw/virtio/iothread-vq-mapping.h | 45 +++
45
block/stream.c | 7 +-
54
include/hw/virtio/virtio-scsi.h | 15 +-
46
softmmu/qdev-monitor.c | 2 +-
55
include/system/block-backend-global-state.h | 1 -
47
util/transactions.c | 8 +-
56
include/system/dma.h | 3 +-
48
tests/qemu-iotests/030 | 11 ++-
57
util/aio-posix.h | 1 +
49
tests/qemu-iotests/142 | 22 +++++
58
block/block-backend.c | 12 -
50
tests/qemu-iotests/142.out | 15 +++
59
block/file-posix.c | 26 +-
51
10 files changed, 269 insertions(+), 66 deletions(-)
60
block/io.c | 4 +
52
61
block/io_uring.c | 13 +-
53
62
block/linux-aio.c | 24 +-
63
block/snapshot.c | 1 +
64
hw/block/virtio-blk.c | 132 +-------
65
hw/ide/core.c | 3 +-
66
hw/ide/macio.c | 3 +-
67
hw/scsi/scsi-bus.c | 121 +++++--
68
hw/scsi/scsi-disk.c | 24 +-
69
hw/scsi/virtio-scsi-dataplane.c | 103 ++++--
70
hw/scsi/virtio-scsi.c | 502 ++++++++++++++++------------
71
hw/virtio/iothread-vq-mapping.c | 131 ++++++++
72
system/dma-helpers.c | 8 +-
73
util/aio-posix.c | 114 ++++---
74
util/async.c | 1 -
75
hw/virtio/meson.build | 1 +
76
meson.build | 4 +
77
tests/qemu-iotests/tests/qsd-migrate | 2 +-
78
28 files changed, 803 insertions(+), 512 deletions(-)
79
create mode 100644 include/hw/virtio/iothread-vq-mapping.h
80
create mode 100644 hw/virtio/iothread-vq-mapping.c
diff view generated by jsdifflib
1
From: Hanna Reitz <hreitz@redhat.com>
1
Commit fc4e394b28 removed the last caller of blk_op_is_blocked(). Remove
2
the now unused function.
2
3
3
Now that bdrv_remove_empty_child() no longer removes the child from the
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
4
parent's children list but only checks that it is not in such a list, it
5
Message-ID: <20250206165331.379033-1-kwolf@redhat.com>
5
is only a wrapper around bdrv_child_free() that checks that the child is
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
empty and unused. That should apply to all children that we free, so
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
put those checks into bdrv_child_free() and drop
8
bdrv_remove_empty_child().
9
10
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
13
Message-Id: <20211111120829.81329-4-hreitz@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
8
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
---
9
---
16
block.c | 26 +++++++++++++-------------
10
include/system/block-backend-global-state.h | 1 -
17
1 file changed, 13 insertions(+), 13 deletions(-)
11
block/block-backend.c | 12 ------------
12
2 files changed, 13 deletions(-)
18
13
19
diff --git a/block.c b/block.c
14
diff --git a/include/system/block-backend-global-state.h b/include/system/block-backend-global-state.h
20
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
21
--- a/block.c
16
--- a/include/system/block-backend-global-state.h
22
+++ b/block.c
17
+++ b/include/system/block-backend-global-state.h
23
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_noperm(BdrvChild *child,
18
@@ -XXX,XX +XXX,XX @@ bool blk_supports_write_perm(BlockBackend *blk);
24
}
19
bool blk_is_sg(BlockBackend *blk);
20
void blk_set_enable_write_cache(BlockBackend *blk, bool wce);
21
int blk_get_flags(BlockBackend *blk);
22
-bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp);
23
int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
24
Error **errp);
25
void blk_add_aio_context_notifier(BlockBackend *blk,
26
diff --git a/block/block-backend.c b/block/block-backend.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/block/block-backend.c
29
+++ b/block/block-backend.c
30
@@ -XXX,XX +XXX,XX @@ void *blk_blockalign(BlockBackend *blk, size_t size)
31
return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
25
}
32
}
26
33
27
-static void bdrv_child_free(void *opaque)
34
-bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
28
-{
35
-{
29
- BdrvChild *c = opaque;
36
- BlockDriverState *bs = blk_bs(blk);
37
- GLOBAL_STATE_CODE();
38
- GRAPH_RDLOCK_GUARD_MAINLOOP();
30
-
39
-
31
- g_free(c->name);
40
- if (!bs) {
32
- g_free(c);
41
- return false;
42
- }
43
-
44
- return bdrv_op_is_blocked(bs, op, errp);
33
-}
45
-}
34
-
46
35
-static void bdrv_remove_empty_child(BdrvChild *child)
47
/**
36
+/**
48
* Return BB's current AioContext. Note that this context may change
37
+ * Free the given @child.
38
+ *
39
+ * The child must be empty (i.e. `child->bs == NULL`) and it must be
40
+ * unused (i.e. not in a children list).
41
+ */
42
+static void bdrv_child_free(BdrvChild *child)
43
{
44
assert(!child->bs);
45
assert(!child->next.le_prev); /* not in children list */
46
- bdrv_child_free(child);
47
+
48
+ g_free(child->name);
49
+ g_free(child);
50
}
51
52
typedef struct BdrvAttachChildCommonState {
53
@@ -XXX,XX +XXX,XX @@ static void bdrv_attach_child_common_abort(void *opaque)
54
}
55
56
bdrv_unref(bs);
57
- bdrv_remove_empty_child(child);
58
+ bdrv_child_free(child);
59
*s->child = NULL;
60
}
61
62
@@ -XXX,XX +XXX,XX @@ static int bdrv_attach_child_common(BlockDriverState *child_bs,
63
64
if (ret < 0) {
65
error_propagate(errp, local_err);
66
- bdrv_remove_empty_child(new_child);
67
+ bdrv_child_free(new_child);
68
return ret;
69
}
70
}
71
@@ -XXX,XX +XXX,XX @@ static void bdrv_detach_child(BdrvChild *child)
72
BlockDriverState *old_bs = child->bs;
73
74
bdrv_replace_child_noperm(child, NULL);
75
- bdrv_remove_empty_child(child);
76
+ bdrv_child_free(child);
77
78
if (old_bs) {
79
/*
80
--
49
--
81
2.31.1
50
2.48.1
82
51
83
52
diff view generated by jsdifflib
New patch
1
Block drivers assume in their .bdrv_open() implementation that their
2
state in bs->opaque has been zeroed; it is initially allocated with
3
g_malloc0() in bdrv_open_driver().
1
4
5
bdrv_snapshot_goto() needs to make sure that it is zeroed again before
6
calling drv->bdrv_open() to avoid that block drivers use stale values.
7
8
One symptom of this bug is VMDK running into a double free when the user
9
tries to apply an internal snapshot like 'qemu-img snapshot -a test
10
test.vmdk'. This should be a graceful error because VMDK doesn't support
11
internal snapshots.
12
13
==25507== Invalid free() / delete / delete[] / realloc()
14
==25507== at 0x484B347: realloc (vg_replace_malloc.c:1801)
15
==25507== by 0x54B592A: g_realloc (gmem.c:171)
16
==25507== by 0x1B221D: vmdk_add_extent (../block/vmdk.c:570)
17
==25507== by 0x1B1084: vmdk_open_sparse (../block/vmdk.c:1059)
18
==25507== by 0x1AF3D8: vmdk_open (../block/vmdk.c:1371)
19
==25507== by 0x1A2AE0: bdrv_snapshot_goto (../block/snapshot.c:299)
20
==25507== by 0x205C77: img_snapshot (../qemu-img.c:3500)
21
==25507== by 0x58FA087: (below main) (libc_start_call_main.h:58)
22
==25507== Address 0x832f3e0 is 0 bytes inside a block of size 272 free'd
23
==25507== at 0x4846B83: free (vg_replace_malloc.c:989)
24
==25507== by 0x54AEAC4: g_free (gmem.c:208)
25
==25507== by 0x1AF629: vmdk_close (../block/vmdk.c:2889)
26
==25507== by 0x1A2A9C: bdrv_snapshot_goto (../block/snapshot.c:290)
27
==25507== by 0x205C77: img_snapshot (../qemu-img.c:3500)
28
==25507== by 0x58FA087: (below main) (libc_start_call_main.h:58)
29
30
This error was discovered by fuzzing qemu-img.
31
32
Cc: qemu-stable@nongnu.org
33
Closes: https://gitlab.com/qemu-project/qemu/-/issues/2853
34
Closes: https://gitlab.com/qemu-project/qemu/-/issues/2851
35
Reported-by: Denis Rastyogin <gerben@altlinux.org>
36
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
37
Message-ID: <20250310104858.28221-1-kwolf@redhat.com>
38
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
39
---
40
block/snapshot.c | 1 +
41
1 file changed, 1 insertion(+)
42
43
diff --git a/block/snapshot.c b/block/snapshot.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/block/snapshot.c
46
+++ b/block/snapshot.c
47
@@ -XXX,XX +XXX,XX @@ int bdrv_snapshot_goto(BlockDriverState *bs,
48
bdrv_graph_wrunlock();
49
50
ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp);
51
+ memset(bs->opaque, 0, drv->instance_size);
52
open_ret = drv->bdrv_open(bs, options, bs->open_flags, &local_err);
53
qobject_unref(options);
54
if (open_ret < 0) {
55
--
56
2.48.1
diff view generated by jsdifflib
1
At the end of a reopen, we already call bdrv_refresh_limits(), which
1
Until now, FUA was always emulated with a separate flush after the write
2
should update bs->request_alignment according to the new file
2
for file-posix. The overhead of processing a second request can reduce
3
descriptor. However, raw_probe_alignment() relies on s->needs_alignment
3
performance significantly for a guest disk that has disabled the write
4
and just uses 1 if it isn't set. We neglected to update this field, so
4
cache, especially if the host disk is already write through, too, and
5
starting with cache=writeback and then reopening with cache=none means
5
the flush isn't actually doing anything.
6
that we get an incorrect bs->request_alignment == 1 and unaligned
6
7
requests fail instead of being automatically aligned.
7
Advertise support for REQ_FUA in write requests and implement it for
8
8
Linux AIO and io_uring using the RWF_DSYNC flag for write requests. The
9
Fix this by recalculating s->needs_alignment in raw_refresh_limits()
9
thread pool still performs a separate fdatasync() call. This can be
10
before calling raw_probe_alignment().
10
improved later by using the pwritev2() syscall if available.
11
12
As an example, this is how fio numbers can be improved in some scenarios
13
with this patch (all using virtio-blk with cache=directsync on an nvme
14
block device for the VM, fio with ioengine=libaio,direct=1,sync=1):
15
16
| old | with FUA support
17
------------------------------+---------------+-------------------
18
bs=4k, iodepth=1, numjobs=1 | 45.6k iops | 56.1k iops
19
bs=4k, iodepth=1, numjobs=16 | 183.3k iops | 236.0k iops
20
bs=4k, iodepth=16, numjobs=1 | 258.4k iops | 311.1k iops
21
22
However, not all scenarios are clear wins. On another slower disk I saw
23
little to no improvment. In fact, in two corner case scenarios, I even
24
observed a regression, which I however consider acceptable:
25
26
1. On slow host disks in a write through cache mode, when the guest is
27
using virtio-blk in a separate iothread so that polling can be
28
enabled, and each completion is quickly followed up with a new
29
request (so that polling gets it), it can happen that enabling FUA
30
makes things slower - the additional very fast no-op flush we used to
31
have gave the adaptive polling algorithm a success so that it kept
32
polling. Without it, we only have the slow write request, which
33
disables polling. This is a problem in the polling algorithm that
34
will be fixed later in this series.
35
36
2. With a high queue depth, it can be beneficial to have flush requests
37
for another reason: The optimisation in bdrv_co_flush() that flushes
38
only once per write generation acts as a synchronisation mechanism
39
that lets all requests complete at the same time. This can result in
40
better batching and if the disk is very fast (I only saw this with a
41
null_blk backend), this can make up for the overhead of the flush and
42
improve throughput. In theory, we could optionally introduce a
43
similar artificial latency in the normal completion path to achieve
44
the same kind of completion batching. This is not implemented in this
45
series.
46
47
Compatibility is not a concern for io_uring, it has supported RWF_DSYNC
48
from the start. Linux AIO started supporting it in Linux 4.13 and libaio
49
0.3.111. The kernel is not a problem for any supported build platform,
50
so it's not necessary to add runtime checks. However, openSUSE is still
51
stuck with an older libaio version that would break the build. We must
52
detect this at build time to avoid build failures.
11
53
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
54
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
Message-Id: <20211104113109.56336-1-kwolf@redhat.com>
55
Message-ID: <20250307221634.71951-2-kwolf@redhat.com>
14
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
56
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
15
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
16
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
57
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
17
---
58
---
18
block/file-posix.c | 20 ++++++++++++++++----
59
include/block/raw-aio.h | 8 ++++++--
19
tests/qemu-iotests/142 | 22 ++++++++++++++++++++++
60
block/file-posix.c | 26 ++++++++++++++++++--------
20
tests/qemu-iotests/142.out | 15 +++++++++++++++
61
block/io_uring.c | 13 ++++++++-----
21
3 files changed, 53 insertions(+), 4 deletions(-)
62
block/linux-aio.c | 24 +++++++++++++++++++++---
22
63
meson.build | 4 ++++
64
5 files changed, 57 insertions(+), 18 deletions(-)
65
66
diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h
67
index XXXXXXX..XXXXXXX 100644
68
--- a/include/block/raw-aio.h
69
+++ b/include/block/raw-aio.h
70
@@ -XXX,XX +XXX,XX @@
71
#define QEMU_RAW_AIO_H
72
73
#include "block/aio.h"
74
+#include "block/block-common.h"
75
#include "qemu/iov.h"
76
77
/* AIO request types */
78
@@ -XXX,XX +XXX,XX @@ void laio_cleanup(LinuxAioState *s);
79
80
/* laio_co_submit: submit I/O requests in the thread's current AioContext. */
81
int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov,
82
- int type, uint64_t dev_max_batch);
83
+ int type, BdrvRequestFlags flags,
84
+ uint64_t dev_max_batch);
85
86
bool laio_has_fdsync(int);
87
+bool laio_has_fua(void);
88
void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context);
89
void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context);
90
#endif
91
@@ -XXX,XX +XXX,XX @@ void luring_cleanup(LuringState *s);
92
93
/* luring_co_submit: submit I/O requests in the thread's current AioContext. */
94
int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset,
95
- QEMUIOVector *qiov, int type);
96
+ QEMUIOVector *qiov, int type,
97
+ BdrvRequestFlags flags);
98
void luring_detach_aio_context(LuringState *s, AioContext *old_context);
99
void luring_attach_aio_context(LuringState *s, AioContext *new_context);
100
#endif
23
diff --git a/block/file-posix.c b/block/file-posix.c
101
diff --git a/block/file-posix.c b/block/file-posix.c
24
index XXXXXXX..XXXXXXX 100644
102
index XXXXXXX..XXXXXXX 100644
25
--- a/block/file-posix.c
103
--- a/block/file-posix.c
26
+++ b/block/file-posix.c
104
+++ b/block/file-posix.c
27
@@ -XXX,XX +XXX,XX @@ typedef struct BDRVRawState {
105
@@ -XXX,XX +XXX,XX @@ static int fd_open(BlockDriverState *bs)
28
int page_cache_inconsistent; /* errno from fdatasync failure */
106
}
29
bool has_fallocate;
107
30
bool needs_alignment;
108
static int64_t raw_getlength(BlockDriverState *bs);
31
+ bool force_alignment;
109
+static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs);
32
bool drop_cache;
110
33
bool check_cache_dropped;
111
typedef struct RawPosixAIOData {
34
struct {
112
BlockDriverState *bs;
35
@@ -XXX,XX +XXX,XX @@ static bool dio_byte_aligned(int fd)
113
@@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
36
return false;
114
#endif
37
}
115
s->needs_alignment = raw_needs_alignment(bs);
38
116
39
+static bool raw_needs_alignment(BlockDriverState *bs)
117
+ if (!s->use_linux_aio || laio_has_fua()) {
40
+{
118
+ bs->supported_write_flags = BDRV_REQ_FUA;
41
+ BDRVRawState *s = bs->opaque;
42
+
43
+ if ((bs->open_flags & BDRV_O_NOCACHE) != 0 && !dio_byte_aligned(s->fd)) {
44
+ return true;
45
+ }
119
+ }
46
+
120
+
47
+ return s->force_alignment;
121
bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK;
122
if (S_ISREG(st.st_mode)) {
123
/* When extending regular files, we get zeros from the OS */
124
@@ -XXX,XX +XXX,XX @@ static inline bool raw_check_linux_aio(BDRVRawState *s)
125
#endif
126
127
static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
128
- uint64_t bytes, QEMUIOVector *qiov, int type)
129
+ uint64_t bytes, QEMUIOVector *qiov, int type,
130
+ int flags)
131
{
132
BDRVRawState *s = bs->opaque;
133
RawPosixAIOData acb;
134
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
135
#ifdef CONFIG_LINUX_IO_URING
136
} else if (raw_check_linux_io_uring(s)) {
137
assert(qiov->size == bytes);
138
- ret = luring_co_submit(bs, s->fd, offset, qiov, type);
139
+ ret = luring_co_submit(bs, s->fd, offset, qiov, type, flags);
140
goto out;
141
#endif
142
#ifdef CONFIG_LINUX_AIO
143
} else if (raw_check_linux_aio(s)) {
144
assert(qiov->size == bytes);
145
- ret = laio_co_submit(s->fd, offset, qiov, type,
146
+ ret = laio_co_submit(s->fd, offset, qiov, type, flags,
147
s->aio_max_batch);
148
goto out;
149
#endif
150
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
151
152
assert(qiov->size == bytes);
153
ret = raw_thread_pool_submit(handle_aiocb_rw, &acb);
154
+ if (ret == 0 && (flags & BDRV_REQ_FUA)) {
155
+ /* TODO Use pwritev2() instead if it's available */
156
+ ret = raw_co_flush_to_disk(bs);
157
+ }
158
goto out; /* Avoid the compiler err of unused label */
159
160
out:
161
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
162
int64_t bytes, QEMUIOVector *qiov,
163
BdrvRequestFlags flags)
164
{
165
- return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_READ);
166
+ return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_READ, flags);
167
}
168
169
static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset,
170
int64_t bytes, QEMUIOVector *qiov,
171
BdrvRequestFlags flags)
172
{
173
- return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_WRITE);
174
+ return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_WRITE, flags);
175
}
176
177
static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
178
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
179
180
#ifdef CONFIG_LINUX_IO_URING
181
if (raw_check_linux_io_uring(s)) {
182
- return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH);
183
+ return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH, 0);
184
}
185
#endif
186
#ifdef CONFIG_LINUX_AIO
187
if (s->has_laio_fdsync && raw_check_linux_aio(s)) {
188
- return laio_co_submit(s->fd, 0, NULL, QEMU_AIO_FLUSH, 0);
189
+ return laio_co_submit(s->fd, 0, NULL, QEMU_AIO_FLUSH, 0, 0);
190
}
191
#endif
192
return raw_thread_pool_submit(handle_aiocb_flush, &acb);
193
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_zone_append(BlockDriverState *bs,
194
}
195
196
trace_zbd_zone_append(bs, *offset >> BDRV_SECTOR_BITS);
197
- return raw_co_prw(bs, offset, len, qiov, QEMU_AIO_ZONE_APPEND);
198
+ return raw_co_prw(bs, offset, len, qiov, QEMU_AIO_ZONE_APPEND, 0);
199
}
200
#endif
201
202
diff --git a/block/io_uring.c b/block/io_uring.c
203
index XXXXXXX..XXXXXXX 100644
204
--- a/block/io_uring.c
205
+++ b/block/io_uring.c
206
@@ -XXX,XX +XXX,XX @@ static void luring_deferred_fn(void *opaque)
207
*
208
*/
209
static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s,
210
- uint64_t offset, int type)
211
+ uint64_t offset, int type, BdrvRequestFlags flags)
212
{
213
int ret;
214
struct io_uring_sqe *sqes = &luringcb->sqeq;
215
+ int luring_flags;
216
217
switch (type) {
218
case QEMU_AIO_WRITE:
219
- io_uring_prep_writev(sqes, fd, luringcb->qiov->iov,
220
- luringcb->qiov->niov, offset);
221
+ luring_flags = (flags & BDRV_REQ_FUA) ? RWF_DSYNC : 0;
222
+ io_uring_prep_writev2(sqes, fd, luringcb->qiov->iov,
223
+ luringcb->qiov->niov, offset, luring_flags);
224
break;
225
case QEMU_AIO_ZONE_APPEND:
226
io_uring_prep_writev(sqes, fd, luringcb->qiov->iov,
227
@@ -XXX,XX +XXX,XX @@ static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s,
228
}
229
230
int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset,
231
- QEMUIOVector *qiov, int type)
232
+ QEMUIOVector *qiov, int type,
233
+ BdrvRequestFlags flags)
234
{
235
int ret;
236
AioContext *ctx = qemu_get_current_aio_context();
237
@@ -XXX,XX +XXX,XX @@ int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset,
238
};
239
trace_luring_co_submit(bs, s, &luringcb, fd, offset, qiov ? qiov->size : 0,
240
type);
241
- ret = luring_do_submit(fd, &luringcb, s, offset, type);
242
+ ret = luring_do_submit(fd, &luringcb, s, offset, type, flags);
243
244
if (ret < 0) {
245
return ret;
246
diff --git a/block/linux-aio.c b/block/linux-aio.c
247
index XXXXXXX..XXXXXXX 100644
248
--- a/block/linux-aio.c
249
+++ b/block/linux-aio.c
250
@@ -XXX,XX +XXX,XX @@ static void laio_deferred_fn(void *opaque)
251
}
252
253
static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
254
- int type, uint64_t dev_max_batch)
255
+ int type, BdrvRequestFlags flags,
256
+ uint64_t dev_max_batch)
257
{
258
LinuxAioState *s = laiocb->ctx;
259
struct iocb *iocbs = &laiocb->iocb;
260
QEMUIOVector *qiov = laiocb->qiov;
261
+ int laio_flags;
262
263
switch (type) {
264
case QEMU_AIO_WRITE:
265
+#ifdef HAVE_IO_PREP_PWRITEV2
266
+ laio_flags = (flags & BDRV_REQ_FUA) ? RWF_DSYNC : 0;
267
+ io_prep_pwritev2(iocbs, fd, qiov->iov, qiov->niov, offset, laio_flags);
268
+#else
269
+ assert(flags == 0);
270
io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
271
+#endif
272
break;
273
case QEMU_AIO_ZONE_APPEND:
274
io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
275
@@ -XXX,XX +XXX,XX @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
276
}
277
278
int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov,
279
- int type, uint64_t dev_max_batch)
280
+ int type, BdrvRequestFlags flags,
281
+ uint64_t dev_max_batch)
282
{
283
int ret;
284
AioContext *ctx = qemu_get_current_aio_context();
285
@@ -XXX,XX +XXX,XX @@ int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov,
286
.qiov = qiov,
287
};
288
289
- ret = laio_do_submit(fd, &laiocb, offset, type, dev_max_batch);
290
+ ret = laio_do_submit(fd, &laiocb, offset, type, flags, dev_max_batch);
291
if (ret < 0) {
292
return ret;
293
}
294
@@ -XXX,XX +XXX,XX @@ bool laio_has_fdsync(int fd)
295
io_destroy(ctx);
296
return (ret == -EINVAL) ? false : true;
297
}
298
+
299
+bool laio_has_fua(void)
300
+{
301
+#ifdef HAVE_IO_PREP_PWRITEV2
302
+ return true;
303
+#else
304
+ return false;
305
+#endif
48
+}
306
+}
49
+
307
diff --git a/meson.build b/meson.build
50
/* Check if read is allowed with given memory buffer and length.
308
index XXXXXXX..XXXXXXX 100644
51
*
309
--- a/meson.build
52
* This function is used to check O_DIRECT memory buffer and request alignment.
310
+++ b/meson.build
53
@@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
311
@@ -XXX,XX +XXX,XX @@ config_host_data.set('HAVE_OPTRESET',
54
312
cc.has_header_symbol('getopt.h', 'optreset'))
55
s->has_discard = true;
313
config_host_data.set('HAVE_IPPROTO_MPTCP',
56
s->has_write_zeroes = true;
314
cc.has_header_symbol('netinet/in.h', 'IPPROTO_MPTCP'))
57
- if ((bs->open_flags & BDRV_O_NOCACHE) != 0 && !dio_byte_aligned(s->fd)) {
315
+if libaio.found()
58
- s->needs_alignment = true;
316
+ config_host_data.set('HAVE_IO_PREP_PWRITEV2',
59
- }
317
+ cc.has_header_symbol('libaio.h', 'io_prep_pwritev2'))
60
318
+endif
61
if (fstat(s->fd, &st) < 0) {
319
62
ret = -errno;
320
# has_member
63
@@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
321
config_host_data.set('HAVE_SIGEV_NOTIFY_THREAD_ID',
64
* so QEMU makes sure all IO operations on the device are aligned
65
* to sector size, or else FreeBSD will reject them with EINVAL.
66
*/
67
- s->needs_alignment = true;
68
+ s->force_alignment = true;
69
}
70
#endif
71
+ s->needs_alignment = raw_needs_alignment(bs);
72
73
#ifdef CONFIG_XFS
74
if (platform_test_xfs_fd(s->fd)) {
75
@@ -XXX,XX +XXX,XX @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
76
BDRVRawState *s = bs->opaque;
77
struct stat st;
78
79
+ s->needs_alignment = raw_needs_alignment(bs);
80
raw_probe_alignment(bs, s->fd, errp);
81
+
82
bs->bl.min_mem_alignment = s->buf_align;
83
bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size);
84
85
diff --git a/tests/qemu-iotests/142 b/tests/qemu-iotests/142
86
index XXXXXXX..XXXXXXX 100755
87
--- a/tests/qemu-iotests/142
88
+++ b/tests/qemu-iotests/142
89
@@ -XXX,XX +XXX,XX @@ info block backing-file"
90
91
echo "$hmp_cmds" | run_qemu -drive "$files","$ids" | grep "Cache"
92
93
+echo
94
+echo "--- Alignment after changing O_DIRECT ---"
95
+echo
96
+
97
+# Directly test the protocol level: Can unaligned requests succeed even if
98
+# O_DIRECT was only enabled through a reopen and vice versa?
99
+
100
+$QEMU_IO --cache=writeback -f file $TEST_IMG <<EOF | _filter_qemu_io
101
+read 42 42
102
+reopen -o cache.direct=on
103
+read 42 42
104
+reopen -o cache.direct=off
105
+read 42 42
106
+EOF
107
+$QEMU_IO --cache=none -f file $TEST_IMG <<EOF | _filter_qemu_io
108
+read 42 42
109
+reopen -o cache.direct=off
110
+read 42 42
111
+reopen -o cache.direct=on
112
+read 42 42
113
+EOF
114
+
115
# success, all done
116
echo "*** done"
117
rm -f $seq.full
118
diff --git a/tests/qemu-iotests/142.out b/tests/qemu-iotests/142.out
119
index XXXXXXX..XXXXXXX 100644
120
--- a/tests/qemu-iotests/142.out
121
+++ b/tests/qemu-iotests/142.out
122
@@ -XXX,XX +XXX,XX @@ cache.no-flush=on on backing-file
123
Cache mode: writeback
124
Cache mode: writeback, direct
125
Cache mode: writeback, ignore flushes
126
+
127
+--- Alignment after changing O_DIRECT ---
128
+
129
+read 42/42 bytes at offset 42
130
+42 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
131
+read 42/42 bytes at offset 42
132
+42 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
133
+read 42/42 bytes at offset 42
134
+42 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
135
+read 42/42 bytes at offset 42
136
+42 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
137
+read 42/42 bytes at offset 42
138
+42 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
139
+read 42/42 bytes at offset 42
140
+42 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
141
*** done
142
--
322
--
143
2.31.1
323
2.48.1
144
145
diff view generated by jsdifflib
New patch
1
For block drivers that don't advertise FUA support, we already call
2
bdrv_co_flush(), which considers BDRV_O_NO_FLUSH. However, drivers that
3
do support FUA still see the FUA flag with BDRV_O_NO_FLUSH and get the
4
associated performance penalty that cache.no-flush=on was supposed to
5
avoid.
1
6
7
Clear FUA for write requests if BDRV_O_NO_FLUSH is set.
8
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
Message-ID: <20250307221634.71951-3-kwolf@redhat.com>
11
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
block/io.c | 4 ++++
15
1 file changed, 4 insertions(+)
16
17
diff --git a/block/io.c b/block/io.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/io.c
20
+++ b/block/io.c
21
@@ -XXX,XX +XXX,XX @@ bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
22
return -ENOMEDIUM;
23
}
24
25
+ if (bs->open_flags & BDRV_O_NO_FLUSH) {
26
+ flags &= ~BDRV_REQ_FUA;
27
+ }
28
+
29
if ((flags & BDRV_REQ_FUA) &&
30
(~bs->supported_write_flags & BDRV_REQ_FUA)) {
31
flags &= ~BDRV_REQ_FUA;
32
--
33
2.48.1
diff view generated by jsdifflib
New patch
1
As a preparation for having multiple adaptive polling states per
2
AioContext, move the 'ns' field into a separate struct.
1
3
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
Message-ID: <20250307221634.71951-4-kwolf@redhat.com>
6
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
8
---
9
include/block/aio.h | 6 +++++-
10
util/aio-posix.c | 31 ++++++++++++++++---------------
11
util/async.c | 3 ++-
12
3 files changed, 23 insertions(+), 17 deletions(-)
13
14
diff --git a/include/block/aio.h b/include/block/aio.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/block/aio.h
17
+++ b/include/block/aio.h
18
@@ -XXX,XX +XXX,XX @@ struct BHListSlice {
19
20
typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
21
22
+typedef struct AioPolledEvent {
23
+ int64_t ns; /* current polling time in nanoseconds */
24
+} AioPolledEvent;
25
+
26
struct AioContext {
27
GSource source;
28
29
@@ -XXX,XX +XXX,XX @@ struct AioContext {
30
int poll_disable_cnt;
31
32
/* Polling mode parameters */
33
- int64_t poll_ns; /* current polling time in nanoseconds */
34
+ AioPolledEvent poll;
35
int64_t poll_max_ns; /* maximum polling time in nanoseconds */
36
int64_t poll_grow; /* polling time growth factor */
37
int64_t poll_shrink; /* polling time shrink factor */
38
diff --git a/util/aio-posix.c b/util/aio-posix.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/util/aio-posix.c
41
+++ b/util/aio-posix.c
42
@@ -XXX,XX +XXX,XX @@ static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
43
return false;
44
}
45
46
- max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
47
+ max_ns = qemu_soonest_timeout(*timeout, ctx->poll.ns);
48
if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
49
/*
50
* Enable poll mode. It pairs with the poll_set_started() in
51
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
52
if (ctx->poll_max_ns) {
53
int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
54
55
- if (block_ns <= ctx->poll_ns) {
56
+ if (block_ns <= ctx->poll.ns) {
57
/* This is the sweet spot, no adjustment needed */
58
} else if (block_ns > ctx->poll_max_ns) {
59
/* We'd have to poll for too long, poll less */
60
- int64_t old = ctx->poll_ns;
61
+ int64_t old = ctx->poll.ns;
62
63
if (ctx->poll_shrink) {
64
- ctx->poll_ns /= ctx->poll_shrink;
65
+ ctx->poll.ns /= ctx->poll_shrink;
66
} else {
67
- ctx->poll_ns = 0;
68
+ ctx->poll.ns = 0;
69
}
70
71
- trace_poll_shrink(ctx, old, ctx->poll_ns);
72
- } else if (ctx->poll_ns < ctx->poll_max_ns &&
73
+ trace_poll_shrink(ctx, old, ctx->poll.ns);
74
+ } else if (ctx->poll.ns < ctx->poll_max_ns &&
75
block_ns < ctx->poll_max_ns) {
76
/* There is room to grow, poll longer */
77
- int64_t old = ctx->poll_ns;
78
+ int64_t old = ctx->poll.ns;
79
int64_t grow = ctx->poll_grow;
80
81
if (grow == 0) {
82
grow = 2;
83
}
84
85
- if (ctx->poll_ns) {
86
- ctx->poll_ns *= grow;
87
+ if (ctx->poll.ns) {
88
+ ctx->poll.ns *= grow;
89
} else {
90
- ctx->poll_ns = 4000; /* start polling at 4 microseconds */
91
+ ctx->poll.ns = 4000; /* start polling at 4 microseconds */
92
}
93
94
- if (ctx->poll_ns > ctx->poll_max_ns) {
95
- ctx->poll_ns = ctx->poll_max_ns;
96
+ if (ctx->poll.ns > ctx->poll_max_ns) {
97
+ ctx->poll.ns = ctx->poll_max_ns;
98
}
99
100
- trace_poll_grow(ctx, old, ctx->poll_ns);
101
+ trace_poll_grow(ctx, old, ctx->poll.ns);
102
}
103
}
104
105
@@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
106
/* No thread synchronization here, it doesn't matter if an incorrect value
107
* is used once.
108
*/
109
+ ctx->poll.ns = 0;
110
+
111
ctx->poll_max_ns = max_ns;
112
- ctx->poll_ns = 0;
113
ctx->poll_grow = grow;
114
ctx->poll_shrink = shrink;
115
116
diff --git a/util/async.c b/util/async.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/util/async.c
119
+++ b/util/async.c
120
@@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp)
121
qemu_rec_mutex_init(&ctx->lock);
122
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
123
124
- ctx->poll_ns = 0;
125
+ ctx->poll.ns = 0;
126
+
127
ctx->poll_max_ns = 0;
128
ctx->poll_grow = 0;
129
ctx->poll_shrink = 0;
130
--
131
2.48.1
diff view generated by jsdifflib
New patch
1
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2
Message-ID: <20250307221634.71951-5-kwolf@redhat.com>
3
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
4
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
---
6
util/aio-posix.c | 77 ++++++++++++++++++++++++++----------------------
7
1 file changed, 41 insertions(+), 36 deletions(-)
1
8
9
diff --git a/util/aio-posix.c b/util/aio-posix.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/util/aio-posix.c
12
+++ b/util/aio-posix.c
13
@@ -XXX,XX +XXX,XX @@ static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
14
return false;
15
}
16
17
+static void adjust_polling_time(AioContext *ctx, AioPolledEvent *poll,
18
+ int64_t block_ns)
19
+{
20
+ if (block_ns <= poll->ns) {
21
+ /* This is the sweet spot, no adjustment needed */
22
+ } else if (block_ns > ctx->poll_max_ns) {
23
+ /* We'd have to poll for too long, poll less */
24
+ int64_t old = poll->ns;
25
+
26
+ if (ctx->poll_shrink) {
27
+ poll->ns /= ctx->poll_shrink;
28
+ } else {
29
+ poll->ns = 0;
30
+ }
31
+
32
+ trace_poll_shrink(ctx, old, poll->ns);
33
+ } else if (poll->ns < ctx->poll_max_ns &&
34
+ block_ns < ctx->poll_max_ns) {
35
+ /* There is room to grow, poll longer */
36
+ int64_t old = poll->ns;
37
+ int64_t grow = ctx->poll_grow;
38
+
39
+ if (grow == 0) {
40
+ grow = 2;
41
+ }
42
+
43
+ if (poll->ns) {
44
+ poll->ns *= grow;
45
+ } else {
46
+ poll->ns = 4000; /* start polling at 4 microseconds */
47
+ }
48
+
49
+ if (poll->ns > ctx->poll_max_ns) {
50
+ poll->ns = ctx->poll_max_ns;
51
+ }
52
+
53
+ trace_poll_grow(ctx, old, poll->ns);
54
+ }
55
+}
56
+
57
bool aio_poll(AioContext *ctx, bool blocking)
58
{
59
AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
60
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
61
/* Adjust polling time */
62
if (ctx->poll_max_ns) {
63
int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
64
-
65
- if (block_ns <= ctx->poll.ns) {
66
- /* This is the sweet spot, no adjustment needed */
67
- } else if (block_ns > ctx->poll_max_ns) {
68
- /* We'd have to poll for too long, poll less */
69
- int64_t old = ctx->poll.ns;
70
-
71
- if (ctx->poll_shrink) {
72
- ctx->poll.ns /= ctx->poll_shrink;
73
- } else {
74
- ctx->poll.ns = 0;
75
- }
76
-
77
- trace_poll_shrink(ctx, old, ctx->poll.ns);
78
- } else if (ctx->poll.ns < ctx->poll_max_ns &&
79
- block_ns < ctx->poll_max_ns) {
80
- /* There is room to grow, poll longer */
81
- int64_t old = ctx->poll.ns;
82
- int64_t grow = ctx->poll_grow;
83
-
84
- if (grow == 0) {
85
- grow = 2;
86
- }
87
-
88
- if (ctx->poll.ns) {
89
- ctx->poll.ns *= grow;
90
- } else {
91
- ctx->poll.ns = 4000; /* start polling at 4 microseconds */
92
- }
93
-
94
- if (ctx->poll.ns > ctx->poll_max_ns) {
95
- ctx->poll.ns = ctx->poll_max_ns;
96
- }
97
-
98
- trace_poll_grow(ctx, old, ctx->poll.ns);
99
- }
100
+ adjust_polling_time(ctx, &ctx->poll, block_ns);
101
}
102
103
progress |= aio_bh_poll(ctx);
104
--
105
2.48.1
diff view generated by jsdifflib
1
From: Hanna Reitz <hreitz@redhat.com>
1
Adaptive polling has a big problem: It doesn't consider that an event
2
loop can wait for many different events that may have very different
3
typical latencies.
2
4
3
bdrv_attach_child_common_abort() restores the parent's AioContext. To
5
For example, think of a guest that tends to send a new I/O request soon
4
do so, the child (which was supposed to be attached, but is now detached
6
after the previous I/O request completes, but the storage on the host is
5
again by this abort handler) is added to the ignore list for the
7
rather slow. In this case, getting the new request from guest quickly
6
AioContext changing functions.
8
means that polling is enabled, but the next thing is performing the I/O
9
request on the backend, which is slow and disables polling again for the
10
next guest request. This means that in such a scenario, polling could
11
help for every other event, but is only ever enabled when it can't
12
succeed.
7
13
8
However, since we modify a BDS's children list in the BdrvChildClass's
14
In order to fix this, keep a separate AioPolledEvent for each
9
.attach and .detach handlers, the child is already effectively detached
15
AioHandler. We will then know that the backend file descriptor always
10
from the parent by this point. We do not need to put it into the ignore
16
has a high latency and isn't worth polling for, but we also know that
11
list.
17
the guest is always fast and we should poll for it. This solves at least
18
half of the problem, we can now keep polling for those cases where it
19
makes sense and get the improved performance from it.
12
20
13
Use this opportunity to clean up the empty line structure: Keep setting
21
Since the event loop doesn't know which event will be next, we still do
14
the ignore list, invoking the AioContext function, and freeing the
22
some unnecessary polling while we're waiting for the slow disk. I made
15
ignore list in blocks separated by empty lines.
23
some attempts to be more clever than just randomly growing and shrinking
24
the polling time, and even to let callers be explicit about when they
25
expect a new event, but so far this hasn't resulted in improved
26
performance or even caused performance regressions. For now, let's just
27
fix the part that is easy enough to fix, we can revisit the rest later.
16
28
17
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
29
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
18
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
30
Message-ID: <20250307221634.71951-6-kwolf@redhat.com>
19
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
31
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
20
Message-Id: <20211111120829.81329-5-hreitz@redhat.com>
21
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
32
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
22
---
33
---
23
block.c | 8 +++++---
34
include/block/aio.h | 1 -
24
1 file changed, 5 insertions(+), 3 deletions(-)
35
util/aio-posix.h | 1 +
36
util/aio-posix.c | 26 ++++++++++++++++++++++----
37
util/async.c | 2 --
38
4 files changed, 23 insertions(+), 7 deletions(-)
25
39
26
diff --git a/block.c b/block.c
40
diff --git a/include/block/aio.h b/include/block/aio.h
27
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
28
--- a/block.c
42
--- a/include/block/aio.h
29
+++ b/block.c
43
+++ b/include/block/aio.h
30
@@ -XXX,XX +XXX,XX @@ static void bdrv_attach_child_common_abort(void *opaque)
44
@@ -XXX,XX +XXX,XX @@ struct AioContext {
45
int poll_disable_cnt;
46
47
/* Polling mode parameters */
48
- AioPolledEvent poll;
49
int64_t poll_max_ns; /* maximum polling time in nanoseconds */
50
int64_t poll_grow; /* polling time growth factor */
51
int64_t poll_shrink; /* polling time shrink factor */
52
diff --git a/util/aio-posix.h b/util/aio-posix.h
53
index XXXXXXX..XXXXXXX 100644
54
--- a/util/aio-posix.h
55
+++ b/util/aio-posix.h
56
@@ -XXX,XX +XXX,XX @@ struct AioHandler {
57
#endif
58
int64_t poll_idle_timeout; /* when to stop userspace polling */
59
bool poll_ready; /* has polling detected an event? */
60
+ AioPolledEvent poll;
61
};
62
63
/* Add a handler to a ready list */
64
diff --git a/util/aio-posix.c b/util/aio-posix.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/util/aio-posix.c
67
+++ b/util/aio-posix.c
68
@@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers(AioContext *ctx, AioHandlerList *ready_list,
69
static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
70
int64_t *timeout)
71
{
72
+ AioHandler *node;
73
int64_t max_ns;
74
75
if (QLIST_EMPTY_RCU(&ctx->poll_aio_handlers)) {
76
return false;
31
}
77
}
32
78
33
if (bdrv_child_get_parent_aio_context(child) != s->old_parent_ctx) {
79
- max_ns = qemu_soonest_timeout(*timeout, ctx->poll.ns);
34
- GSList *ignore = g_slist_prepend(NULL, child);
80
+ max_ns = 0;
35
+ GSList *ignore;
81
+ QLIST_FOREACH(node, &ctx->poll_aio_handlers, node_poll) {
36
82
+ max_ns = MAX(max_ns, node->poll.ns);
37
+ /* No need to ignore `child`, because it has been detached already */
83
+ }
38
+ ignore = NULL;
84
+ max_ns = qemu_soonest_timeout(*timeout, max_ns);
39
child->klass->can_set_aio_ctx(child, s->old_parent_ctx, &ignore,
85
+
40
&error_abort);
86
if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
41
g_slist_free(ignore);
87
/*
42
- ignore = g_slist_prepend(NULL, child);
88
* Enable poll mode. It pairs with the poll_set_started() in
43
- child->klass->set_aio_ctx(child, s->old_parent_ctx, &ignore);
89
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
44
90
45
+ ignore = NULL;
91
/* Adjust polling time */
46
+ child->klass->set_aio_ctx(child, s->old_parent_ctx, &ignore);
92
if (ctx->poll_max_ns) {
47
g_slist_free(ignore);
93
+ AioHandler *node;
94
int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
95
- adjust_polling_time(ctx, &ctx->poll, block_ns);
96
+
97
+ QLIST_FOREACH(node, &ctx->poll_aio_handlers, node_poll) {
98
+ if (QLIST_IS_INSERTED(node, node_ready)) {
99
+ adjust_polling_time(ctx, &node->poll, block_ns);
100
+ }
101
+ }
48
}
102
}
49
103
104
progress |= aio_bh_poll(ctx);
105
@@ -XXX,XX +XXX,XX @@ void aio_context_use_g_source(AioContext *ctx)
106
void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
107
int64_t grow, int64_t shrink, Error **errp)
108
{
109
+ AioHandler *node;
110
+
111
+ qemu_lockcnt_inc(&ctx->list_lock);
112
+ QLIST_FOREACH(node, &ctx->aio_handlers, node) {
113
+ node->poll.ns = 0;
114
+ }
115
+ qemu_lockcnt_dec(&ctx->list_lock);
116
+
117
/* No thread synchronization here, it doesn't matter if an incorrect value
118
* is used once.
119
*/
120
- ctx->poll.ns = 0;
121
-
122
ctx->poll_max_ns = max_ns;
123
ctx->poll_grow = grow;
124
ctx->poll_shrink = shrink;
125
diff --git a/util/async.c b/util/async.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/util/async.c
128
+++ b/util/async.c
129
@@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp)
130
qemu_rec_mutex_init(&ctx->lock);
131
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
132
133
- ctx->poll.ns = 0;
134
-
135
ctx->poll_max_ns = 0;
136
ctx->poll_grow = 0;
137
ctx->poll_shrink = 0;
50
--
138
--
51
2.31.1
139
2.48.1
52
53
diff view generated by jsdifflib
1
While introducing a non-QemuOpts code path for device creation for JSON
1
aio_dispatch_handler() adds handlers to ctx->poll_aio_handlers if
2
-device, we noticed that QMP device_add doesn't check its input
2
polling should be enabled. If we call adjust_polling_time() for all
3
correctly (accepting arguments that should have been rejected), and that
3
polling handlers before this, new polling handlers are still left at
4
users may be relying on this behaviour (libvirt did until it was fixed
4
poll->ns = 0 and polling is only actually enabled after the next event.
5
recently).
5
Move the adjust_polling_time() call after aio_dispatch_handler().
6
6
7
Let's use a deprecation period before we fix this bug in QEMU to avoid
7
This fixes test-nested-aio-poll, which expects that polling becomes
8
nasty surprises for users.
8
effective the first time around.
9
9
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
Message-Id: <20211111143530.18985-1-kwolf@redhat.com>
11
Message-ID: <20250311141912.135657-1-kwolf@redhat.com>
12
Reviewed-by: Markus Armbruster <armbru@redhat.com>
13
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
---
13
---
16
docs/about/deprecated.rst | 14 ++++++++++++++
14
util/aio-posix.c | 28 +++++++++++++++++-----------
17
1 file changed, 14 insertions(+)
15
1 file changed, 17 insertions(+), 11 deletions(-)
18
16
19
diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst
17
diff --git a/util/aio-posix.c b/util/aio-posix.c
20
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
21
--- a/docs/about/deprecated.rst
19
--- a/util/aio-posix.c
22
+++ b/docs/about/deprecated.rst
20
+++ b/util/aio-posix.c
23
@@ -XXX,XX +XXX,XX @@ options are removed in favor of using explicit ``blockdev-create`` and
21
@@ -XXX,XX +XXX,XX @@
24
``blockdev-add`` calls. See :doc:`/interop/live-block-operations` for
22
/* Stop userspace polling on a handler if it isn't active for some time */
25
details.
23
#define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND)
26
24
27
+Incorrectly typed ``device_add`` arguments (since 6.2)
25
+static void adjust_polling_time(AioContext *ctx, AioPolledEvent *poll,
28
+''''''''''''''''''''''''''''''''''''''''''''''''''''''
26
+ int64_t block_ns);
29
+
27
+
30
+Due to shortcomings in the internal implementation of ``device_add``, QEMU
28
bool aio_poll_disabled(AioContext *ctx)
31
+incorrectly accepts certain invalid arguments: Any object or list arguments are
29
{
32
+silently ignored. Other argument types are not checked, but an implicit
30
return qatomic_read(&ctx->poll_disable_cnt);
33
+conversion happens, so that e.g. string values can be assigned to integer
31
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
34
+device properties or vice versa.
32
* scanning all handlers with aio_dispatch_handlers().
33
*/
34
static bool aio_dispatch_ready_handlers(AioContext *ctx,
35
- AioHandlerList *ready_list)
36
+ AioHandlerList *ready_list,
37
+ int64_t block_ns)
38
{
39
bool progress = false;
40
AioHandler *node;
41
@@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_ready_handlers(AioContext *ctx,
42
while ((node = QLIST_FIRST(ready_list))) {
43
QLIST_REMOVE(node, node_ready);
44
progress = aio_dispatch_handler(ctx, node) || progress;
35
+
45
+
36
+This is a bug in QEMU that will be fixed in the future so that previously
46
+ /*
37
+accepted incorrect commands will return an error. Users should make sure that
47
+ * Adjust polling time only after aio_dispatch_handler(), which can
38
+all arguments passed to ``device_add`` are consistent with the documented
48
+ * add the handler to ctx->poll_aio_handlers.
39
+property types.
49
+ */
40
+
50
+ if (ctx->poll_max_ns && QLIST_IS_INSERTED(node, node_poll)) {
41
System accelerators
51
+ adjust_polling_time(ctx, &node->poll, block_ns);
42
-------------------
52
+ }
53
}
54
55
return progress;
56
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
57
bool use_notify_me;
58
int64_t timeout;
59
int64_t start = 0;
60
+ int64_t block_ns = 0;
61
62
/*
63
* There cannot be two concurrent aio_poll calls for the same AioContext (or
64
@@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking)
65
66
aio_notify_accept(ctx);
67
68
- /* Adjust polling time */
69
+ /* Calculate blocked time for adaptive polling */
70
if (ctx->poll_max_ns) {
71
- AioHandler *node;
72
- int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
73
-
74
- QLIST_FOREACH(node, &ctx->poll_aio_handlers, node_poll) {
75
- if (QLIST_IS_INSERTED(node, node_ready)) {
76
- adjust_polling_time(ctx, &node->poll, block_ns);
77
- }
78
- }
79
+ block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
80
}
81
82
progress |= aio_bh_poll(ctx);
83
- progress |= aio_dispatch_ready_handlers(ctx, &ready_list);
84
+ progress |= aio_dispatch_ready_handlers(ctx, &ready_list, block_ns);
85
86
aio_free_deleted_handlers(ctx);
43
87
44
--
88
--
45
2.31.1
89
2.48.1
46
47
diff view generated by jsdifflib
1
From: Hanna Reitz <hreitz@redhat.com>
1
From: Thomas Huth <thuth@redhat.com>
2
2
3
See the comment for why this is necessary.
3
qsd-migrate is currently only working for raw, qcow2 and qed.
4
Other formats are failing, e.g. because they don't support migration.
5
Thus let's limit this test to the three usable formats now.
4
6
5
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
7
Suggested-by: Kevin Wolf <kwolf@redhat.com>
6
Message-Id: <20211111120829.81329-11-hreitz@redhat.com>
8
Signed-off-by: Thomas Huth <thuth@redhat.com>
9
Message-ID: <20250224214058.205889-1-thuth@redhat.com>
10
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
7
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
8
---
12
---
9
tests/qemu-iotests/030 | 11 ++++++++++-
13
tests/qemu-iotests/tests/qsd-migrate | 2 +-
10
1 file changed, 10 insertions(+), 1 deletion(-)
14
1 file changed, 1 insertion(+), 1 deletion(-)
11
15
12
diff --git a/tests/qemu-iotests/030 b/tests/qemu-iotests/030
16
diff --git a/tests/qemu-iotests/tests/qsd-migrate b/tests/qemu-iotests/tests/qsd-migrate
13
index XXXXXXX..XXXXXXX 100755
17
index XXXXXXX..XXXXXXX 100755
14
--- a/tests/qemu-iotests/030
18
--- a/tests/qemu-iotests/tests/qsd-migrate
15
+++ b/tests/qemu-iotests/030
19
+++ b/tests/qemu-iotests/tests/qsd-migrate
16
@@ -XXX,XX +XXX,XX @@ class TestParallelOps(iotests.QMPTestCase):
20
@@ -XXX,XX +XXX,XX @@ import iotests
17
speed=1024)
21
18
self.assert_qmp(result, 'return', {})
22
from iotests import filter_qemu_io, filter_qtest
19
23
20
- for job in pending_jobs:
24
-iotests.script_initialize(supported_fmts=['generic'],
21
+ # Do this in reverse: After unthrottling them, some jobs may finish
25
+iotests.script_initialize(supported_fmts=['qcow2', 'qed', 'raw'],
22
+ # before we have unthrottled all of them. This will drain their
26
supported_protocols=['file'],
23
+ # subgraph, and this will make jobs above them advance (despite those
27
supported_platforms=['linux'])
24
+ # jobs on top being throttled). In the worst case, all jobs below the
25
+ # top one are finished before we can unthrottle it, and this makes it
26
+ # advance so far that it completes before we can unthrottle it - which
27
+ # results in an error.
28
+ # Starting from the top (i.e. in reverse) does not have this problem:
29
+ # When a job finishes, the ones below it are not advanced.
30
+ for job in reversed(pending_jobs):
31
result = self.vm.qmp('block-job-set-speed', device=job, speed=0)
32
self.assert_qmp(result, 'return', {})
33
28
34
--
29
--
35
2.31.1
30
2.48.1
36
37
diff view generated by jsdifflib
1
From: Stefan Hajnoczi <stefanha@redhat.com>
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
2
3
Reported by Coverity (CID 1465222).
3
Commit 71544d30a6f8 ("scsi: push request restart to SCSIDevice") removed
4
the only user of SCSIDiskState->bh.
4
5
5
Fixes: 4a1d937796de0fecd8b22d7dbebf87f38e8282fd ("softmmu/qdev-monitor: add error handling in qdev_set_id")
6
Cc: Damien Hedde <damien.hedde@greensocs.com>
7
Cc: Kevin Wolf <kwolf@redhat.com>
8
Cc: Michael S. Tsirkin <mst@redhat.com>
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Message-Id: <20211102163342.31162-1-stefanha@redhat.com>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
8
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
9
Message-ID: <20250311132616.1049687-2-stefanha@redhat.com>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
15
Reviewed-by: Damien Hedde <damien.hedde@greensocs.com>
16
Reviewed-by: Markus Armbruster <armbru@redhat.com>
17
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
18
---
11
---
19
softmmu/qdev-monitor.c | 2 +-
12
hw/scsi/scsi-disk.c | 1 -
20
1 file changed, 1 insertion(+), 1 deletion(-)
13
1 file changed, 1 deletion(-)
21
14
22
diff --git a/softmmu/qdev-monitor.c b/softmmu/qdev-monitor.c
15
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
23
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
24
--- a/softmmu/qdev-monitor.c
17
--- a/hw/scsi/scsi-disk.c
25
+++ b/softmmu/qdev-monitor.c
18
+++ b/hw/scsi/scsi-disk.c
26
@@ -XXX,XX +XXX,XX @@ const char *qdev_set_id(DeviceState *dev, char *id, Error **errp)
19
@@ -XXX,XX +XXX,XX @@ struct SCSIDiskState {
27
if (prop) {
20
uint64_t max_unmap_size;
28
dev->id = id;
21
uint64_t max_io_size;
29
} else {
22
uint32_t quirks;
30
- g_free(id);
23
- QEMUBH *bh;
31
error_setg(errp, "Duplicate device ID '%s'", id);
24
char *version;
32
+ g_free(id);
25
char *serial;
33
return NULL;
26
char *vendor;
34
}
35
} else {
36
--
27
--
37
2.31.1
28
2.48.1
38
29
39
30
diff view generated by jsdifflib
New patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
1
2
3
In the past a single AioContext was used for block I/O and it was
4
fetched using blk_get_aio_context(). Nowadays the block layer supports
5
running I/O from any AioContext and multiple AioContexts at the same
6
time. Remove the dma_blk_io() AioContext argument and use the current
7
AioContext instead.
8
9
This makes calling the function easier and enables multiple IOThreads to
10
use dma_blk_io() concurrently for the same block device.
11
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
14
Message-ID: <20250311132616.1049687-3-stefanha@redhat.com>
15
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
16
---
17
include/system/dma.h | 3 +--
18
hw/ide/core.c | 3 +--
19
hw/ide/macio.c | 3 +--
20
hw/scsi/scsi-disk.c | 6 ++----
21
system/dma-helpers.c | 8 ++++----
22
5 files changed, 9 insertions(+), 14 deletions(-)
23
24
diff --git a/include/system/dma.h b/include/system/dma.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/include/system/dma.h
27
+++ b/include/system/dma.h
28
@@ -XXX,XX +XXX,XX @@ typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov,
29
BlockCompletionFunc *cb, void *cb_opaque,
30
void *opaque);
31
32
-BlockAIOCB *dma_blk_io(AioContext *ctx,
33
- QEMUSGList *sg, uint64_t offset, uint32_t align,
34
+BlockAIOCB *dma_blk_io(QEMUSGList *sg, uint64_t offset, uint32_t align,
35
DMAIOFunc *io_func, void *io_func_opaque,
36
BlockCompletionFunc *cb, void *opaque, DMADirection dir);
37
BlockAIOCB *dma_blk_read(BlockBackend *blk,
38
diff --git a/hw/ide/core.c b/hw/ide/core.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/hw/ide/core.c
41
+++ b/hw/ide/core.c
42
@@ -XXX,XX +XXX,XX @@ static void ide_dma_cb(void *opaque, int ret)
43
BDRV_SECTOR_SIZE, ide_dma_cb, s);
44
break;
45
case IDE_DMA_TRIM:
46
- s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
47
- &s->sg, offset, BDRV_SECTOR_SIZE,
48
+ s->bus->dma->aiocb = dma_blk_io(&s->sg, offset, BDRV_SECTOR_SIZE,
49
ide_issue_trim, s, ide_dma_cb, s,
50
DMA_DIRECTION_TO_DEVICE);
51
break;
52
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/hw/ide/macio.c
55
+++ b/hw/ide/macio.c
56
@@ -XXX,XX +XXX,XX @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
57
pmac_ide_transfer_cb, io);
58
break;
59
case IDE_DMA_TRIM:
60
- s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk), &s->sg,
61
- offset, 0x1, ide_issue_trim, s,
62
+ s->bus->dma->aiocb = dma_blk_io(&s->sg, offset, 0x1, ide_issue_trim, s,
63
pmac_ide_transfer_cb, io,
64
DMA_DIRECTION_TO_DEVICE);
65
break;
66
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/hw/scsi/scsi-disk.c
69
+++ b/hw/scsi/scsi-disk.c
70
@@ -XXX,XX +XXX,XX @@ static void scsi_do_read(SCSIDiskReq *r, int ret)
71
if (r->req.sg) {
72
dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
73
r->req.residual -= r->req.sg->size;
74
- r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
75
- r->req.sg, r->sector << BDRV_SECTOR_BITS,
76
+ r->req.aiocb = dma_blk_io(r->req.sg, r->sector << BDRV_SECTOR_BITS,
77
BDRV_SECTOR_SIZE,
78
sdc->dma_readv, r, scsi_dma_complete, r,
79
DMA_DIRECTION_FROM_DEVICE);
80
@@ -XXX,XX +XXX,XX @@ static void scsi_write_data(SCSIRequest *req)
81
if (r->req.sg) {
82
dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
83
r->req.residual -= r->req.sg->size;
84
- r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
85
- r->req.sg, r->sector << BDRV_SECTOR_BITS,
86
+ r->req.aiocb = dma_blk_io(r->req.sg, r->sector << BDRV_SECTOR_BITS,
87
BDRV_SECTOR_SIZE,
88
sdc->dma_writev, r, scsi_dma_complete, r,
89
DMA_DIRECTION_TO_DEVICE);
90
diff --git a/system/dma-helpers.c b/system/dma-helpers.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/system/dma-helpers.c
93
+++ b/system/dma-helpers.c
94
@@ -XXX,XX +XXX,XX @@ static const AIOCBInfo dma_aiocb_info = {
95
.cancel_async = dma_aio_cancel,
96
};
97
98
-BlockAIOCB *dma_blk_io(AioContext *ctx,
99
+BlockAIOCB *dma_blk_io(
100
QEMUSGList *sg, uint64_t offset, uint32_t align,
101
DMAIOFunc *io_func, void *io_func_opaque,
102
BlockCompletionFunc *cb,
103
@@ -XXX,XX +XXX,XX @@ BlockAIOCB *dma_blk_io(AioContext *ctx,
104
105
dbs->acb = NULL;
106
dbs->sg = sg;
107
- dbs->ctx = ctx;
108
+ dbs->ctx = qemu_get_current_aio_context();
109
dbs->offset = offset;
110
dbs->align = align;
111
dbs->sg_cur_index = 0;
112
@@ -XXX,XX +XXX,XX @@ BlockAIOCB *dma_blk_read(BlockBackend *blk,
113
QEMUSGList *sg, uint64_t offset, uint32_t align,
114
void (*cb)(void *opaque, int ret), void *opaque)
115
{
116
- return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
117
+ return dma_blk_io(sg, offset, align,
118
dma_blk_read_io_func, blk, cb, opaque,
119
DMA_DIRECTION_FROM_DEVICE);
120
}
121
@@ -XXX,XX +XXX,XX @@ BlockAIOCB *dma_blk_write(BlockBackend *blk,
122
QEMUSGList *sg, uint64_t offset, uint32_t align,
123
void (*cb)(void *opaque, int ret), void *opaque)
124
{
125
- return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
126
+ return dma_blk_io(sg, offset, align,
127
dma_blk_write_io_func, blk, cb, opaque,
128
DMA_DIRECTION_TO_DEVICE);
129
}
130
--
131
2.48.1
diff view generated by jsdifflib
New patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
1
2
3
Until now, a SCSIDevice's I/O requests have run in a single AioContext.
4
In order to support multiple IOThreads it will be necessary to move to
5
the concept of a per-SCSIRequest AioContext.
6
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
9
Message-ID: <20250311132616.1049687-4-stefanha@redhat.com>
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
---
12
include/hw/scsi/scsi.h | 1 +
13
hw/scsi/scsi-bus.c | 1 +
14
hw/scsi/scsi-disk.c | 17 ++++++-----------
15
3 files changed, 8 insertions(+), 11 deletions(-)
16
17
diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/scsi/scsi.h
20
+++ b/include/hw/scsi/scsi.h
21
@@ -XXX,XX +XXX,XX @@ struct SCSIRequest {
22
SCSIBus *bus;
23
SCSIDevice *dev;
24
const SCSIReqOps *ops;
25
+ AioContext *ctx;
26
uint32_t refcount;
27
uint32_t tag;
28
uint32_t lun;
29
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/scsi/scsi-bus.c
32
+++ b/hw/scsi/scsi-bus.c
33
@@ -XXX,XX +XXX,XX @@ invalid_opcode:
34
}
35
}
36
37
+ req->ctx = qemu_get_current_aio_context();
38
req->cmd = cmd;
39
req->residual = req->cmd.xfer;
40
41
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/hw/scsi/scsi-disk.c
44
+++ b/hw/scsi/scsi-disk.c
45
@@ -XXX,XX +XXX,XX @@ static void scsi_aio_complete(void *opaque, int ret)
46
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
47
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
48
49
- /* The request must only run in the BlockBackend's AioContext */
50
- assert(blk_get_aio_context(s->qdev.conf.blk) ==
51
- qemu_get_current_aio_context());
52
+ /* The request must run in its AioContext */
53
+ assert(r->req.ctx == qemu_get_current_aio_context());
54
55
assert(r->req.aiocb != NULL);
56
r->req.aiocb = NULL;
57
@@ -XXX,XX +XXX,XX @@ static void scsi_dma_complete(void *opaque, int ret)
58
59
static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
60
{
61
- SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
62
uint32_t n;
63
64
- /* The request must only run in the BlockBackend's AioContext */
65
- assert(blk_get_aio_context(s->qdev.conf.blk) ==
66
- qemu_get_current_aio_context());
67
+ /* The request must run in its AioContext */
68
+ assert(r->req.ctx == qemu_get_current_aio_context());
69
70
assert(r->req.aiocb == NULL);
71
if (scsi_disk_req_check_error(r, ret, ret > 0)) {
72
@@ -XXX,XX +XXX,XX @@ static void scsi_read_data(SCSIRequest *req)
73
74
static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
75
{
76
- SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
77
uint32_t n;
78
79
- /* The request must only run in the BlockBackend's AioContext */
80
- assert(blk_get_aio_context(s->qdev.conf.blk) ==
81
- qemu_get_current_aio_context());
82
+ /* The request must run in its AioContext */
83
+ assert(r->req.ctx == qemu_get_current_aio_context());
84
85
assert (r->req.aiocb == NULL);
86
if (scsi_disk_req_check_error(r, ret, ret > 0)) {
87
--
88
2.48.1
diff view generated by jsdifflib
1
From: Hanna Reitz <hreitz@redhat.com>
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
2
3
Invoke the transaction drivers' .clean() methods only after all
3
SCSIDevice keeps track of in-flight requests for device reset and Task
4
.commit() or .abort() handlers are done.
4
Management Functions (TMFs). The request list requires protection so
5
that multi-threaded SCSI emulation can be implemented in commits that
6
follow.
5
7
6
This makes it easier to have nested transactions where the top-level
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
7
transactions pass objects to lower transactions that the latter can
9
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
8
still use throughout their commit/abort phases, while the top-level
10
Message-ID: <20250311132616.1049687-5-stefanha@redhat.com>
9
transaction keeps a reference that is released in its .clean() method.
10
11
(Before this commit, that is also possible, but the top-level
12
transaction would need to take care to invoke tran_add() before the
13
lower-level transaction does. This commit makes the ordering
14
irrelevant, which is just a bit nicer.)
15
16
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
17
Message-Id: <20211111120829.81329-8-hreitz@redhat.com>
18
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
19
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
20
---
12
---
21
include/qemu/transactions.h | 3 +++
13
include/hw/scsi/scsi.h | 7 ++-
22
util/transactions.c | 8 ++++++--
14
hw/scsi/scsi-bus.c | 120 +++++++++++++++++++++++++++++------------
23
2 files changed, 9 insertions(+), 2 deletions(-)
15
2 files changed, 88 insertions(+), 39 deletions(-)
24
16
25
diff --git a/include/qemu/transactions.h b/include/qemu/transactions.h
17
diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h
26
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
27
--- a/include/qemu/transactions.h
19
--- a/include/hw/scsi/scsi.h
28
+++ b/include/qemu/transactions.h
20
+++ b/include/hw/scsi/scsi.h
29
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@ struct SCSIRequest {
30
* tran_create(), call your "prepare" functions on it, and finally call
22
bool dma_started;
31
* tran_abort() or tran_commit() to finalize the transaction by corresponding
23
BlockAIOCB *aiocb;
32
* finalization actions in reverse order.
24
QEMUSGList *sg;
25
+
26
+ /* Protected by SCSIDevice->requests_lock */
27
QTAILQ_ENTRY(SCSIRequest) next;
28
};
29
30
@@ -XXX,XX +XXX,XX @@ struct SCSIDevice
31
uint8_t sense[SCSI_SENSE_BUF_SIZE];
32
uint32_t sense_len;
33
34
- /*
35
- * The requests list is only accessed from the AioContext that executes
36
- * requests or from the main loop when IOThread processing is stopped.
37
- */
38
+ QemuMutex requests_lock; /* protects the requests list */
39
QTAILQ_HEAD(, SCSIRequest) requests;
40
41
uint32_t channel;
42
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/hw/scsi/scsi-bus.c
45
+++ b/hw/scsi/scsi-bus.c
46
@@ -XXX,XX +XXX,XX @@ static void scsi_device_for_each_req_sync(SCSIDevice *s,
47
assert(!runstate_is_running());
48
assert(qemu_in_main_thread());
49
50
- QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) {
51
- fn(req, opaque);
52
+ /*
53
+ * Locking is not necessary because the guest is stopped and no other
54
+ * threads can be accessing the requests list, but take the lock for
55
+ * consistency.
56
+ */
57
+ WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
58
+ QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) {
59
+ fn(req, opaque);
60
+ }
61
}
62
}
63
64
@@ -XXX,XX +XXX,XX @@ static void scsi_device_for_each_req_async_bh(void *opaque)
65
{
66
g_autofree SCSIDeviceForEachReqAsyncData *data = opaque;
67
SCSIDevice *s = data->s;
68
- AioContext *ctx;
69
- SCSIRequest *req;
70
- SCSIRequest *next;
71
+ g_autoptr(GList) reqs = NULL;
72
73
/*
74
- * The BB cannot have changed contexts between this BH being scheduled and
75
- * now: BBs' AioContexts, when they have a node attached, can only be
76
- * changed via bdrv_try_change_aio_context(), in a drained section. While
77
- * we have the in-flight counter incremented, that drain must block.
78
+ * Build a list of requests in this AioContext so fn() can be invoked later
79
+ * outside requests_lock.
80
*/
81
- ctx = blk_get_aio_context(s->conf.blk);
82
- assert(ctx == qemu_get_current_aio_context());
83
+ WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
84
+ AioContext *ctx = qemu_get_current_aio_context();
85
+ SCSIRequest *req;
86
+ SCSIRequest *next;
87
+
88
+ QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
89
+ if (req->ctx == ctx) {
90
+ scsi_req_ref(req); /* dropped after calling fn() */
91
+ reqs = g_list_prepend(reqs, req);
92
+ }
93
+ }
94
+ }
95
96
- QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
97
- data->fn(req, data->fn_opaque);
98
+ /* Call fn() on each request */
99
+ for (GList *elem = g_list_first(reqs); elem; elem = g_list_next(elem)) {
100
+ data->fn(elem->data, data->fn_opaque);
101
+ scsi_req_unref(elem->data);
102
}
103
104
/* Drop the reference taken by scsi_device_for_each_req_async() */
105
@@ -XXX,XX +XXX,XX @@ static void scsi_device_for_each_req_async_bh(void *opaque)
106
blk_dec_in_flight(s->conf.blk);
107
}
108
109
+static void scsi_device_for_each_req_async_do_ctx(gpointer key, gpointer value,
110
+ gpointer user_data)
111
+{
112
+ AioContext *ctx = key;
113
+ SCSIDeviceForEachReqAsyncData *params = user_data;
114
+ SCSIDeviceForEachReqAsyncData *data;
115
+
116
+ data = g_new(SCSIDeviceForEachReqAsyncData, 1);
117
+ data->s = params->s;
118
+ data->fn = params->fn;
119
+ data->fn_opaque = params->fn_opaque;
120
+
121
+ /*
122
+ * Hold a reference to the SCSIDevice until
123
+ * scsi_device_for_each_req_async_bh() finishes.
124
+ */
125
+ object_ref(OBJECT(data->s));
126
+
127
+ /* Paired with scsi_device_for_each_req_async_bh() */
128
+ blk_inc_in_flight(data->s->conf.blk);
129
+
130
+ aio_bh_schedule_oneshot(ctx, scsi_device_for_each_req_async_bh, data);
131
+}
132
+
133
/*
134
* Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
135
- * runs in the AioContext that is executing the request.
136
+ * must be thread-safe because it runs concurrently in each AioContext that is
137
+ * executing a request.
33
+ *
138
+ *
34
+ * The clean() functions registered by the drivers in a transaction are called
139
* Keeps the BlockBackend's in-flight counter incremented until everything is
35
+ * last, after all abort() or commit() functions have been called.
140
* done, so draining it will settle all scheduled @fn() calls.
36
*/
141
*/
37
142
@@ -XXX,XX +XXX,XX @@ static void scsi_device_for_each_req_async(SCSIDevice *s,
38
#ifndef QEMU_TRANSACTIONS_H
39
diff --git a/util/transactions.c b/util/transactions.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/util/transactions.c
42
+++ b/util/transactions.c
43
@@ -XXX,XX +XXX,XX @@ void tran_abort(Transaction *tran)
44
{
143
{
45
TransactionAction *act, *next;
144
assert(qemu_in_main_thread());
46
145
47
- QSLIST_FOREACH_SAFE(act, &tran->actions, entry, next) {
146
- SCSIDeviceForEachReqAsyncData *data =
48
+ QSLIST_FOREACH(act, &tran->actions, entry) {
147
- g_new(SCSIDeviceForEachReqAsyncData, 1);
49
if (act->drv->abort) {
148
-
50
act->drv->abort(act->opaque);
149
- data->s = s;
51
}
150
- data->fn = fn;
151
- data->fn_opaque = opaque;
152
-
153
- /*
154
- * Hold a reference to the SCSIDevice until
155
- * scsi_device_for_each_req_async_bh() finishes.
156
- */
157
- object_ref(OBJECT(s));
158
+ /* The set of AioContexts where the requests are being processed */
159
+ g_autoptr(GHashTable) aio_contexts = g_hash_table_new(NULL, NULL);
160
+ WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
161
+ SCSIRequest *req;
162
+ QTAILQ_FOREACH(req, &s->requests, next) {
163
+ g_hash_table_add(aio_contexts, req->ctx);
164
+ }
52
+ }
165
+ }
53
166
54
+ QSLIST_FOREACH_SAFE(act, &tran->actions, entry, next) {
167
- /* Paired with blk_dec_in_flight() in scsi_device_for_each_req_async_bh() */
55
if (act->drv->clean) {
168
- blk_inc_in_flight(s->conf.blk);
56
act->drv->clean(act->opaque);
169
- aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk),
57
}
170
- scsi_device_for_each_req_async_bh,
58
@@ -XXX,XX +XXX,XX @@ void tran_commit(Transaction *tran)
171
- data);
172
+ /* Schedule a BH for each AioContext */
173
+ SCSIDeviceForEachReqAsyncData params = {
174
+ .s = s,
175
+ .fn = fn,
176
+ .fn_opaque = opaque,
177
+ };
178
+ g_hash_table_foreach(
179
+ aio_contexts,
180
+ scsi_device_for_each_req_async_do_ctx,
181
+ &params
182
+ );
183
}
184
185
static void scsi_device_realize(SCSIDevice *s, Error **errp)
186
@@ -XXX,XX +XXX,XX @@ static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
187
dev->lun = lun;
188
}
189
190
+ qemu_mutex_init(&dev->requests_lock);
191
QTAILQ_INIT(&dev->requests);
192
scsi_device_realize(dev, &local_err);
193
if (local_err) {
194
@@ -XXX,XX +XXX,XX @@ static void scsi_qdev_unrealize(DeviceState *qdev)
195
196
scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE));
197
198
+ qemu_mutex_destroy(&dev->requests_lock);
199
+
200
scsi_device_unrealize(dev);
201
202
blockdev_mark_auto_del(dev->conf.blk);
203
@@ -XXX,XX +XXX,XX @@ static void scsi_req_enqueue_internal(SCSIRequest *req)
204
req->sg = NULL;
205
}
206
req->enqueued = true;
207
- QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
208
+
209
+ WITH_QEMU_LOCK_GUARD(&req->dev->requests_lock) {
210
+ QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
211
+ }
212
}
213
214
int32_t scsi_req_enqueue(SCSIRequest *req)
215
@@ -XXX,XX +XXX,XX @@ static void scsi_req_dequeue(SCSIRequest *req)
216
trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag);
217
req->retry = false;
218
if (req->enqueued) {
219
- QTAILQ_REMOVE(&req->dev->requests, req, next);
220
+ WITH_QEMU_LOCK_GUARD(&req->dev->requests_lock) {
221
+ QTAILQ_REMOVE(&req->dev->requests, req, next);
222
+ }
223
req->enqueued = false;
224
scsi_req_unref(req);
225
}
226
@@ -XXX,XX +XXX,XX @@ static void scsi_device_class_init(ObjectClass *klass, void *data)
227
228
static void scsi_dev_instance_init(Object *obj)
59
{
229
{
60
TransactionAction *act, *next;
230
- DeviceState *dev = DEVICE(obj);
61
231
- SCSIDevice *s = SCSI_DEVICE(dev);
62
- QSLIST_FOREACH_SAFE(act, &tran->actions, entry, next) {
232
+ SCSIDevice *s = SCSI_DEVICE(obj);
63
+ QSLIST_FOREACH(act, &tran->actions, entry) {
233
64
if (act->drv->commit) {
234
device_add_bootindex_property(obj, &s->conf.bootindex,
65
act->drv->commit(act->opaque);
235
"bootindex", NULL,
66
}
67
+ }
68
69
+ QSLIST_FOREACH_SAFE(act, &tran->actions, entry, next) {
70
if (act->drv->clean) {
71
act->drv->clean(act->opaque);
72
}
73
--
236
--
74
2.31.1
237
2.48.1
75
76
diff view generated by jsdifflib
1
From: Hanna Reitz <hreitz@redhat.com>
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
2
3
The children list is specific to BDS parents. We should not modify it
3
Virtqueues are not thread-safe. Until now this was not a major issue
4
in the general children modification code, but let BDS parents deal with
4
since all virtqueue processing happened in the same thread. The ctrl
5
it in their .attach() and .detach() methods.
5
queue's Task Management Function (TMF) requests sometimes need the main
6
6
loop, so a BH was used to schedule the virtqueue completion back in the
7
This also has the advantage that a BdrvChild is removed from the
7
thread that has virtqueue access.
8
children list before its .bs pointer can become NULL. BDS parents
8
9
generally assume that their children's .bs pointer is never NULL, so
9
When IOThread Virtqueue Mapping is introduced in later commits, event
10
this is actually a bug fix.
10
and ctrl virtqueue accesses from other threads will become necessary.
11
11
Introduce an optional per-virtqueue lock so the event and ctrl
12
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
12
virtqueues can be protected in the commits that follow.
13
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
13
14
Message-Id: <20211111120829.81329-3-hreitz@redhat.com>
14
The addition of the ctrl virtqueue lock makes
15
virtio_scsi_complete_req_from_main_loop() and its BH unnecessary.
16
Instead, take the ctrl virtqueue lock from the main loop thread.
17
18
The cmd virtqueue does not have a lock because the entirety of SCSI
19
command processing happens in one thread. Only one thread accesses the
20
cmd virtqueue and a lock is unnecessary.
21
22
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
23
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
24
Message-ID: <20250311132616.1049687-6-stefanha@redhat.com>
15
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
25
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
16
---
26
---
17
block.c | 14 +++++---------
27
include/hw/virtio/virtio-scsi.h | 3 ++
18
1 file changed, 5 insertions(+), 9 deletions(-)
28
hw/scsi/virtio-scsi.c | 84 ++++++++++++++++++---------------
19
29
2 files changed, 49 insertions(+), 38 deletions(-)
20
diff --git a/block.c b/block.c
30
31
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
21
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
22
--- a/block.c
33
--- a/include/hw/virtio/virtio-scsi.h
23
+++ b/block.c
34
+++ b/include/hw/virtio/virtio-scsi.h
24
@@ -XXX,XX +XXX,XX @@ static void bdrv_child_cb_attach(BdrvChild *child)
35
@@ -XXX,XX +XXX,XX @@ struct VirtIOSCSI {
25
{
36
int resetting; /* written from main loop thread, read from any thread */
26
BlockDriverState *bs = child->opaque;
37
bool events_dropped;
27
38
28
+ QLIST_INSERT_HEAD(&bs->children, child, next);
39
+ QemuMutex ctrl_lock; /* protects ctrl_vq */
29
+
40
+ QemuMutex event_lock; /* protects event_vq */
30
if (child->role & BDRV_CHILD_COW) {
41
+
31
bdrv_backing_attach(child);
42
/*
32
}
43
* TMFs deferred to main loop BH. These fields are protected by
33
@@ -XXX,XX +XXX,XX @@ static void bdrv_child_cb_detach(BdrvChild *child)
44
* tmf_bh_lock.
34
}
45
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
35
46
index XXXXXXX..XXXXXXX 100644
36
bdrv_unapply_subtree_drain(child, bs);
47
--- a/hw/scsi/virtio-scsi.c
37
+
48
+++ b/hw/scsi/virtio-scsi.c
38
+ QLIST_REMOVE(child, next);
49
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_free_req(VirtIOSCSIReq *req)
39
}
50
g_free(req);
40
51
}
41
static int bdrv_child_cb_update_filename(BdrvChild *c, BlockDriverState *base,
52
42
@@ -XXX,XX +XXX,XX @@ static void bdrv_child_free(void *opaque)
53
-static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
43
static void bdrv_remove_empty_child(BdrvChild *child)
54
+static void virtio_scsi_complete_req(VirtIOSCSIReq *req, QemuMutex *vq_lock)
44
{
55
{
45
assert(!child->bs);
56
VirtIOSCSI *s = req->dev;
46
- QLIST_SAFE_REMOVE(child, next);
57
VirtQueue *vq = req->vq;
47
+ assert(!child->next.le_prev); /* not in children list */
58
VirtIODevice *vdev = VIRTIO_DEVICE(s);
48
bdrv_child_free(child);
59
49
}
60
qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
50
61
+
51
@@ -XXX,XX +XXX,XX @@ static int bdrv_attach_child_noperm(BlockDriverState *parent_bs,
62
+ if (vq_lock) {
52
return ret;
63
+ qemu_mutex_lock(vq_lock);
53
}
64
+ }
54
65
+
55
- QLIST_INSERT_HEAD(&parent_bs->children, *child, next);
66
virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
56
- /*
67
if (s->dataplane_started && !s->dataplane_fenced) {
57
- * child is removed in bdrv_attach_child_common_abort(), so don't care to
68
virtio_notify_irqfd(vdev, vq);
58
- * abort this change separately.
69
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
59
- */
70
virtio_notify(vdev, vq);
60
-
71
}
72
73
+ if (vq_lock) {
74
+ qemu_mutex_unlock(vq_lock);
75
+ }
76
+
77
if (req->sreq) {
78
req->sreq->hba_private = NULL;
79
scsi_req_unref(req->sreq);
80
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
81
virtio_scsi_free_req(req);
82
}
83
84
-static void virtio_scsi_complete_req_bh(void *opaque)
85
+static void virtio_scsi_bad_req(VirtIOSCSIReq *req, QemuMutex *vq_lock)
86
{
87
- VirtIOSCSIReq *req = opaque;
88
+ virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
89
90
- virtio_scsi_complete_req(req);
91
-}
92
+ if (vq_lock) {
93
+ qemu_mutex_lock(vq_lock);
94
+ }
95
96
-/*
97
- * Called from virtio_scsi_do_one_tmf_bh() in main loop thread. The main loop
98
- * thread cannot touch the virtqueue since that could race with an IOThread.
99
- */
100
-static void virtio_scsi_complete_req_from_main_loop(VirtIOSCSIReq *req)
101
-{
102
- VirtIOSCSI *s = req->dev;
103
+ virtqueue_detach_element(req->vq, &req->elem, 0);
104
105
- if (!s->ctx || s->ctx == qemu_get_aio_context()) {
106
- /* No need to schedule a BH when there is no IOThread */
107
- virtio_scsi_complete_req(req);
108
- } else {
109
- /* Run request completion in the IOThread */
110
- aio_wait_bh_oneshot(s->ctx, virtio_scsi_complete_req_bh, req);
111
+ if (vq_lock) {
112
+ qemu_mutex_unlock(vq_lock);
113
}
114
-}
115
116
-static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
117
-{
118
- virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
119
- virtqueue_detach_element(req->vq, &req->elem, 0);
120
virtio_scsi_free_req(req);
121
}
122
123
@@ -XXX,XX +XXX,XX @@ static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
61
return 0;
124
return 0;
62
}
125
}
63
126
64
@@ -XXX,XX +XXX,XX @@ static void bdrv_remove_filter_or_cow_child_abort(void *opaque)
127
-static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
65
BdrvRemoveFilterOrCowChild *s = opaque;
128
+static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq, QemuMutex *vq_lock)
66
BlockDriverState *parent_bs = s->child->opaque;
129
{
67
130
VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
68
- QLIST_INSERT_HEAD(&parent_bs->children, s->child, next);
131
VirtIOSCSIReq *req;
69
if (s->is_backing) {
132
70
parent_bs->backing = s->child;
133
+ if (vq_lock) {
134
+ qemu_mutex_lock(vq_lock);
135
+ }
136
+
137
req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
138
+
139
+ if (vq_lock) {
140
+ qemu_mutex_unlock(vq_lock);
141
+ }
142
+
143
if (!req) {
144
return NULL;
145
}
146
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
147
148
trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
149
req->req.tmf.tag, req->resp.tmf.response);
150
- virtio_scsi_complete_req(req);
151
+ virtio_scsi_complete_req(req, &req->dev->ctrl_lock);
152
}
153
g_free(n);
154
}
155
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req)
156
157
out:
158
object_unref(OBJECT(d));
159
- virtio_scsi_complete_req_from_main_loop(req);
160
+ virtio_scsi_complete_req(req, &s->ctrl_lock);
161
}
162
163
/* Some TMFs must be processed from the main loop thread */
164
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
165
166
/* SAM-6 6.3.2 Hard reset */
167
req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE;
168
- virtio_scsi_complete_req(req);
169
+ virtio_scsi_complete_req(req, &req->dev->ctrl_lock);
170
}
171
}
172
173
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
174
175
if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
176
&type, sizeof(type)) < sizeof(type)) {
177
- virtio_scsi_bad_req(req);
178
+ virtio_scsi_bad_req(req, &s->ctrl_lock);
179
return;
180
}
181
182
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
183
if (type == VIRTIO_SCSI_T_TMF) {
184
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
185
sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
186
- virtio_scsi_bad_req(req);
187
+ virtio_scsi_bad_req(req, &s->ctrl_lock);
188
return;
189
} else {
190
r = virtio_scsi_do_tmf(s, req);
191
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
192
type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
193
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
194
sizeof(VirtIOSCSICtrlANResp)) < 0) {
195
- virtio_scsi_bad_req(req);
196
+ virtio_scsi_bad_req(req, &s->ctrl_lock);
197
return;
198
} else {
199
req->req.an.event_requested =
200
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
201
type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
202
trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
203
req->resp.an.response);
204
- virtio_scsi_complete_req(req);
205
+ virtio_scsi_complete_req(req, &s->ctrl_lock);
71
} else {
206
} else {
72
@@ -XXX,XX +XXX,XX @@ static void bdrv_remove_file_or_backing_child(BlockDriverState *bs,
207
assert(r == -EINPROGRESS);
73
};
208
}
74
tran_add(tran, &bdrv_remove_filter_or_cow_child_drv, s);
209
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
75
210
{
76
- QLIST_SAFE_REMOVE(child, next);
211
VirtIOSCSIReq *req;
77
if (s->is_backing) {
212
78
bs->backing = NULL;
213
- while ((req = virtio_scsi_pop_req(s, vq))) {
79
} else {
214
+ while ((req = virtio_scsi_pop_req(s, vq, &s->ctrl_lock))) {
215
virtio_scsi_handle_ctrl_req(s, req);
216
}
217
}
218
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
219
* in virtio_scsi_command_complete.
220
*/
221
req->resp_size = sizeof(VirtIOSCSICmdResp);
222
- virtio_scsi_complete_req(req);
223
+ virtio_scsi_complete_req(req, NULL);
224
}
225
226
static void virtio_scsi_command_failed(SCSIRequest *r)
227
@@ -XXX,XX +XXX,XX @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
228
virtio_scsi_fail_cmd_req(req);
229
return -ENOTSUP;
230
} else {
231
- virtio_scsi_bad_req(req);
232
+ virtio_scsi_bad_req(req, NULL);
233
return -EINVAL;
234
}
235
}
236
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
237
virtio_queue_set_notification(vq, 0);
238
}
239
240
- while ((req = virtio_scsi_pop_req(s, vq))) {
241
+ while ((req = virtio_scsi_pop_req(s, vq, NULL))) {
242
ret = virtio_scsi_handle_cmd_req_prepare(s, req);
243
if (!ret) {
244
QTAILQ_INSERT_TAIL(&reqs, req, next);
245
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_push_event(VirtIOSCSI *s,
246
return;
247
}
248
249
- req = virtio_scsi_pop_req(s, vs->event_vq);
250
+ req = virtio_scsi_pop_req(s, vs->event_vq, &s->event_lock);
251
if (!req) {
252
s->events_dropped = true;
253
return;
254
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_push_event(VirtIOSCSI *s,
255
}
256
257
if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
258
- virtio_scsi_bad_req(req);
259
+ virtio_scsi_bad_req(req, &s->event_lock);
260
return;
261
}
262
263
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_push_event(VirtIOSCSI *s,
264
}
265
trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
266
267
- virtio_scsi_complete_req(req);
268
+ virtio_scsi_complete_req(req, &s->event_lock);
269
}
270
271
static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
272
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
273
Error *err = NULL;
274
275
QTAILQ_INIT(&s->tmf_bh_list);
276
+ qemu_mutex_init(&s->ctrl_lock);
277
+ qemu_mutex_init(&s->event_lock);
278
qemu_mutex_init(&s->tmf_bh_lock);
279
280
virtio_scsi_common_realize(dev,
281
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_device_unrealize(DeviceState *dev)
282
qbus_set_hotplug_handler(BUS(&s->bus), NULL);
283
virtio_scsi_common_unrealize(dev);
284
qemu_mutex_destroy(&s->tmf_bh_lock);
285
+ qemu_mutex_destroy(&s->event_lock);
286
+ qemu_mutex_destroy(&s->ctrl_lock);
287
}
288
289
static const Property virtio_scsi_properties[] = {
80
--
290
--
81
2.31.1
291
2.48.1
82
83
diff view generated by jsdifflib
1
From: Hanna Reitz <hreitz@redhat.com>
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
2
3
In most of the block layer, especially when traversing down from other
3
The block layer can invoke the resize callback from any AioContext that
4
BlockDriverStates, we assume that BdrvChild.bs can never be NULL. When
4
is processing requests. The virtqueue is already protected but the
5
it becomes NULL, it is expected that the corresponding BdrvChild pointer
5
events_dropped field also needs to be protected against races. Cover it
6
also becomes NULL and the BdrvChild object is freed.
6
using the event virtqueue lock because it is closely associated with
7
accesses to the virtqueue.
7
8
8
Therefore, once bdrv_replace_child_noperm() sets the BdrvChild.bs
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
pointer to NULL, it should also immediately set the corresponding
10
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
10
BdrvChild pointer (like bs->file or bs->backing) to NULL.
11
Message-ID: <20250311132616.1049687-7-stefanha@redhat.com>
11
12
In that context, it also makes sense for this function to free the
13
child. Sometimes we cannot do so, though, because it is called in a
14
transactional context where the caller might still want to reinstate the
15
child in the abort branch (and free it only on commit), so this behavior
16
has to remain optional.
17
18
In bdrv_replace_child_tran()'s abort handler, we now rely on the fact
19
that the BdrvChild passed to bdrv_replace_child_tran() must have had a
20
non-NULL .bs pointer initially. Make a note of that and assert it.
21
22
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
23
Message-Id: <20211111120829.81329-10-hreitz@redhat.com>
24
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
25
---
13
---
26
block.c | 102 +++++++++++++++++++++++++++++++++++++++++++-------------
14
include/hw/virtio/virtio-scsi.h | 3 ++-
27
1 file changed, 79 insertions(+), 23 deletions(-)
15
hw/scsi/virtio-scsi.c | 29 ++++++++++++++++++++---------
16
2 files changed, 22 insertions(+), 10 deletions(-)
28
17
29
diff --git a/block.c b/block.c
18
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
30
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
31
--- a/block.c
20
--- a/include/hw/virtio/virtio-scsi.h
32
+++ b/block.c
21
+++ b/include/hw/virtio/virtio-scsi.h
33
@@ -XXX,XX +XXX,XX @@ static BlockDriverState *bdrv_open_inherit(const char *filename,
22
@@ -XXX,XX +XXX,XX @@ struct VirtIOSCSI {
34
static bool bdrv_recurse_has_child(BlockDriverState *bs,
23
35
BlockDriverState *child);
24
SCSIBus bus;
36
25
int resetting; /* written from main loop thread, read from any thread */
37
+static void bdrv_child_free(BdrvChild *child);
38
static void bdrv_replace_child_noperm(BdrvChild **child,
39
- BlockDriverState *new_bs);
40
+ BlockDriverState *new_bs,
41
+ bool free_empty_child);
42
static void bdrv_remove_file_or_backing_child(BlockDriverState *bs,
43
BdrvChild *child,
44
Transaction *tran);
45
@@ -XXX,XX +XXX,XX @@ typedef struct BdrvReplaceChildState {
46
BdrvChild *child;
47
BdrvChild **childp;
48
BlockDriverState *old_bs;
49
+ bool free_empty_child;
50
} BdrvReplaceChildState;
51
52
static void bdrv_replace_child_commit(void *opaque)
53
{
54
BdrvReplaceChildState *s = opaque;
55
56
+ if (s->free_empty_child && !s->child->bs) {
57
+ bdrv_child_free(s->child);
58
+ }
59
bdrv_unref(s->old_bs);
60
}
61
62
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_abort(void *opaque)
63
* modify the BdrvChild * pointer we indirectly pass to it, i.e. it
64
* will not modify s->child. From that perspective, it does not matter
65
* whether we pass s->childp or &s->child.
66
- * (TODO: Right now, bdrv_replace_child_noperm() never modifies that
67
- * pointer anyway (though it will in the future), so at this point it
68
- * absolutely does not matter whether we pass s->childp or &s->child.)
69
* (2) If new_bs is not NULL, s->childp will be NULL. We then cannot use
70
* it here.
71
* (3) If new_bs is NULL, *s->childp will have been NULLed by
72
* bdrv_replace_child_tran()'s bdrv_replace_child_noperm() call, and we
73
* must not pass a NULL *s->childp here.
74
- * (TODO: In its current state, bdrv_replace_child_noperm() will not
75
- * have NULLed *s->childp, so this does not apply yet. It will in the
76
- * future.)
77
*
78
* So whether new_bs was NULL or not, we cannot pass s->childp here; and in
79
* any case, there is no reason to pass it anyway.
80
*/
81
- bdrv_replace_child_noperm(&s->child, s->old_bs);
82
+ bdrv_replace_child_noperm(&s->child, s->old_bs, true);
83
+ /*
84
+ * The child was pre-existing, so s->old_bs must be non-NULL, and
85
+ * s->child thus must not have been freed
86
+ */
87
+ assert(s->child != NULL);
88
+ if (!new_bs) {
89
+ /* As described above, *s->childp was cleared, so restore it */
90
+ assert(s->childp != NULL);
91
+ *s->childp = s->child;
92
+ }
93
bdrv_unref(new_bs);
94
}
95
96
@@ -XXX,XX +XXX,XX @@ static TransactionActionDrv bdrv_replace_child_drv = {
97
*
98
* The function doesn't update permissions, caller is responsible for this.
99
*
100
+ * (*childp)->bs must not be NULL.
101
+ *
102
* Note that if new_bs == NULL, @childp is stored in a state object attached
103
* to @tran, so that the old child can be reinstated in the abort handler.
104
* Therefore, if @new_bs can be NULL, @childp must stay valid until the
105
* transaction is committed or aborted.
106
*
107
- * (TODO: The reinstating does not happen yet, but it will once
108
- * bdrv_replace_child_noperm() NULLs *childp when new_bs is NULL.)
109
+ * If @free_empty_child is true and @new_bs is NULL, the BdrvChild is
110
+ * freed (on commit). @free_empty_child should only be false if the
111
+ * caller will free the BDrvChild themselves (which may be important
112
+ * if this is in turn called in another transactional context).
113
*/
114
static void bdrv_replace_child_tran(BdrvChild **childp,
115
BlockDriverState *new_bs,
116
- Transaction *tran)
117
+ Transaction *tran,
118
+ bool free_empty_child)
119
{
120
BdrvReplaceChildState *s = g_new(BdrvReplaceChildState, 1);
121
*s = (BdrvReplaceChildState) {
122
.child = *childp,
123
.childp = new_bs == NULL ? childp : NULL,
124
.old_bs = (*childp)->bs,
125
+ .free_empty_child = free_empty_child,
126
};
127
tran_add(tran, &bdrv_replace_child_drv, s);
128
129
+ /* The abort handler relies on this */
130
+ assert(s->old_bs != NULL);
131
+
26
+
132
if (new_bs) {
27
+ QemuMutex event_lock; /* protects event_vq and events_dropped */
133
bdrv_ref(new_bs);
28
bool events_dropped;
134
}
29
135
- bdrv_replace_child_noperm(childp, new_bs);
30
QemuMutex ctrl_lock; /* protects ctrl_vq */
136
+ /*
31
- QemuMutex event_lock; /* protects event_vq */
137
+ * Pass free_empty_child=false, we will free the child (if
32
138
+ * necessary) in bdrv_replace_child_commit() (if our
33
/*
139
+ * @free_empty_child parameter was true).
34
* TMFs deferred to main loop BH. These fields are protected by
140
+ */
35
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
141
+ bdrv_replace_child_noperm(childp, new_bs, false);
36
index XXXXXXX..XXXXXXX 100644
142
/* old_bs reference is transparently moved from *childp to @s */
37
--- a/hw/scsi/virtio-scsi.c
143
}
38
+++ b/hw/scsi/virtio-scsi.c
144
39
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_reset(VirtIODevice *vdev)
145
@@ -XXX,XX +XXX,XX @@ uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm)
40
146
return permissions[qapi_perm];
41
vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
147
}
42
vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
148
43
- s->events_dropped = false;
149
+/**
150
+ * Replace (*childp)->bs by @new_bs.
151
+ *
152
+ * If @new_bs is NULL, *childp will be set to NULL, too: BDS parents
153
+ * generally cannot handle a BdrvChild with .bs == NULL, so clearing
154
+ * BdrvChild.bs should generally immediately be followed by the
155
+ * BdrvChild pointer being cleared as well.
156
+ *
157
+ * If @free_empty_child is true and @new_bs is NULL, the BdrvChild is
158
+ * freed. @free_empty_child should only be false if the caller will
159
+ * free the BdrvChild themselves (this may be important in a
160
+ * transactional context, where it may only be freed on commit).
161
+ */
162
static void bdrv_replace_child_noperm(BdrvChild **childp,
163
- BlockDriverState *new_bs)
164
+ BlockDriverState *new_bs,
165
+ bool free_empty_child)
166
{
167
BdrvChild *child = *childp;
168
BlockDriverState *old_bs = child->bs;
169
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_noperm(BdrvChild **childp,
170
}
171
172
child->bs = new_bs;
173
+ if (!new_bs) {
174
+ *childp = NULL;
175
+ }
176
177
if (new_bs) {
178
QLIST_INSERT_HEAD(&new_bs->parents, child, next_parent);
179
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_noperm(BdrvChild **childp,
180
bdrv_parent_drained_end_single(child);
181
drain_saldo++;
182
}
183
+
44
+
184
+ if (free_empty_child && !child->bs) {
45
+ WITH_QEMU_LOCK_GUARD(&s->event_lock) {
185
+ bdrv_child_free(child);
46
+ s->events_dropped = false;
186
+ }
47
+ }
187
}
48
}
188
49
189
/**
50
typedef struct {
190
@@ -XXX,XX +XXX,XX @@ static void bdrv_attach_child_common_abort(void *opaque)
51
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_push_event(VirtIOSCSI *s,
191
BdrvChild *child = *s->child;
192
BlockDriverState *bs = child->bs;
193
194
- bdrv_replace_child_noperm(s->child, NULL);
195
+ /*
196
+ * Pass free_empty_child=false, because we still need the child
197
+ * for the AioContext operations on the parent below; those
198
+ * BdrvChildClass methods all work on a BdrvChild object, so we
199
+ * need to keep it as an empty shell (after this function, it will
200
+ * not be attached to any parent, and it will not have a .bs).
201
+ */
202
+ bdrv_replace_child_noperm(s->child, NULL, false);
203
204
if (bdrv_get_aio_context(bs) != s->old_child_ctx) {
205
bdrv_try_set_aio_context(bs, s->old_child_ctx, &error_abort);
206
@@ -XXX,XX +XXX,XX @@ static void bdrv_attach_child_common_abort(void *opaque)
207
208
bdrv_unref(bs);
209
bdrv_child_free(child);
210
- *s->child = NULL;
211
}
212
213
static TransactionActionDrv bdrv_attach_child_common_drv = {
214
@@ -XXX,XX +XXX,XX @@ static int bdrv_attach_child_common(BlockDriverState *child_bs,
215
}
52
}
216
53
217
bdrv_ref(child_bs);
54
req = virtio_scsi_pop_req(s, vs->event_vq, &s->event_lock);
218
- bdrv_replace_child_noperm(&new_child, child_bs);
55
- if (!req) {
219
+ bdrv_replace_child_noperm(&new_child, child_bs, true);
56
- s->events_dropped = true;
220
+ /* child_bs was non-NULL, so new_child must not have been freed */
57
- return;
221
+ assert(new_child != NULL);
58
- }
222
59
+ WITH_QEMU_LOCK_GUARD(&s->event_lock) {
223
*child = new_child;
60
+ if (!req) {
224
61
+ s->events_dropped = true;
225
@@ -XXX,XX +XXX,XX @@ static void bdrv_detach_child(BdrvChild **childp)
62
+ return;
63
+ }
64
65
- if (s->events_dropped) {
66
- event |= VIRTIO_SCSI_T_EVENTS_MISSED;
67
- s->events_dropped = false;
68
+ if (s->events_dropped) {
69
+ event |= VIRTIO_SCSI_T_EVENTS_MISSED;
70
+ s->events_dropped = false;
71
+ }
72
}
73
74
if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
75
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_push_event(VirtIOSCSI *s,
76
77
static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
226
{
78
{
227
BlockDriverState *old_bs = (*childp)->bs;
79
- if (s->events_dropped) {
228
80
+ bool events_dropped;
229
- bdrv_replace_child_noperm(childp, NULL);
81
+
230
- bdrv_child_free(*childp);
82
+ WITH_QEMU_LOCK_GUARD(&s->event_lock) {
231
+ bdrv_replace_child_noperm(childp, NULL, true);
83
+ events_dropped = s->events_dropped;
232
84
+ }
233
if (old_bs) {
85
+
234
/*
86
+ if (events_dropped) {
235
@@ -XXX,XX +XXX,XX @@ static void bdrv_remove_file_or_backing_child(BlockDriverState *bs,
87
VirtIOSCSIEventInfo info = {
236
}
88
.event = VIRTIO_SCSI_T_NO_EVENT,
237
89
};
238
if (child->bs) {
239
- bdrv_replace_child_tran(childp, NULL, tran);
240
+ /*
241
+ * Pass free_empty_child=false, we will free the child in
242
+ * bdrv_remove_filter_or_cow_child_commit()
243
+ */
244
+ bdrv_replace_child_tran(childp, NULL, tran, false);
245
}
246
247
s = g_new(BdrvRemoveFilterOrCowChild, 1);
248
@@ -XXX,XX +XXX,XX @@ static void bdrv_remove_file_or_backing_child(BlockDriverState *bs,
249
.is_backing = (childp == &bs->backing),
250
};
251
tran_add(tran, &bdrv_remove_filter_or_cow_child_drv, s);
252
-
253
- *childp = NULL;
254
}
255
256
/*
257
@@ -XXX,XX +XXX,XX @@ static int bdrv_replace_node_noperm(BlockDriverState *from,
258
* Passing a pointer to the local variable @c is fine here, because
259
* @to is not NULL, and so &c will not be attached to the transaction.
260
*/
261
- bdrv_replace_child_tran(&c, to, tran);
262
+ bdrv_replace_child_tran(&c, to, tran, true);
263
}
264
265
return 0;
266
@@ -XXX,XX +XXX,XX @@ int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs,
267
bdrv_drained_begin(old_bs);
268
bdrv_drained_begin(new_bs);
269
270
- bdrv_replace_child_tran(&child, new_bs, tran);
271
+ bdrv_replace_child_tran(&child, new_bs, tran, true);
272
+ /* @new_bs must have been non-NULL, so @child must not have been freed */
273
+ assert(child != NULL);
274
275
found = g_hash_table_new(NULL, NULL);
276
refresh_list = bdrv_topological_dfs(refresh_list, found, old_bs);
277
--
90
--
278
2.31.1
91
2.48.1
279
280
diff view generated by jsdifflib
1
From: Hanna Reitz <hreitz@redhat.com>
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
2
3
bdrv_replace_child_noperm() modifies BdrvChild.bs, and can potentially
3
With IOThread Virtqueue Mapping there will be multiple AioContexts
4
set it to NULL. That is dangerous, because BDS parents generally assume
4
processing SCSI requests. scsi_req_cancel() and other SCSI request
5
that their children's .bs pointer is never NULL. We therefore want to
5
operations must be performed from the AioContext where the request is
6
let bdrv_replace_child_noperm() set the corresponding BdrvChild pointer
6
running.
7
to NULL, too.
7
8
8
Introduce a virtio_scsi_defer_tmf_to_aio_context() function and the
9
This patch lays the foundation for it by passing a BdrvChild ** pointer
9
necessary VirtIOSCSIReq->remaining refcount infrastructure to move the
10
to bdrv_replace_child_noperm() so that it can later use it to NULL the
10
TMF code into the AioContext where the request is running.
11
BdrvChild pointer immediately after setting BdrvChild.bs to NULL.
11
12
12
For the time being there is still just one AioContext: the main loop or
13
(We will still need to undertake some intermediate steps, though.)
13
the IOThread. When the iothread-vq-mapping parameter is added in a later
14
14
patch this will be changed to per-virtqueue AioContexts.
15
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
15
16
Message-Id: <20211111120829.81329-6-hreitz@redhat.com>
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
17
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
18
Message-ID: <20250311132616.1049687-8-stefanha@redhat.com>
18
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
19
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
19
---
20
---
20
block.c | 23 ++++++++++++-----------
21
hw/scsi/virtio-scsi.c | 270 ++++++++++++++++++++++++++++++++----------
21
1 file changed, 12 insertions(+), 11 deletions(-)
22
1 file changed, 206 insertions(+), 64 deletions(-)
22
23
23
diff --git a/block.c b/block.c
24
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
24
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
25
--- a/block.c
26
--- a/hw/scsi/virtio-scsi.c
26
+++ b/block.c
27
+++ b/hw/scsi/virtio-scsi.c
27
@@ -XXX,XX +XXX,XX @@ static BlockDriverState *bdrv_open_inherit(const char *filename,
28
@@ -XXX,XX +XXX,XX @@ typedef struct VirtIOSCSIReq {
28
static bool bdrv_recurse_has_child(BlockDriverState *bs,
29
/* Used for two-stage request submission and TMFs deferred to BH */
29
BlockDriverState *child);
30
QTAILQ_ENTRY(VirtIOSCSIReq) next;
30
31
31
-static void bdrv_replace_child_noperm(BdrvChild *child,
32
- /* Used for cancellation of request during TMFs */
32
+static void bdrv_replace_child_noperm(BdrvChild **child,
33
+ /* Used for cancellation of request during TMFs. Atomic. */
33
BlockDriverState *new_bs);
34
int remaining;
34
static void bdrv_remove_file_or_backing_child(BlockDriverState *bs,
35
35
BdrvChild *child,
36
SCSIRequest *sreq;
36
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_abort(void *opaque)
37
@@ -XXX,XX +XXX,XX @@ typedef struct {
37
BlockDriverState *new_bs = s->child->bs;
38
VirtIOSCSIReq *tmf_req;
38
39
} VirtIOSCSICancelNotifier;
39
/* old_bs reference is transparently moved from @s to @s->child */
40
40
- bdrv_replace_child_noperm(s->child, s->old_bs);
41
+static void virtio_scsi_tmf_dec_remaining(VirtIOSCSIReq *tmf)
41
+ bdrv_replace_child_noperm(&s->child, s->old_bs);
42
+{
42
bdrv_unref(new_bs);
43
+ if (qatomic_fetch_dec(&tmf->remaining) == 1) {
44
+ trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(tmf->req.tmf.lun),
45
+ tmf->req.tmf.tag, tmf->resp.tmf.response);
46
+
47
+ virtio_scsi_complete_req(tmf, &tmf->dev->ctrl_lock);
48
+ }
49
+}
50
+
51
static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
52
{
53
VirtIOSCSICancelNotifier *n = container_of(notifier,
54
VirtIOSCSICancelNotifier,
55
notifier);
56
57
- if (--n->tmf_req->remaining == 0) {
58
- VirtIOSCSIReq *req = n->tmf_req;
59
-
60
- trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
61
- req->req.tmf.tag, req->resp.tmf.response);
62
- virtio_scsi_complete_req(req, &req->dev->ctrl_lock);
63
- }
64
+ virtio_scsi_tmf_dec_remaining(n->tmf_req);
65
g_free(n);
43
}
66
}
44
67
45
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_tran(BdrvChild *child, BlockDriverState *new_bs,
68
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
46
if (new_bs) {
47
bdrv_ref(new_bs);
48
}
69
}
49
- bdrv_replace_child_noperm(child, new_bs);
50
+ bdrv_replace_child_noperm(&child, new_bs);
51
/* old_bs reference is transparently moved from @child to @s */
52
}
70
}
53
71
54
@@ -XXX,XX +XXX,XX @@ uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm)
72
-static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req)
55
return permissions[qapi_perm];
73
+static void virtio_scsi_defer_tmf_to_main_loop(VirtIOSCSIReq *req)
74
{
75
VirtIOSCSI *s = req->dev;
76
77
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req)
78
}
56
}
79
}
57
80
58
-static void bdrv_replace_child_noperm(BdrvChild *child,
81
+static void virtio_scsi_tmf_cancel_req(VirtIOSCSIReq *tmf, SCSIRequest *r)
59
+static void bdrv_replace_child_noperm(BdrvChild **childp,
82
+{
60
BlockDriverState *new_bs)
83
+ VirtIOSCSICancelNotifier *notifier;
84
+
85
+ assert(r->ctx == qemu_get_current_aio_context());
86
+
87
+ /* Decremented in virtio_scsi_cancel_notify() */
88
+ qatomic_inc(&tmf->remaining);
89
+
90
+ notifier = g_new(VirtIOSCSICancelNotifier, 1);
91
+ notifier->notifier.notify = virtio_scsi_cancel_notify;
92
+ notifier->tmf_req = tmf;
93
+ scsi_req_cancel_async(r, &notifier->notifier);
94
+}
95
+
96
+/* Execute a TMF on the requests in the current AioContext */
97
+static void virtio_scsi_do_tmf_aio_context(void *opaque)
98
+{
99
+ AioContext *ctx = qemu_get_current_aio_context();
100
+ VirtIOSCSIReq *tmf = opaque;
101
+ VirtIOSCSI *s = tmf->dev;
102
+ SCSIDevice *d = virtio_scsi_device_get(s, tmf->req.tmf.lun);
103
+ SCSIRequest *r;
104
+ bool match_tag;
105
+
106
+ if (!d) {
107
+ tmf->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
108
+ virtio_scsi_tmf_dec_remaining(tmf);
109
+ return;
110
+ }
111
+
112
+ /*
113
+ * This function could handle other subtypes that need to be processed in
114
+ * the request's AioContext in the future, but for now only request
115
+ * cancelation subtypes are performed here.
116
+ */
117
+ switch (tmf->req.tmf.subtype) {
118
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK:
119
+ match_tag = true;
120
+ break;
121
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
122
+ case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
123
+ match_tag = false;
124
+ break;
125
+ default:
126
+ g_assert_not_reached();
127
+ }
128
+
129
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
130
+ QTAILQ_FOREACH(r, &d->requests, next) {
131
+ VirtIOSCSIReq *cmd_req = r->hba_private;
132
+ assert(cmd_req); /* request has hba_private while enqueued */
133
+
134
+ if (r->ctx != ctx) {
135
+ continue;
136
+ }
137
+ if (match_tag && cmd_req->req.cmd.tag != tmf->req.tmf.tag) {
138
+ continue;
139
+ }
140
+ virtio_scsi_tmf_cancel_req(tmf, r);
141
+ }
142
+ }
143
+
144
+ /* Incremented by virtio_scsi_do_tmf() */
145
+ virtio_scsi_tmf_dec_remaining(tmf);
146
+
147
+ object_unref(d);
148
+}
149
+
150
+static void dummy_bh(void *opaque)
151
+{
152
+ /* Do nothing */
153
+}
154
+
155
+/*
156
+ * Wait for pending virtio_scsi_defer_tmf_to_aio_context() BHs.
157
+ */
158
+static void virtio_scsi_flush_defer_tmf_to_aio_context(VirtIOSCSI *s)
159
+{
160
+ GLOBAL_STATE_CODE();
161
+
162
+ assert(!s->dataplane_started);
163
+
164
+ if (s->ctx) {
165
+ /* Our BH only runs after previously scheduled BHs */
166
+ aio_wait_bh_oneshot(s->ctx, dummy_bh, NULL);
167
+ }
168
+}
169
+
170
+/*
171
+ * Run the TMF in a specific AioContext, handling only requests in that
172
+ * AioContext. This is necessary because requests can run in different
173
+ * AioContext and it is only possible to cancel them from the AioContext where
174
+ * they are running.
175
+ */
176
+static void virtio_scsi_defer_tmf_to_aio_context(VirtIOSCSIReq *tmf,
177
+ AioContext *ctx)
178
+{
179
+ /* Decremented in virtio_scsi_do_tmf_aio_context() */
180
+ qatomic_inc(&tmf->remaining);
181
+
182
+ /* See virtio_scsi_flush_defer_tmf_to_aio_context() cleanup during reset */
183
+ aio_bh_schedule_oneshot(ctx, virtio_scsi_do_tmf_aio_context, tmf);
184
+}
185
+
186
+/*
187
+ * Returns the AioContext for a given TMF's tag field or NULL. Note that the
188
+ * request identified by the tag may have completed by the time you can execute
189
+ * a BH in the AioContext, so don't assume the request still exists in your BH.
190
+ */
191
+static AioContext *find_aio_context_for_tmf_tag(SCSIDevice *d,
192
+ VirtIOSCSIReq *tmf)
193
+{
194
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
195
+ SCSIRequest *r;
196
+ SCSIRequest *next;
197
+
198
+ QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
199
+ VirtIOSCSIReq *cmd_req = r->hba_private;
200
+
201
+ /* hba_private is non-NULL while the request is enqueued */
202
+ assert(cmd_req);
203
+
204
+ if (cmd_req->req.cmd.tag == tmf->req.tmf.tag) {
205
+ return r->ctx;
206
+ }
207
+ }
208
+ }
209
+ return NULL;
210
+}
211
+
212
/* Return 0 if the request is ready to be completed and return to guest;
213
* -EINPROGRESS if the request is submitted and will be completed later, in the
214
* case of async cancellation. */
215
@@ -XXX,XX +XXX,XX @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
61
{
216
{
62
+ BdrvChild *child = *childp;
217
SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
63
BlockDriverState *old_bs = child->bs;
218
SCSIRequest *r, *next;
64
int new_bs_quiesce_counter;
219
+ AioContext *ctx;
65
int drain_saldo;
220
int ret = 0;
66
@@ -XXX,XX +XXX,XX @@ static void bdrv_attach_child_common_abort(void *opaque)
221
67
BdrvChild *child = *s->child;
222
virtio_scsi_ctx_check(s, d);
68
BlockDriverState *bs = child->bs;
223
@@ -XXX,XX +XXX,XX @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
69
224
req->req.tmf.tag, req->req.tmf.subtype);
70
- bdrv_replace_child_noperm(child, NULL);
225
71
+ bdrv_replace_child_noperm(s->child, NULL);
226
switch (req->req.tmf.subtype) {
72
227
- case VIRTIO_SCSI_T_TMF_ABORT_TASK:
73
if (bdrv_get_aio_context(bs) != s->old_child_ctx) {
228
- case VIRTIO_SCSI_T_TMF_QUERY_TASK:
74
bdrv_try_set_aio_context(bs, s->old_child_ctx, &error_abort);
229
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK: {
75
@@ -XXX,XX +XXX,XX @@ static int bdrv_attach_child_common(BlockDriverState *child_bs,
230
if (!d) {
76
}
231
goto fail;
77
232
}
78
bdrv_ref(child_bs);
233
if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
79
- bdrv_replace_child_noperm(new_child, child_bs);
234
goto incorrect_lun;
80
+ bdrv_replace_child_noperm(&new_child, child_bs);
235
}
81
236
- QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
82
*child = new_child;
237
- VirtIOSCSIReq *cmd_req = r->hba_private;
83
238
- if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
84
@@ -XXX,XX +XXX,XX @@ static int bdrv_attach_child_noperm(BlockDriverState *parent_bs,
239
- break;
85
return 0;
240
- }
86
}
241
+
87
242
+ ctx = find_aio_context_for_tmf_tag(d, req);
88
-static void bdrv_detach_child(BdrvChild *child)
243
+ if (ctx) {
89
+static void bdrv_detach_child(BdrvChild **childp)
244
+ virtio_scsi_defer_tmf_to_aio_context(req, ctx);
90
{
245
+ ret = -EINPROGRESS;
91
- BlockDriverState *old_bs = child->bs;
246
}
92
+ BlockDriverState *old_bs = (*childp)->bs;
247
- if (r) {
93
248
- /*
94
- bdrv_replace_child_noperm(child, NULL);
249
- * Assert that the request has not been completed yet, we
95
- bdrv_child_free(child);
250
- * check for it in the loop above.
96
+ bdrv_replace_child_noperm(childp, NULL);
251
- */
97
+ bdrv_child_free(*childp);
252
- assert(r->hba_private);
98
253
- if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
99
if (old_bs) {
254
- /* "If the specified command is present in the task set, then
100
/*
255
- * return a service response set to FUNCTION SUCCEEDED".
101
@@ -XXX,XX +XXX,XX @@ void bdrv_root_unref_child(BdrvChild *child)
256
- */
102
BlockDriverState *child_bs;
257
- req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
103
258
- } else {
104
child_bs = child->bs;
259
- VirtIOSCSICancelNotifier *notifier;
105
- bdrv_detach_child(child);
260
-
106
+ bdrv_detach_child(&child);
261
- req->remaining = 1;
107
bdrv_unref(child_bs);
262
- notifier = g_new(VirtIOSCSICancelNotifier, 1);
108
}
263
- notifier->tmf_req = req;
109
264
- notifier->notifier.notify = virtio_scsi_cancel_notify;
265
- scsi_req_cancel_async(r, &notifier->notifier);
266
- ret = -EINPROGRESS;
267
+ break;
268
+ }
269
+
270
+ case VIRTIO_SCSI_T_TMF_QUERY_TASK:
271
+ if (!d) {
272
+ goto fail;
273
+ }
274
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
275
+ goto incorrect_lun;
276
+ }
277
+
278
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
279
+ QTAILQ_FOREACH(r, &d->requests, next) {
280
+ VirtIOSCSIReq *cmd_req = r->hba_private;
281
+ assert(cmd_req); /* request has hba_private while enqueued */
282
+
283
+ if (cmd_req->req.cmd.tag == req->req.tmf.tag) {
284
+ /*
285
+ * "If the specified command is present in the task set,
286
+ * then return a service response set to FUNCTION
287
+ * SUCCEEDED".
288
+ */
289
+ req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
290
+ }
291
}
292
}
293
break;
294
295
case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
296
case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
297
- virtio_scsi_defer_tmf_to_bh(req);
298
+ virtio_scsi_defer_tmf_to_main_loop(req);
299
ret = -EINPROGRESS;
300
break;
301
302
case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
303
- case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
304
+ case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: {
305
+ if (!d) {
306
+ goto fail;
307
+ }
308
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
309
+ goto incorrect_lun;
310
+ }
311
+
312
+ qatomic_inc(&req->remaining);
313
+
314
+ ctx = s->ctx ?: qemu_get_aio_context();
315
+ virtio_scsi_defer_tmf_to_aio_context(req, ctx);
316
+
317
+ virtio_scsi_tmf_dec_remaining(req);
318
+ ret = -EINPROGRESS;
319
+ break;
320
+ }
321
+
322
case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
323
if (!d) {
324
goto fail;
325
@@ -XXX,XX +XXX,XX @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
326
goto incorrect_lun;
327
}
328
329
- /* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
330
- * This way, if the bus starts calling back to the notifiers
331
- * even before we finish the loop, virtio_scsi_cancel_notify
332
- * will not complete the TMF too early.
333
- */
334
- req->remaining = 1;
335
- QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
336
- if (r->hba_private) {
337
- if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
338
- /* "If there is any command present in the task set, then
339
- * return a service response set to FUNCTION SUCCEEDED".
340
- */
341
- req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
342
- break;
343
- } else {
344
- VirtIOSCSICancelNotifier *notifier;
345
-
346
- req->remaining++;
347
- notifier = g_new(VirtIOSCSICancelNotifier, 1);
348
- notifier->notifier.notify = virtio_scsi_cancel_notify;
349
- notifier->tmf_req = req;
350
- scsi_req_cancel_async(r, &notifier->notifier);
351
- }
352
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
353
+ QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
354
+ /* Request has hba_private while enqueued */
355
+ assert(r->hba_private);
356
+
357
+ /*
358
+ * "If there is any command present in the task set, then
359
+ * return a service response set to FUNCTION SUCCEEDED".
360
+ */
361
+ req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
362
+ break;
363
}
364
}
365
- if (--req->remaining > 0) {
366
- ret = -EINPROGRESS;
367
- }
368
break;
369
370
case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
371
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_reset(VirtIODevice *vdev)
372
assert(!s->dataplane_started);
373
374
virtio_scsi_reset_tmf_bh(s);
375
+ virtio_scsi_flush_defer_tmf_to_aio_context(s);
376
377
qatomic_inc(&s->resetting);
378
bus_cold_reset(BUS(&s->bus));
110
--
379
--
111
2.31.1
380
2.48.1
112
113
diff view generated by jsdifflib
1
From: Hanna Reitz <hreitz@redhat.com>
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
2
3
As of a future commit, bdrv_replace_child_noperm() will clear the
3
This is the cleanup function that must be called after
4
indirect BdrvChild pointer passed to it if the new child BDS is NULL.
4
apply_iothread_vq_mapping() succeeds. virtio-scsi will need this
5
bdrv_replace_child_tran() will want to let it do that, but revert this
5
function too, so extract it.
6
change in its abort handler. For that, we need to have it receive a
7
BdrvChild ** pointer, too, and keep it stored in the
8
BdrvReplaceChildState object that we attach to the transaction.
9
6
10
Note that we do not need to store it in the BdrvReplaceChildState when
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
new_bs is not NULL, because then there is nothing to revert. This is
8
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
important so that bdrv_replace_node_noperm() can pass a pointer to a
9
Message-ID: <20250311132616.1049687-9-stefanha@redhat.com>
13
loop-local variable to bdrv_replace_child_tran() without worrying that
14
this pointer will outlive one loop iteration.
15
16
(Of course, for that to work, bdrv_replace_node_noperm() and in turn
17
bdrv_replace_node() and its relatives may not be called with a NULL @to
18
node. Luckily, they already are not, but now we should assert this.)
19
20
bdrv_remove_file_or_backing_child() on the other hand needs to ensure
21
that the indirect pointer it passes will stay valid for the duration of
22
the transaction. Ensure this by keeping a strong reference to the BDS
23
whose &bs->backing or &bs->file it passes to bdrv_replace_child_tran(),
24
and giving up that reference only in the transaction .clean() handler.
25
26
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
27
Message-Id: <20211111120829.81329-9-hreitz@redhat.com>
28
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
29
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
30
---
11
---
31
block.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++++-------
12
hw/block/virtio-blk.c | 27 +++++++++++++++++++++------
32
1 file changed, 73 insertions(+), 10 deletions(-)
13
1 file changed, 21 insertions(+), 6 deletions(-)
33
14
34
diff --git a/block.c b/block.c
15
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
35
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
36
--- a/block.c
17
--- a/hw/block/virtio-blk.c
37
+++ b/block.c
18
+++ b/hw/block/virtio-blk.c
38
@@ -XXX,XX +XXX,XX @@ static int bdrv_drv_set_perm(BlockDriverState *bs, uint64_t perm,
19
@@ -XXX,XX +XXX,XX @@ validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
39
20
* Fill in the AioContext for each virtqueue in the @vq_aio_context array given
40
typedef struct BdrvReplaceChildState {
21
* the iothread-vq-mapping parameter in @iothread_vq_mapping_list.
41
BdrvChild *child;
22
*
42
+ BdrvChild **childp;
23
+ * cleanup_iothread_vq_mapping() must be called to free IOThread object
43
BlockDriverState *old_bs;
24
+ * references after this function returns success.
44
} BdrvReplaceChildState;
25
+ *
45
26
* Returns: %true on success, %false on failure.
46
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_abort(void *opaque)
27
**/
47
BdrvReplaceChildState *s = opaque;
28
static bool apply_iothread_vq_mapping(
48
BlockDriverState *new_bs = s->child->bs;
29
@@ -XXX,XX +XXX,XX @@ static bool apply_iothread_vq_mapping(
49
30
return true;
50
- /* old_bs reference is transparently moved from @s to @s->child */
51
+ /*
52
+ * old_bs reference is transparently moved from @s to s->child.
53
+ *
54
+ * Pass &s->child here instead of s->childp, because:
55
+ * (1) s->old_bs must be non-NULL, so bdrv_replace_child_noperm() will not
56
+ * modify the BdrvChild * pointer we indirectly pass to it, i.e. it
57
+ * will not modify s->child. From that perspective, it does not matter
58
+ * whether we pass s->childp or &s->child.
59
+ * (TODO: Right now, bdrv_replace_child_noperm() never modifies that
60
+ * pointer anyway (though it will in the future), so at this point it
61
+ * absolutely does not matter whether we pass s->childp or &s->child.)
62
+ * (2) If new_bs is not NULL, s->childp will be NULL. We then cannot use
63
+ * it here.
64
+ * (3) If new_bs is NULL, *s->childp will have been NULLed by
65
+ * bdrv_replace_child_tran()'s bdrv_replace_child_noperm() call, and we
66
+ * must not pass a NULL *s->childp here.
67
+ * (TODO: In its current state, bdrv_replace_child_noperm() will not
68
+ * have NULLed *s->childp, so this does not apply yet. It will in the
69
+ * future.)
70
+ *
71
+ * So whether new_bs was NULL or not, we cannot pass s->childp here; and in
72
+ * any case, there is no reason to pass it anyway.
73
+ */
74
bdrv_replace_child_noperm(&s->child, s->old_bs);
75
bdrv_unref(new_bs);
76
}
31
}
77
@@ -XXX,XX +XXX,XX @@ static TransactionActionDrv bdrv_replace_child_drv = {
32
78
* Note: real unref of old_bs is done only on commit.
33
+/**
79
*
34
+ * cleanup_iothread_vq_mapping:
80
* The function doesn't update permissions, caller is responsible for this.
35
+ * @list: The mapping of virtqueues to IOThreads.
81
+ *
36
+ *
82
+ * Note that if new_bs == NULL, @childp is stored in a state object attached
37
+ * Release IOThread object references that were acquired by
83
+ * to @tran, so that the old child can be reinstated in the abort handler.
38
+ * apply_iothread_vq_mapping().
84
+ * Therefore, if @new_bs can be NULL, @childp must stay valid until the
39
+ */
85
+ * transaction is committed or aborted.
40
+static void cleanup_iothread_vq_mapping(IOThreadVirtQueueMappingList *list)
86
+ *
87
+ * (TODO: The reinstating does not happen yet, but it will once
88
+ * bdrv_replace_child_noperm() NULLs *childp when new_bs is NULL.)
89
*/
90
-static void bdrv_replace_child_tran(BdrvChild *child, BlockDriverState *new_bs,
91
+static void bdrv_replace_child_tran(BdrvChild **childp,
92
+ BlockDriverState *new_bs,
93
Transaction *tran)
94
{
95
BdrvReplaceChildState *s = g_new(BdrvReplaceChildState, 1);
96
*s = (BdrvReplaceChildState) {
97
- .child = child,
98
- .old_bs = child->bs,
99
+ .child = *childp,
100
+ .childp = new_bs == NULL ? childp : NULL,
101
+ .old_bs = (*childp)->bs,
102
};
103
tran_add(tran, &bdrv_replace_child_drv, s);
104
105
if (new_bs) {
106
bdrv_ref(new_bs);
107
}
108
- bdrv_replace_child_noperm(&child, new_bs);
109
- /* old_bs reference is transparently moved from @child to @s */
110
+ bdrv_replace_child_noperm(childp, new_bs);
111
+ /* old_bs reference is transparently moved from *childp to @s */
112
}
113
114
/*
115
@@ -XXX,XX +XXX,XX @@ static bool should_update_child(BdrvChild *c, BlockDriverState *to)
116
117
typedef struct BdrvRemoveFilterOrCowChild {
118
BdrvChild *child;
119
+ BlockDriverState *bs;
120
bool is_backing;
121
} BdrvRemoveFilterOrCowChild;
122
123
@@ -XXX,XX +XXX,XX @@ static void bdrv_remove_filter_or_cow_child_commit(void *opaque)
124
bdrv_child_free(s->child);
125
}
126
127
+static void bdrv_remove_filter_or_cow_child_clean(void *opaque)
128
+{
41
+{
129
+ BdrvRemoveFilterOrCowChild *s = opaque;
42
+ IOThreadVirtQueueMappingList *node;
130
+
43
+
131
+ /* Drop the bs reference after the transaction is done */
44
+ for (node = list; node; node = node->next) {
132
+ bdrv_unref(s->bs);
45
+ IOThread *iothread = iothread_by_id(node->value->iothread);
133
+ g_free(s);
46
+ object_unref(OBJECT(iothread));
47
+ }
134
+}
48
+}
135
+
49
+
136
static TransactionActionDrv bdrv_remove_filter_or_cow_child_drv = {
50
/* Context: BQL held */
137
.abort = bdrv_remove_filter_or_cow_child_abort,
51
static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
138
.commit = bdrv_remove_filter_or_cow_child_commit,
52
{
139
- .clean = g_free,
53
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_vq_aio_context_cleanup(VirtIOBlock *s)
140
+ .clean = bdrv_remove_filter_or_cow_child_clean,
54
assert(!s->ioeventfd_started);
141
};
55
142
56
if (conf->iothread_vq_mapping_list) {
143
/*
57
- IOThreadVirtQueueMappingList *node;
144
@@ -XXX,XX +XXX,XX @@ static void bdrv_remove_file_or_backing_child(BlockDriverState *bs,
58
-
145
return;
59
- for (node = conf->iothread_vq_mapping_list; node; node = node->next) {
60
- IOThread *iothread = iothread_by_id(node->value->iothread);
61
- object_unref(OBJECT(iothread));
62
- }
63
+ cleanup_iothread_vq_mapping(conf->iothread_vq_mapping_list);
146
}
64
}
147
65
148
+ /*
66
if (conf->iothread) {
149
+ * Keep a reference to @bs so @childp will stay valid throughout the
150
+ * transaction (required by bdrv_replace_child_tran())
151
+ */
152
+ bdrv_ref(bs);
153
if (child == bs->backing) {
154
childp = &bs->backing;
155
} else if (child == bs->file) {
156
@@ -XXX,XX +XXX,XX @@ static void bdrv_remove_file_or_backing_child(BlockDriverState *bs,
157
}
158
159
if (child->bs) {
160
- bdrv_replace_child_tran(*childp, NULL, tran);
161
+ bdrv_replace_child_tran(childp, NULL, tran);
162
}
163
164
s = g_new(BdrvRemoveFilterOrCowChild, 1);
165
*s = (BdrvRemoveFilterOrCowChild) {
166
.child = child,
167
+ .bs = bs,
168
.is_backing = (childp == &bs->backing),
169
};
170
tran_add(tran, &bdrv_remove_filter_or_cow_child_drv, s);
171
@@ -XXX,XX +XXX,XX @@ static int bdrv_replace_node_noperm(BlockDriverState *from,
172
{
173
BdrvChild *c, *next;
174
175
+ assert(to != NULL);
176
+
177
QLIST_FOREACH_SAFE(c, &from->parents, next_parent, next) {
178
assert(c->bs == from);
179
if (!should_update_child(c, to)) {
180
@@ -XXX,XX +XXX,XX @@ static int bdrv_replace_node_noperm(BlockDriverState *from,
181
c->name, from->node_name);
182
return -EPERM;
183
}
184
- bdrv_replace_child_tran(c, to, tran);
185
+
186
+ /*
187
+ * Passing a pointer to the local variable @c is fine here, because
188
+ * @to is not NULL, and so &c will not be attached to the transaction.
189
+ */
190
+ bdrv_replace_child_tran(&c, to, tran);
191
}
192
193
return 0;
194
@@ -XXX,XX +XXX,XX @@ static int bdrv_replace_node_noperm(BlockDriverState *from,
195
*
196
* With @detach_subchain=true @to must be in a backing chain of @from. In this
197
* case backing link of the cow-parent of @to is removed.
198
+ *
199
+ * @to must not be NULL.
200
*/
201
static int bdrv_replace_node_common(BlockDriverState *from,
202
BlockDriverState *to,
203
@@ -XXX,XX +XXX,XX @@ static int bdrv_replace_node_common(BlockDriverState *from,
204
BlockDriverState *to_cow_parent = NULL;
205
int ret;
206
207
+ assert(to != NULL);
208
+
209
if (detach_subchain) {
210
assert(bdrv_chain_contains(from, to));
211
assert(from != to);
212
@@ -XXX,XX +XXX,XX @@ out:
213
return ret;
214
}
215
216
+/**
217
+ * Replace node @from by @to (where neither may be NULL).
218
+ */
219
int bdrv_replace_node(BlockDriverState *from, BlockDriverState *to,
220
Error **errp)
221
{
222
@@ -XXX,XX +XXX,XX @@ int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs,
223
bdrv_drained_begin(old_bs);
224
bdrv_drained_begin(new_bs);
225
226
- bdrv_replace_child_tran(child, new_bs, tran);
227
+ bdrv_replace_child_tran(&child, new_bs, tran);
228
229
found = g_hash_table_new(NULL, NULL);
230
refresh_list = bdrv_topological_dfs(refresh_list, found, old_bs);
231
--
67
--
232
2.31.1
68
2.48.1
233
234
diff view generated by jsdifflib
1
From: Hanna Reitz <hreitz@redhat.com>
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
2
3
As of a future patch, bdrv_replace_child_tran() will take a BdrvChild **
3
Use noun_verb() function naming instead of verb_noun() because the
4
pointer. Prepare for that by getting such a pointer and using it where
4
former is the most common naming style for APIs. The next commit will
5
applicable, and (dereferenced) as a parameter for
5
move these functions into a header file so that virtio-scsi can call
6
bdrv_replace_child_tran().
6
them.
7
7
8
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
8
Shorten iothread_vq_mapping_apply()'s iothread_vq_mapping_list argument
9
Message-Id: <20211111120829.81329-7-hreitz@redhat.com>
9
to just "list" like in the other functions.
10
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
10
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
13
Message-ID: <20250311132616.1049687-10-stefanha@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
---
15
---
13
block.c | 21 ++++++++++++---------
16
hw/block/virtio-blk.c | 33 ++++++++++++++++-----------------
14
1 file changed, 12 insertions(+), 9 deletions(-)
17
1 file changed, 16 insertions(+), 17 deletions(-)
15
18
16
diff --git a/block.c b/block.c
19
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
17
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
18
--- a/block.c
21
--- a/hw/block/virtio-blk.c
19
+++ b/block.c
22
+++ b/hw/block/virtio-blk.c
20
@@ -XXX,XX +XXX,XX @@ static void bdrv_remove_file_or_backing_child(BlockDriverState *bs,
23
@@ -XXX,XX +XXX,XX @@ static const BlockDevOps virtio_block_ops = {
21
BdrvChild *child,
24
};
22
Transaction *tran)
25
26
static bool
27
-validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
28
- uint16_t num_queues, Error **errp)
29
+iothread_vq_mapping_validate(IOThreadVirtQueueMappingList *list, uint16_t
30
+ num_queues, Error **errp)
23
{
31
{
24
+ BdrvChild **childp;
32
g_autofree unsigned long *vqs = bitmap_new(num_queues);
25
BdrvRemoveFilterOrCowChild *s;
33
g_autoptr(GHashTable) iothreads =
26
34
@@ -XXX,XX +XXX,XX @@ validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
27
- assert(child == bs->backing || child == bs->file);
35
}
28
-
36
29
if (!child) {
37
/**
30
return;
38
- * apply_iothread_vq_mapping:
39
- * @iothread_vq_mapping_list: The mapping of virtqueues to IOThreads.
40
+ * iothread_vq_mapping_apply:
41
+ * @list: The mapping of virtqueues to IOThreads.
42
* @vq_aio_context: The array of AioContext pointers to fill in.
43
* @num_queues: The length of @vq_aio_context.
44
* @errp: If an error occurs, a pointer to the area to store the error.
45
*
46
* Fill in the AioContext for each virtqueue in the @vq_aio_context array given
47
- * the iothread-vq-mapping parameter in @iothread_vq_mapping_list.
48
+ * the iothread-vq-mapping parameter in @list.
49
*
50
- * cleanup_iothread_vq_mapping() must be called to free IOThread object
51
+ * iothread_vq_mapping_cleanup() must be called to free IOThread object
52
* references after this function returns success.
53
*
54
* Returns: %true on success, %false on failure.
55
**/
56
-static bool apply_iothread_vq_mapping(
57
- IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
58
+static bool iothread_vq_mapping_apply(
59
+ IOThreadVirtQueueMappingList *list,
60
AioContext **vq_aio_context,
61
uint16_t num_queues,
62
Error **errp)
63
@@ -XXX,XX +XXX,XX @@ static bool apply_iothread_vq_mapping(
64
size_t num_iothreads = 0;
65
size_t cur_iothread = 0;
66
67
- if (!validate_iothread_vq_mapping_list(iothread_vq_mapping_list,
68
- num_queues, errp)) {
69
+ if (!iothread_vq_mapping_validate(list, num_queues, errp)) {
70
return false;
31
}
71
}
32
72
33
+ if (child == bs->backing) {
73
- for (node = iothread_vq_mapping_list; node; node = node->next) {
34
+ childp = &bs->backing;
74
+ for (node = list; node; node = node->next) {
35
+ } else if (child == bs->file) {
75
num_iothreads++;
36
+ childp = &bs->file;
37
+ } else {
38
+ g_assert_not_reached();
39
+ }
40
+
41
if (child->bs) {
42
- bdrv_replace_child_tran(child, NULL, tran);
43
+ bdrv_replace_child_tran(*childp, NULL, tran);
44
}
76
}
45
77
46
s = g_new(BdrvRemoveFilterOrCowChild, 1);
78
- for (node = iothread_vq_mapping_list; node; node = node->next) {
47
*s = (BdrvRemoveFilterOrCowChild) {
79
+ for (node = list; node; node = node->next) {
48
.child = child,
80
IOThread *iothread = iothread_by_id(node->value->iothread);
49
- .is_backing = (child == bs->backing),
81
AioContext *ctx = iothread_get_aio_context(iothread);
50
+ .is_backing = (childp == &bs->backing),
82
51
};
83
@@ -XXX,XX +XXX,XX @@ static bool apply_iothread_vq_mapping(
52
tran_add(tran, &bdrv_remove_filter_or_cow_child_drv, s);
53
54
- if (s->is_backing) {
55
- bs->backing = NULL;
56
- } else {
57
- bs->file = NULL;
58
- }
59
+ *childp = NULL;
60
}
84
}
61
85
62
/*
86
/**
87
- * cleanup_iothread_vq_mapping:
88
+ * iothread_vq_mapping_cleanup:
89
* @list: The mapping of virtqueues to IOThreads.
90
*
91
* Release IOThread object references that were acquired by
92
- * apply_iothread_vq_mapping().
93
+ * iothread_vq_mapping_apply().
94
*/
95
-static void cleanup_iothread_vq_mapping(IOThreadVirtQueueMappingList *list)
96
+static void iothread_vq_mapping_cleanup(IOThreadVirtQueueMappingList *list)
97
{
98
IOThreadVirtQueueMappingList *node;
99
100
@@ -XXX,XX +XXX,XX @@ static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
101
s->vq_aio_context = g_new(AioContext *, conf->num_queues);
102
103
if (conf->iothread_vq_mapping_list) {
104
- if (!apply_iothread_vq_mapping(conf->iothread_vq_mapping_list,
105
+ if (!iothread_vq_mapping_apply(conf->iothread_vq_mapping_list,
106
s->vq_aio_context,
107
conf->num_queues,
108
errp)) {
109
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_vq_aio_context_cleanup(VirtIOBlock *s)
110
assert(!s->ioeventfd_started);
111
112
if (conf->iothread_vq_mapping_list) {
113
- cleanup_iothread_vq_mapping(conf->iothread_vq_mapping_list);
114
+ iothread_vq_mapping_cleanup(conf->iothread_vq_mapping_list);
115
}
116
117
if (conf->iothread) {
63
--
118
--
64
2.31.1
119
2.48.1
65
66
diff view generated by jsdifflib
New patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
1
2
3
The code that builds an array of AioContext pointers indexed by the
4
virtqueue is not specific to virtio-blk. virtio-scsi will need to do the
5
same thing, so extract the functions.
6
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
9
Message-ID: <20250311132616.1049687-11-stefanha@redhat.com>
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
---
12
include/hw/virtio/iothread-vq-mapping.h | 45 ++++++++
13
hw/block/virtio-blk.c | 142 +-----------------------
14
hw/virtio/iothread-vq-mapping.c | 131 ++++++++++++++++++++++
15
hw/virtio/meson.build | 1 +
16
4 files changed, 178 insertions(+), 141 deletions(-)
17
create mode 100644 include/hw/virtio/iothread-vq-mapping.h
18
create mode 100644 hw/virtio/iothread-vq-mapping.c
19
20
diff --git a/include/hw/virtio/iothread-vq-mapping.h b/include/hw/virtio/iothread-vq-mapping.h
21
new file mode 100644
22
index XXXXXXX..XXXXXXX
23
--- /dev/null
24
+++ b/include/hw/virtio/iothread-vq-mapping.h
25
@@ -XXX,XX +XXX,XX @@
26
+/*
27
+ * IOThread Virtqueue Mapping
28
+ *
29
+ * Copyright Red Hat, Inc
30
+ *
31
+ * SPDX-License-Identifier: GPL-2.0-only
32
+ */
33
+
34
+#ifndef HW_VIRTIO_IOTHREAD_VQ_MAPPING_H
35
+#define HW_VIRTIO_IOTHREAD_VQ_MAPPING_H
36
+
37
+#include "qapi/error.h"
38
+#include "qapi/qapi-types-virtio.h"
39
+
40
+/**
41
+ * iothread_vq_mapping_apply:
42
+ * @list: The mapping of virtqueues to IOThreads.
43
+ * @vq_aio_context: The array of AioContext pointers to fill in.
44
+ * @num_queues: The length of @vq_aio_context.
45
+ * @errp: If an error occurs, a pointer to the area to store the error.
46
+ *
47
+ * Fill in the AioContext for each virtqueue in the @vq_aio_context array given
48
+ * the iothread-vq-mapping parameter in @list.
49
+ *
50
+ * iothread_vq_mapping_cleanup() must be called to free IOThread object
51
+ * references after this function returns success.
52
+ *
53
+ * Returns: %true on success, %false on failure.
54
+ **/
55
+bool iothread_vq_mapping_apply(
56
+ IOThreadVirtQueueMappingList *list,
57
+ AioContext **vq_aio_context,
58
+ uint16_t num_queues,
59
+ Error **errp);
60
+
61
+/**
62
+ * iothread_vq_mapping_cleanup:
63
+ * @list: The mapping of virtqueues to IOThreads.
64
+ *
65
+ * Release IOThread object references that were acquired by
66
+ * iothread_vq_mapping_apply().
67
+ */
68
+void iothread_vq_mapping_cleanup(IOThreadVirtQueueMappingList *list);
69
+
70
+#endif /* HW_VIRTIO_IOTHREAD_VQ_MAPPING_H */
71
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/hw/block/virtio-blk.c
74
+++ b/hw/block/virtio-blk.c
75
@@ -XXX,XX +XXX,XX @@
76
#endif
77
#include "hw/virtio/virtio-bus.h"
78
#include "migration/qemu-file-types.h"
79
+#include "hw/virtio/iothread-vq-mapping.h"
80
#include "hw/virtio/virtio-access.h"
81
#include "hw/virtio/virtio-blk-common.h"
82
#include "qemu/coroutine.h"
83
@@ -XXX,XX +XXX,XX @@ static const BlockDevOps virtio_block_ops = {
84
.drained_end = virtio_blk_drained_end,
85
};
86
87
-static bool
88
-iothread_vq_mapping_validate(IOThreadVirtQueueMappingList *list, uint16_t
89
- num_queues, Error **errp)
90
-{
91
- g_autofree unsigned long *vqs = bitmap_new(num_queues);
92
- g_autoptr(GHashTable) iothreads =
93
- g_hash_table_new(g_str_hash, g_str_equal);
94
-
95
- for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) {
96
- const char *name = node->value->iothread;
97
- uint16List *vq;
98
-
99
- if (!iothread_by_id(name)) {
100
- error_setg(errp, "IOThread \"%s\" object does not exist", name);
101
- return false;
102
- }
103
-
104
- if (!g_hash_table_add(iothreads, (gpointer)name)) {
105
- error_setg(errp,
106
- "duplicate IOThread name \"%s\" in iothread-vq-mapping",
107
- name);
108
- return false;
109
- }
110
-
111
- if (node != list) {
112
- if (!!node->value->vqs != !!list->value->vqs) {
113
- error_setg(errp, "either all items in iothread-vq-mapping "
114
- "must have vqs or none of them must have it");
115
- return false;
116
- }
117
- }
118
-
119
- for (vq = node->value->vqs; vq; vq = vq->next) {
120
- if (vq->value >= num_queues) {
121
- error_setg(errp, "vq index %u for IOThread \"%s\" must be "
122
- "less than num_queues %u in iothread-vq-mapping",
123
- vq->value, name, num_queues);
124
- return false;
125
- }
126
-
127
- if (test_and_set_bit(vq->value, vqs)) {
128
- error_setg(errp, "cannot assign vq %u to IOThread \"%s\" "
129
- "because it is already assigned", vq->value, name);
130
- return false;
131
- }
132
- }
133
- }
134
-
135
- if (list->value->vqs) {
136
- for (uint16_t i = 0; i < num_queues; i++) {
137
- if (!test_bit(i, vqs)) {
138
- error_setg(errp,
139
- "missing vq %u IOThread assignment in iothread-vq-mapping",
140
- i);
141
- return false;
142
- }
143
- }
144
- }
145
-
146
- return true;
147
-}
148
-
149
-/**
150
- * iothread_vq_mapping_apply:
151
- * @list: The mapping of virtqueues to IOThreads.
152
- * @vq_aio_context: The array of AioContext pointers to fill in.
153
- * @num_queues: The length of @vq_aio_context.
154
- * @errp: If an error occurs, a pointer to the area to store the error.
155
- *
156
- * Fill in the AioContext for each virtqueue in the @vq_aio_context array given
157
- * the iothread-vq-mapping parameter in @list.
158
- *
159
- * iothread_vq_mapping_cleanup() must be called to free IOThread object
160
- * references after this function returns success.
161
- *
162
- * Returns: %true on success, %false on failure.
163
- **/
164
-static bool iothread_vq_mapping_apply(
165
- IOThreadVirtQueueMappingList *list,
166
- AioContext **vq_aio_context,
167
- uint16_t num_queues,
168
- Error **errp)
169
-{
170
- IOThreadVirtQueueMappingList *node;
171
- size_t num_iothreads = 0;
172
- size_t cur_iothread = 0;
173
-
174
- if (!iothread_vq_mapping_validate(list, num_queues, errp)) {
175
- return false;
176
- }
177
-
178
- for (node = list; node; node = node->next) {
179
- num_iothreads++;
180
- }
181
-
182
- for (node = list; node; node = node->next) {
183
- IOThread *iothread = iothread_by_id(node->value->iothread);
184
- AioContext *ctx = iothread_get_aio_context(iothread);
185
-
186
- /* Released in virtio_blk_vq_aio_context_cleanup() */
187
- object_ref(OBJECT(iothread));
188
-
189
- if (node->value->vqs) {
190
- uint16List *vq;
191
-
192
- /* Explicit vq:IOThread assignment */
193
- for (vq = node->value->vqs; vq; vq = vq->next) {
194
- assert(vq->value < num_queues);
195
- vq_aio_context[vq->value] = ctx;
196
- }
197
- } else {
198
- /* Round-robin vq:IOThread assignment */
199
- for (unsigned i = cur_iothread; i < num_queues;
200
- i += num_iothreads) {
201
- vq_aio_context[i] = ctx;
202
- }
203
- }
204
-
205
- cur_iothread++;
206
- }
207
-
208
- return true;
209
-}
210
-
211
-/**
212
- * iothread_vq_mapping_cleanup:
213
- * @list: The mapping of virtqueues to IOThreads.
214
- *
215
- * Release IOThread object references that were acquired by
216
- * iothread_vq_mapping_apply().
217
- */
218
-static void iothread_vq_mapping_cleanup(IOThreadVirtQueueMappingList *list)
219
-{
220
- IOThreadVirtQueueMappingList *node;
221
-
222
- for (node = list; node; node = node->next) {
223
- IOThread *iothread = iothread_by_id(node->value->iothread);
224
- object_unref(OBJECT(iothread));
225
- }
226
-}
227
-
228
/* Context: BQL held */
229
static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
230
{
231
diff --git a/hw/virtio/iothread-vq-mapping.c b/hw/virtio/iothread-vq-mapping.c
232
new file mode 100644
233
index XXXXXXX..XXXXXXX
234
--- /dev/null
235
+++ b/hw/virtio/iothread-vq-mapping.c
236
@@ -XXX,XX +XXX,XX @@
237
+/*
238
+ * IOThread Virtqueue Mapping
239
+ *
240
+ * Copyright Red Hat, Inc
241
+ *
242
+ * SPDX-License-Identifier: GPL-2.0-only
243
+ */
244
+
245
+#include "qemu/osdep.h"
246
+#include "system/iothread.h"
247
+#include "hw/virtio/iothread-vq-mapping.h"
248
+
249
+static bool
250
+iothread_vq_mapping_validate(IOThreadVirtQueueMappingList *list, uint16_t
251
+ num_queues, Error **errp)
252
+{
253
+ g_autofree unsigned long *vqs = bitmap_new(num_queues);
254
+ g_autoptr(GHashTable) iothreads =
255
+ g_hash_table_new(g_str_hash, g_str_equal);
256
+
257
+ for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) {
258
+ const char *name = node->value->iothread;
259
+ uint16List *vq;
260
+
261
+ if (!iothread_by_id(name)) {
262
+ error_setg(errp, "IOThread \"%s\" object does not exist", name);
263
+ return false;
264
+ }
265
+
266
+ if (!g_hash_table_add(iothreads, (gpointer)name)) {
267
+ error_setg(errp,
268
+ "duplicate IOThread name \"%s\" in iothread-vq-mapping",
269
+ name);
270
+ return false;
271
+ }
272
+
273
+ if (node != list) {
274
+ if (!!node->value->vqs != !!list->value->vqs) {
275
+ error_setg(errp, "either all items in iothread-vq-mapping "
276
+ "must have vqs or none of them must have it");
277
+ return false;
278
+ }
279
+ }
280
+
281
+ for (vq = node->value->vqs; vq; vq = vq->next) {
282
+ if (vq->value >= num_queues) {
283
+ error_setg(errp, "vq index %u for IOThread \"%s\" must be "
284
+ "less than num_queues %u in iothread-vq-mapping",
285
+ vq->value, name, num_queues);
286
+ return false;
287
+ }
288
+
289
+ if (test_and_set_bit(vq->value, vqs)) {
290
+ error_setg(errp, "cannot assign vq %u to IOThread \"%s\" "
291
+ "because it is already assigned", vq->value, name);
292
+ return false;
293
+ }
294
+ }
295
+ }
296
+
297
+ if (list->value->vqs) {
298
+ for (uint16_t i = 0; i < num_queues; i++) {
299
+ if (!test_bit(i, vqs)) {
300
+ error_setg(errp,
301
+ "missing vq %u IOThread assignment in iothread-vq-mapping",
302
+ i);
303
+ return false;
304
+ }
305
+ }
306
+ }
307
+
308
+ return true;
309
+}
310
+
311
+bool iothread_vq_mapping_apply(
312
+ IOThreadVirtQueueMappingList *list,
313
+ AioContext **vq_aio_context,
314
+ uint16_t num_queues,
315
+ Error **errp)
316
+{
317
+ IOThreadVirtQueueMappingList *node;
318
+ size_t num_iothreads = 0;
319
+ size_t cur_iothread = 0;
320
+
321
+ if (!iothread_vq_mapping_validate(list, num_queues, errp)) {
322
+ return false;
323
+ }
324
+
325
+ for (node = list; node; node = node->next) {
326
+ num_iothreads++;
327
+ }
328
+
329
+ for (node = list; node; node = node->next) {
330
+ IOThread *iothread = iothread_by_id(node->value->iothread);
331
+ AioContext *ctx = iothread_get_aio_context(iothread);
332
+
333
+ /* Released in virtio_blk_vq_aio_context_cleanup() */
334
+ object_ref(OBJECT(iothread));
335
+
336
+ if (node->value->vqs) {
337
+ uint16List *vq;
338
+
339
+ /* Explicit vq:IOThread assignment */
340
+ for (vq = node->value->vqs; vq; vq = vq->next) {
341
+ assert(vq->value < num_queues);
342
+ vq_aio_context[vq->value] = ctx;
343
+ }
344
+ } else {
345
+ /* Round-robin vq:IOThread assignment */
346
+ for (unsigned i = cur_iothread; i < num_queues;
347
+ i += num_iothreads) {
348
+ vq_aio_context[i] = ctx;
349
+ }
350
+ }
351
+
352
+ cur_iothread++;
353
+ }
354
+
355
+ return true;
356
+}
357
+
358
+void iothread_vq_mapping_cleanup(IOThreadVirtQueueMappingList *list)
359
+{
360
+ IOThreadVirtQueueMappingList *node;
361
+
362
+ for (node = list; node; node = node->next) {
363
+ IOThread *iothread = iothread_by_id(node->value->iothread);
364
+ object_unref(OBJECT(iothread));
365
+ }
366
+}
367
+
368
diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build
369
index XXXXXXX..XXXXXXX 100644
370
--- a/hw/virtio/meson.build
371
+++ b/hw/virtio/meson.build
372
@@ -XXX,XX +XXX,XX @@
373
system_virtio_ss = ss.source_set()
374
system_virtio_ss.add(files('virtio-bus.c'))
375
+system_virtio_ss.add(files('iothread-vq-mapping.c'))
376
system_virtio_ss.add(when: 'CONFIG_VIRTIO_PCI', if_true: files('virtio-pci.c'))
377
system_virtio_ss.add(when: 'CONFIG_VIRTIO_MMIO', if_true: files('virtio-mmio.c'))
378
system_virtio_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto.c'))
379
--
380
2.48.1
diff view generated by jsdifflib
1
From: Hanna Reitz <hreitz@redhat.com>
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
2
3
bdrv_cor_filter_drop() modifies the block graph. That means that other
3
Allow virtio-scsi virtqueues to be assigned to different IOThreads. This
4
parties can also modify the block graph before it returns. Therefore,
4
makes it possible to take advantage of host multi-queue block layer
5
we cannot assume that the result of a graph traversal we did before
5
scalability by assigning virtqueues that have affinity with vCPUs to
6
remains valid afterwards.
6
different IOThreads that have affinity with host CPUs. The same feature
7
was introduced for virtio-blk in the past:
8
https://developers.redhat.com/articles/2024/09/05/scaling-virtio-blk-disk-io-iothread-virtqueue-mapping
7
9
8
We should thus fetch `base` and `unfiltered_base` afterwards instead of
10
Here are fio randread 4k iodepth=64 results from a 4 vCPU guest with an
9
before.
11
Intel P4800X SSD:
12
iothreads IOPS
13
------------------------------
14
1 189576
15
2 312698
16
4 346744
10
17
11
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
18
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
19
Message-ID: <20250311132616.1049687-12-stefanha@redhat.com>
13
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
14
Message-Id: <20211111120829.81329-2-hreitz@redhat.com>
15
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
20
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
16
---
21
---
17
block/stream.c | 7 +++++--
22
include/hw/virtio/virtio-scsi.h | 5 +-
18
1 file changed, 5 insertions(+), 2 deletions(-)
23
hw/scsi/virtio-scsi-dataplane.c | 90 ++++++++++++++++++++++++---------
24
hw/scsi/virtio-scsi.c | 63 ++++++++++++++---------
25
3 files changed, 107 insertions(+), 51 deletions(-)
19
26
20
diff --git a/block/stream.c b/block/stream.c
27
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
21
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
22
--- a/block/stream.c
29
--- a/include/hw/virtio/virtio-scsi.h
23
+++ b/block/stream.c
30
+++ b/include/hw/virtio/virtio-scsi.h
24
@@ -XXX,XX +XXX,XX @@ static int stream_prepare(Job *job)
31
@@ -XXX,XX +XXX,XX @@
25
{
32
#include "hw/virtio/virtio.h"
26
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
33
#include "hw/scsi/scsi.h"
27
BlockDriverState *unfiltered_bs = bdrv_skip_filters(s->target_bs);
34
#include "chardev/char-fe.h"
28
- BlockDriverState *base = bdrv_filter_or_cow_bs(s->above_base);
35
+#include "qapi/qapi-types-virtio.h"
29
- BlockDriverState *unfiltered_base = bdrv_skip_filters(base);
36
#include "system/iothread.h"
30
+ BlockDriverState *base;
37
31
+ BlockDriverState *unfiltered_base;
38
#define TYPE_VIRTIO_SCSI_COMMON "virtio-scsi-common"
32
Error *local_err = NULL;
39
@@ -XXX,XX +XXX,XX @@ struct VirtIOSCSIConf {
40
CharBackend chardev;
41
uint32_t boot_tpgt;
42
IOThread *iothread;
43
+ IOThreadVirtQueueMappingList *iothread_vq_mapping_list;
44
};
45
46
struct VirtIOSCSI;
47
@@ -XXX,XX +XXX,XX @@ struct VirtIOSCSI {
48
QTAILQ_HEAD(, VirtIOSCSIReq) tmf_bh_list;
49
50
/* Fields for dataplane below */
51
- AioContext *ctx; /* one iothread per virtio-scsi-pci for now */
52
+ AioContext **vq_aio_context; /* per-virtqueue AioContext pointer */
53
54
bool dataplane_started;
55
bool dataplane_starting;
56
@@ -XXX,XX +XXX,XX @@ void virtio_scsi_common_realize(DeviceState *dev,
57
void virtio_scsi_common_unrealize(DeviceState *dev);
58
59
void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp);
60
+void virtio_scsi_dataplane_cleanup(VirtIOSCSI *s);
61
int virtio_scsi_dataplane_start(VirtIODevice *s);
62
void virtio_scsi_dataplane_stop(VirtIODevice *s);
63
64
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/hw/scsi/virtio-scsi-dataplane.c
67
+++ b/hw/scsi/virtio-scsi-dataplane.c
68
@@ -XXX,XX +XXX,XX @@
69
#include "system/block-backend.h"
70
#include "hw/scsi/scsi.h"
71
#include "scsi/constants.h"
72
+#include "hw/virtio/iothread-vq-mapping.h"
73
#include "hw/virtio/virtio-bus.h"
74
75
/* Context: BQL held */
76
@@ -XXX,XX +XXX,XX @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
77
VirtIODevice *vdev = VIRTIO_DEVICE(s);
78
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
79
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
80
+ uint16_t num_vqs = vs->conf.num_queues + VIRTIO_SCSI_VQ_NUM_FIXED;
81
82
- if (vs->conf.iothread) {
83
+ if (vs->conf.iothread && vs->conf.iothread_vq_mapping_list) {
84
+ error_setg(errp,
85
+ "iothread and iothread-vq-mapping properties cannot be set "
86
+ "at the same time");
87
+ return;
88
+ }
89
+
90
+ if (vs->conf.iothread || vs->conf.iothread_vq_mapping_list) {
91
if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
92
error_setg(errp,
93
"device is incompatible with iothread "
94
@@ -XXX,XX +XXX,XX @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
95
error_setg(errp, "ioeventfd is required for iothread");
96
return;
97
}
98
- s->ctx = iothread_get_aio_context(vs->conf.iothread);
99
- } else {
100
- if (!virtio_device_ioeventfd_enabled(vdev)) {
101
+ }
102
+
103
+ s->vq_aio_context = g_new(AioContext *, num_vqs);
104
+
105
+ if (vs->conf.iothread_vq_mapping_list) {
106
+ if (!iothread_vq_mapping_apply(vs->conf.iothread_vq_mapping_list,
107
+ s->vq_aio_context, num_vqs, errp)) {
108
+ g_free(s->vq_aio_context);
109
+ s->vq_aio_context = NULL;
110
return;
111
}
112
- s->ctx = qemu_get_aio_context();
113
+ } else if (vs->conf.iothread) {
114
+ AioContext *ctx = iothread_get_aio_context(vs->conf.iothread);
115
+ for (uint16_t i = 0; i < num_vqs; i++) {
116
+ s->vq_aio_context[i] = ctx;
117
+ }
118
+
119
+ /* Released in virtio_scsi_dataplane_cleanup() */
120
+ object_ref(OBJECT(vs->conf.iothread));
121
+ } else {
122
+ AioContext *ctx = qemu_get_aio_context();
123
+ for (unsigned i = 0; i < num_vqs; i++) {
124
+ s->vq_aio_context[i] = ctx;
125
+ }
126
+ }
127
+}
128
+
129
+/* Context: BQL held */
130
+void virtio_scsi_dataplane_cleanup(VirtIOSCSI *s)
131
+{
132
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
133
+
134
+ if (vs->conf.iothread_vq_mapping_list) {
135
+ iothread_vq_mapping_cleanup(vs->conf.iothread_vq_mapping_list);
136
}
137
+
138
+ if (vs->conf.iothread) {
139
+ object_unref(OBJECT(vs->conf.iothread));
140
+ }
141
+
142
+ g_free(s->vq_aio_context);
143
+ s->vq_aio_context = NULL;
144
}
145
146
static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n)
147
@@ -XXX,XX +XXX,XX @@ static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n)
148
}
149
150
/* Context: BH in IOThread */
151
-static void virtio_scsi_dataplane_stop_bh(void *opaque)
152
+static void virtio_scsi_dataplane_stop_vq_bh(void *opaque)
153
{
154
- VirtIOSCSI *s = opaque;
155
- VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
156
+ AioContext *ctx = qemu_get_current_aio_context();
157
+ VirtQueue *vq = opaque;
158
EventNotifier *host_notifier;
159
- int i;
160
161
- virtio_queue_aio_detach_host_notifier(vs->ctrl_vq, s->ctx);
162
- host_notifier = virtio_queue_get_host_notifier(vs->ctrl_vq);
163
+ virtio_queue_aio_detach_host_notifier(vq, ctx);
164
+ host_notifier = virtio_queue_get_host_notifier(vq);
165
166
/*
167
* Test and clear notifier after disabling event, in case poll callback
168
* didn't have time to run.
169
*/
170
virtio_queue_host_notifier_read(host_notifier);
171
-
172
- virtio_queue_aio_detach_host_notifier(vs->event_vq, s->ctx);
173
- host_notifier = virtio_queue_get_host_notifier(vs->event_vq);
174
- virtio_queue_host_notifier_read(host_notifier);
175
-
176
- for (i = 0; i < vs->conf.num_queues; i++) {
177
- virtio_queue_aio_detach_host_notifier(vs->cmd_vqs[i], s->ctx);
178
- host_notifier = virtio_queue_get_host_notifier(vs->cmd_vqs[i]);
179
- virtio_queue_host_notifier_read(host_notifier);
180
- }
181
}
182
183
/* Context: BQL held */
184
@@ -XXX,XX +XXX,XX @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev)
185
smp_wmb(); /* paired with aio_notify_accept() */
186
187
if (s->bus.drain_count == 0) {
188
- virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx);
189
- virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx);
190
+ virtio_queue_aio_attach_host_notifier(vs->ctrl_vq,
191
+ s->vq_aio_context[0]);
192
+ virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq,
193
+ s->vq_aio_context[1]);
194
195
for (i = 0; i < vs->conf.num_queues; i++) {
196
- virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx);
197
+ AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i];
198
+ virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], ctx);
199
}
200
}
201
return 0;
202
@@ -XXX,XX +XXX,XX @@ void virtio_scsi_dataplane_stop(VirtIODevice *vdev)
203
s->dataplane_stopping = true;
204
205
if (s->bus.drain_count == 0) {
206
- aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s);
207
+ for (i = 0; i < vs->conf.num_queues + VIRTIO_SCSI_VQ_NUM_FIXED; i++) {
208
+ VirtQueue *vq = virtio_get_queue(&vs->parent_obj, i);
209
+ AioContext *ctx = s->vq_aio_context[i];
210
+ aio_wait_bh_oneshot(ctx, virtio_scsi_dataplane_stop_vq_bh, vq);
211
+ }
212
}
213
214
blk_drain_all(); /* ensure there are no in-flight requests */
215
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
216
index XXXXXXX..XXXXXXX 100644
217
--- a/hw/scsi/virtio-scsi.c
218
+++ b/hw/scsi/virtio-scsi.c
219
@@ -XXX,XX +XXX,XX @@
220
#include "hw/qdev-properties.h"
221
#include "hw/scsi/scsi.h"
222
#include "scsi/constants.h"
223
+#include "hw/virtio/iothread-vq-mapping.h"
224
#include "hw/virtio/virtio-bus.h"
225
#include "hw/virtio/virtio-access.h"
226
#include "trace.h"
227
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
228
g_free(n);
229
}
230
231
-static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d)
232
-{
233
- if (s->dataplane_started && d && blk_is_available(d->conf.blk)) {
234
- assert(blk_get_aio_context(d->conf.blk) == s->ctx);
235
- }
236
-}
237
-
238
static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req)
239
{
240
VirtIOSCSI *s = req->dev;
241
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_flush_defer_tmf_to_aio_context(VirtIOSCSI *s)
242
243
assert(!s->dataplane_started);
244
245
- if (s->ctx) {
246
+ for (uint32_t i = 0; i < s->parent_obj.conf.num_queues; i++) {
247
+ AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i];
248
+
249
/* Our BH only runs after previously scheduled BHs */
250
- aio_wait_bh_oneshot(s->ctx, dummy_bh, NULL);
251
+ aio_wait_bh_oneshot(ctx, dummy_bh, NULL);
252
}
253
}
254
255
@@ -XXX,XX +XXX,XX @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
256
AioContext *ctx;
33
int ret = 0;
257
int ret = 0;
34
258
35
@@ -XXX,XX +XXX,XX @@ static int stream_prepare(Job *job)
259
- virtio_scsi_ctx_check(s, d);
36
bdrv_cor_filter_drop(s->cor_filter_bs);
260
/* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
37
s->cor_filter_bs = NULL;
261
req->resp.tmf.response = VIRTIO_SCSI_S_OK;
38
262
39
+ base = bdrv_filter_or_cow_bs(s->above_base);
263
@@ -XXX,XX +XXX,XX @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
40
+ unfiltered_base = bdrv_skip_filters(base);
264
41
+
265
case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
42
if (bdrv_cow_child(unfiltered_bs)) {
266
case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: {
43
const char *base_id = NULL, *base_fmt = NULL;
267
+ g_autoptr(GHashTable) aio_contexts = g_hash_table_new(NULL, NULL);
44
if (unfiltered_base) {
268
+
269
if (!d) {
270
goto fail;
271
}
272
@@ -XXX,XX +XXX,XX @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
273
274
qatomic_inc(&req->remaining);
275
276
- ctx = s->ctx ?: qemu_get_aio_context();
277
- virtio_scsi_defer_tmf_to_aio_context(req, ctx);
278
+ for (uint32_t i = 0; i < s->parent_obj.conf.num_queues; i++) {
279
+ ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i];
280
+
281
+ if (!g_hash_table_add(aio_contexts, ctx)) {
282
+ continue; /* skip previously added AioContext */
283
+ }
284
+
285
+ virtio_scsi_defer_tmf_to_aio_context(req, ctx);
286
+ }
287
288
virtio_scsi_tmf_dec_remaining(req);
289
ret = -EINPROGRESS;
290
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
291
*/
292
static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s)
293
{
294
- if (!s->ctx || s->dataplane_started) {
295
+ if (s->dataplane_started) {
296
return false;
297
}
298
+ if (s->vq_aio_context[0] == qemu_get_aio_context()) {
299
+ return false; /* not using IOThreads */
300
+ }
301
302
virtio_device_start_ioeventfd(&s->parent_obj.parent_obj);
303
return !s->dataplane_fenced;
304
@@ -XXX,XX +XXX,XX @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
305
virtio_scsi_complete_cmd_req(req);
306
return -ENOENT;
307
}
308
- virtio_scsi_ctx_check(s, d);
309
req->sreq = scsi_req_new(d, req->req.cmd.tag,
310
virtio_scsi_get_lun(req->req.cmd.lun),
311
req->req.cmd.cdb, vs->cdb_size, req);
312
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
313
{
314
VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
315
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
316
+ AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED];
317
SCSIDevice *sd = SCSI_DEVICE(dev);
318
- int ret;
319
320
- if (s->ctx && !s->dataplane_fenced) {
321
- ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
322
- if (ret < 0) {
323
- return;
324
- }
325
+ if (ctx != qemu_get_aio_context() && !s->dataplane_fenced) {
326
+ /*
327
+ * Try to make the BlockBackend's AioContext match ours. Ignore failure
328
+ * because I/O will still work although block jobs and other users
329
+ * might be slower when multiple AioContexts use a BlockBackend.
330
+ */
331
+ blk_set_aio_context(sd->conf.blk, ctx, errp);
332
}
333
334
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
335
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
336
337
qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
338
339
- if (s->ctx) {
340
+ if (s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED] != qemu_get_aio_context()) {
341
/* If other users keep the BlockBackend in the iothread, that's ok */
342
blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
343
}
344
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_drained_begin(SCSIBus *bus)
345
346
for (uint32_t i = 0; i < total_queues; i++) {
347
VirtQueue *vq = virtio_get_queue(vdev, i);
348
- virtio_queue_aio_detach_host_notifier(vq, s->ctx);
349
+ virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
350
}
351
}
352
353
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_drained_end(SCSIBus *bus)
354
355
for (uint32_t i = 0; i < total_queues; i++) {
356
VirtQueue *vq = virtio_get_queue(vdev, i);
357
+ AioContext *ctx = s->vq_aio_context[i];
358
+
359
if (vq == vs->event_vq) {
360
- virtio_queue_aio_attach_host_notifier_no_poll(vq, s->ctx);
361
+ virtio_queue_aio_attach_host_notifier_no_poll(vq, ctx);
362
} else {
363
- virtio_queue_aio_attach_host_notifier(vq, s->ctx);
364
+ virtio_queue_aio_attach_host_notifier(vq, ctx);
365
}
366
}
367
}
368
@@ -XXX,XX +XXX,XX @@ void virtio_scsi_common_unrealize(DeviceState *dev)
369
virtio_cleanup(vdev);
370
}
371
372
+/* main loop */
373
static void virtio_scsi_device_unrealize(DeviceState *dev)
374
{
375
VirtIOSCSI *s = VIRTIO_SCSI(dev);
376
377
virtio_scsi_reset_tmf_bh(s);
378
-
379
+ virtio_scsi_dataplane_cleanup(s);
380
qbus_set_hotplug_handler(BUS(&s->bus), NULL);
381
virtio_scsi_common_unrealize(dev);
382
qemu_mutex_destroy(&s->tmf_bh_lock);
383
@@ -XXX,XX +XXX,XX @@ static const Property virtio_scsi_properties[] = {
384
VIRTIO_SCSI_F_CHANGE, true),
385
DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread,
386
TYPE_IOTHREAD, IOThread *),
387
+ DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOSCSI,
388
+ parent_obj.conf.iothread_vq_mapping_list),
389
};
390
391
static const VMStateDescription vmstate_virtio_scsi = {
45
--
392
--
46
2.31.1
393
2.48.1
47
48
diff view generated by jsdifflib
New patch
1
1
From: Stefan Hajnoczi <stefanha@redhat.com>
2
3
Previously the ctrl virtqueue was handled in the AioContext where SCSI
4
requests are processed. When IOThread Virtqueue Mapping was added things
5
become more complicated because SCSI requests could run in other
6
AioContexts.
7
8
Simplify by handling the ctrl virtqueue in the main loop where reset
9
operations can be performed. Note that BHs are still used canceling SCSI
10
requests in their AioContexts but at least the mean loop activity
11
doesn't need BHs anymore.
12
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Message-ID: <20250311132616.1049687-13-stefanha@redhat.com>
15
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
16
---
17
include/hw/virtio/virtio-scsi.h | 8 --
18
hw/scsi/virtio-scsi-dataplane.c | 6 ++
19
hw/scsi/virtio-scsi.c | 144 ++++++--------------------------
20
3 files changed, 33 insertions(+), 125 deletions(-)
21
22
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/include/hw/virtio/virtio-scsi.h
25
+++ b/include/hw/virtio/virtio-scsi.h
26
@@ -XXX,XX +XXX,XX @@ struct VirtIOSCSI {
27
28
QemuMutex ctrl_lock; /* protects ctrl_vq */
29
30
- /*
31
- * TMFs deferred to main loop BH. These fields are protected by
32
- * tmf_bh_lock.
33
- */
34
- QemuMutex tmf_bh_lock;
35
- QEMUBH *tmf_bh;
36
- QTAILQ_HEAD(, VirtIOSCSIReq) tmf_bh_list;
37
-
38
/* Fields for dataplane below */
39
AioContext **vq_aio_context; /* per-virtqueue AioContext pointer */
40
41
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/hw/scsi/virtio-scsi-dataplane.c
44
+++ b/hw/scsi/virtio-scsi-dataplane.c
45
@@ -XXX,XX +XXX,XX @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
46
s->vq_aio_context[i] = ctx;
47
}
48
}
49
+
50
+ /*
51
+ * Always handle the ctrl virtqueue in the main loop thread where device
52
+ * resets can be performed.
53
+ */
54
+ s->vq_aio_context[0] = qemu_get_aio_context();
55
}
56
57
/* Context: BQL held */
58
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/hw/scsi/virtio-scsi.c
61
+++ b/hw/scsi/virtio-scsi.c
62
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
63
g_free(n);
64
}
65
66
-static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req)
67
-{
68
- VirtIOSCSI *s = req->dev;
69
- SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
70
- BusChild *kid;
71
- int target;
72
-
73
- switch (req->req.tmf.subtype) {
74
- case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
75
- if (!d) {
76
- req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
77
- goto out;
78
- }
79
- if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
80
- req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
81
- goto out;
82
- }
83
- qatomic_inc(&s->resetting);
84
- device_cold_reset(&d->qdev);
85
- qatomic_dec(&s->resetting);
86
- break;
87
-
88
- case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
89
- target = req->req.tmf.lun[1];
90
- qatomic_inc(&s->resetting);
91
-
92
- rcu_read_lock();
93
- QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
94
- SCSIDevice *d1 = SCSI_DEVICE(kid->child);
95
- if (d1->channel == 0 && d1->id == target) {
96
- device_cold_reset(&d1->qdev);
97
- }
98
- }
99
- rcu_read_unlock();
100
-
101
- qatomic_dec(&s->resetting);
102
- break;
103
-
104
- default:
105
- g_assert_not_reached();
106
- }
107
-
108
-out:
109
- object_unref(OBJECT(d));
110
- virtio_scsi_complete_req(req, &s->ctrl_lock);
111
-}
112
-
113
-/* Some TMFs must be processed from the main loop thread */
114
-static void virtio_scsi_do_tmf_bh(void *opaque)
115
-{
116
- VirtIOSCSI *s = opaque;
117
- QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
118
- VirtIOSCSIReq *req;
119
- VirtIOSCSIReq *tmp;
120
-
121
- GLOBAL_STATE_CODE();
122
-
123
- WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
124
- QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
125
- QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
126
- QTAILQ_INSERT_TAIL(&reqs, req, next);
127
- }
128
-
129
- qemu_bh_delete(s->tmf_bh);
130
- s->tmf_bh = NULL;
131
- }
132
-
133
- QTAILQ_FOREACH_SAFE(req, &reqs, next, tmp) {
134
- QTAILQ_REMOVE(&reqs, req, next);
135
- virtio_scsi_do_one_tmf_bh(req);
136
- }
137
-}
138
-
139
-static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
140
-{
141
- VirtIOSCSIReq *req;
142
- VirtIOSCSIReq *tmp;
143
-
144
- GLOBAL_STATE_CODE();
145
-
146
- /* Called after ioeventfd has been stopped, so tmf_bh_lock is not needed */
147
- if (s->tmf_bh) {
148
- qemu_bh_delete(s->tmf_bh);
149
- s->tmf_bh = NULL;
150
- }
151
-
152
- QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
153
- QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
154
-
155
- /* SAM-6 6.3.2 Hard reset */
156
- req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE;
157
- virtio_scsi_complete_req(req, &req->dev->ctrl_lock);
158
- }
159
-}
160
-
161
-static void virtio_scsi_defer_tmf_to_main_loop(VirtIOSCSIReq *req)
162
-{
163
- VirtIOSCSI *s = req->dev;
164
-
165
- WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
166
- QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next);
167
-
168
- if (!s->tmf_bh) {
169
- s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s);
170
- qemu_bh_schedule(s->tmf_bh);
171
- }
172
- }
173
-}
174
-
175
static void virtio_scsi_tmf_cancel_req(VirtIOSCSIReq *tmf, SCSIRequest *r)
176
{
177
VirtIOSCSICancelNotifier *notifier;
178
@@ -XXX,XX +XXX,XX @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
179
break;
180
181
case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
182
- case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
183
- virtio_scsi_defer_tmf_to_main_loop(req);
184
- ret = -EINPROGRESS;
185
+ if (!d) {
186
+ goto fail;
187
+ }
188
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
189
+ goto incorrect_lun;
190
+ }
191
+ qatomic_inc(&s->resetting);
192
+ device_cold_reset(&d->qdev);
193
+ qatomic_dec(&s->resetting);
194
break;
195
196
+ case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: {
197
+ BusChild *kid;
198
+ int target = req->req.tmf.lun[1];
199
+ qatomic_inc(&s->resetting);
200
+
201
+ rcu_read_lock();
202
+ QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
203
+ SCSIDevice *d1 = SCSI_DEVICE(kid->child);
204
+ if (d1->channel == 0 && d1->id == target) {
205
+ device_cold_reset(&d1->qdev);
206
+ }
207
+ }
208
+ rcu_read_unlock();
209
+
210
+ qatomic_dec(&s->resetting);
211
+ break;
212
+ }
213
+
214
case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
215
case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: {
216
g_autoptr(GHashTable) aio_contexts = g_hash_table_new(NULL, NULL);
217
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_reset(VirtIODevice *vdev)
218
219
assert(!s->dataplane_started);
220
221
- virtio_scsi_reset_tmf_bh(s);
222
virtio_scsi_flush_defer_tmf_to_aio_context(s);
223
224
qatomic_inc(&s->resetting);
225
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
226
VirtIOSCSI *s = VIRTIO_SCSI(dev);
227
Error *err = NULL;
228
229
- QTAILQ_INIT(&s->tmf_bh_list);
230
qemu_mutex_init(&s->ctrl_lock);
231
qemu_mutex_init(&s->event_lock);
232
- qemu_mutex_init(&s->tmf_bh_lock);
233
234
virtio_scsi_common_realize(dev,
235
virtio_scsi_handle_ctrl,
236
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_device_unrealize(DeviceState *dev)
237
{
238
VirtIOSCSI *s = VIRTIO_SCSI(dev);
239
240
- virtio_scsi_reset_tmf_bh(s);
241
virtio_scsi_dataplane_cleanup(s);
242
qbus_set_hotplug_handler(BUS(&s->bus), NULL);
243
virtio_scsi_common_unrealize(dev);
244
- qemu_mutex_destroy(&s->tmf_bh_lock);
245
qemu_mutex_destroy(&s->event_lock);
246
qemu_mutex_destroy(&s->ctrl_lock);
247
}
248
--
249
2.48.1
diff view generated by jsdifflib
New patch
1
From: Stefan Hajnoczi <stefanha@redhat.com>
1
2
3
Peter Krempa and Kevin Wolf observed that iothread-vq-mapping is
4
confusing to use because the control and event virtqueues have a fixed
5
location before the command virtqueues but need to be treated
6
differently.
7
8
Only expose the command virtqueues via iothread-vq-mapping so that the
9
command-line parameter is intuitive: it controls where SCSI requests are
10
processed.
11
12
The control virtqueue needs to be hardcoded to the main loop thread for
13
technical reasons anyway. Kevin also pointed out that it's better to
14
place the event virtqueue in the main loop thread since its no poll
15
behavior would prevent polling if assigned to an IOThread.
16
17
This change is its own commit to avoid squashing the previous commit.
18
19
Suggested-by: Kevin Wolf <kwolf@redhat.com>
20
Suggested-by: Peter Krempa <pkrempa@redhat.com>
21
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
22
Message-ID: <20250311132616.1049687-14-stefanha@redhat.com>
23
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
24
---
25
hw/scsi/virtio-scsi-dataplane.c | 33 ++++++++++++++++++++-------------
26
1 file changed, 20 insertions(+), 13 deletions(-)
27
28
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/hw/scsi/virtio-scsi-dataplane.c
31
+++ b/hw/scsi/virtio-scsi-dataplane.c
32
@@ -XXX,XX +XXX,XX @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
33
VirtIODevice *vdev = VIRTIO_DEVICE(s);
34
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
35
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
36
- uint16_t num_vqs = vs->conf.num_queues + VIRTIO_SCSI_VQ_NUM_FIXED;
37
38
if (vs->conf.iothread && vs->conf.iothread_vq_mapping_list) {
39
error_setg(errp,
40
@@ -XXX,XX +XXX,XX @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
41
}
42
}
43
44
- s->vq_aio_context = g_new(AioContext *, num_vqs);
45
+ s->vq_aio_context = g_new(AioContext *, vs->conf.num_queues +
46
+ VIRTIO_SCSI_VQ_NUM_FIXED);
47
+
48
+ /*
49
+ * Handle the ctrl virtqueue in the main loop thread where device resets
50
+ * can be performed.
51
+ */
52
+ s->vq_aio_context[0] = qemu_get_aio_context();
53
+
54
+ /*
55
+ * Handle the event virtqueue in the main loop thread where its no_poll
56
+ * behavior won't stop IOThread polling.
57
+ */
58
+ s->vq_aio_context[1] = qemu_get_aio_context();
59
60
if (vs->conf.iothread_vq_mapping_list) {
61
if (!iothread_vq_mapping_apply(vs->conf.iothread_vq_mapping_list,
62
- s->vq_aio_context, num_vqs, errp)) {
63
+ &s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED],
64
+ vs->conf.num_queues, errp)) {
65
g_free(s->vq_aio_context);
66
s->vq_aio_context = NULL;
67
return;
68
}
69
} else if (vs->conf.iothread) {
70
AioContext *ctx = iothread_get_aio_context(vs->conf.iothread);
71
- for (uint16_t i = 0; i < num_vqs; i++) {
72
- s->vq_aio_context[i] = ctx;
73
+ for (uint16_t i = 0; i < vs->conf.num_queues; i++) {
74
+ s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i] = ctx;
75
}
76
77
/* Released in virtio_scsi_dataplane_cleanup() */
78
object_ref(OBJECT(vs->conf.iothread));
79
} else {
80
AioContext *ctx = qemu_get_aio_context();
81
- for (unsigned i = 0; i < num_vqs; i++) {
82
- s->vq_aio_context[i] = ctx;
83
+ for (unsigned i = 0; i < vs->conf.num_queues; i++) {
84
+ s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i] = ctx;
85
}
86
}
87
-
88
- /*
89
- * Always handle the ctrl virtqueue in the main loop thread where device
90
- * resets can be performed.
91
- */
92
- s->vq_aio_context[0] = qemu_get_aio_context();
93
}
94
95
/* Context: BQL held */
96
--
97
2.48.1
diff view generated by jsdifflib