1
The following changes since commit 58560ad254fbda71d4daa6622d71683190070ee2:
1
The following changes since commit 887cba855bb6ff4775256f7968409281350b568c:
2
2
3
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.2-20191024' into staging (2019-10-24 16:22:58 +0100)
3
configure: Fix cross-building for RISCV host (v5) (2023-07-11 17:56:09 +0100)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/stefanha/qemu.git tags/block-pull-request
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to d154ef37ff885918fa3e512fd7a8e42870291667:
9
for you to fetch changes up to 75dcb4d790bbe5327169fd72b185960ca58e2fa6:
10
10
11
yield_until_fd_readable: make it work with any AioContect (2019-10-25 14:38:29 +0200)
11
virtio-blk: fix host notifier issues during dataplane start/stop (2023-07-12 15:20:32 -0400)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Pull request
14
Pull request
15
15
16
----------------------------------------------------------------
16
----------------------------------------------------------------
17
17
18
Dietmar Maurer (1):
18
Stefan Hajnoczi (1):
19
yield_until_fd_readable: make it work with any AioContect
19
virtio-blk: fix host notifier issues during dataplane start/stop
20
20
21
Julia Suvorova (1):
21
hw/block/dataplane/virtio-blk.c | 67 +++++++++++++++++++--------------
22
virtio-blk: Add blk_drain() to virtio_blk_device_unrealize()
22
1 file changed, 38 insertions(+), 29 deletions(-)
23
24
hw/block/virtio-blk.c | 1 +
25
util/qemu-coroutine-io.c | 7 +++++--
26
2 files changed, 6 insertions(+), 2 deletions(-)
27
23
28
--
24
--
29
2.21.0
25
2.40.1
30
31
diff view generated by jsdifflib
Deleted patch
1
From: Julia Suvorova <jusual@redhat.com>
2
1
3
QEMU does not wait for completed I/O requests, assuming that the guest
4
driver will reset the device before calling unrealize(). This does not
5
happen on Windows, and QEMU crashes in virtio_notify(), getting the
6
result of a completed I/O request on hot-unplugged device.
7
8
Signed-off-by: Julia Suvorova <jusual@redhat.com>
9
Message-Id: <20191018142856.31870-1-jusual@redhat.com>
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
12
hw/block/virtio-blk.c | 1 +
13
1 file changed, 1 insertion(+)
14
15
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/block/virtio-blk.c
18
+++ b/hw/block/virtio-blk.c
19
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_unrealize(DeviceState *dev, Error **errp)
20
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
21
VirtIOBlock *s = VIRTIO_BLK(dev);
22
23
+ blk_drain(s->blk);
24
virtio_blk_data_plane_destroy(s->dataplane);
25
s->dataplane = NULL;
26
qemu_del_vm_change_state_handler(s->change);
27
--
28
2.21.0
29
30
diff view generated by jsdifflib
1
From: Dietmar Maurer <dietmar@proxmox.com>
1
The main loop thread can consume 100% CPU when using --device
2
virtio-blk-pci,iothread=<iothread>. ppoll() constantly returns but
3
reading virtqueue host notifiers fails with EAGAIN. The file descriptors
4
are stale and remain registered with the AioContext because of bugs in
5
the virtio-blk dataplane start/stop code.
2
6
3
Simply use qemu_get_current_aio_context().
7
The problem is that the dataplane start/stop code involves drain
8
operations, which call virtio_blk_drained_begin() and
9
virtio_blk_drained_end() at points where the host notifier is not
10
operational:
11
- In virtio_blk_data_plane_start(), blk_set_aio_context() drains after
12
vblk->dataplane_started has been set to true but the host notifier has
13
not been attached yet.
14
- In virtio_blk_data_plane_stop(), blk_drain() and blk_set_aio_context()
15
drain after the host notifier has already been detached but with
16
vblk->dataplane_started still set to true.
4
17
5
Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
18
I would like to simplify ->ioeventfd_start/stop() to avoid interactions
6
Message-Id: <20191024045610.9071-1-dietmar@proxmox.com>
19
with drain entirely, but couldn't find a way to do that. Instead, this
20
patch accepts the fragile nature of the code and reorders it so that
21
vblk->dataplane_started is false during drain operations. This way the
22
virtio_blk_drained_begin() and virtio_blk_drained_end() calls don't
23
touch the host notifier. The result is that
24
virtio_blk_data_plane_start() and virtio_blk_data_plane_stop() have
25
complete control over the host notifier and stale file descriptors are
26
no longer left in the AioContext.
27
28
This patch fixes the 100% CPU consumption in the main loop thread and
29
correctly moves host notifier processing to the IOThread.
30
31
Fixes: 1665d9326fd2 ("virtio-blk: implement BlockDevOps->drained_begin()")
32
Reported-by: Lukáš Doktor <ldoktor@redhat.com>
33
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
34
Tested-by: Lukas Doktor <ldoktor@redhat.com>
35
Message-id: 20230704151527.193586-1-stefanha@redhat.com
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
36
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
---
37
---
9
util/qemu-coroutine-io.c | 7 +++++--
38
hw/block/dataplane/virtio-blk.c | 67 +++++++++++++++++++--------------
10
1 file changed, 5 insertions(+), 2 deletions(-)
39
1 file changed, 38 insertions(+), 29 deletions(-)
11
40
12
diff --git a/util/qemu-coroutine-io.c b/util/qemu-coroutine-io.c
41
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
13
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
14
--- a/util/qemu-coroutine-io.c
43
--- a/hw/block/dataplane/virtio-blk.c
15
+++ b/util/qemu-coroutine-io.c
44
+++ b/hw/block/dataplane/virtio-blk.c
16
@@ -XXX,XX +XXX,XX @@ qemu_co_send_recv(int sockfd, void *buf, size_t bytes, bool do_send)
45
@@ -XXX,XX +XXX,XX @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
46
47
memory_region_transaction_commit();
48
49
- /*
50
- * These fields are visible to the IOThread so we rely on implicit barriers
51
- * in aio_context_acquire() on the write side and aio_notify_accept() on
52
- * the read side.
53
- */
54
- s->starting = false;
55
- vblk->dataplane_started = true;
56
trace_virtio_blk_data_plane_start(s);
57
58
old_context = blk_get_aio_context(s->conf->conf.blk);
59
@@ -XXX,XX +XXX,XX @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
60
event_notifier_set(virtio_queue_get_host_notifier(vq));
61
}
62
63
+ /*
64
+ * These fields must be visible to the IOThread when it processes the
65
+ * virtqueue, otherwise it will think dataplane has not started yet.
66
+ *
67
+ * Make sure ->dataplane_started is false when blk_set_aio_context() is
68
+ * called above so that draining does not cause the host notifier to be
69
+ * detached/attached prematurely.
70
+ */
71
+ s->starting = false;
72
+ vblk->dataplane_started = true;
73
+ smp_wmb(); /* paired with aio_notify_accept() on the read side */
74
+
75
/* Get this show started by hooking up our callbacks */
76
if (!blk_in_drain(s->conf->conf.blk)) {
77
aio_context_acquire(s->ctx);
78
@@ -XXX,XX +XXX,XX @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
79
fail_guest_notifiers:
80
vblk->dataplane_disabled = true;
81
s->starting = false;
82
- vblk->dataplane_started = true;
83
return -ENOSYS;
17
}
84
}
18
85
19
typedef struct {
86
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
20
+ AioContext *ctx;
87
aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
21
Coroutine *co;
88
}
22
int fd;
89
23
} FDYieldUntilData;
90
+ /*
24
@@ -XXX,XX +XXX,XX @@ typedef struct {
91
+ * Batch all the host notifiers in a single transaction to avoid
25
static void fd_coroutine_enter(void *opaque)
92
+ * quadratic time complexity in address_space_update_ioeventfds().
26
{
93
+ */
27
FDYieldUntilData *data = opaque;
94
+ memory_region_transaction_begin();
28
- qemu_set_fd_handler(data->fd, NULL, NULL, NULL);
95
+
29
+ aio_set_fd_handler(data->ctx, data->fd, false, NULL, NULL, NULL, NULL);
96
+ for (i = 0; i < nvqs; i++) {
30
qemu_coroutine_enter(data->co);
97
+ virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
31
}
98
+ }
32
99
+
33
@@ -XXX,XX +XXX,XX @@ void coroutine_fn yield_until_fd_readable(int fd)
100
+ /*
34
FDYieldUntilData data;
101
+ * The transaction expects the ioeventfds to be open when it
35
102
+ * commits. Do it now, before the cleanup loop.
36
assert(qemu_in_coroutine());
103
+ */
37
+ data.ctx = qemu_get_current_aio_context();
104
+ memory_region_transaction_commit();
38
data.co = qemu_coroutine_self();
105
+
39
data.fd = fd;
106
+ for (i = 0; i < nvqs; i++) {
40
- qemu_set_fd_handler(fd, fd_coroutine_enter, NULL, &data);
107
+ virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
41
+ aio_set_fd_handler(
108
+ }
42
+ data.ctx, fd, false, fd_coroutine_enter, NULL, NULL, &data);
109
+
43
qemu_coroutine_yield();
110
+ /*
111
+ * Set ->dataplane_started to false before draining so that host notifiers
112
+ * are not detached/attached anymore.
113
+ */
114
+ vblk->dataplane_started = false;
115
+
116
aio_context_acquire(s->ctx);
117
118
/* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
119
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
120
121
aio_context_release(s->ctx);
122
123
- /*
124
- * Batch all the host notifiers in a single transaction to avoid
125
- * quadratic time complexity in address_space_update_ioeventfds().
126
- */
127
- memory_region_transaction_begin();
128
-
129
- for (i = 0; i < nvqs; i++) {
130
- virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
131
- }
132
-
133
- /*
134
- * The transaction expects the ioeventfds to be open when it
135
- * commits. Do it now, before the cleanup loop.
136
- */
137
- memory_region_transaction_commit();
138
-
139
- for (i = 0; i < nvqs; i++) {
140
- virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
141
- }
142
-
143
qemu_bh_cancel(s->bh);
144
notify_guest_bh(s); /* final chance to notify guest */
145
146
/* Clean up guest notifier (irq) */
147
k->set_guest_notifiers(qbus->parent, nvqs, false);
148
149
- vblk->dataplane_started = false;
150
s->stopping = false;
44
}
151
}
45
--
152
--
46
2.21.0
153
2.40.1
47
154
48
155
diff view generated by jsdifflib