When vhost-user vdpa client restart, VQ notifier mmap address and MR
become invalid, restore MR only is wrong. vdpa client will set VQ
notifier after reconnect.
This patch removes VQ notifier restore and related flags.
Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers")
Cc: qemu-stable@nongnu.org
Cc: Yuwei Zhang <zhangyuwei.9149@bytedance.com>
Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
hw/virtio/vhost-user.c | 19 +------------------
include/hw/virtio/vhost-user.h | 1 -
2 files changed, 1 insertion(+), 19 deletions(-)
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index cfca1b9adc..cc33f4b042 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -1144,19 +1144,6 @@ static int vhost_user_set_vring_num(struct vhost_dev *dev,
return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
}
-static void vhost_user_host_notifier_restore(struct vhost_dev *dev,
- int queue_idx)
-{
- struct vhost_user *u = dev->opaque;
- VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
- VirtIODevice *vdev = dev->vdev;
-
- if (n->addr && !n->set) {
- virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true);
- n->set = true;
- }
-}
-
static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
int queue_idx)
{
@@ -1164,7 +1151,7 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
VirtIODevice *vdev = dev->vdev;
- if (n->addr && n->set) {
+ if (n->addr) {
virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
if (!qemu_in_vcpu_thread()) { /* Avoid vCPU dead lock. */
/* Wait for VM threads accessing old flatview which contains notifier. */
@@ -1172,15 +1159,12 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
}
munmap(n->addr, qemu_real_host_page_size);
n->addr = NULL;
- n->set = false;
}
}
static int vhost_user_set_vring_base(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
- vhost_user_host_notifier_restore(dev, ring->index);
-
return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
}
@@ -1540,7 +1524,6 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
}
n->addr = addr;
- n->set = true;
return 0;
}
diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
index a9abca3288..f6012b2078 100644
--- a/include/hw/virtio/vhost-user.h
+++ b/include/hw/virtio/vhost-user.h
@@ -14,7 +14,6 @@
typedef struct VhostUserHostNotifier {
MemoryRegion mr;
void *addr;
- bool set;
} VhostUserHostNotifier;
typedef struct VhostUserState {
--
2.33.0
On Tue, Oct 19, 2021 at 03:57:42PM +0800, Xueming Li wrote:
> When vhost-user device cleanup and unmmap notifier address, VM cpu
you mean cleanup is executed and un-mmaps ...
> thread that writing the notifier failed
that writing -> writing
failed -> fails
> with accessing invalid address.
... error
>
> To avoid this concurrent issue, wait memory flatview update by draining
wait -> wait for a
> rcu callbacks, then unmap notifiers.
>
> Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers")
> Cc: qemu-stable@nongnu.org
> Cc: Yuwei Zhang <zhangyuwei.9149@bytedance.com>
> Signed-off-by: Xueming Li <xuemingl@nvidia.com>
If I apply this, qos-test hangs.
> ---
> hw/virtio/vhost-user.c | 22 +++++++++++++++-------
> 1 file changed, 15 insertions(+), 7 deletions(-)
>
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index bf6e50223c..cfca1b9adc 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -18,6 +18,7 @@
> #include "chardev/char-fe.h"
> #include "io/channel-socket.h"
> #include "sysemu/kvm.h"
> +#include "sysemu/cpus.h"
> #include "qemu/error-report.h"
> #include "qemu/main-loop.h"
> #include "qemu/sockets.h"
> @@ -1165,6 +1166,12 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
>
> if (n->addr && n->set) {
> virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
> + if (!qemu_in_vcpu_thread()) { /* Avoid vCPU dead lock. */
ok this avoids deadlock, but the comment does not explain:
why does not vcpu thread
need to wait for other VCPU threads? what if one of these
is in the middle of notify?
> + /* Wait for VM threads accessing old flatview which contains notifier. */
> + drain_call_rcu();
> + }
> + munmap(n->addr, qemu_real_host_page_size);
> + n->addr = NULL;
> n->set = false;
> }
> }
> @@ -1502,12 +1509,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
>
> n = &user->notifier[queue_idx];
>
> - if (n->addr) {
> - virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
> - object_unparent(OBJECT(&n->mr));
> - munmap(n->addr, page_size);
> - n->addr = NULL;
> - }
> + vhost_user_host_notifier_remove(dev, queue_idx);
>
> if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
> return 0;
> @@ -2485,11 +2487,17 @@ void vhost_user_cleanup(VhostUserState *user)
> for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
> if (user->notifier[i].addr) {
> object_unparent(OBJECT(&user->notifier[i].mr));
> + }
> + }
> + memory_region_transaction_commit();
> + /* Wait for VM threads accessing old flatview which contains notifier. */
> + drain_call_rcu();
> + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
> + if (user->notifier[i].addr) {
> munmap(user->notifier[i].addr, qemu_real_host_page_size);
> user->notifier[i].addr = NULL;
> }
> }
> - memory_region_transaction_commit();
> user->chr = NULL;
> }
>
> --
> 2.33.0
On Tue, 2021-10-19 at 06:52 -0400, Michael S. Tsirkin wrote:
> On Tue, Oct 19, 2021 at 03:57:42PM +0800, Xueming Li wrote:
> > When vhost-user device cleanup and unmmap notifier address, VM cpu
>
> you mean cleanup is executed and un-mmaps ...
>
> > thread that writing the notifier failed
>
> that writing -> writing
> failed -> fails
>
> > with accessing invalid address.
>
> ... error
>
> >
> > To avoid this concurrent issue, wait memory flatview update by draining
>
> wait -> wait for a
>
> > rcu callbacks, then unmap notifiers.
> >
> > Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers")
> > Cc: qemu-stable@nongnu.org
> > Cc: Yuwei Zhang <zhangyuwei.9149@bytedance.com>
> > Signed-off-by: Xueming Li <xuemingl@nvidia.com>
>
>
> If I apply this, qos-test hangs.
Hi Michael,
Thanks for all the comments, this one is critical. The blocking RCU
draining inside a RCU reader lock caused the hang. I'm planing to use
async call RCU to do the munmap, thanks again for the alarm.
>
>
>
>
> > ---
> > hw/virtio/vhost-user.c | 22 +++++++++++++++-------
> > 1 file changed, 15 insertions(+), 7 deletions(-)
> >
> > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> > index bf6e50223c..cfca1b9adc 100644
> > --- a/hw/virtio/vhost-user.c
> > +++ b/hw/virtio/vhost-user.c
> > @@ -18,6 +18,7 @@
> > #include "chardev/char-fe.h"
> > #include "io/channel-socket.h"
> > #include "sysemu/kvm.h"
> > +#include "sysemu/cpus.h"
> > #include "qemu/error-report.h"
> > #include "qemu/main-loop.h"
> > #include "qemu/sockets.h"
> > @@ -1165,6 +1166,12 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
> >
> > if (n->addr && n->set) {
> > virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
> > + if (!qemu_in_vcpu_thread()) { /* Avoid vCPU dead lock. */
>
> ok this avoids deadlock, but the comment does not explain:
> why does not vcpu thread
> need to wait for other VCPU threads? what if one of these
> is in the middle of notify?
>
>
> > + /* Wait for VM threads accessing old flatview which contains notifier. */
> > + drain_call_rcu();
> > + }
> > + munmap(n->addr, qemu_real_host_page_size);
> > + n->addr = NULL;
> > n->set = false;
> > }
> > }
> > @@ -1502,12 +1509,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
> >
> > n = &user->notifier[queue_idx];
> >
> > - if (n->addr) {
> > - virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
> > - object_unparent(OBJECT(&n->mr));
> > - munmap(n->addr, page_size);
> > - n->addr = NULL;
> > - }
> > + vhost_user_host_notifier_remove(dev, queue_idx);
> >
> > if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
> > return 0;
> > @@ -2485,11 +2487,17 @@ void vhost_user_cleanup(VhostUserState *user)
> > for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
> > if (user->notifier[i].addr) {
> > object_unparent(OBJECT(&user->notifier[i].mr));
> > + }
> > + }
> > + memory_region_transaction_commit();
> > + /* Wait for VM threads accessing old flatview which contains notifier. */
> > + drain_call_rcu();
> > + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
> > + if (user->notifier[i].addr) {
> > munmap(user->notifier[i].addr, qemu_real_host_page_size);
> > user->notifier[i].addr = NULL;
> > }
> > }
> > - memory_region_transaction_commit();
> > user->chr = NULL;
> > }
> >
> > --
> > 2.33.0
>
On Tue, Oct 19, 2021 at 03:57:43PM +0800, Xueming Li wrote:
> When vhost-user vdpa client restart, VQ notifier mmap address and MR
> become invalid, restore MR only is wrong.
wrong how? as opposed to what?
> vdpa client will set VQ
> notifier after reconnect.
>
> This patch removes VQ notifier restore and related flags.
with the result that ...
>
> Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers")
> Cc: qemu-stable@nongnu.org
> Cc: Yuwei Zhang <zhangyuwei.9149@bytedance.com>
> Signed-off-by: Xueming Li <xuemingl@nvidia.com>
> ---
> hw/virtio/vhost-user.c | 19 +------------------
> include/hw/virtio/vhost-user.h | 1 -
> 2 files changed, 1 insertion(+), 19 deletions(-)
>
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index cfca1b9adc..cc33f4b042 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -1144,19 +1144,6 @@ static int vhost_user_set_vring_num(struct vhost_dev *dev,
> return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
> }
>
> -static void vhost_user_host_notifier_restore(struct vhost_dev *dev,
> - int queue_idx)
> -{
> - struct vhost_user *u = dev->opaque;
> - VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
> - VirtIODevice *vdev = dev->vdev;
> -
> - if (n->addr && !n->set) {
> - virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true);
> - n->set = true;
> - }
> -}
> -
> static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
> int queue_idx)
> {
> @@ -1164,7 +1151,7 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
> VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
> VirtIODevice *vdev = dev->vdev;
>
> - if (n->addr && n->set) {
> + if (n->addr) {
> virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
> if (!qemu_in_vcpu_thread()) { /* Avoid vCPU dead lock. */
> /* Wait for VM threads accessing old flatview which contains notifier. */
> @@ -1172,15 +1159,12 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
> }
> munmap(n->addr, qemu_real_host_page_size);
> n->addr = NULL;
> - n->set = false;
> }
> }
>
> static int vhost_user_set_vring_base(struct vhost_dev *dev,
> struct vhost_vring_state *ring)
> {
> - vhost_user_host_notifier_restore(dev, ring->index);
> -
> return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
> }
>
> @@ -1540,7 +1524,6 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
> }
>
> n->addr = addr;
> - n->set = true;
>
> return 0;
> }
> diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
> index a9abca3288..f6012b2078 100644
> --- a/include/hw/virtio/vhost-user.h
> +++ b/include/hw/virtio/vhost-user.h
> @@ -14,7 +14,6 @@
> typedef struct VhostUserHostNotifier {
> MemoryRegion mr;
> void *addr;
> - bool set;
> } VhostUserHostNotifier;
>
> typedef struct VhostUserState {
> --
> 2.33.0
© 2016 - 2026 Red Hat, Inc.