When vhost-user device cleanup and unmmap notifier address, VM cpu
thread that writing the notifier failed with accessing invalid address.
To avoid this concurrent issue, wait memory flatview update by draining
rcu callbacks, then unmap notifiers.
Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers")
Cc: tiwei.bie@intel.com
Cc: qemu-stable@nongnu.org
Cc: Yuwei Zhang <zhangyuwei.9149@bytedance.com>
Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
hw/virtio/vhost-user.c | 21 ++++++++++++++-------
1 file changed, 14 insertions(+), 7 deletions(-)
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index bf6e50223c..b2e948bdc7 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -1165,6 +1165,12 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
if (n->addr && n->set) {
virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
+ if (!qemu_in_vcpu_thread()) { /* Avoid vCPU dead lock. */
+ /* Wait for VM threads accessing old flatview which contains notifier. */
+ drain_call_rcu();
+ }
+ munmap(n->addr, qemu_real_host_page_size);
+ n->addr = NULL;
n->set = false;
}
}
@@ -1502,12 +1508,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
n = &user->notifier[queue_idx];
- if (n->addr) {
- virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
- object_unparent(OBJECT(&n->mr));
- munmap(n->addr, page_size);
- n->addr = NULL;
- }
+ vhost_user_host_notifier_remove(dev, queue_idx);
if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
return 0;
@@ -2485,11 +2486,17 @@ void vhost_user_cleanup(VhostUserState *user)
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
if (user->notifier[i].addr) {
object_unparent(OBJECT(&user->notifier[i].mr));
+ }
+ }
+ memory_region_transaction_commit();
+ /* Wait for VM threads accessing old flatview which contains notifier. */
+ drain_call_rcu();
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ if (user->notifier[i].addr) {
munmap(user->notifier[i].addr, qemu_real_host_page_size);
user->notifier[i].addr = NULL;
}
}
- memory_region_transaction_commit();
user->chr = NULL;
}
--
2.33.0
On Fri, Oct 08, 2021 at 03:58:04PM +0800, Xueming Li wrote:
> When vhost-user device cleanup and unmmap notifier address, VM cpu
> thread that writing the notifier failed with accessing invalid address.
>
> To avoid this concurrent issue, wait memory flatview update by draining
> rcu callbacks, then unmap notifiers.
>
> Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers")
> Cc: tiwei.bie@intel.com
> Cc: qemu-stable@nongnu.org
> Cc: Yuwei Zhang <zhangyuwei.9149@bytedance.com>
> Signed-off-by: Xueming Li <xuemingl@nvidia.com>
> ---
> hw/virtio/vhost-user.c | 21 ++++++++++++++-------
> 1 file changed, 14 insertions(+), 7 deletions(-)
>
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index bf6e50223c..b2e948bdc7 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -1165,6 +1165,12 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
>
> if (n->addr && n->set) {
> virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
> + if (!qemu_in_vcpu_thread()) { /* Avoid vCPU dead lock. */
> + /* Wait for VM threads accessing old flatview which contains notifier. */
> + drain_call_rcu();
> + }
> + munmap(n->addr, qemu_real_host_page_size);
> + n->addr = NULL;
> n->set = false;
> }
> }
../hw/virtio/vhost-user.c: In function ‘vhost_user_host_notifier_remove’:
../hw/virtio/vhost-user.c:1168:14: error: implicit declaration of function ‘qemu_in_vcpu_thread’ [-Werror=implicit-function-declaration]
1168 | if (!qemu_in_vcpu_thread()) { /* Avoid vCPU dead lock. */
| ^~~~~~~~~~~~~~~~~~~
../hw/virtio/vhost-user.c:1168:14: error: nested extern declaration of ‘qemu_in_vcpu_thread’ [-Werror=nested-externs]
cc1: all warnings being treated as errors
ninja: build stopped: subcommand failed.
make[1]: *** [Makefile:162: run-ninja] Error 1
make[1]: Leaving directory '/scm/qemu/build'
make: *** [GNUmakefile:11: all] Error 2
Although the following patch fixes it, bisect is broken.
> @@ -1502,12 +1508,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
>
> n = &user->notifier[queue_idx];
>
> - if (n->addr) {
> - virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
> - object_unparent(OBJECT(&n->mr));
> - munmap(n->addr, page_size);
> - n->addr = NULL;
> - }
> + vhost_user_host_notifier_remove(dev, queue_idx);
>
> if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
> return 0;
> @@ -2485,11 +2486,17 @@ void vhost_user_cleanup(VhostUserState *user)
> for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
> if (user->notifier[i].addr) {
> object_unparent(OBJECT(&user->notifier[i].mr));
> + }
> + }
> + memory_region_transaction_commit();
> + /* Wait for VM threads accessing old flatview which contains notifier. */
> + drain_call_rcu();
> + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
> + if (user->notifier[i].addr) {
> munmap(user->notifier[i].addr, qemu_real_host_page_size);
> user->notifier[i].addr = NULL;
> }
> }
> - memory_region_transaction_commit();
> user->chr = NULL;
> }
>
> --
> 2.33.0
On Tue, 2021-10-19 at 02:15 -0400, Michael S. Tsirkin wrote:
> On Fri, Oct 08, 2021 at 03:58:04PM +0800, Xueming Li wrote:
> > When vhost-user device cleanup and unmmap notifier address, VM cpu
> > thread that writing the notifier failed with accessing invalid address.
> >
> > To avoid this concurrent issue, wait memory flatview update by draining
> > rcu callbacks, then unmap notifiers.
> >
> > Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers")
> > Cc: tiwei.bie@intel.com
> > Cc: qemu-stable@nongnu.org
> > Cc: Yuwei Zhang <zhangyuwei.9149@bytedance.com>
> > Signed-off-by: Xueming Li <xuemingl@nvidia.com>
> > ---
> > hw/virtio/vhost-user.c | 21 ++++++++++++++-------
> > 1 file changed, 14 insertions(+), 7 deletions(-)
> >
> > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> > index bf6e50223c..b2e948bdc7 100644
> > --- a/hw/virtio/vhost-user.c
> > +++ b/hw/virtio/vhost-user.c
> > @@ -1165,6 +1165,12 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
> >
> > if (n->addr && n->set) {
> > virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
> > + if (!qemu_in_vcpu_thread()) { /* Avoid vCPU dead lock. */
> > + /* Wait for VM threads accessing old flatview which contains notifier. */
> > + drain_call_rcu();
> > + }
> > + munmap(n->addr, qemu_real_host_page_size);
> > + n->addr = NULL;
> > n->set = false;
> > }
> > }
>
>
> ../hw/virtio/vhost-user.c: In function ‘vhost_user_host_notifier_remove’:
> ../hw/virtio/vhost-user.c:1168:14: error: implicit declaration of function ‘qemu_in_vcpu_thread’ [-Werror=implicit-function-declaration]
> 1168 | if (!qemu_in_vcpu_thread()) { /* Avoid vCPU dead lock. */
> | ^~~~~~~~~~~~~~~~~~~
> ../hw/virtio/vhost-user.c:1168:14: error: nested extern declaration of ‘qemu_in_vcpu_thread’ [-Werror=nested-externs]
> cc1: all warnings being treated as errors
> ninja: build stopped: subcommand failed.
> make[1]: *** [Makefile:162: run-ninja] Error 1
> make[1]: Leaving directory '/scm/qemu/build'
> make: *** [GNUmakefile:11: all] Error 2
>
>
> Although the following patch fixes it, bisect is broken.
Yes, really an issue, v4 posted, thanks!
>
>
> > @@ -1502,12 +1508,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
> >
> > n = &user->notifier[queue_idx];
> >
> > - if (n->addr) {
> > - virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
> > - object_unparent(OBJECT(&n->mr));
> > - munmap(n->addr, page_size);
> > - n->addr = NULL;
> > - }
> > + vhost_user_host_notifier_remove(dev, queue_idx);
> >
> > if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
> > return 0;
> > @@ -2485,11 +2486,17 @@ void vhost_user_cleanup(VhostUserState *user)
> > for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
> > if (user->notifier[i].addr) {
> > object_unparent(OBJECT(&user->notifier[i].mr));
> > + }
> > + }
> > + memory_region_transaction_commit();
> > + /* Wait for VM threads accessing old flatview which contains notifier. */
> > + drain_call_rcu();
> > + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
> > + if (user->notifier[i].addr) {
> > munmap(user->notifier[i].addr, qemu_real_host_page_size);
> > user->notifier[i].addr = NULL;
> > }
> > }
> > - memory_region_transaction_commit();
> > user->chr = NULL;
> > }
> >
> > --
> > 2.33.0
>
On Tue, Oct 19, 2021 at 06:45:19AM +0000, Xueming(Steven) Li wrote:
> On Tue, 2021-10-19 at 02:15 -0400, Michael S. Tsirkin wrote:
> > On Fri, Oct 08, 2021 at 03:58:04PM +0800, Xueming Li wrote:
> > > When vhost-user device cleanup and unmmap notifier address, VM cpu
> > > thread that writing the notifier failed with accessing invalid address.
> > >
> > > To avoid this concurrent issue, wait memory flatview update by draining
> > > rcu callbacks, then unmap notifiers.
> > >
> > > Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers")
> > > Cc: tiwei.bie@intel.com
> > > Cc: qemu-stable@nongnu.org
> > > Cc: Yuwei Zhang <zhangyuwei.9149@bytedance.com>
> > > Signed-off-by: Xueming Li <xuemingl@nvidia.com>
> > > ---
> > > hw/virtio/vhost-user.c | 21 ++++++++++++++-------
> > > 1 file changed, 14 insertions(+), 7 deletions(-)
> > >
> > > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> > > index bf6e50223c..b2e948bdc7 100644
> > > --- a/hw/virtio/vhost-user.c
> > > +++ b/hw/virtio/vhost-user.c
> > > @@ -1165,6 +1165,12 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
> > >
> > > if (n->addr && n->set) {
> > > virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
> > > + if (!qemu_in_vcpu_thread()) { /* Avoid vCPU dead lock. */
> > > + /* Wait for VM threads accessing old flatview which contains notifier. */
> > > + drain_call_rcu();
> > > + }
> > > + munmap(n->addr, qemu_real_host_page_size);
> > > + n->addr = NULL;
> > > n->set = false;
> > > }
> > > }
> >
> >
> > ../hw/virtio/vhost-user.c: In function ‘vhost_user_host_notifier_remove’:
> > ../hw/virtio/vhost-user.c:1168:14: error: implicit declaration of function ‘qemu_in_vcpu_thread’ [-Werror=implicit-function-declaration]
> > 1168 | if (!qemu_in_vcpu_thread()) { /* Avoid vCPU dead lock. */
> > | ^~~~~~~~~~~~~~~~~~~
> > ../hw/virtio/vhost-user.c:1168:14: error: nested extern declaration of ‘qemu_in_vcpu_thread’ [-Werror=nested-externs]
> > cc1: all warnings being treated as errors
> > ninja: build stopped: subcommand failed.
> > make[1]: *** [Makefile:162: run-ninja] Error 1
> > make[1]: Leaving directory '/scm/qemu/build'
> > make: *** [GNUmakefile:11: all] Error 2
> >
> >
> > Although the following patch fixes it, bisect is broken.
>
> Yes, really an issue, v4 posted, thanks!
Pls address the comment on 2/2 too.
> >
> >
> > > @@ -1502,12 +1508,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
> > >
> > > n = &user->notifier[queue_idx];
> > >
> > > - if (n->addr) {
> > > - virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
> > > - object_unparent(OBJECT(&n->mr));
> > > - munmap(n->addr, page_size);
> > > - n->addr = NULL;
> > > - }
> > > + vhost_user_host_notifier_remove(dev, queue_idx);
> > >
> > > if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
> > > return 0;
> > > @@ -2485,11 +2486,17 @@ void vhost_user_cleanup(VhostUserState *user)
> > > for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
> > > if (user->notifier[i].addr) {
> > > object_unparent(OBJECT(&user->notifier[i].mr));
> > > + }
> > > + }
> > > + memory_region_transaction_commit();
> > > + /* Wait for VM threads accessing old flatview which contains notifier. */
> > > + drain_call_rcu();
> > > + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
> > > + if (user->notifier[i].addr) {
> > > munmap(user->notifier[i].addr, qemu_real_host_page_size);
> > > user->notifier[i].addr = NULL;
> > > }
> > > }
> > > - memory_region_transaction_commit();
> > > user->chr = NULL;
> > > }
> > >
> > > --
> > > 2.33.0
> >
>
© 2016 - 2026 Red Hat, Inc.