hw/virtio/vhost-user.c | 38 ++++++++++++---------------------- include/hw/virtio/vhost-user.h | 1 - 2 files changed, 13 insertions(+), 26 deletions(-)
When vDPA applicaiton in client mode shutdown, unmapped VQ notifier might being accessed by VM thread under hight tx traffic, it will crash VM in rare conditon. This patch try to fix it with better RCU sychronization of new flatview. Xueming Li (2): vhost-user: fix VirtQ notifier cleanup vhost-user: remove VirtQ notifier restore hw/virtio/vhost-user.c | 38 ++++++++++++---------------------- include/hw/virtio/vhost-user.h | 1 - 2 files changed, 13 insertions(+), 26 deletions(-) -- 2.33.0
When vDPA applicaiton in client mode shutdown, unmapped VQ notifier might being accessed by vCPU thread under high tx traffic, it will crash VM in rare conditon. This patch try to fix it with better RCU sychronization of new flatview. v2: no RCU draining on vCPU thread Xueming Li (2): vhost-user: fix VirtQ notifier cleanup vhost-user: remove VirtQ notifier restore hw/virtio/vhost-user.c | 40 +++++++++++++--------------------- include/hw/virtio/vhost-user.h | 1 - 2 files changed, 15 insertions(+), 26 deletions(-) -- 2.33.0
When vhost-user device stop and unmmap notifier address, vCPU thread
that writing the notifier via old flatview failed with accessing invalid
address.
To avoid this concurrent issue, wait memory flatview update by draining
rcu callbacks before unmaping notifiers.
Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers")
Cc: tiwei.bie@intel.com
Cc: qemu-stable@nongnu.org
Cc: Yuwei Zhang <zhangyuwei.9149@bytedance.com>
Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
hw/virtio/vhost-user.c | 20 +++++++++++++-------
1 file changed, 13 insertions(+), 7 deletions(-)
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 2c8556237f..08581e6711 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -1165,6 +1165,11 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
if (n->addr && n->set) {
virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
+ if (!qemu_in_vcpu_thread())
+ /* Wait vCPU threads accessing notifier via old flatview. */
+ drain_call_rcu();
+ munmap(n->addr, qemu_real_host_page_size);
+ n->addr = NULL;
n->set = false;
}
}
@@ -1502,12 +1507,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
n = &user->notifier[queue_idx];
- if (n->addr) {
- virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
- object_unparent(OBJECT(&n->mr));
- munmap(n->addr, page_size);
- n->addr = NULL;
- }
+ vhost_user_host_notifier_remove(dev, queue_idx);
if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
return 0;
@@ -2484,11 +2484,17 @@ void vhost_user_cleanup(VhostUserState *user)
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
if (user->notifier[i].addr) {
object_unparent(OBJECT(&user->notifier[i].mr));
+ }
+ }
+ memory_region_transaction_commit();
+ /* Wait VM threads accessing old flatview which contains notifier. */
+ drain_call_rcu();
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ if (user->notifier[i].addr) {
munmap(user->notifier[i].addr, qemu_real_host_page_size);
user->notifier[i].addr = NULL;
}
}
- memory_region_transaction_commit();
user->chr = NULL;
}
--
2.33.0
On Fri, Sep 17, 2021 at 08:26:15PM +0800, Xueming Li wrote: > When vhost-user device stop and unmmap notifier address, vCPU thread > that writing the notifier via old flatview failed with accessing invalid > address. > > To avoid this concurrent issue, wait memory flatview update by draining > rcu callbacks before unmaping notifiers. > > Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers") > Cc: tiwei.bie@intel.com > Cc: qemu-stable@nongnu.org > Cc: Yuwei Zhang <zhangyuwei.9149@bytedance.com> > Signed-off-by: Xueming Li <xuemingl@nvidia.com> Pls post v2 as a new thread, with changelog in the cover letter. > --- > hw/virtio/vhost-user.c | 20 +++++++++++++------- > 1 file changed, 13 insertions(+), 7 deletions(-) > > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c > index 2c8556237f..08581e6711 100644 > --- a/hw/virtio/vhost-user.c > +++ b/hw/virtio/vhost-user.c > @@ -1165,6 +1165,11 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev, > > if (n->addr && n->set) { > virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); > + if (!qemu_in_vcpu_thread()) > + /* Wait vCPU threads accessing notifier via old flatview. */ Wait VM - Wait for VM > + drain_call_rcu(); okay. but this has a coding style violation: should use {} in if. > + munmap(n->addr, qemu_real_host_page_size); > + n->addr = NULL; > n->set = false; > } > } > @@ -1502,12 +1507,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, > > n = &user->notifier[queue_idx]; > > - if (n->addr) { > - virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); > - object_unparent(OBJECT(&n->mr)); > - munmap(n->addr, page_size); > - n->addr = NULL; > - } > + vhost_user_host_notifier_remove(dev, queue_idx); > > if (area->u64 & VHOST_USER_VRING_NOFD_MASK) { > return 0; > @@ -2484,11 +2484,17 @@ void vhost_user_cleanup(VhostUserState *user) > for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { > if (user->notifier[i].addr) { > object_unparent(OBJECT(&user->notifier[i].mr)); > + } > + } > + memory_region_transaction_commit(); > + /* Wait VM threads accessing old flatview which contains notifier. */ Wait VM - Wait for VM > + drain_call_rcu(); > + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { > + if (user->notifier[i].addr) { > munmap(user->notifier[i].addr, qemu_real_host_page_size); > user->notifier[i].addr = NULL; > } > } > - memory_region_transaction_commit(); > user->chr = NULL; > } > > -- > 2.33.0
On Tue, 2021-10-05 at 10:40 -0400, Michael S. Tsirkin wrote: > On Fri, Sep 17, 2021 at 08:26:15PM +0800, Xueming Li wrote: > > When vhost-user device stop and unmmap notifier address, vCPU thread > > that writing the notifier via old flatview failed with accessing invalid > > address. > > > > To avoid this concurrent issue, wait memory flatview update by draining > > rcu callbacks before unmaping notifiers. > > > > Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers") > > Cc: tiwei.bie@intel.com > > Cc: qemu-stable@nongnu.org > > Cc: Yuwei Zhang <zhangyuwei.9149@bytedance.com> > > Signed-off-by: Xueming Li <xuemingl@nvidia.com> > > > Pls post v2 as a new thread, with changelog in the cover letter. Thanks, v3 posted with below coding style and comment fixes. > > > --- > > hw/virtio/vhost-user.c | 20 +++++++++++++------- > > 1 file changed, 13 insertions(+), 7 deletions(-) > > > > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c > > index 2c8556237f..08581e6711 100644 > > --- a/hw/virtio/vhost-user.c > > +++ b/hw/virtio/vhost-user.c > > @@ -1165,6 +1165,11 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev, > > > > if (n->addr && n->set) { > > virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); > > + if (!qemu_in_vcpu_thread()) > > + /* Wait vCPU threads accessing notifier via old flatview. */ > > Wait VM - Wait for VM > > > + drain_call_rcu(); > > okay. > but this has a coding style violation: > should use {} in if. > > > > + munmap(n->addr, qemu_real_host_page_size); > > + n->addr = NULL; > > n->set = false; > > } > > } > > @@ -1502,12 +1507,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, > > > > n = &user->notifier[queue_idx]; > > > > - if (n->addr) { > > - virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); > > - object_unparent(OBJECT(&n->mr)); > > - munmap(n->addr, page_size); > > - n->addr = NULL; > > - } > > + vhost_user_host_notifier_remove(dev, queue_idx); > > > > if (area->u64 & VHOST_USER_VRING_NOFD_MASK) { > > return 0; > > @@ -2484,11 +2484,17 @@ void vhost_user_cleanup(VhostUserState *user) > > for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { > > if (user->notifier[i].addr) { > > object_unparent(OBJECT(&user->notifier[i].mr)); > > + } > > + } > > + memory_region_transaction_commit(); > > + /* Wait VM threads accessing old flatview which contains notifier. */ > > Wait VM - Wait for VM > > > + drain_call_rcu(); > > + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { > > + if (user->notifier[i].addr) { > > munmap(user->notifier[i].addr, qemu_real_host_page_size); > > user->notifier[i].addr = NULL; > > } > > } > > - memory_region_transaction_commit(); > > user->chr = NULL; > > } > > > > -- > > 2.33.0 >
When vhost-user vdpa client restart, VQ notifier resources become
invalid, no need to keep mmap, vdpa client will set VQ notifier after
reconnect.
Removes VQ notifier restore and related flags.
Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers")
Cc: tiwei.bie@intel.com
Cc: qemu-stable@nongnu.org
Cc: Yuwei Zhang <zhangyuwei.9149@bytedance.com>
Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
hw/virtio/vhost-user.c | 20 ++------------------
include/hw/virtio/vhost-user.h | 1 -
2 files changed, 2 insertions(+), 19 deletions(-)
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 08581e6711..15a4b4ee76 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -22,6 +22,7 @@
#include "qemu/main-loop.h"
#include "qemu/sockets.h"
#include "sysemu/cryptodev.h"
+#include "sysemu/cpus.h"
#include "migration/migration.h"
#include "migration/postcopy-ram.h"
#include "trace.h"
@@ -1143,19 +1144,6 @@ static int vhost_user_set_vring_num(struct vhost_dev *dev,
return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
}
-static void vhost_user_host_notifier_restore(struct vhost_dev *dev,
- int queue_idx)
-{
- struct vhost_user *u = dev->opaque;
- VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
- VirtIODevice *vdev = dev->vdev;
-
- if (n->addr && !n->set) {
- virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true);
- n->set = true;
- }
-}
-
static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
int queue_idx)
{
@@ -1163,22 +1151,19 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
VirtIODevice *vdev = dev->vdev;
- if (n->addr && n->set) {
+ if (n->addr) {
virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
if (!qemu_in_vcpu_thread())
/* Wait vCPU threads accessing notifier via old flatview. */
drain_call_rcu();
munmap(n->addr, qemu_real_host_page_size);
n->addr = NULL;
- n->set = false;
}
}
static int vhost_user_set_vring_base(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
- vhost_user_host_notifier_restore(dev, ring->index);
-
return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
}
@@ -1537,7 +1522,6 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
}
n->addr = addr;
- n->set = true;
return 0;
}
diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
index a9abca3288..f6012b2078 100644
--- a/include/hw/virtio/vhost-user.h
+++ b/include/hw/virtio/vhost-user.h
@@ -14,7 +14,6 @@
typedef struct VhostUserHostNotifier {
MemoryRegion mr;
void *addr;
- bool set;
} VhostUserHostNotifier;
typedef struct VhostUserState {
--
2.33.0
© 2016 - 2024 Red Hat, Inc.