Today if QEMU (or any other VMM) has sent multiple copies of the same
region to a libvhost-user based backend and then attempts to remove the
region, only one instance of the region will be removed, leaving stale
copies of the region in dev->regions[].
This change resolves this by having vu_rem_mem_reg() iterate through all
regions in dev->regions[] and delete all matching regions.
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Raphael Norwitz <raphael.norwitz@nutanix.com>
---
subprojects/libvhost-user/libvhost-user.c | 27 ++++++++++++-----------
1 file changed, 14 insertions(+), 13 deletions(-)
diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c
index 74a9980194..2f465a4f0e 100644
--- a/subprojects/libvhost-user/libvhost-user.c
+++ b/subprojects/libvhost-user/libvhost-user.c
@@ -809,6 +809,7 @@ static bool
vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
int i;
+ bool found = false;
if (vmsg->fd_num != 1 ||
vmsg->size != sizeof(vmsg->payload.memreg)) {
@@ -831,25 +832,25 @@ vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
VuDevRegion *r = &dev->regions[i];
void *m = (void *) (uintptr_t) r->mmap_addr;
- if (m) {
+ if (m && !found) {
munmap(m, r->size + r->mmap_offset);
}
- break;
+ /*
+ * Shift all affected entries by 1 to close the hole at index i and
+ * zero out the last entry.
+ */
+ memmove(dev->regions + i, dev->regions + i + 1,
+ sizeof(VuDevRegion) * (dev->nregions - i - 1));
+ memset(dev->regions + dev->nregions - 1, 0, sizeof(VuDevRegion));
+ DPRINT("Successfully removed a region\n");
+ dev->nregions--;
+
+ found = true;
}
}
- if (i < dev->nregions) {
- /*
- * Shift all affected entries by 1 to close the hole at index i and
- * zero out the last entry.
- */
- memmove(dev->regions + i, dev->regions + i + 1,
- sizeof(VuDevRegion) * (dev->nregions - i - 1));
- memset(dev->regions + dev->nregions - 1, 0,
- sizeof(VuDevRegion));
- DPRINT("Successfully removed a region\n");
- dev->nregions--;
+ if (found) {
vmsg_set_reply_u64(vmsg, 0);
} else {
vu_panic(dev, "Specified region not found\n");
--
2.20.1
On Wed, Dec 15, 2021 at 10:29:55PM +0000, Raphael Norwitz wrote:
> diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c
> index 74a9980194..2f465a4f0e 100644
> --- a/subprojects/libvhost-user/libvhost-user.c
> +++ b/subprojects/libvhost-user/libvhost-user.c
> @@ -809,6 +809,7 @@ static bool
> vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
> VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
> int i;
> + bool found = false;
>
> if (vmsg->fd_num != 1 ||
> vmsg->size != sizeof(vmsg->payload.memreg)) {
> @@ -831,25 +832,25 @@ vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
> VuDevRegion *r = &dev->regions[i];
> void *m = (void *) (uintptr_t) r->mmap_addr;
>
> - if (m) {
> + if (m && !found) {
> munmap(m, r->size + r->mmap_offset);
> }
Why is only the first region unmapped? My interpretation of
vu_add_mem_reg() is that it mmaps duplicate regions to unique mmap_addr
addresses, so we need to munmap each of them.
>
> - break;
> + /*
> + * Shift all affected entries by 1 to close the hole at index i and
> + * zero out the last entry.
> + */
> + memmove(dev->regions + i, dev->regions + i + 1,
> + sizeof(VuDevRegion) * (dev->nregions - i - 1));
> + memset(dev->regions + dev->nregions - 1, 0, sizeof(VuDevRegion));
> + DPRINT("Successfully removed a region\n");
> + dev->nregions--;
> +
> + found = true;
> }
i-- is missing. dev->regions[] has been shortened so we need to check
the same element again.
On Wed, Jan 05, 2022 at 11:18:52AM +0000, Stefan Hajnoczi wrote:
> On Wed, Dec 15, 2021 at 10:29:55PM +0000, Raphael Norwitz wrote:
> > diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c
> > index 74a9980194..2f465a4f0e 100644
> > --- a/subprojects/libvhost-user/libvhost-user.c
> > +++ b/subprojects/libvhost-user/libvhost-user.c
> > @@ -809,6 +809,7 @@ static bool
> > vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
> > VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
> > int i;
> > + bool found = false;
> >
> > if (vmsg->fd_num != 1 ||
> > vmsg->size != sizeof(vmsg->payload.memreg)) {
> > @@ -831,25 +832,25 @@ vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
> > VuDevRegion *r = &dev->regions[i];
> > void *m = (void *) (uintptr_t) r->mmap_addr;
> >
> > - if (m) {
> > + if (m && !found) {
> > munmap(m, r->size + r->mmap_offset);
> > }
>
> Why is only the first region unmapped? My interpretation of
> vu_add_mem_reg() is that it mmaps duplicate regions to unique mmap_addr
> addresses, so we need to munmap each of them.
I agree - I will remove the found check here.
>
> >
> > - break;
> > + /*
> > + * Shift all affected entries by 1 to close the hole at index i and
> > + * zero out the last entry.
> > + */
> > + memmove(dev->regions + i, dev->regions + i + 1,
> > + sizeof(VuDevRegion) * (dev->nregions - i - 1));
> > + memset(dev->regions + dev->nregions - 1, 0, sizeof(VuDevRegion));
> > + DPRINT("Successfully removed a region\n");
> > + dev->nregions--;
> > +
> > + found = true;
> > }
>
> i-- is missing. dev->regions[] has been shortened so we need to check
> the same element again.
Ack
© 2016 - 2026 Red Hat, Inc.