[PATCH v1 12/15] libvhost-user: Use most of mmap_offset as fd_offset

David Hildenbrand posted 15 patches 9 months, 3 weeks ago
Maintainers: "Michael S. Tsirkin" <mst@redhat.com>
There is a newer version of this series
[PATCH v1 12/15] libvhost-user: Use most of mmap_offset as fd_offset
Posted by David Hildenbrand 9 months, 3 weeks ago
In the past, QEMU would create memory regions that could partially cover
hugetlb pages, making mmap() fail if we would use the mmap_offset as an
fd_offset. For that reason, we never used the mmap_offset as an offset into
the fd and instead always mapped the fd from the very start.

However, that can easily result in us mmap'ing a lot of unnecessary
parts of an fd, possibly repeatedly.

QEMU nowadays does not create memory regions that partially cover huge
pages -- it never really worked with postcopy. QEMU handles merging of
regions that partially cover huge pages (due to holes in boot memory) since
2018 in c1ece84e7c93 ("vhost: Huge page align and merge").

Let's be a bit careful and not unconditionally convert the
mmap_offset into an fd_offset. Instead, let's simply detect the hugetlb
size and pass as much as we can as fd_offset, making sure that we call
mmap() with a properly aligned offset.

With QEMU and a virtio-mem device that is fully plugged (50GiB using 50
memslots) the qemu-storage daemon process consumes in the VA space
1281GiB before this change and 58GiB after this change.

Example debug output:
  ================ Vhost user message ================
  Request: VHOST_USER_ADD_MEM_REG (37)
  Flags:   0x9
  Size:    40
  Fds: 59
  Adding region 50
      guest_phys_addr: 0x0000000d80000000
      memory_size:     0x0000000040000000
      userspace_addr   0x00007f54ebffe000
      mmap_offset      0x0000000c00000000
      fd_offset:       0x0000000c00000000
      new mmap_offset: 0x0000000000000000
      mmap_addr:       0x00007f7ecc000000
  Successfully added new region
  ================ Vhost user message ================
  Request: VHOST_USER_ADD_MEM_REG (37)
  Flags:   0x9
  Size:    40
  Fds: 59
  Adding region 51
      guest_phys_addr: 0x0000000dc0000000
      memory_size:     0x0000000040000000
      userspace_addr   0x00007f552bffe000
      mmap_offset      0x0000000c40000000
      fd_offset:       0x0000000c40000000
      new mmap_offset: 0x0000000000000000
      mmap_addr:       0x00007f7e8c000000
  Successfully added new region

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 subprojects/libvhost-user/libvhost-user.c | 50 ++++++++++++++++++++---
 1 file changed, 45 insertions(+), 5 deletions(-)

diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c
index 75e47b7bb3..7d8293dc84 100644
--- a/subprojects/libvhost-user/libvhost-user.c
+++ b/subprojects/libvhost-user/libvhost-user.c
@@ -43,6 +43,8 @@
 #include <fcntl.h>
 #include <sys/ioctl.h>
 #include <linux/vhost.h>
+#include <sys/vfs.h>
+#include <linux/magic.h>
 
 #ifdef __NR_userfaultfd
 #include <linux/userfaultfd.h>
@@ -281,12 +283,36 @@ vu_remove_all_mem_regs(VuDev *dev)
     dev->nregions = 0;
 }
 
+static size_t
+get_fd_pagesize(int fd)
+{
+    static size_t pagesize;
+#if defined(__linux__)
+    struct statfs fs;
+    int ret;
+
+    do {
+        ret = fstatfs(fd, &fs);
+    } while (ret != 0 && errno == EINTR);
+
+    if (!ret && fs.f_type == HUGETLBFS_MAGIC) {
+        return fs.f_bsize;
+    }
+#endif
+
+    if (!pagesize) {
+        pagesize = getpagesize();
+    }
+    return pagesize;
+}
+
 static void
 _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
 {
     const uint64_t start_gpa = msg_region->guest_phys_addr;
     const uint64_t end_gpa = start_gpa + msg_region->memory_size;
     int prot = PROT_READ | PROT_WRITE;
+    uint64_t mmap_offset, fd_offset;
     VuDevRegion *r;
     void *mmap_addr;
     int low = 0;
@@ -335,11 +361,25 @@ _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
     idx = low;
 
     /*
-     * We don't use offset argument of mmap() since the mapped address has
-     * to be page aligned, and we use huge pages.
+     * Convert most of msg_region->mmap_offset to fd_offset. In almost all
+     * cases, this will leave us with mmap_offset == 0, mmap()'ing only
+     * what we really need. Only if a memory region would partially cover
+     * hugetlb pages, we'd get mmap_offset != 0, which usually doesn't happen
+     * anymore (i.e., modern QEMU).
+     *
+     * Note that mmap() with hugetlb would fail if the offset into the file
+     * is not aligned to the huge page size.
      */
-    mmap_addr = mmap(0, msg_region->memory_size + msg_region->mmap_offset,
-                     prot, MAP_SHARED | MAP_NORESERVE, fd, 0);
+    fd_offset = ALIGN_DOWN(msg_region->mmap_offset, get_fd_pagesize(fd));
+    mmap_offset = msg_region->mmap_offset - fd_offset;
+
+    DPRINT("    fd_offset:       0x%016"PRIx64"\n",
+           fd_offset);
+    DPRINT("    adj mmap_offset: 0x%016"PRIx64"\n",
+           mmap_offset);
+
+    mmap_addr = mmap(0, msg_region->memory_size + mmap_offset,
+                     prot, MAP_SHARED | MAP_NORESERVE, fd, fd_offset);
     if (mmap_addr == MAP_FAILED) {
         vu_panic(dev, "region mmap error: %s", strerror(errno));
         return;
@@ -354,7 +394,7 @@ _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
     r->size = msg_region->memory_size;
     r->qva = msg_region->userspace_addr;
     r->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
-    r->mmap_offset = msg_region->mmap_offset;
+    r->mmap_offset = mmap_offset;
     dev->nregions++;
 
     if (dev->postcopy_listening) {
-- 
2.43.0
Re: [PATCH v1 12/15] libvhost-user: Use most of mmap_offset as fd_offset
Posted by Raphael Norwitz 9 months, 3 weeks ago
On Fri, Feb 2, 2024 at 4:55 PM David Hildenbrand <david@redhat.com> wrote:
>
> In the past, QEMU would create memory regions that could partially cover
> hugetlb pages, making mmap() fail if we would use the mmap_offset as an
> fd_offset. For that reason, we never used the mmap_offset as an offset into
> the fd and instead always mapped the fd from the very start.
>
> However, that can easily result in us mmap'ing a lot of unnecessary
> parts of an fd, possibly repeatedly.
>
> QEMU nowadays does not create memory regions that partially cover huge
> pages -- it never really worked with postcopy. QEMU handles merging of
> regions that partially cover huge pages (due to holes in boot memory) since
> 2018 in c1ece84e7c93 ("vhost: Huge page align and merge").
>
> Let's be a bit careful and not unconditionally convert the
> mmap_offset into an fd_offset. Instead, let's simply detect the hugetlb
> size and pass as much as we can as fd_offset, making sure that we call
> mmap() with a properly aligned offset.
>
> With QEMU and a virtio-mem device that is fully plugged (50GiB using 50
> memslots) the qemu-storage daemon process consumes in the VA space
> 1281GiB before this change and 58GiB after this change.
>
> Example debug output:
>   ================ Vhost user message ================
>   Request: VHOST_USER_ADD_MEM_REG (37)
>   Flags:   0x9
>   Size:    40
>   Fds: 59
>   Adding region 50
>       guest_phys_addr: 0x0000000d80000000
>       memory_size:     0x0000000040000000
>       userspace_addr   0x00007f54ebffe000
>       mmap_offset      0x0000000c00000000
>       fd_offset:       0x0000000c00000000
>       new mmap_offset: 0x0000000000000000
>       mmap_addr:       0x00007f7ecc000000
>   Successfully added new region
>   ================ Vhost user message ================
>   Request: VHOST_USER_ADD_MEM_REG (37)
>   Flags:   0x9
>   Size:    40
>   Fds: 59
>   Adding region 51
>       guest_phys_addr: 0x0000000dc0000000
>       memory_size:     0x0000000040000000
>       userspace_addr   0x00007f552bffe000
>       mmap_offset      0x0000000c40000000
>       fd_offset:       0x0000000c40000000
>       new mmap_offset: 0x0000000000000000
>       mmap_addr:       0x00007f7e8c000000
>   Successfully added new region
>
> Signed-off-by: David Hildenbrand <david@redhat.com>

Reviewed-by: Raphael Norwitz <raphael@enfabrica.net>

> ---
>  subprojects/libvhost-user/libvhost-user.c | 50 ++++++++++++++++++++---
>  1 file changed, 45 insertions(+), 5 deletions(-)
>
> diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c
> index 75e47b7bb3..7d8293dc84 100644
> --- a/subprojects/libvhost-user/libvhost-user.c
> +++ b/subprojects/libvhost-user/libvhost-user.c
> @@ -43,6 +43,8 @@
>  #include <fcntl.h>
>  #include <sys/ioctl.h>
>  #include <linux/vhost.h>
> +#include <sys/vfs.h>
> +#include <linux/magic.h>
>
>  #ifdef __NR_userfaultfd
>  #include <linux/userfaultfd.h>
> @@ -281,12 +283,36 @@ vu_remove_all_mem_regs(VuDev *dev)
>      dev->nregions = 0;
>  }
>
> +static size_t
> +get_fd_pagesize(int fd)
> +{
> +    static size_t pagesize;
> +#if defined(__linux__)
> +    struct statfs fs;
> +    int ret;
> +
> +    do {
> +        ret = fstatfs(fd, &fs);
> +    } while (ret != 0 && errno == EINTR);
> +
> +    if (!ret && fs.f_type == HUGETLBFS_MAGIC) {
> +        return fs.f_bsize;
> +    }
> +#endif
> +
> +    if (!pagesize) {
> +        pagesize = getpagesize();
> +    }
> +    return pagesize;
> +}
> +
>  static void
>  _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
>  {
>      const uint64_t start_gpa = msg_region->guest_phys_addr;
>      const uint64_t end_gpa = start_gpa + msg_region->memory_size;
>      int prot = PROT_READ | PROT_WRITE;
> +    uint64_t mmap_offset, fd_offset;
>      VuDevRegion *r;
>      void *mmap_addr;
>      int low = 0;
> @@ -335,11 +361,25 @@ _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
>      idx = low;
>
>      /*
> -     * We don't use offset argument of mmap() since the mapped address has
> -     * to be page aligned, and we use huge pages.
> +     * Convert most of msg_region->mmap_offset to fd_offset. In almost all
> +     * cases, this will leave us with mmap_offset == 0, mmap()'ing only
> +     * what we really need. Only if a memory region would partially cover
> +     * hugetlb pages, we'd get mmap_offset != 0, which usually doesn't happen
> +     * anymore (i.e., modern QEMU).
> +     *
> +     * Note that mmap() with hugetlb would fail if the offset into the file
> +     * is not aligned to the huge page size.
>       */
> -    mmap_addr = mmap(0, msg_region->memory_size + msg_region->mmap_offset,
> -                     prot, MAP_SHARED | MAP_NORESERVE, fd, 0);
> +    fd_offset = ALIGN_DOWN(msg_region->mmap_offset, get_fd_pagesize(fd));
> +    mmap_offset = msg_region->mmap_offset - fd_offset;
> +
> +    DPRINT("    fd_offset:       0x%016"PRIx64"\n",
> +           fd_offset);
> +    DPRINT("    adj mmap_offset: 0x%016"PRIx64"\n",
> +           mmap_offset);
> +
> +    mmap_addr = mmap(0, msg_region->memory_size + mmap_offset,
> +                     prot, MAP_SHARED | MAP_NORESERVE, fd, fd_offset);
>      if (mmap_addr == MAP_FAILED) {
>          vu_panic(dev, "region mmap error: %s", strerror(errno));
>          return;
> @@ -354,7 +394,7 @@ _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
>      r->size = msg_region->memory_size;
>      r->qva = msg_region->userspace_addr;
>      r->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
> -    r->mmap_offset = msg_region->mmap_offset;
> +    r->mmap_offset = mmap_offset;
>      dev->nregions++;
>
>      if (dev->postcopy_listening) {
> --
> 2.43.0
>
>