In addition to memfd, a blob resource can also have its backing
storage in a VFIO device region. Therefore, we first need to figure
out if the blob is backed by a VFIO device region or a memfd before
we can call the right API to get a dmabuf fd created.
So, once we have the ramblock and the associated mr, we rely on
memory_region_is_ram_device() to tell us where the backing storage
is located. If the blob resource is VFIO backed, we try to find the
right VFIO device that contains the blob and then invoke the API
vfio_device_create_dmabuf().
Note that in virtio_gpu_remap_udmabuf(), we first try to test if
the VFIO dmabuf exporter supports mmap or not. If it doesn't, we
use the VFIO device fd directly to create the CPU mapping.
Cc: Marc-André Lureau <marcandre.lureau@redhat.com>
Cc: Alex Bennée <alex.bennee@linaro.org>
Cc: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
Cc: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Cédric Le Goater <clg@redhat.com>
Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
---
hw/display/Kconfig | 5 ++
hw/display/virtio-gpu-dmabuf.c | 114 +++++++++++++++++++++++++++++++--
2 files changed, 112 insertions(+), 7 deletions(-)
diff --git a/hw/display/Kconfig b/hw/display/Kconfig
index 1e95ab28ef..0d090f25f5 100644
--- a/hw/display/Kconfig
+++ b/hw/display/Kconfig
@@ -106,6 +106,11 @@ config VIRTIO_VGA
depends on VIRTIO_PCI
select VGA
+config VIRTIO_GPU_VFIO_BLOB
+ bool
+ default y
+ depends on VFIO
+
config VHOST_USER_GPU
bool
default y
diff --git a/hw/display/virtio-gpu-dmabuf.c b/hw/display/virtio-gpu-dmabuf.c
index 80143034d4..940b0e0411 100644
--- a/hw/display/virtio-gpu-dmabuf.c
+++ b/hw/display/virtio-gpu-dmabuf.c
@@ -18,6 +18,7 @@
#include "ui/console.h"
#include "hw/virtio/virtio-gpu.h"
#include "hw/virtio/virtio-gpu-pixman.h"
+#include "hw/vfio/vfio-device.h"
#include "trace.h"
#include "system/ramblock.h"
#include "system/hostmem.h"
@@ -52,6 +53,19 @@ static bool qemu_iovec_same_memory_regions(const struct iovec *iov, int iov_cnt)
return true;
}
+static void vfio_create_dmabuf(VFIODevice *vdev,
+ struct virtio_gpu_simple_resource *res)
+{
+#if defined(VIRTIO_GPU_VFIO_BLOB)
+ res->dmabuf_fd = vfio_device_create_dmabuf_fd(vdev, res->iov, res->iov_cnt);
+ if (res->dmabuf_fd < 0) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: VFIO_DEVICE_FEATURE_DMA_BUF: %s\n",
+ __func__, strerror(errno));
+ }
+#endif
+}
+
static void virtio_gpu_create_udmabuf(struct virtio_gpu_simple_resource *res)
{
struct udmabuf_create_list *list;
@@ -93,11 +107,69 @@ static void virtio_gpu_create_udmabuf(struct virtio_gpu_simple_resource *res)
g_free(list);
}
-static void virtio_gpu_remap_dmabuf(struct virtio_gpu_simple_resource *res)
+static void *vfio_dmabuf_mmap(struct virtio_gpu_simple_resource *res,
+ VFIODevice *vdev)
+{
+ struct vfio_region_info *info = NULL;
+ ram_addr_t offset, len = 0;
+ void *map, *submap;
+ int i, ret = -1;
+ RAMBlock *rb;
+
+ /*
+ * We first reserve a contiguous chunk of address space for the entire
+ * dmabuf, then replace it with smaller mappings that correspond to the
+ * individual segments of the dmabuf.
+ */
+ map = mmap(NULL, res->blob_size, PROT_READ, MAP_SHARED, vdev->fd, 0);
+ if (map == MAP_FAILED) {
+ return map;
+ }
+
+ for (i = 0; i < res->iov_cnt; i++) {
+ rb = qemu_ram_block_from_host(res->iov[i].iov_base, false, &offset);
+ if (!rb) {
+ goto err;
+ }
+#if defined(VIRTIO_GPU_VFIO_BLOB)
+ ret = vfio_get_region_index_from_mr(rb->mr);
+ if (ret < 0) {
+ goto err;
+ }
+
+ ret = vfio_device_get_region_info(vdev, ret, &info);
+#endif
+ if (ret < 0 || !info) {
+ goto err;
+ }
+
+ submap = mmap(map + len, res->iov[i].iov_len, PROT_READ,
+ MAP_SHARED | MAP_FIXED, vdev->fd,
+ info->offset + offset);
+ if (submap == MAP_FAILED) {
+ goto err;
+ }
+
+ len += res->iov[i].iov_len;
+ }
+ return map;
+err:
+ munmap(map, res->blob_size);
+ return MAP_FAILED;
+}
+
+static void virtio_gpu_remap_dmabuf(struct virtio_gpu_simple_resource *res,
+ VFIODevice *vdev)
{
res->remapped = mmap(NULL, res->blob_size, PROT_READ,
MAP_SHARED, res->dmabuf_fd, 0);
if (res->remapped == MAP_FAILED) {
+ if (vdev) {
+ res->remapped = vfio_dmabuf_mmap(res, vdev);
+ if (res->remapped != MAP_FAILED) {
+ return;
+ }
+ }
warn_report("%s: dmabuf mmap failed: %s", __func__,
strerror(errno));
res->remapped = NULL;
@@ -155,7 +227,10 @@ bool virtio_gpu_have_udmabuf(void)
void virtio_gpu_init_dmabuf(struct virtio_gpu_simple_resource *res)
{
+ VFIODevice *vdev = NULL;
void *pdata = NULL;
+ ram_addr_t offset;
+ RAMBlock *rb;
res->dmabuf_fd = -1;
if (res->iov_cnt == 1 &&
@@ -166,11 +241,38 @@ void virtio_gpu_init_dmabuf(struct virtio_gpu_simple_resource *res)
return;
}
- virtio_gpu_create_udmabuf(res);
- if (res->dmabuf_fd < 0) {
+ rb = qemu_ram_block_from_host(res->iov[0].iov_base, false, &offset);
+ if (memory_region_is_ram_device(rb->mr)) {
+ vdev = vfio_device_lookup(rb->mr);
+ if (!vdev) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Could not find device to create dmabuf\n",
+ __func__);
+ return;
+ }
+
+ vfio_create_dmabuf(vdev, res);
+ if (res->dmabuf_fd < 0) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Could not create dmabuf from vfio device\n",
+ __func__);
+ return;
+ }
+ } else if (memory_region_is_ram(rb->mr)) {
+ virtio_gpu_create_udmabuf(res);
+ if (res->dmabuf_fd < 0) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Could not create dmabuf from memfd\n",
+ __func__);
+ return;
+ }
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: memory region cannot be used to create dmabuf\n",
+ __func__);
return;
}
- virtio_gpu_remap_dmabuf(res);
+ virtio_gpu_remap_dmabuf(res, vdev);
if (!res->remapped) {
return;
}
@@ -182,9 +284,7 @@ void virtio_gpu_init_dmabuf(struct virtio_gpu_simple_resource *res)
void virtio_gpu_fini_dmabuf(struct virtio_gpu_simple_resource *res)
{
- if (res->remapped) {
- virtio_gpu_destroy_dmabuf(res);
- }
+ virtio_gpu_destroy_dmabuf(res);
}
static void virtio_gpu_free_dmabuf(VirtIOGPU *g, VGPUDMABuf *dmabuf)
--
2.50.1
On 2025/11/09 14:33, Vivek Kasireddy wrote:
> In addition to memfd, a blob resource can also have its backing
> storage in a VFIO device region. Therefore, we first need to figure
> out if the blob is backed by a VFIO device region or a memfd before
> we can call the right API to get a dmabuf fd created.
>
> So, once we have the ramblock and the associated mr, we rely on
> memory_region_is_ram_device() to tell us where the backing storage
> is located. If the blob resource is VFIO backed, we try to find the
> right VFIO device that contains the blob and then invoke the API
> vfio_device_create_dmabuf().
>
> Note that in virtio_gpu_remap_udmabuf(), we first try to test if
> the VFIO dmabuf exporter supports mmap or not. If it doesn't, we
> use the VFIO device fd directly to create the CPU mapping.
I have just remembered that mremap() will work for either of udmabuf and
VFIO. That will avoid having two different methods and make
vfio_get_region_index_from_mr() and vfio_device_get_region_info()
unnecessary.
>
> Cc: Marc-André Lureau <marcandre.lureau@redhat.com>
> Cc: Alex Bennée <alex.bennee@linaro.org>
> Cc: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
> Cc: Dmitry Osipenko <dmitry.osipenko@collabora.com>
> Cc: Alex Williamson <alex.williamson@redhat.com>
> Cc: Cédric Le Goater <clg@redhat.com>
> Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
> ---
> hw/display/Kconfig | 5 ++
> hw/display/virtio-gpu-dmabuf.c | 114 +++++++++++++++++++++++++++++++--
> 2 files changed, 112 insertions(+), 7 deletions(-)
>
> diff --git a/hw/display/Kconfig b/hw/display/Kconfig
> index 1e95ab28ef..0d090f25f5 100644
> --- a/hw/display/Kconfig
> +++ b/hw/display/Kconfig
> @@ -106,6 +106,11 @@ config VIRTIO_VGA
> depends on VIRTIO_PCI
> select VGA
>
> +config VIRTIO_GPU_VFIO_BLOB
> + bool
> + default y
> + depends on VFIO
> +
> config VHOST_USER_GPU
> bool
> default y
> diff --git a/hw/display/virtio-gpu-dmabuf.c b/hw/display/virtio-gpu-dmabuf.c
> index 80143034d4..940b0e0411 100644
> --- a/hw/display/virtio-gpu-dmabuf.c
> +++ b/hw/display/virtio-gpu-dmabuf.c
> @@ -18,6 +18,7 @@
> #include "ui/console.h"
> #include "hw/virtio/virtio-gpu.h"
> #include "hw/virtio/virtio-gpu-pixman.h"
> +#include "hw/vfio/vfio-device.h"
> #include "trace.h"
> #include "system/ramblock.h"
> #include "system/hostmem.h"
> @@ -52,6 +53,19 @@ static bool qemu_iovec_same_memory_regions(const struct iovec *iov, int iov_cnt)
> return true;
> }
>
> +static void vfio_create_dmabuf(VFIODevice *vdev,
> + struct virtio_gpu_simple_resource *res)
> +{
> +#if defined(VIRTIO_GPU_VFIO_BLOB)
> + res->dmabuf_fd = vfio_device_create_dmabuf_fd(vdev, res->iov, res->iov_cnt);
> + if (res->dmabuf_fd < 0) {
> + qemu_log_mask(LOG_GUEST_ERROR,
> + "%s: VFIO_DEVICE_FEATURE_DMA_BUF: %s\n",
> + __func__, strerror(errno));
> + }
> +#endif
> +}
> +
> static void virtio_gpu_create_udmabuf(struct virtio_gpu_simple_resource *res)
> {
> struct udmabuf_create_list *list;
> @@ -93,11 +107,69 @@ static void virtio_gpu_create_udmabuf(struct virtio_gpu_simple_resource *res)
> g_free(list);
> }
>
> -static void virtio_gpu_remap_dmabuf(struct virtio_gpu_simple_resource *res)
> +static void *vfio_dmabuf_mmap(struct virtio_gpu_simple_resource *res,
> + VFIODevice *vdev)
> +{
> + struct vfio_region_info *info = NULL;
> + ram_addr_t offset, len = 0;
> + void *map, *submap;
> + int i, ret = -1;
> + RAMBlock *rb;
> +
> + /*
> + * We first reserve a contiguous chunk of address space for the entire
> + * dmabuf, then replace it with smaller mappings that correspond to the
> + * individual segments of the dmabuf.
> + */
> + map = mmap(NULL, res->blob_size, PROT_READ, MAP_SHARED, vdev->fd, 0);
> + if (map == MAP_FAILED) {
> + return map;
> + }
> +
> + for (i = 0; i < res->iov_cnt; i++) {
> + rb = qemu_ram_block_from_host(res->iov[i].iov_base, false, &offset);
> + if (!rb) {
> + goto err;
> + }
> +#if defined(VIRTIO_GPU_VFIO_BLOB)
> + ret = vfio_get_region_index_from_mr(rb->mr);
> + if (ret < 0) {
> + goto err;
> + }
> +
> + ret = vfio_device_get_region_info(vdev, ret, &info);
> +#endif
> + if (ret < 0 || !info) {
> + goto err;
> + }
> +
> + submap = mmap(map + len, res->iov[i].iov_len, PROT_READ,
> + MAP_SHARED | MAP_FIXED, vdev->fd,
> + info->offset + offset);
> + if (submap == MAP_FAILED) {
> + goto err;
> + }
> +
> + len += res->iov[i].iov_len;
> + }
> + return map;
> +err:
> + munmap(map, res->blob_size);
> + return MAP_FAILED;
> +}
> +
> +static void virtio_gpu_remap_dmabuf(struct virtio_gpu_simple_resource *res,
> + VFIODevice *vdev)
> {
> res->remapped = mmap(NULL, res->blob_size, PROT_READ,
> MAP_SHARED, res->dmabuf_fd, 0);
> if (res->remapped == MAP_FAILED) {
> + if (vdev) {
> + res->remapped = vfio_dmabuf_mmap(res, vdev);
> + if (res->remapped != MAP_FAILED) {
> + return;
> + }
> + }
> warn_report("%s: dmabuf mmap failed: %s", __func__,
> strerror(errno));
> res->remapped = NULL;
> @@ -155,7 +227,10 @@ bool virtio_gpu_have_udmabuf(void)
>
> void virtio_gpu_init_dmabuf(struct virtio_gpu_simple_resource *res)
> {
> + VFIODevice *vdev = NULL;
> void *pdata = NULL;
> + ram_addr_t offset;
> + RAMBlock *rb;
>
> res->dmabuf_fd = -1;
> if (res->iov_cnt == 1 &&
> @@ -166,11 +241,38 @@ void virtio_gpu_init_dmabuf(struct virtio_gpu_simple_resource *res)
> return;
> }
>
> - virtio_gpu_create_udmabuf(res);
> - if (res->dmabuf_fd < 0) {
> + rb = qemu_ram_block_from_host(res->iov[0].iov_base, false, &offset);
> + if (memory_region_is_ram_device(rb->mr)) {
> + vdev = vfio_device_lookup(rb->mr);
> + if (!vdev) {
> + qemu_log_mask(LOG_GUEST_ERROR,
> + "%s: Could not find device to create dmabuf\n",
> + __func__);
This should say "VFIO device" since no other RAM device is supported.
> + return;
> + }
> +
> + vfio_create_dmabuf(vdev, res);
> + if (res->dmabuf_fd < 0) {
> + qemu_log_mask(LOG_GUEST_ERROR,
> + "%s: Could not create dmabuf from vfio device\n",
Nitpick: let's make VFIO uppercase as other user-visible messages do.
> + __func__);
> + return;
> + }
> + } else if (memory_region_is_ram(rb->mr)) {
> + virtio_gpu_create_udmabuf(res);> + if
(res->dmabuf_fd < 0) {
> + qemu_log_mask(LOG_GUEST_ERROR,
> + "%s: Could not create dmabuf from memfd\n",
> + __func__);
> + return;
> + }
> + } else {
> + qemu_log_mask(LOG_GUEST_ERROR,
> + "%s: memory region cannot be used to create dmabuf\n",
> + __func__);
> return;
> }
> - virtio_gpu_remap_dmabuf(res);
> + virtio_gpu_remap_dmabuf(res, vdev);
> if (!res->remapped) {
> return;
> }
> @@ -182,9 +284,7 @@ void virtio_gpu_init_dmabuf(struct virtio_gpu_simple_resource *res)
>
> void virtio_gpu_fini_dmabuf(struct virtio_gpu_simple_resource *res)
> {
> - if (res->remapped) {
> - virtio_gpu_destroy_dmabuf(res);
> - }
> + virtio_gpu_destroy_dmabuf(res);
> }
>
> static void virtio_gpu_free_dmabuf(VirtIOGPU *g, VGPUDMABuf *dmabuf)
Hi Akihiko,
> Subject: Re: [PATCH v2 10/10] virtio-gpu-dmabuf: Create dmabuf for blobs
> associated with VFIO devices
>
> On 2025/11/09 14:33, Vivek Kasireddy wrote:
> > In addition to memfd, a blob resource can also have its backing
> > storage in a VFIO device region. Therefore, we first need to figure
> > out if the blob is backed by a VFIO device region or a memfd before
> > we can call the right API to get a dmabuf fd created.
> >
> > So, once we have the ramblock and the associated mr, we rely on
> > memory_region_is_ram_device() to tell us where the backing storage
> > is located. If the blob resource is VFIO backed, we try to find the
> > right VFIO device that contains the blob and then invoke the API
> > vfio_device_create_dmabuf().
> >
> > Note that in virtio_gpu_remap_udmabuf(), we first try to test if
> > the VFIO dmabuf exporter supports mmap or not. If it doesn't, we
> > use the VFIO device fd directly to create the CPU mapping.
>
> I have just remembered that mremap() will work for either of udmabuf and
> VFIO. That will avoid having two different methods and make
> vfio_get_region_index_from_mr() and vfio_device_get_region_info()
> unnecessary.
IIUC, the name virtio_gpu_remap_dmabuf() is misleading because we are not
actually doing remap but are simply calling mmap(). In other words, we are not
expanding or shrinking existing mapping but are creating a new mapping.
And, for dmabufs associated with VFIO devices, without having to call
vfio_get_region_index_from_mr() and vfio_device_get_region_info(), I don't see
any other way to determine the region offset.
So, I guess I'll create a new patch to do s/remapped/map.
>
> >
> > Cc: Marc-André Lureau <marcandre.lureau@redhat.com>
> > Cc: Alex Bennée <alex.bennee@linaro.org>
> > Cc: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
> > Cc: Dmitry Osipenko <dmitry.osipenko@collabora.com>
> > Cc: Alex Williamson <alex.williamson@redhat.com>
> > Cc: Cédric Le Goater <clg@redhat.com>
> > Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
> > ---
> > hw/display/Kconfig | 5 ++
> > hw/display/virtio-gpu-dmabuf.c | 114
> +++++++++++++++++++++++++++++++--
> > 2 files changed, 112 insertions(+), 7 deletions(-)
> >
> > diff --git a/hw/display/Kconfig b/hw/display/Kconfig
> > index 1e95ab28ef..0d090f25f5 100644
> > --- a/hw/display/Kconfig
> > +++ b/hw/display/Kconfig
> > @@ -106,6 +106,11 @@ config VIRTIO_VGA
> > depends on VIRTIO_PCI
> > select VGA
> >
> > +config VIRTIO_GPU_VFIO_BLOB
> > + bool
> > + default y
> > + depends on VFIO
> > +
> > config VHOST_USER_GPU
> > bool
> > default y
> > diff --git a/hw/display/virtio-gpu-dmabuf.c b/hw/display/virtio-gpu-
> dmabuf.c
> > index 80143034d4..940b0e0411 100644
> > --- a/hw/display/virtio-gpu-dmabuf.c
> > +++ b/hw/display/virtio-gpu-dmabuf.c
> > @@ -18,6 +18,7 @@
> > #include "ui/console.h"
> > #include "hw/virtio/virtio-gpu.h"
> > #include "hw/virtio/virtio-gpu-pixman.h"
> > +#include "hw/vfio/vfio-device.h"
> > #include "trace.h"
> > #include "system/ramblock.h"
> > #include "system/hostmem.h"
> > @@ -52,6 +53,19 @@ static bool
> qemu_iovec_same_memory_regions(const struct iovec *iov, int iov_cnt)
> > return true;
> > }
> >
> > +static void vfio_create_dmabuf(VFIODevice *vdev,
> > + struct virtio_gpu_simple_resource *res)
> > +{
> > +#if defined(VIRTIO_GPU_VFIO_BLOB)
> > + res->dmabuf_fd = vfio_device_create_dmabuf_fd(vdev, res->iov, res-
> >iov_cnt);
> > + if (res->dmabuf_fd < 0) {
> > + qemu_log_mask(LOG_GUEST_ERROR,
> > + "%s: VFIO_DEVICE_FEATURE_DMA_BUF: %s\n",
> > + __func__, strerror(errno));
> > + }
> > +#endif
> > +}
> > +
> > static void virtio_gpu_create_udmabuf(struct virtio_gpu_simple_resource
> *res)
> > {
> > struct udmabuf_create_list *list;
> > @@ -93,11 +107,69 @@ static void virtio_gpu_create_udmabuf(struct
> virtio_gpu_simple_resource *res)
> > g_free(list);
> > }
> >
> > -static void virtio_gpu_remap_dmabuf(struct virtio_gpu_simple_resource
> *res)
> > +static void *vfio_dmabuf_mmap(struct virtio_gpu_simple_resource *res,
> > + VFIODevice *vdev)
> > +{
> > + struct vfio_region_info *info = NULL;
> > + ram_addr_t offset, len = 0;
> > + void *map, *submap;
> > + int i, ret = -1;
> > + RAMBlock *rb;
> > +
> > + /*
> > + * We first reserve a contiguous chunk of address space for the entire
> > + * dmabuf, then replace it with smaller mappings that correspond to
> the
> > + * individual segments of the dmabuf.
> > + */
> > + map = mmap(NULL, res->blob_size, PROT_READ, MAP_SHARED, vdev-
> >fd, 0);
> > + if (map == MAP_FAILED) {
> > + return map;
> > + }
> > +
> > + for (i = 0; i < res->iov_cnt; i++) {
> > + rb = qemu_ram_block_from_host(res->iov[i].iov_base, false,
> &offset);
> > + if (!rb) {
> > + goto err;
> > + }
> > +#if defined(VIRTIO_GPU_VFIO_BLOB)
> > + ret = vfio_get_region_index_from_mr(rb->mr);
> > + if (ret < 0) {
> > + goto err;
> > + }
> > +
> > + ret = vfio_device_get_region_info(vdev, ret, &info);
> > +#endif
> > + if (ret < 0 || !info) {
> > + goto err;
> > + }
> > +
> > + submap = mmap(map + len, res->iov[i].iov_len, PROT_READ,
> > + MAP_SHARED | MAP_FIXED, vdev->fd,
> > + info->offset + offset);
> > + if (submap == MAP_FAILED) {
> > + goto err;
> > + }
> > +
> > + len += res->iov[i].iov_len;
> > + }
> > + return map;
> > +err:
> > + munmap(map, res->blob_size);
> > + return MAP_FAILED;
> > +}
> > +
> > +static void virtio_gpu_remap_dmabuf(struct virtio_gpu_simple_resource
> *res,
> > + VFIODevice *vdev)
> > {
> > res->remapped = mmap(NULL, res->blob_size, PROT_READ,
> > MAP_SHARED, res->dmabuf_fd, 0);
> > if (res->remapped == MAP_FAILED) {
> > + if (vdev) {
> > + res->remapped = vfio_dmabuf_mmap(res, vdev);
> > + if (res->remapped != MAP_FAILED) {
> > + return;
> > + }
> > + }
> > warn_report("%s: dmabuf mmap failed: %s", __func__,
> > strerror(errno));
> > res->remapped = NULL;
> > @@ -155,7 +227,10 @@ bool virtio_gpu_have_udmabuf(void)
> >
> > void virtio_gpu_init_dmabuf(struct virtio_gpu_simple_resource *res)
> > {
> > + VFIODevice *vdev = NULL;
> > void *pdata = NULL;
> > + ram_addr_t offset;
> > + RAMBlock *rb;
> >
> > res->dmabuf_fd = -1;
> > if (res->iov_cnt == 1 &&
> > @@ -166,11 +241,38 @@ void virtio_gpu_init_dmabuf(struct
> virtio_gpu_simple_resource *res)
> > return;
> > }
> >
> > - virtio_gpu_create_udmabuf(res);
> > - if (res->dmabuf_fd < 0) {
> > + rb = qemu_ram_block_from_host(res->iov[0].iov_base, false,
> &offset);
> > + if (memory_region_is_ram_device(rb->mr)) {
> > + vdev = vfio_device_lookup(rb->mr);
> > + if (!vdev) {
> > + qemu_log_mask(LOG_GUEST_ERROR,
> > + "%s: Could not find device to create dmabuf\n",
> > + __func__);
>
> This should say "VFIO device" since no other RAM device is supported.
Ok, I'll make the change.
>
> > + return;
> > + }
> > +
> > + vfio_create_dmabuf(vdev, res);
> > + if (res->dmabuf_fd < 0) {
> > + qemu_log_mask(LOG_GUEST_ERROR,
> > + "%s: Could not create dmabuf from vfio device\n",
>
> Nitpick: let's make VFIO uppercase as other user-visible messages do.
Sure, will do.
Thanks,
Vivek
>
> > + __func__);
> > + return;
> > + }
> > + } else if (memory_region_is_ram(rb->mr)) {
> > + virtio_gpu_create_udmabuf(res);> + if
> (res->dmabuf_fd < 0) {
> > + qemu_log_mask(LOG_GUEST_ERROR,
> > + "%s: Could not create dmabuf from memfd\n",
> > + __func__);
> > + return;
> > + }
> > + } else {
> > + qemu_log_mask(LOG_GUEST_ERROR,
> > + "%s: memory region cannot be used to create dmabuf\n",
> > + __func__);
> > return;
> > }
> > - virtio_gpu_remap_dmabuf(res);
> > + virtio_gpu_remap_dmabuf(res, vdev);
> > if (!res->remapped) {
> > return;
> > }
> > @@ -182,9 +284,7 @@ void virtio_gpu_init_dmabuf(struct
> virtio_gpu_simple_resource *res)
> >
> > void virtio_gpu_fini_dmabuf(struct virtio_gpu_simple_resource *res)
> > {
> > - if (res->remapped) {
> > - virtio_gpu_destroy_dmabuf(res);
> > - }
> > + virtio_gpu_destroy_dmabuf(res);
> > }
> >
> > static void virtio_gpu_free_dmabuf(VirtIOGPU *g, VGPUDMABuf
> *dmabuf)
On 2025/11/12 13:26, Kasireddy, Vivek wrote: > Hi Akihiko, > >> Subject: Re: [PATCH v2 10/10] virtio-gpu-dmabuf: Create dmabuf for blobs >> associated with VFIO devices >> >> On 2025/11/09 14:33, Vivek Kasireddy wrote: >>> In addition to memfd, a blob resource can also have its backing >>> storage in a VFIO device region. Therefore, we first need to figure >>> out if the blob is backed by a VFIO device region or a memfd before >>> we can call the right API to get a dmabuf fd created. >>> >>> So, once we have the ramblock and the associated mr, we rely on >>> memory_region_is_ram_device() to tell us where the backing storage >>> is located. If the blob resource is VFIO backed, we try to find the >>> right VFIO device that contains the blob and then invoke the API >>> vfio_device_create_dmabuf(). >>> >>> Note that in virtio_gpu_remap_udmabuf(), we first try to test if >>> the VFIO dmabuf exporter supports mmap or not. If it doesn't, we >>> use the VFIO device fd directly to create the CPU mapping. >> >> I have just remembered that mremap() will work for either of udmabuf and >> VFIO. That will avoid having two different methods and make >> vfio_get_region_index_from_mr() and vfio_device_get_region_info() >> unnecessary. > IIUC, the name virtio_gpu_remap_dmabuf() is misleading because we are not > actually doing remap but are simply calling mmap(). In other words, we are not > expanding or shrinking existing mapping but are creating a new mapping. > And, for dmabufs associated with VFIO devices, without having to call > vfio_get_region_index_from_mr() and vfio_device_get_region_info(), I don't see > any other way to determine the region offset. > > So, I guess I'll create a new patch to do s/remapped/map. I mean calling mremap() with 0 as the old_size parameter. The man page says: > If the value of old_size is zero, and old_address refers to a > shareable mapping (see the description of MAP_SHARED in mmap(2)), then > mremap() will create a new mapping of the same pages. Regards, Akihiko Odaki
© 2016 - 2025 Red Hat, Inc.