From: Antonio Caggiano <antonio.caggiano@collabora.com>
Support BLOB resources creation, mapping and unmapping by calling the
new stable virglrenderer 0.10 interface. Only enabled when available and
via the blob config. E.g. -device virtio-vga-gl,blob=true
Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com>
Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
---
hw/display/virtio-gpu-virgl.c | 248 +++++++++++++++++++++++++++++++++
hw/display/virtio-gpu.c | 4 +-
include/hw/virtio/virtio-gpu.h | 4 +
3 files changed, 255 insertions(+), 1 deletion(-)
diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
index bb9ee1eba9a0..de132b22f554 100644
--- a/hw/display/virtio-gpu-virgl.c
+++ b/hw/display/virtio-gpu-virgl.c
@@ -32,6 +32,102 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
}
#endif
+#ifdef HAVE_VIRGL_RESOURCE_BLOB
+struct virtio_gpu_virgl_hostmem_region {
+ MemoryRegion mr;
+ VirtIOGPUBase *b;
+ struct virtio_gpu_simple_resource *res;
+};
+
+static void virtio_gpu_virgl_hostmem_region_free(void *obj)
+{
+ MemoryRegion *mr = MEMORY_REGION(obj);
+ struct virtio_gpu_virgl_hostmem_region *vmr;
+
+ vmr = container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr);
+ vmr->res->async_unmap_in_progress = false;
+ vmr->res->async_unmap_completed = true;
+ vmr->b->renderer_blocked--;
+
+ g_free(vmr);
+}
+
+static int
+virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
+ struct virtio_gpu_simple_resource *res,
+ uint64_t offset)
+{
+ struct virtio_gpu_virgl_hostmem_region *vmr;
+ VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
+ uint64_t size;
+ void *data;
+ int ret;
+
+ if (!virtio_gpu_hostmem_enabled(b->conf)) {
+ return -EOPNOTSUPP;
+ }
+
+ ret = virgl_renderer_resource_map(res->resource_id, &data, &size);
+ if (ret) {
+ return -ret;
+ }
+
+ vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
+ MemoryRegion *mr = &vmr->mr;
+ vmr->res = res;
+ vmr->b = b;
+
+ memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
+ memory_region_add_subregion(&b->hostmem, offset, mr);
+ memory_region_set_enabled(mr, true);
+
+ /*
+ * Potentially, MR could outlive the resource if MR's reference is held
+ * outside of virtio-gpu. In order to prevent unmapping resource while
+ * MR is alive, and thus, making the data pointer invalid, we will block
+ * virtio-gpu command processing until MR is fully unreferenced and
+ * released.
+ */
+ OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free;
+
+ res->mr = mr;
+
+ return 0;
+}
+
+static bool
+virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
+ struct virtio_gpu_simple_resource *res)
+{
+ VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
+
+ if (!res->async_unmap_in_progress && !res->async_unmap_completed) {
+ /* memory region owns self res->mr object and frees it by itself */
+ MemoryRegion *mr = res->mr;
+ res->mr = NULL;
+
+ res->async_unmap_in_progress = true;
+
+ /* render will be unblocked when MR is freed */
+ b->renderer_blocked++;
+
+ memory_region_set_enabled(mr, false);
+ memory_region_del_subregion(&b->hostmem, mr);
+ object_unparent(OBJECT(mr));
+ }
+
+ if (!res->async_unmap_completed) {
+ return false;
+ }
+
+ virgl_renderer_resource_unmap(res->resource_id);
+ res->async_unmap_completed = false;
+
+ return true;
+
+}
+#endif /* HAVE_VIRGL_RESOURCE_BLOB */
+
static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
@@ -145,6 +241,14 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
return;
}
+ if (res->mr || cmd->suspended) {
+ bool unmapped = virtio_gpu_virgl_unmap_resource_blob(g, res);
+ cmd->suspended = !unmapped;
+ if (cmd->suspended) {
+ return;
+ }
+ }
+
virgl_renderer_resource_detach_iov(unref.resource_id,
&res_iovs,
&num_iovs);
@@ -495,6 +599,141 @@ static void virgl_cmd_get_capset(VirtIOGPU *g,
}
#ifdef HAVE_VIRGL_RESOURCE_BLOB
+static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
+ struct virtio_gpu_resource_create_blob cblob;
+ struct virtio_gpu_simple_resource *res;
+ int ret;
+
+ if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+
+ VIRTIO_GPU_FILL_CMD(cblob);
+ virtio_gpu_create_blob_bswap(&cblob);
+ trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
+
+ if (cblob.resource_id == 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
+ __func__);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = virtio_gpu_find_resource(g, cblob.resource_id);
+ if (res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
+ __func__, cblob.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = g_new0(struct virtio_gpu_simple_resource, 1);
+ res->resource_id = cblob.resource_id;
+ res->blob_size = cblob.size;
+ res->dmabuf_fd = -1;
+
+ if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
+ ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
+ cmd, &res->addrs,
+ &res->iov, &res->iov_cnt);
+ if (!ret) {
+ g_free(res);
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+ }
+
+ QTAILQ_INSERT_HEAD(&g->reslist, res, next);
+
+ virgl_args.res_handle = cblob.resource_id;
+ virgl_args.ctx_id = cblob.hdr.ctx_id;
+ virgl_args.blob_mem = cblob.blob_mem;
+ virgl_args.blob_id = cblob.blob_id;
+ virgl_args.blob_flags = cblob.blob_flags;
+ virgl_args.size = cblob.size;
+ virgl_args.iovecs = res->iov;
+ virgl_args.num_iovs = res->iov_cnt;
+
+ ret = virgl_renderer_resource_create_blob(&virgl_args);
+ if (ret) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n",
+ __func__, strerror(-ret));
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ }
+}
+
+static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_resource_map_blob mblob;
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_resp_map_info resp;
+ int ret;
+
+ VIRTIO_GPU_FILL_CMD(mblob);
+ virtio_gpu_map_blob_bswap(&mblob);
+
+ res = virtio_gpu_find_resource(g, mblob.resource_id);
+ if (!res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
+ __func__, mblob.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ if (res->mr) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already mapped %d\n",
+ __func__, mblob.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset);
+ if (ret) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource map error: %s\n",
+ __func__, strerror(ret));
+ cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
+ return;
+ }
+
+ memset(&resp, 0, sizeof(resp));
+ resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
+ virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info);
+ virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
+}
+
+static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_resource_unmap_blob ublob;
+ struct virtio_gpu_simple_resource *res;
+
+ VIRTIO_GPU_FILL_CMD(ublob);
+ virtio_gpu_unmap_blob_bswap(&ublob);
+
+ res = virtio_gpu_find_resource(g, ublob.resource_id);
+ if (!res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
+ __func__, ublob.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ if (!res->mr && !cmd->suspended) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already unmapped %d\n",
+ __func__, ublob.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ bool unmapped = virtio_gpu_virgl_unmap_resource_blob(g, res);
+ cmd->suspended = !unmapped;
+}
+
static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
@@ -661,6 +900,15 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
virtio_gpu_get_edid(g, cmd);
break;
#ifdef HAVE_VIRGL_RESOURCE_BLOB
+ case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
+ virgl_cmd_resource_create_blob(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB:
+ virgl_cmd_resource_map_blob(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB:
+ virgl_cmd_resource_unmap_blob(g, cmd);
+ break;
case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
virgl_cmd_set_scanout_blob(g, cmd);
break;
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index a1bd4d6914c4..45c1f2006712 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -1483,10 +1483,12 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
return;
}
+#ifndef HAVE_VIRGL_RESOURCE_BLOB
if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
- error_setg(errp, "blobs and virgl are not compatible (yet)");
+ error_setg(errp, "old virglrenderer, blob resources unsupported");
return;
}
+#endif
}
if (!virtio_gpu_base_device_realize(qdev,
diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
index dc24360656ce..b9d5e106f3c5 100644
--- a/include/hw/virtio/virtio-gpu.h
+++ b/include/hw/virtio/virtio-gpu.h
@@ -61,6 +61,10 @@ struct virtio_gpu_simple_resource {
int dmabuf_fd;
uint8_t *remapped;
+ MemoryRegion *mr;
+ bool async_unmap_completed;
+ bool async_unmap_in_progress;
+
QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
};
--
2.44.0
On 2024/04/19 4:00, Dmitry Osipenko wrote:
> From: Antonio Caggiano <antonio.caggiano@collabora.com>
>
> Support BLOB resources creation, mapping and unmapping by calling the
> new stable virglrenderer 0.10 interface. Only enabled when available and
> via the blob config. E.g. -device virtio-vga-gl,blob=true
>
> Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com>
> Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com>
> Signed-off-by: Huang Rui <ray.huang@amd.com>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
> ---
> hw/display/virtio-gpu-virgl.c | 248 +++++++++++++++++++++++++++++++++
> hw/display/virtio-gpu.c | 4 +-
> include/hw/virtio/virtio-gpu.h | 4 +
> 3 files changed, 255 insertions(+), 1 deletion(-)
>
> diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
> index bb9ee1eba9a0..de132b22f554 100644
> --- a/hw/display/virtio-gpu-virgl.c
> +++ b/hw/display/virtio-gpu-virgl.c
> @@ -32,6 +32,102 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
> }
> #endif
>
> +#ifdef HAVE_VIRGL_RESOURCE_BLOB
> +struct virtio_gpu_virgl_hostmem_region {
> + MemoryRegion mr;
> + VirtIOGPUBase *b;
> + struct virtio_gpu_simple_resource *res;
> +};
> +
> +static void virtio_gpu_virgl_hostmem_region_free(void *obj)
> +{
> + MemoryRegion *mr = MEMORY_REGION(obj);
> + struct virtio_gpu_virgl_hostmem_region *vmr;
> +
> + vmr = container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr);
> + vmr->res->async_unmap_in_progress = false;
> + vmr->res->async_unmap_completed = true;
> + vmr->b->renderer_blocked--;
Resume the command queue processing here.
> +
> + g_free(vmr);
> +}
> +
> +static int
> +virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
> + struct virtio_gpu_simple_resource *res,
> + uint64_t offset)
> +{
> + struct virtio_gpu_virgl_hostmem_region *vmr;
> + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
> + uint64_t size;
> + void *data;
> + int ret;
> +
> + if (!virtio_gpu_hostmem_enabled(b->conf)) {
> + return -EOPNOTSUPP;
Log a message here instead of picking an error number.
> + }
> +
> + ret = virgl_renderer_resource_map(res->resource_id, &data, &size);
> + if (ret) {
> + return -ret;
> + }
> +
> + vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
> + MemoryRegion *mr = &vmr->mr;
Mixed declarations are not allowed; see: docs/devel/style.rst
> + vmr->res = res;
> + vmr->b = b;
> +
> + memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
> + memory_region_add_subregion(&b->hostmem, offset, mr);
> + memory_region_set_enabled(mr, true);
> +
> + /*
> + * Potentially, MR could outlive the resource if MR's reference is held
> + * outside of virtio-gpu. In order to prevent unmapping resource while
> + * MR is alive, and thus, making the data pointer invalid, we will block
> + * virtio-gpu command processing until MR is fully unreferenced and
> + * released.
> + */
> + OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free;
> +
> + res->mr = mr;
> +
> + return 0;
> +}
> +
> +static bool
> +virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
> + struct virtio_gpu_simple_resource *res)
> +{
> + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
> +
> + if (!res->async_unmap_in_progress && !res->async_unmap_completed) {
> + /* memory region owns self res->mr object and frees it by itself */
> + MemoryRegion *mr = res->mr;
> + res->mr = NULL;
> +
> + res->async_unmap_in_progress = true;
> +
> + /* render will be unblocked when MR is freed */
> + b->renderer_blocked++;
> +
> + memory_region_set_enabled(mr, false);
> + memory_region_del_subregion(&b->hostmem, mr);
> + object_unparent(OBJECT(mr));
> + }
> +
> + if (!res->async_unmap_completed) {
This check is unnecessary as the command processing is blocked until the
unmap operation completes.
> + return false;
> + }
> +
> + virgl_renderer_resource_unmap(res->resource_id);
> + res->async_unmap_completed = false;
> +
> + return true;
> +
> +}
> +#endif /* HAVE_VIRGL_RESOURCE_BLOB */
> +
> static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
> struct virtio_gpu_ctrl_command *cmd)
> {
> @@ -145,6 +241,14 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
> return;
> }
>
> + if (res->mr || cmd->suspended) {
> + bool unmapped = virtio_gpu_virgl_unmap_resource_blob(g, res);
> + cmd->suspended = !unmapped;
> + if (cmd->suspended) {
> + return;
> + }
> + }
> +
> virgl_renderer_resource_detach_iov(unref.resource_id,
> &res_iovs,
> &num_iovs);
> @@ -495,6 +599,141 @@ static void virgl_cmd_get_capset(VirtIOGPU *g,
> }
>
> #ifdef HAVE_VIRGL_RESOURCE_BLOB
> +static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
> + struct virtio_gpu_ctrl_command *cmd)
> +{
> + struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
> + struct virtio_gpu_resource_create_blob cblob;
> + struct virtio_gpu_simple_resource *res;
> + int ret;
> +
> + if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
> + return;
> + }
> +
> + VIRTIO_GPU_FILL_CMD(cblob);
> + virtio_gpu_create_blob_bswap(&cblob);
> + trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
> +
> + if (cblob.resource_id == 0) {
> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
> + __func__);
> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> + return;
> + }
> +
> + res = virtio_gpu_find_resource(g, cblob.resource_id);
> + if (res) {
> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
> + __func__, cblob.resource_id);
> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> + return;
> + }
> +
> + res = g_new0(struct virtio_gpu_simple_resource, 1);
> + res->resource_id = cblob.resource_id;
> + res->blob_size = cblob.size;
> + res->dmabuf_fd = -1;
> +
> + if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
> + ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
> + cmd, &res->addrs,
> + &res->iov, &res->iov_cnt);
> + if (!ret) {
> + g_free(res);
> + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
> + return;
> + }
> + }
> +
> + QTAILQ_INSERT_HEAD(&g->reslist, res, next);
> +
> + virgl_args.res_handle = cblob.resource_id;
> + virgl_args.ctx_id = cblob.hdr.ctx_id;
> + virgl_args.blob_mem = cblob.blob_mem;
> + virgl_args.blob_id = cblob.blob_id;
> + virgl_args.blob_flags = cblob.blob_flags;
> + virgl_args.size = cblob.size;
> + virgl_args.iovecs = res->iov;
> + virgl_args.num_iovs = res->iov_cnt;
> +
> + ret = virgl_renderer_resource_create_blob(&virgl_args);
> + if (ret) {
> + qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n",
> + __func__, strerror(-ret));
> + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
> + }
> +}
> +
> +static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
> + struct virtio_gpu_ctrl_command *cmd)
> +{
> + struct virtio_gpu_resource_map_blob mblob;
> + struct virtio_gpu_simple_resource *res;
> + struct virtio_gpu_resp_map_info resp;
> + int ret;
> +
> + VIRTIO_GPU_FILL_CMD(mblob);
> + virtio_gpu_map_blob_bswap(&mblob);
> +
> + res = virtio_gpu_find_resource(g, mblob.resource_id);
> + if (!res) {
> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
> + __func__, mblob.resource_id);
> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> + return;
> + }
> +
> + if (res->mr) {
> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already mapped %d\n",
> + __func__, mblob.resource_id);
> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> + return;
> + }
> +
> + ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset);
> + if (ret) {
> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource map error: %s\n",
> + __func__, strerror(ret));
> + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
> + return;
> + }
> +
> + memset(&resp, 0, sizeof(resp));
> + resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
> + virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info);
> + virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
> +}
> +
> +static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g,
> + struct virtio_gpu_ctrl_command *cmd)
> +{
> + struct virtio_gpu_resource_unmap_blob ublob;
> + struct virtio_gpu_simple_resource *res;
> +
> + VIRTIO_GPU_FILL_CMD(ublob);
> + virtio_gpu_unmap_blob_bswap(&ublob);
> +
> + res = virtio_gpu_find_resource(g, ublob.resource_id);
> + if (!res) {
> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
> + __func__, ublob.resource_id);
> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> + return;
> + }
> +
> + if (!res->mr && !cmd->suspended) {
> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already unmapped %d\n",
> + __func__, ublob.resource_id);
> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> + return;
> + }
> +
> + bool unmapped = virtio_gpu_virgl_unmap_resource_blob(g, res);
> + cmd->suspended = !unmapped;
> +}
> +
> static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
> struct virtio_gpu_ctrl_command *cmd)
> {
> @@ -661,6 +900,15 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
> virtio_gpu_get_edid(g, cmd);
> break;
> #ifdef HAVE_VIRGL_RESOURCE_BLOB
> + case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
> + virgl_cmd_resource_create_blob(g, cmd);
> + break;
> + case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB:
> + virgl_cmd_resource_map_blob(g, cmd);
> + break;
> + case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB:
> + virgl_cmd_resource_unmap_blob(g, cmd);
> + break;
> case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
> virgl_cmd_set_scanout_blob(g, cmd);
> break;
> diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
> index a1bd4d6914c4..45c1f2006712 100644
> --- a/hw/display/virtio-gpu.c
> +++ b/hw/display/virtio-gpu.c
> @@ -1483,10 +1483,12 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
> return;
> }
>
> +#ifndef HAVE_VIRGL_RESOURCE_BLOB
> if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
> - error_setg(errp, "blobs and virgl are not compatible (yet)");
> + error_setg(errp, "old virglrenderer, blob resources unsupported");
> return;
> }
> +#endif
> }
>
> if (!virtio_gpu_base_device_realize(qdev,
> diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
> index dc24360656ce..b9d5e106f3c5 100644
> --- a/include/hw/virtio/virtio-gpu.h
> +++ b/include/hw/virtio/virtio-gpu.h
> @@ -61,6 +61,10 @@ struct virtio_gpu_simple_resource {
> int dmabuf_fd;
> uint8_t *remapped;
>
> + MemoryRegion *mr;
> + bool async_unmap_completed;
> + bool async_unmap_in_progress;
> +
Don't add fields to virtio_gpu_simple_resource but instead create a
struct that embeds virtio_gpu_simple_resource in virtio-gpu-virgl.c.
> QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
> };
>
On 4/19/24 12:18, Akihiko Odaki wrote:
>> @@ -61,6 +61,10 @@ struct virtio_gpu_simple_resource {
>> int dmabuf_fd;
>> uint8_t *remapped;
>> + MemoryRegion *mr;
>> + bool async_unmap_completed;
>> + bool async_unmap_in_progress;
>> +
>
> Don't add fields to virtio_gpu_simple_resource but instead create a
> struct that embeds virtio_gpu_simple_resource in virtio-gpu-virgl.c.
Please give a justification. I'd rather rename
virtio_gpu_simple_resource s/_simple//. Simple resource already supports
blob and the added fields are directly related to the blob. Don't see
why another struct is needed.
--
Best regards,
Dmitry
On 2024/04/24 19:30, Dmitry Osipenko wrote:
> On 4/19/24 12:18, Akihiko Odaki wrote:
>>> @@ -61,6 +61,10 @@ struct virtio_gpu_simple_resource {
>>> int dmabuf_fd;
>>> uint8_t *remapped;
>>> + MemoryRegion *mr;
>>> + bool async_unmap_completed;
>>> + bool async_unmap_in_progress;
>>> +
>>
>> Don't add fields to virtio_gpu_simple_resource but instead create a
>> struct that embeds virtio_gpu_simple_resource in virtio-gpu-virgl.c.
>
> Please give a justification. I'd rather rename
> virtio_gpu_simple_resource s/_simple//. Simple resource already supports
> blob and the added fields are directly related to the blob. Don't see
> why another struct is needed.
>
Because mapping is only implemented in virtio-gpu-gl while blob itself
is implemented also in virtio-gpu.
On 4/27/24 08:52, Akihiko Odaki wrote:
> On 2024/04/24 19:30, Dmitry Osipenko wrote:
>> On 4/19/24 12:18, Akihiko Odaki wrote:
>>>> @@ -61,6 +61,10 @@ struct virtio_gpu_simple_resource {
>>>> int dmabuf_fd;
>>>> uint8_t *remapped;
>>>> + MemoryRegion *mr;
>>>> + bool async_unmap_completed;
>>>> + bool async_unmap_in_progress;
>>>> +
>>>
>>> Don't add fields to virtio_gpu_simple_resource but instead create a
>>> struct that embeds virtio_gpu_simple_resource in virtio-gpu-virgl.c.
>>
>> Please give a justification. I'd rather rename
>> virtio_gpu_simple_resource s/_simple//. Simple resource already supports
>> blob and the added fields are directly related to the blob. Don't see
>> why another struct is needed.
>>
>
> Because mapping is only implemented in virtio-gpu-gl while blob itself
> is implemented also in virtio-gpu.
Rutubaga maps blobs and it should do unmapping blobs asynchronously as
well, AFAICT.
--
Best regards,
Dmitry
On 2024/05/02 4:20, Dmitry Osipenko wrote:
> On 4/27/24 08:52, Akihiko Odaki wrote:
>> On 2024/04/24 19:30, Dmitry Osipenko wrote:
>>> On 4/19/24 12:18, Akihiko Odaki wrote:
>>>>> @@ -61,6 +61,10 @@ struct virtio_gpu_simple_resource {
>>>>> int dmabuf_fd;
>>>>> uint8_t *remapped;
>>>>> + MemoryRegion *mr;
>>>>> + bool async_unmap_completed;
>>>>> + bool async_unmap_in_progress;
>>>>> +
>>>>
>>>> Don't add fields to virtio_gpu_simple_resource but instead create a
>>>> struct that embeds virtio_gpu_simple_resource in virtio-gpu-virgl.c.
>>>
>>> Please give a justification. I'd rather rename
>>> virtio_gpu_simple_resource s/_simple//. Simple resource already supports
>>> blob and the added fields are directly related to the blob. Don't see
>>> why another struct is needed.
>>>
>>
>> Because mapping is only implemented in virtio-gpu-gl while blob itself
>> is implemented also in virtio-gpu.
>
> Rutubaga maps blobs and it should do unmapping blobs asynchronously as
> well, AFAICT.
>
Right. It makes sense to put mr in struct virtio_gpu_simple_resource in
preparation for such a situation.
Based on this discussion, I think it is fine to put mr either in struct
virtio_gpu_simple_resource or a distinct struct. However if you put mr
in struct virtio_gpu_simple_resource, the logic that manages
MemoryRegion should also be moved to virtio-gpu.c for consistency.
On 5/5/24 09:47, Akihiko Odaki wrote:
> On 2024/05/02 4:20, Dmitry Osipenko wrote:
>> On 4/27/24 08:52, Akihiko Odaki wrote:
>>> On 2024/04/24 19:30, Dmitry Osipenko wrote:
>>>> On 4/19/24 12:18, Akihiko Odaki wrote:
>>>>>> @@ -61,6 +61,10 @@ struct virtio_gpu_simple_resource {
>>>>>> int dmabuf_fd;
>>>>>> uint8_t *remapped;
>>>>>> + MemoryRegion *mr;
>>>>>> + bool async_unmap_completed;
>>>>>> + bool async_unmap_in_progress;
>>>>>> +
>>>>>
>>>>> Don't add fields to virtio_gpu_simple_resource but instead create a
>>>>> struct that embeds virtio_gpu_simple_resource in virtio-gpu-virgl.c.
>>>>
>>>> Please give a justification. I'd rather rename
>>>> virtio_gpu_simple_resource s/_simple//. Simple resource already
>>>> supports
>>>> blob and the added fields are directly related to the blob. Don't see
>>>> why another struct is needed.
>>>>
>>>
>>> Because mapping is only implemented in virtio-gpu-gl while blob itself
>>> is implemented also in virtio-gpu.
>>
>> Rutubaga maps blobs and it should do unmapping blobs asynchronously as
>> well, AFAICT.
>>
>
> Right. It makes sense to put mr in struct virtio_gpu_simple_resource in
> preparation for such a situation.
>
> Based on this discussion, I think it is fine to put mr either in struct
> virtio_gpu_simple_resource or a distinct struct. However if you put mr
> in struct virtio_gpu_simple_resource, the logic that manages
> MemoryRegion should also be moved to virtio-gpu.c for consistency.
Rutabaga uses static MRs. It will either need a different workaround or
will have to move to dynamic MRs. I'll keep using distinct struct for now.
AFAICT, its a lesser problem for rutabaga because static MR isn't
subjected to the dynamic MR UAF problem that virgl has. On the other
hand, rutabaga is re-initing already inited static MR object on each new
mapping, that looks like a bug and it will need to move to dynamic MRs.
--
Best regards,
Dmitry
© 2016 - 2026 Red Hat, Inc.