[PATCH v11 08/10] virtio-gpu: Handle resource blob commands

Dmitry Osipenko posted 10 patches 6 months, 2 weeks ago
Maintainers: "Michael S. Tsirkin" <mst@redhat.com>, Gerd Hoffmann <kraxel@redhat.com>, Paolo Bonzini <pbonzini@redhat.com>, "Marc-André Lureau" <marcandre.lureau@redhat.com>, "Daniel P. Berrangé" <berrange@redhat.com>, Thomas Huth <thuth@redhat.com>, "Philippe Mathieu-Daudé" <philmd@linaro.org>
There is a newer version of this series
[PATCH v11 08/10] virtio-gpu: Handle resource blob commands
Posted by Dmitry Osipenko 6 months, 2 weeks ago
From: Antonio Caggiano <antonio.caggiano@collabora.com>

Support BLOB resources creation, mapping and unmapping by calling the
new stable virglrenderer 0.10 interface. Only enabled when available and
via the blob config. E.g. -device virtio-vga-gl,blob=true

Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com>
Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
---
 hw/display/virtio-gpu-virgl.c  | 274 ++++++++++++++++++++++++++++++++-
 hw/display/virtio-gpu.c        |   4 +-
 include/hw/virtio/virtio-gpu.h |   2 +
 3 files changed, 277 insertions(+), 3 deletions(-)

diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
index 524f15220b7f..3f2e406be3a4 100644
--- a/hw/display/virtio-gpu-virgl.c
+++ b/hw/display/virtio-gpu-virgl.c
@@ -26,6 +26,7 @@
 
 struct virtio_gpu_virgl_resource {
     struct virtio_gpu_simple_resource base;
+    MemoryRegion *mr;
 };
 
 static struct virtio_gpu_virgl_resource *
@@ -49,6 +50,117 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
 }
 #endif
 
+#ifdef HAVE_VIRGL_RESOURCE_BLOB
+struct virtio_gpu_virgl_hostmem_region {
+    MemoryRegion mr;
+    struct VirtIOGPU *g;
+    struct virtio_gpu_virgl_resource *res;
+};
+
+static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque)
+{
+    VirtIOGPU *g = opaque;
+
+    virtio_gpu_process_cmdq(g);
+}
+
+static void virtio_gpu_virgl_hostmem_region_free(void *obj)
+{
+    MemoryRegion *mr = MEMORY_REGION(obj);
+    struct virtio_gpu_virgl_hostmem_region *vmr;
+    VirtIOGPUBase *b;
+
+    vmr = container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr);
+    vmr->res->mr = NULL;
+
+    b = VIRTIO_GPU_BASE(vmr->g);
+    b->renderer_blocked--;
+
+    /*
+     * memory_region_unref() is executed from RCU thread context, while
+     * virglrenderer works only on the main-loop thread that's holding GL
+     * context.
+     */
+    qemu_bh_schedule(vmr->g->cmdq_resume_bh);
+    g_free(vmr);
+}
+
+static int
+virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
+                                   struct virtio_gpu_virgl_resource *res,
+                                   uint64_t offset)
+{
+    struct virtio_gpu_virgl_hostmem_region *vmr;
+    VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
+    MemoryRegion *mr;
+    uint64_t size;
+    void *data;
+    int ret;
+
+    if (!virtio_gpu_hostmem_enabled(b->conf)) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__);
+        return -EOPNOTSUPP;
+    }
+
+    ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size);
+    if (ret) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n",
+                      __func__, strerror(-ret));
+        return ret;
+    }
+
+    vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
+    vmr->res = res;
+    vmr->g = g;
+
+    mr = &vmr->mr;
+    memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
+    memory_region_add_subregion(&b->hostmem, offset, mr);
+    memory_region_set_enabled(mr, true);
+
+    /*
+     * MR could outlive the resource if MR's reference is held outside of
+     * virtio-gpu. In order to prevent unmapping resource while MR is alive,
+     * and thus, making the data pointer invalid, we will block virtio-gpu
+     * command processing until MR is fully unreferenced and freed.
+     */
+    OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free;
+
+    res->mr = mr;
+
+    return 0;
+}
+
+static int
+virtio_gpu_virgl_async_unmap_resource_blob(VirtIOGPU *g,
+                                           struct virtio_gpu_virgl_resource *res)
+{
+    VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
+    MemoryRegion *mr = res->mr;
+    int ret;
+
+    if (mr) {
+        /* render will be unblocked once MR is freed */
+        b->renderer_blocked++;
+
+        /* memory region owns self res->mr object and frees it by itself */
+        memory_region_set_enabled(mr, false);
+        memory_region_del_subregion(&b->hostmem, mr);
+        object_unparent(OBJECT(mr));
+    } else {
+        ret = virgl_renderer_resource_unmap(res->base.resource_id);
+        if (ret) {
+            qemu_log_mask(LOG_GUEST_ERROR,
+                          "%s: failed to unmap virgl resource: %s\n",
+                          __func__, strerror(-ret));
+            return ret;
+        }
+    }
+
+    return 0;
+}
+#endif /* HAVE_VIRGL_RESOURCE_BLOB */
+
 static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
                                          struct virtio_gpu_ctrl_command *cmd)
 {
@@ -146,7 +258,8 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
 }
 
 static void virgl_cmd_resource_unref(VirtIOGPU *g,
-                                     struct virtio_gpu_ctrl_command *cmd)
+                                     struct virtio_gpu_ctrl_command *cmd,
+                                     bool *cmd_suspended)
 {
     struct virtio_gpu_resource_unref unref;
     struct virtio_gpu_virgl_resource *res;
@@ -164,6 +277,12 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
         return;
     }
 
+    if (res->mr) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource is mapped\n", __func__);
+        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+        return;
+    }
+
     virgl_renderer_resource_detach_iov(unref.resource_id,
                                        &res_iovs,
                                        &num_iovs);
@@ -514,6 +633,134 @@ static void virgl_cmd_get_capset(VirtIOGPU *g,
 }
 
 #ifdef HAVE_VIRGL_RESOURCE_BLOB
+static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
+                                           struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
+    struct virtio_gpu_resource_create_blob cblob;
+    struct virtio_gpu_virgl_resource *res;
+    int ret;
+
+    if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+        return;
+    }
+
+    VIRTIO_GPU_FILL_CMD(cblob);
+    virtio_gpu_create_blob_bswap(&cblob);
+    trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
+
+    if (cblob.resource_id == 0) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
+                      __func__);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+
+    res = virtio_gpu_virgl_find_resource(g, cblob.resource_id);
+    if (res) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
+                      __func__, cblob.resource_id);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+
+    res = g_new0(struct virtio_gpu_virgl_resource, 1);
+    res->base.resource_id = cblob.resource_id;
+    res->base.blob_size = cblob.size;
+    res->base.dmabuf_fd = -1;
+
+    if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
+        ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
+                                            cmd, &res->base.addrs,
+                                            &res->base.iov, &res->base.iov_cnt);
+        if (!ret) {
+            g_free(res);
+            cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+            return;
+        }
+    }
+
+    QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
+
+    virgl_args.res_handle = cblob.resource_id;
+    virgl_args.ctx_id = cblob.hdr.ctx_id;
+    virgl_args.blob_mem = cblob.blob_mem;
+    virgl_args.blob_id = cblob.blob_id;
+    virgl_args.blob_flags = cblob.blob_flags;
+    virgl_args.size = cblob.size;
+    virgl_args.iovecs = res->base.iov;
+    virgl_args.num_iovs = res->base.iov_cnt;
+
+    ret = virgl_renderer_resource_create_blob(&virgl_args);
+    if (ret) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n",
+                      __func__, strerror(-ret));
+        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+    }
+}
+
+static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
+                                        struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_resource_map_blob mblob;
+    struct virtio_gpu_virgl_resource *res;
+    struct virtio_gpu_resp_map_info resp;
+    int ret;
+
+    VIRTIO_GPU_FILL_CMD(mblob);
+    virtio_gpu_map_blob_bswap(&mblob);
+
+    res = virtio_gpu_virgl_find_resource(g, mblob.resource_id);
+    if (!res) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
+                      __func__, mblob.resource_id);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+
+    ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset);
+    if (ret) {
+        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+        return;
+    }
+
+    memset(&resp, 0, sizeof(resp));
+    resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
+    virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info);
+    virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
+}
+
+static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g,
+                                          struct virtio_gpu_ctrl_command *cmd,
+                                          bool *cmd_suspended)
+{
+    struct virtio_gpu_resource_unmap_blob ublob;
+    struct virtio_gpu_virgl_resource *res;
+    int ret;
+
+    VIRTIO_GPU_FILL_CMD(ublob);
+    virtio_gpu_unmap_blob_bswap(&ublob);
+
+    res = virtio_gpu_virgl_find_resource(g, ublob.resource_id);
+    if (!res) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
+                      __func__, ublob.resource_id);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+
+    ret = virtio_gpu_virgl_async_unmap_resource_blob(g, res);
+    if (ret) {
+        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+        return;
+    }
+
+    if (res->mr) {
+        *cmd_suspended = true;
+    }
+}
+
 static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
                                        struct virtio_gpu_ctrl_command *cmd)
 {
@@ -616,6 +863,8 @@ static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
 void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
                                       struct virtio_gpu_ctrl_command *cmd)
 {
+    bool cmd_suspended = false;
+
     VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
 
     virgl_renderer_force_ctx_0();
@@ -657,7 +906,7 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
         virgl_cmd_resource_flush(g, cmd);
         break;
     case VIRTIO_GPU_CMD_RESOURCE_UNREF:
-        virgl_cmd_resource_unref(g, cmd);
+        virgl_cmd_resource_unref(g, cmd, &cmd_suspended);
         break;
     case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
         /* TODO add security */
@@ -680,6 +929,15 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
         virtio_gpu_get_edid(g, cmd);
         break;
 #ifdef HAVE_VIRGL_RESOURCE_BLOB
+    case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
+        virgl_cmd_resource_create_blob(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB:
+        virgl_cmd_resource_map_blob(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB:
+        virgl_cmd_resource_unmap_blob(g, cmd, &cmd_suspended);
+        break;
     case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
         virgl_cmd_set_scanout_blob(g, cmd);
         break;
@@ -689,6 +947,9 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
         break;
     }
 
+    if (cmd_suspended) {
+        return;
+    }
     if (cmd->finished) {
         return;
     }
@@ -854,6 +1115,11 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
                                       virtio_gpu_print_stats, g);
         timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
     }
+
+    g->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(),
+                                   virtio_gpu_virgl_resume_cmdq_bh,
+                                   g);
+
     return 0;
 }
 
@@ -869,6 +1135,10 @@ int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g)
 
 void virtio_gpu_virgl_deinit(VirtIOGPU *g)
 {
+    if (g->cmdq_resume_bh) {
+        qemu_bh_delete(g->cmdq_resume_bh);
+    }
+
     if (g->fence_poll) {
         timer_free(g->fence_poll);
     }
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 6c8c7213bafa..052ab493a00b 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -1483,10 +1483,12 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
             return;
         }
 
+#ifndef HAVE_VIRGL_RESOURCE_BLOB
         if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
-            error_setg(errp, "blobs and virgl are not compatible (yet)");
+            error_setg(errp, "old virglrenderer, blob resources unsupported");
             return;
         }
+#endif
     }
 
     if (!virtio_gpu_base_device_realize(qdev,
diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
index ba36497c477f..c0b0b0eac08b 100644
--- a/include/hw/virtio/virtio-gpu.h
+++ b/include/hw/virtio/virtio-gpu.h
@@ -210,6 +210,8 @@ struct VirtIOGPU {
         QTAILQ_HEAD(, VGPUDMABuf) bufs;
         VGPUDMABuf *primary[VIRTIO_GPU_MAX_SCANOUTS];
     } dmabuf;
+
+    QEMUBH *cmdq_resume_bh;
 };
 
 struct VirtIOGPUClass {
-- 
2.44.0
Re: [PATCH v11 08/10] virtio-gpu: Handle resource blob commands
Posted by Akihiko Odaki 6 months, 2 weeks ago
On 2024/05/12 3:22, Dmitry Osipenko wrote:
> From: Antonio Caggiano <antonio.caggiano@collabora.com>
> 
> Support BLOB resources creation, mapping and unmapping by calling the
> new stable virglrenderer 0.10 interface. Only enabled when available and
> via the blob config. E.g. -device virtio-vga-gl,blob=true
> 
> Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com>
> Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com>
> Signed-off-by: Huang Rui <ray.huang@amd.com>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
> ---
>   hw/display/virtio-gpu-virgl.c  | 274 ++++++++++++++++++++++++++++++++-
>   hw/display/virtio-gpu.c        |   4 +-
>   include/hw/virtio/virtio-gpu.h |   2 +
>   3 files changed, 277 insertions(+), 3 deletions(-)
> 
> diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
> index 524f15220b7f..3f2e406be3a4 100644
> --- a/hw/display/virtio-gpu-virgl.c
> +++ b/hw/display/virtio-gpu-virgl.c
> @@ -26,6 +26,7 @@
>   
>   struct virtio_gpu_virgl_resource {
>       struct virtio_gpu_simple_resource base;
> +    MemoryRegion *mr;
>   };
>   
>   static struct virtio_gpu_virgl_resource *
> @@ -49,6 +50,117 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
>   }
>   #endif
>   
> +#ifdef HAVE_VIRGL_RESOURCE_BLOB
> +struct virtio_gpu_virgl_hostmem_region {
> +    MemoryRegion mr;
> +    struct VirtIOGPU *g;
> +    struct virtio_gpu_virgl_resource *res;
> +};
> +
> +static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque)
> +{
> +    VirtIOGPU *g = opaque;
> +
> +    virtio_gpu_process_cmdq(g);
> +}
> +
> +static void virtio_gpu_virgl_hostmem_region_free(void *obj)
> +{
> +    MemoryRegion *mr = MEMORY_REGION(obj);
> +    struct virtio_gpu_virgl_hostmem_region *vmr;
> +    VirtIOGPUBase *b;
> +
> +    vmr = container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr);
> +    vmr->res->mr = NULL;
> +
> +    b = VIRTIO_GPU_BASE(vmr->g);
> +    b->renderer_blocked--;
> +
> +    /*
> +     * memory_region_unref() is executed from RCU thread context, while
> +     * virglrenderer works only on the main-loop thread that's holding GL
> +     * context.
> +     */
> +    qemu_bh_schedule(vmr->g->cmdq_resume_bh);
> +    g_free(vmr);
> +}
> +
> +static int
> +virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
> +                                   struct virtio_gpu_virgl_resource *res,
> +                                   uint64_t offset)
> +{
> +    struct virtio_gpu_virgl_hostmem_region *vmr;
> +    VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
> +    MemoryRegion *mr;
> +    uint64_t size;
> +    void *data;
> +    int ret;
> +
> +    if (!virtio_gpu_hostmem_enabled(b->conf)) {
> +        qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__);
> +        return -EOPNOTSUPP;
> +    }
> +
> +    ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size);
> +    if (ret) {
> +        qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n",
> +                      __func__, strerror(-ret));
> +        return ret;
> +    }
> +
> +    vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
> +    vmr->res = res;
> +    vmr->g = g;
> +
> +    mr = &vmr->mr;
> +    memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
> +    memory_region_add_subregion(&b->hostmem, offset, mr);
> +    memory_region_set_enabled(mr, true);
> +
> +    /*
> +     * MR could outlive the resource if MR's reference is held outside of
> +     * virtio-gpu. In order to prevent unmapping resource while MR is alive,
> +     * and thus, making the data pointer invalid, we will block virtio-gpu
> +     * command processing until MR is fully unreferenced and freed.
> +     */
> +    OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free;
> +
> +    res->mr = mr;
> +
> +    return 0;
> +}
> +
> +static int
> +virtio_gpu_virgl_async_unmap_resource_blob(VirtIOGPU *g,
> +                                           struct virtio_gpu_virgl_resource *res)
> +{
> +    VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
> +    MemoryRegion *mr = res->mr;
> +    int ret;
> +
> +    if (mr) {
> +        /* render will be unblocked once MR is freed */
> +        b->renderer_blocked++;
> +
> +        /* memory region owns self res->mr object and frees it by itself */
> +        memory_region_set_enabled(mr, false);
> +        memory_region_del_subregion(&b->hostmem, mr);
> +        object_unparent(OBJECT(mr));
> +    } else {
> +        ret = virgl_renderer_resource_unmap(res->base.resource_id);
> +        if (ret) {
> +            qemu_log_mask(LOG_GUEST_ERROR,
> +                          "%s: failed to unmap virgl resource: %s\n",
> +                          __func__, strerror(-ret));
> +            return ret;
> +        }
> +    }
> +
> +    return 0;
> +}
> +#endif /* HAVE_VIRGL_RESOURCE_BLOB */
> +
>   static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
>                                            struct virtio_gpu_ctrl_command *cmd)
>   {
> @@ -146,7 +258,8 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
>   }
>   
>   static void virgl_cmd_resource_unref(VirtIOGPU *g,
> -                                     struct virtio_gpu_ctrl_command *cmd)
> +                                     struct virtio_gpu_ctrl_command *cmd,
> +                                     bool *cmd_suspended)

This parameter is not used as it returns an error if the resource is 
still mapped.

It may be better to actually implement unmapping instead of returning an 
error for consistency with the iov operation. Apparently crosvm also 
unmaps blobs with VIRTIO_GPU_CMD_RESOURCE_UNREF.

>   {
>       struct virtio_gpu_resource_unref unref;
>       struct virtio_gpu_virgl_resource *res;
> @@ -164,6 +277,12 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
>           return;
>       }
>   
> +    if (res->mr) {
> +        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource is mapped\n", __func__);
> +        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
> +        return;
> +    }
> +
>       virgl_renderer_resource_detach_iov(unref.resource_id,
>                                          &res_iovs,
>                                          &num_iovs);
> @@ -514,6 +633,134 @@ static void virgl_cmd_get_capset(VirtIOGPU *g,
>   }
>   
>   #ifdef HAVE_VIRGL_RESOURCE_BLOB
> +static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
> +                                           struct virtio_gpu_ctrl_command *cmd)
> +{
> +    struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
> +    struct virtio_gpu_resource_create_blob cblob;
> +    struct virtio_gpu_virgl_resource *res;
> +    int ret;
> +
> +    if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
> +        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
> +        return;
> +    }
> +
> +    VIRTIO_GPU_FILL_CMD(cblob);
> +    virtio_gpu_create_blob_bswap(&cblob);
> +    trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
> +
> +    if (cblob.resource_id == 0) {
> +        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
> +                      __func__);
> +        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> +        return;
> +    }
> +
> +    res = virtio_gpu_virgl_find_resource(g, cblob.resource_id);
> +    if (res) {
> +        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
> +                      __func__, cblob.resource_id);
> +        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> +        return;
> +    }
> +
> +    res = g_new0(struct virtio_gpu_virgl_resource, 1);
> +    res->base.resource_id = cblob.resource_id;
> +    res->base.blob_size = cblob.size;
> +    res->base.dmabuf_fd = -1;
> +
> +    if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
> +        ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
> +                                            cmd, &res->base.addrs,
> +                                            &res->base.iov, &res->base.iov_cnt);
> +        if (!ret) {
> +            g_free(res);
> +            cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
> +            return;
> +        }
> +    }
> +
> +    QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
> +
> +    virgl_args.res_handle = cblob.resource_id;
> +    virgl_args.ctx_id = cblob.hdr.ctx_id;
> +    virgl_args.blob_mem = cblob.blob_mem;
> +    virgl_args.blob_id = cblob.blob_id;
> +    virgl_args.blob_flags = cblob.blob_flags;
> +    virgl_args.size = cblob.size;
> +    virgl_args.iovecs = res->base.iov;
> +    virgl_args.num_iovs = res->base.iov_cnt;
> +
> +    ret = virgl_renderer_resource_create_blob(&virgl_args);
> +    if (ret) {
> +        qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n",
> +                      __func__, strerror(-ret));
> +        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;

reslist keeps the stale res even if an error happens.

> +    }
> +}
> +
> +static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
> +                                        struct virtio_gpu_ctrl_command *cmd)
> +{
> +    struct virtio_gpu_resource_map_blob mblob;
> +    struct virtio_gpu_virgl_resource *res;
> +    struct virtio_gpu_resp_map_info resp;
> +    int ret;
> +
> +    VIRTIO_GPU_FILL_CMD(mblob);
> +    virtio_gpu_map_blob_bswap(&mblob);
> +
> +    res = virtio_gpu_virgl_find_resource(g, mblob.resource_id);
> +    if (!res) {
> +        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
> +                      __func__, mblob.resource_id);
> +        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> +        return;
> +    }
> +
> +    ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset);
> +    if (ret) {
> +        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
> +        return;
> +    }
> +
> +    memset(&resp, 0, sizeof(resp));
> +    resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
> +    virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info);
> +    virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
> +}
> +
> +static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g,
> +                                          struct virtio_gpu_ctrl_command *cmd,
> +                                          bool *cmd_suspended)
> +{
> +    struct virtio_gpu_resource_unmap_blob ublob;
> +    struct virtio_gpu_virgl_resource *res;
> +    int ret;
> +
> +    VIRTIO_GPU_FILL_CMD(ublob);
> +    virtio_gpu_unmap_blob_bswap(&ublob);
> +
> +    res = virtio_gpu_virgl_find_resource(g, ublob.resource_id);
> +    if (!res) {
> +        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
> +                      __func__, ublob.resource_id);
> +        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> +        return;
> +    }
> +
> +    ret = virtio_gpu_virgl_async_unmap_resource_blob(g, res);
> +    if (ret) {
> +        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
> +        return;
> +    }
> +
> +    if (res->mr) {
> +        *cmd_suspended = true;
> +    }
> +}
> +
>   static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
>                                          struct virtio_gpu_ctrl_command *cmd)
>   {
> @@ -616,6 +863,8 @@ static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
>   void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
>                                         struct virtio_gpu_ctrl_command *cmd)
>   {
> +    bool cmd_suspended = false;
> +
>       VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
>   
>       virgl_renderer_force_ctx_0();
> @@ -657,7 +906,7 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
>           virgl_cmd_resource_flush(g, cmd);
>           break;
>       case VIRTIO_GPU_CMD_RESOURCE_UNREF:
> -        virgl_cmd_resource_unref(g, cmd);
> +        virgl_cmd_resource_unref(g, cmd, &cmd_suspended);
>           break;
>       case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
>           /* TODO add security */
> @@ -680,6 +929,15 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
>           virtio_gpu_get_edid(g, cmd);
>           break;
>   #ifdef HAVE_VIRGL_RESOURCE_BLOB
> +    case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
> +        virgl_cmd_resource_create_blob(g, cmd);
> +        break;
> +    case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB:
> +        virgl_cmd_resource_map_blob(g, cmd);
> +        break;
> +    case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB:
> +        virgl_cmd_resource_unmap_blob(g, cmd, &cmd_suspended);
> +        break;
>       case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
>           virgl_cmd_set_scanout_blob(g, cmd);
>           break;
> @@ -689,6 +947,9 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
>           break;
>       }
>   
> +    if (cmd_suspended) {
> +        return;
> +    }
>       if (cmd->finished) {
>           return;
>       }

Nitpick: I would write cmd_suspended || cmd->finished to save a few lines.

> @@ -854,6 +1115,11 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
>                                         virtio_gpu_print_stats, g);
>           timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
>       }
> +
> +    g->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(),
> +                                   virtio_gpu_virgl_resume_cmdq_bh,
> +                                   g);
> +
>       return 0;
>   }
>   
> @@ -869,6 +1135,10 @@ int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g)
>   
>   void virtio_gpu_virgl_deinit(VirtIOGPU *g)
>   {
> +    if (g->cmdq_resume_bh) {
> +        qemu_bh_delete(g->cmdq_resume_bh);
> +    }
> +
>       if (g->fence_poll) {
>           timer_free(g->fence_poll);
>       }
> diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
> index 6c8c7213bafa..052ab493a00b 100644
> --- a/hw/display/virtio-gpu.c
> +++ b/hw/display/virtio-gpu.c
> @@ -1483,10 +1483,12 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
>               return;
>           }
>   
> +#ifndef HAVE_VIRGL_RESOURCE_BLOB
>           if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
> -            error_setg(errp, "blobs and virgl are not compatible (yet)");
> +            error_setg(errp, "old virglrenderer, blob resources unsupported");
>               return;
>           }
> +#endif
>       }
>   
>       if (!virtio_gpu_base_device_realize(qdev,
> diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
> index ba36497c477f..c0b0b0eac08b 100644
> --- a/include/hw/virtio/virtio-gpu.h
> +++ b/include/hw/virtio/virtio-gpu.h
> @@ -210,6 +210,8 @@ struct VirtIOGPU {
>           QTAILQ_HEAD(, VGPUDMABuf) bufs;
>           VGPUDMABuf *primary[VIRTIO_GPU_MAX_SCANOUTS];
>       } dmabuf;
> +
> +    QEMUBH *cmdq_resume_bh;

Move this to VirtIOGPUGL.

>   };
>   
>   struct VirtIOGPUClass {
Re: [PATCH v11 08/10] virtio-gpu: Handle resource blob commands
Posted by Dmitry Osipenko 6 months, 1 week ago
On 5/13/24 12:18, Akihiko Odaki wrote:
>>     static void virgl_cmd_resource_unref(VirtIOGPU *g,
>> -                                     struct virtio_gpu_ctrl_command
>> *cmd)
>> +                                     struct virtio_gpu_ctrl_command
>> *cmd,
>> +                                     bool *cmd_suspended)
> 
> This parameter is not used as it returns an error if the resource is
> still mapped.

Missed to remove it by accident

> It may be better to actually implement unmapping instead of returning an
> error for consistency with the iov operation. Apparently crosvm also
> unmaps blobs with VIRTIO_GPU_CMD_RESOURCE_UNREF.

Then I'll add back `async_unmap_in_progress` because resource can be
both mapped/unmapped on unref, and we'll need flag to know whether async
unmapping has been finished to do the final unmapping of the resource.

...
>> +    QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
>> +
>> +    virgl_args.res_handle = cblob.resource_id;
>> +    virgl_args.ctx_id = cblob.hdr.ctx_id;
>> +    virgl_args.blob_mem = cblob.blob_mem;
>> +    virgl_args.blob_id = cblob.blob_id;
>> +    virgl_args.blob_flags = cblob.blob_flags;
>> +    virgl_args.size = cblob.size;
>> +    virgl_args.iovecs = res->base.iov;
>> +    virgl_args.num_iovs = res->base.iov_cnt;
>> +
>> +    ret = virgl_renderer_resource_create_blob(&virgl_args);
>> +    if (ret) {
>> +        qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error:
>> %s\n",
>> +                      __func__, strerror(-ret));
>> +        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
> 
> reslist keeps the stale res even if an error happens.

Good catch

-- 
Best regards,
Dmitry


Re: [PATCH v11 08/10] virtio-gpu: Handle resource blob commands
Posted by Akihiko Odaki 6 months, 1 week ago
On 2024/05/16 1:39, Dmitry Osipenko wrote:
> On 5/13/24 12:18, Akihiko Odaki wrote:
>>>      static void virgl_cmd_resource_unref(VirtIOGPU *g,
>>> -                                     struct virtio_gpu_ctrl_command
>>> *cmd)
>>> +                                     struct virtio_gpu_ctrl_command
>>> *cmd,
>>> +                                     bool *cmd_suspended)
>>
>> This parameter is not used as it returns an error if the resource is
>> still mapped.
> 
> Missed to remove it by accident
> 
>> It may be better to actually implement unmapping instead of returning an
>> error for consistency with the iov operation. Apparently crosvm also
>> unmaps blobs with VIRTIO_GPU_CMD_RESOURCE_UNREF.
> 
> Then I'll add back `async_unmap_in_progress` because resource can be
> both mapped/unmapped on unref, and we'll need flag to know whether async
> unmapping has been finished to do the final unmapping of the resource.

Such a situation should be already handled since unmapping in progress 
blocks all commands (not just VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB but 
literally all, including VIRTIO_GPU_CMD_RESOURCE_UNREF).

Re: [PATCH v11 08/10] virtio-gpu: Handle resource blob commands
Posted by Dmitry Osipenko 6 months, 1 week ago
On 5/15/24 19:42, Akihiko Odaki wrote:
>>> It may be better to actually implement unmapping instead of returning an
>>> error for consistency with the iov operation. Apparently crosvm also
>>> unmaps blobs with VIRTIO_GPU_CMD_RESOURCE_UNREF.
>>
>> Then I'll add back `async_unmap_in_progress` because resource can be
>> both mapped/unmapped on unref, and we'll need flag to know whether async
>> unmapping has been finished to do the final unmapping of the resource.
> 
> Such a situation should be already handled since unmapping in progress
> blocks all commands (not just VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB but
> literally all, including VIRTIO_GPU_CMD_RESOURCE_UNREF).

The async unmapping consists of 3 parts:

1. begin async unmapping with memory_region_del_subregion() and suspend
2. wait for res->mr to be freed and resume
3. finish the unmapping with final virgl_renderer_resource_unmap()

Parts 1 and 3 are handled by  virtio_gpu_virgl_async_unmap_resource_blob()


The VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB is different because we know that
blob is mapped in the first place. Hence we can safely perform the part
3, assuming that parts 1/2 has been completed.

In case of VIRTIO_GPU_CMD_RESOURCE_UNREF, blob can be unmapped in the
first place and we can't do the part 3 because it will error out for
unmapped resource since parts 1/2 were not performed.

-- 
Best regards,
Dmitry
Re: [PATCH v11 08/10] virtio-gpu: Handle resource blob commands
Posted by Akihiko Odaki 6 months, 1 week ago
On 2024/05/16 2:01, Dmitry Osipenko wrote:
> On 5/15/24 19:42, Akihiko Odaki wrote:
>>>> It may be better to actually implement unmapping instead of returning an
>>>> error for consistency with the iov operation. Apparently crosvm also
>>>> unmaps blobs with VIRTIO_GPU_CMD_RESOURCE_UNREF.
>>>
>>> Then I'll add back `async_unmap_in_progress` because resource can be
>>> both mapped/unmapped on unref, and we'll need flag to know whether async
>>> unmapping has been finished to do the final unmapping of the resource.
>>
>> Such a situation should be already handled since unmapping in progress
>> blocks all commands (not just VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB but
>> literally all, including VIRTIO_GPU_CMD_RESOURCE_UNREF).
> 
> The async unmapping consists of 3 parts:
> 
> 1. begin async unmapping with memory_region_del_subregion() and suspend
> 2. wait for res->mr to be freed and resume
> 3. finish the unmapping with final virgl_renderer_resource_unmap()
> 
> Parts 1 and 3 are handled by  virtio_gpu_virgl_async_unmap_resource_blob()
> 
> 
> The VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB is different because we know that
> blob is mapped in the first place. Hence we can safely perform the part
> 3, assuming that parts 1/2 has been completed.
> 
> In case of VIRTIO_GPU_CMD_RESOURCE_UNREF, blob can be unmapped in the
> first place and we can't do the part 3 because it will error out for
> unmapped resource since parts 1/2 were not performed.
> 

VIRTIO_GPU_CMD_RESOURCE_UNREF should also call 
virtio_gpu_virgl_async_unmap_resource_blob(). I guess that's the 
original intention of having a function for this instead of inlining the 
content of this function to virgl_cmd_resource_unmap_blob().
Re: [PATCH v11 08/10] virtio-gpu: Handle resource blob commands
Posted by Dmitry Osipenko 6 months, 1 week ago
On 5/15/24 20:04, Akihiko Odaki wrote:
>>
> 
> VIRTIO_GPU_CMD_RESOURCE_UNREF should also call
> virtio_gpu_virgl_async_unmap_resource_blob(). I guess that's the
> original intention of having a function for this instead of inlining the
> content of this function to virgl_cmd_resource_unmap_blob().

Correct, previous patchset versions unmapped resource on unref.

In v11 I dropped unmapping from unref to avoid adding additional
`async_unmap_in_progress` flag because normally map/unmap will be
balanced by guest anyways.

The virtio-gpu spec doesn't tell that resource have to be implicitly
unmapped on unref. In a case of Linux guest, it actually will be a bug
to unref a mapped resource because guest will continue to map and use
the destroyed resource.

-- 
Best regards,
Dmitry
Re: [PATCH v11 08/10] virtio-gpu: Handle resource blob commands
Posted by Akihiko Odaki 6 months, 1 week ago
On 2024/05/16 2:15, Dmitry Osipenko wrote:
> On 5/15/24 20:04, Akihiko Odaki wrote:
>>>
>>
>> VIRTIO_GPU_CMD_RESOURCE_UNREF should also call
>> virtio_gpu_virgl_async_unmap_resource_blob(). I guess that's the
>> original intention of having a function for this instead of inlining the
>> content of this function to virgl_cmd_resource_unmap_blob().
> 
> Correct, previous patchset versions unmapped resource on unref.
> 
> In v11 I dropped unmapping from unref to avoid adding additional
> `async_unmap_in_progress` flag because normally map/unmap will be
> balanced by guest anyways.
> 
> The virtio-gpu spec doesn't tell that resource have to be implicitly
> unmapped on unref. In a case of Linux guest, it actually will be a bug
> to unref a mapped resource because guest will continue to map and use
> the destroyed resource.
> 

Additional `async_unmap_in_progress` flag should not be necessary as 
explained earlier.

It is a valid design not to issue UNMAP_BLOB before UNREF if the 
automatically performs the unmapping operation. A guest needs to ensure 
the blob is not mapped in a guest userspace virtual address space, but 
it does not require issuing UNMAP_BLOB, which is to unmap the blob from 
the guest physical address space.

In case of Linux, virtio_gpu_vram_free() calls virtio_gpu_cmd_unmap() to 
issue UNMAP_BLOB before UNREF, which is actually not necessary. Linux 
still needs to ensure that the blob is not mapped in a guest userspace 
virtual address space, but that is done before virtio_gpu_vram_free() 
gets called, and virtio_gpu_cmd_unmap() has nothing to do with that.

It is still a good practice for a guest to issue UNMAP_BLOB in such a 
case because the spec does not say the VMM will automatically unmap the 
blob for UNREF, and that's what Linux does. From the VMM perspective, 
it's better to perform the unmapping operation for UNREF because the 
spec does not say the guest always issue UNMAP_BLOB before UNREF.