Allow virtio_gpu_virgl_unmap_resource_blob() to be invoked while async
unmapping is in progress. Do it in preparation to improvement of virtio-gpu
resetting that will require this change.
Suggested-by: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
---
hw/display/trace-events | 2 +-
hw/display/virtio-gpu-virgl.c | 28 +++++++++++++++++++++++-----
2 files changed, 24 insertions(+), 6 deletions(-)
diff --git a/hw/display/trace-events b/hw/display/trace-events
index e323a82cff24..4bfc457fbac1 100644
--- a/hw/display/trace-events
+++ b/hw/display/trace-events
@@ -39,7 +39,7 @@ virtio_gpu_cmd_res_create_2d(uint32_t res, uint32_t fmt, uint32_t w, uint32_t h)
virtio_gpu_cmd_res_create_3d(uint32_t res, uint32_t fmt, uint32_t w, uint32_t h, uint32_t d) "res 0x%x, fmt 0x%x, w %d, h %d, d %d"
virtio_gpu_cmd_res_create_blob(uint32_t res, uint64_t size) "res 0x%x, size %" PRId64
virtio_gpu_cmd_res_map_blob(uint32_t res, void *vmr, void *mr) "res 0x%x, vmr %p, mr %p"
-virtio_gpu_cmd_res_unmap_blob(uint32_t res, void *mr, bool finish_unmapping) "res 0x%x, mr %p, finish_unmapping %d"
+virtio_gpu_cmd_res_unmap_blob(uint32_t res, void *mr, int mapping_state) "res 0x%x, mr %p, mapping_state %d"
virtio_gpu_cmd_res_unref(uint32_t res) "res 0x%x"
virtio_gpu_cmd_res_back_attach(uint32_t res) "res 0x%x"
virtio_gpu_cmd_res_back_detach(uint32_t res) "res 0x%x"
diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
index 6a2aac0b6e5c..342e93728df0 100644
--- a/hw/display/virtio-gpu-virgl.c
+++ b/hw/display/virtio-gpu-virgl.c
@@ -68,10 +68,16 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
#endif
#if VIRGL_VERSION_MAJOR >= 1
+enum virtio_gpu_virgl_hostmem_region_mapping_state {
+ VIRTIO_GPU_MR_MAPPED,
+ VIRTIO_GPU_MR_UNMAP_STARTED,
+ VIRTIO_GPU_MR_UNMAP_COMPLETED,
+};
+
struct virtio_gpu_virgl_hostmem_region {
MemoryRegion mr;
struct VirtIOGPU *g;
- bool finish_unmapping;
+ enum virtio_gpu_virgl_hostmem_region_mapping_state mapping_state;
};
static struct virtio_gpu_virgl_hostmem_region *
@@ -95,7 +101,7 @@ static void virtio_gpu_virgl_hostmem_region_free(void *obj)
VirtIOGPUGL *gl;
vmr = to_hostmem_region(mr);
- vmr->finish_unmapping = true;
+ vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_COMPLETED;
b = VIRTIO_GPU_BASE(vmr->g);
b->renderer_blocked--;
@@ -135,6 +141,7 @@ virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
vmr->g = g;
+ vmr->mapping_state = VIRTIO_GPU_MR_MAPPED;
mr = &vmr->mr;
memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
@@ -171,7 +178,8 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
vmr = to_hostmem_region(res->mr);
- trace_virtio_gpu_cmd_res_unmap_blob(res->base.resource_id, mr, vmr->finish_unmapping);
+ trace_virtio_gpu_cmd_res_unmap_blob(res->base.resource_id, mr,
+ vmr->mapping_state);
/*
* Perform async unmapping in 3 steps:
@@ -182,7 +190,8 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
* asynchronously by virtio_gpu_virgl_hostmem_region_free().
* 3. Finish the unmapping with final virgl_renderer_resource_unmap().
*/
- if (vmr->finish_unmapping) {
+ switch (vmr->mapping_state) {
+ case VIRTIO_GPU_MR_UNMAP_COMPLETED:
res->mr = NULL;
g_free(vmr);
@@ -193,15 +202,24 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
__func__, strerror(-ret));
return ret;
}
- } else {
+ break;
+
+ case VIRTIO_GPU_MR_MAPPED:
*cmd_suspended = true;
/* render will be unblocked once MR is freed */
b->renderer_blocked++;
+ vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED;
+
/* memory region owns self res->mr object and frees it by itself */
memory_region_del_subregion(&b->hostmem, mr);
object_unparent(OBJECT(mr));
+ break;
+
+ case VIRTIO_GPU_MR_UNMAP_STARTED:
+ *cmd_suspended = true;
+ break;
}
return 0;
--
2.52.0
On 2026/01/13 7:52, Dmitry Osipenko wrote:
> Allow virtio_gpu_virgl_unmap_resource_blob() to be invoked while async
> unmapping is in progress. Do it in preparation to improvement of virtio-gpu
> resetting that will require this change.
>
> Suggested-by: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
> ---
> hw/display/trace-events | 2 +-
> hw/display/virtio-gpu-virgl.c | 28 +++++++++++++++++++++++-----
> 2 files changed, 24 insertions(+), 6 deletions(-)
>
> diff --git a/hw/display/trace-events b/hw/display/trace-events
> index e323a82cff24..4bfc457fbac1 100644
> --- a/hw/display/trace-events
> +++ b/hw/display/trace-events
> @@ -39,7 +39,7 @@ virtio_gpu_cmd_res_create_2d(uint32_t res, uint32_t fmt, uint32_t w, uint32_t h)
> virtio_gpu_cmd_res_create_3d(uint32_t res, uint32_t fmt, uint32_t w, uint32_t h, uint32_t d) "res 0x%x, fmt 0x%x, w %d, h %d, d %d"
> virtio_gpu_cmd_res_create_blob(uint32_t res, uint64_t size) "res 0x%x, size %" PRId64
> virtio_gpu_cmd_res_map_blob(uint32_t res, void *vmr, void *mr) "res 0x%x, vmr %p, mr %p"
> -virtio_gpu_cmd_res_unmap_blob(uint32_t res, void *mr, bool finish_unmapping) "res 0x%x, mr %p, finish_unmapping %d"
> +virtio_gpu_cmd_res_unmap_blob(uint32_t res, void *mr, int mapping_state) "res 0x%x, mr %p, mapping_state %d"
> virtio_gpu_cmd_res_unref(uint32_t res) "res 0x%x"
> virtio_gpu_cmd_res_back_attach(uint32_t res) "res 0x%x"
> virtio_gpu_cmd_res_back_detach(uint32_t res) "res 0x%x"
> diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
> index 6a2aac0b6e5c..342e93728df0 100644
> --- a/hw/display/virtio-gpu-virgl.c
> +++ b/hw/display/virtio-gpu-virgl.c
> @@ -68,10 +68,16 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
> #endif
>
> #if VIRGL_VERSION_MAJOR >= 1
> +enum virtio_gpu_virgl_hostmem_region_mapping_state {
> + VIRTIO_GPU_MR_MAPPED,
> + VIRTIO_GPU_MR_UNMAP_STARTED,
> + VIRTIO_GPU_MR_UNMAP_COMPLETED,
> +};
> +
> struct virtio_gpu_virgl_hostmem_region {
> MemoryRegion mr;
> struct VirtIOGPU *g;
> - bool finish_unmapping;
> + enum virtio_gpu_virgl_hostmem_region_mapping_state mapping_state;
> };
>
> static struct virtio_gpu_virgl_hostmem_region *
> @@ -95,7 +101,7 @@ static void virtio_gpu_virgl_hostmem_region_free(void *obj)
> VirtIOGPUGL *gl;
>
> vmr = to_hostmem_region(mr);
> - vmr->finish_unmapping = true;
> + vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_COMPLETED;
>
> b = VIRTIO_GPU_BASE(vmr->g);
> b->renderer_blocked--;
> @@ -135,6 +141,7 @@ virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
>
> vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
> vmr->g = g;
> + vmr->mapping_state = VIRTIO_GPU_MR_MAPPED;
>
> mr = &vmr->mr;
> memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
> @@ -171,7 +178,8 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
>
> vmr = to_hostmem_region(res->mr);
>
> - trace_virtio_gpu_cmd_res_unmap_blob(res->base.resource_id, mr, vmr->finish_unmapping);
> + trace_virtio_gpu_cmd_res_unmap_blob(res->base.resource_id, mr,
> + vmr->mapping_state);
>
> /*
> * Perform async unmapping in 3 steps:
> @@ -182,7 +190,8 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
> * asynchronously by virtio_gpu_virgl_hostmem_region_free().
> * 3. Finish the unmapping with final virgl_renderer_resource_unmap().
> */
> - if (vmr->finish_unmapping) {
> + switch (vmr->mapping_state) {
> + case VIRTIO_GPU_MR_UNMAP_COMPLETED:
> res->mr = NULL;
> g_free(vmr);
>
> @@ -193,15 +202,24 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
> __func__, strerror(-ret));
> return ret;
> }
> - } else {
> + break;
> +
> + case VIRTIO_GPU_MR_MAPPED:
> *cmd_suspended = true;
>
> /* render will be unblocked once MR is freed */
> b->renderer_blocked++;
>
> + vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED;
> +
> /* memory region owns self res->mr object and frees it by itself */
> memory_region_del_subregion(&b->hostmem, mr);
> object_unparent(OBJECT(mr));
> + break;
I suggest:
- Put vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED; here
- Remove *cmd_suspended = true for VIRTIO_GPU_MR_MAPPED.
- Let it fall through.
This way, it is clear that we need to execute *cmd_suspended = true
because the state is now VIRTIO_GPU_MR_UNMAP_STARTED, and we can save on
line by not having a duplicate *cmd_suspended = true.
> +
> + case VIRTIO_GPU_MR_UNMAP_STARTED:
> + *cmd_suspended = true;
> + break;
> }
>
> return 0;
On 1/13/26 07:51, Akihiko Odaki wrote:
> On 2026/01/13 7:52, Dmitry Osipenko wrote:
>> Allow virtio_gpu_virgl_unmap_resource_blob() to be invoked while async
>> unmapping is in progress. Do it in preparation to improvement of
>> virtio-gpu
>> resetting that will require this change.
>>
>> Suggested-by: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
>> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
>> ---
>> hw/display/trace-events | 2 +-
>> hw/display/virtio-gpu-virgl.c | 28 +++++++++++++++++++++++-----
>> 2 files changed, 24 insertions(+), 6 deletions(-)
>>
>> diff --git a/hw/display/trace-events b/hw/display/trace-events
>> index e323a82cff24..4bfc457fbac1 100644
>> --- a/hw/display/trace-events
>> +++ b/hw/display/trace-events
>> @@ -39,7 +39,7 @@ virtio_gpu_cmd_res_create_2d(uint32_t res, uint32_t
>> fmt, uint32_t w, uint32_t h)
>> virtio_gpu_cmd_res_create_3d(uint32_t res, uint32_t fmt, uint32_t w,
>> uint32_t h, uint32_t d) "res 0x%x, fmt 0x%x, w %d, h %d, d %d"
>> virtio_gpu_cmd_res_create_blob(uint32_t res, uint64_t size) "res
>> 0x%x, size %" PRId64
>> virtio_gpu_cmd_res_map_blob(uint32_t res, void *vmr, void *mr) "res
>> 0x%x, vmr %p, mr %p"
>> -virtio_gpu_cmd_res_unmap_blob(uint32_t res, void *mr, bool
>> finish_unmapping) "res 0x%x, mr %p, finish_unmapping %d"
>> +virtio_gpu_cmd_res_unmap_blob(uint32_t res, void *mr, int
>> mapping_state) "res 0x%x, mr %p, mapping_state %d"
>> virtio_gpu_cmd_res_unref(uint32_t res) "res 0x%x"
>> virtio_gpu_cmd_res_back_attach(uint32_t res) "res 0x%x"
>> virtio_gpu_cmd_res_back_detach(uint32_t res) "res 0x%x"
>> diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-
>> virgl.c
>> index 6a2aac0b6e5c..342e93728df0 100644
>> --- a/hw/display/virtio-gpu-virgl.c
>> +++ b/hw/display/virtio-gpu-virgl.c
>> @@ -68,10 +68,16 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
>> #endif
>> #if VIRGL_VERSION_MAJOR >= 1
>> +enum virtio_gpu_virgl_hostmem_region_mapping_state {
>> + VIRTIO_GPU_MR_MAPPED,
>> + VIRTIO_GPU_MR_UNMAP_STARTED,
>> + VIRTIO_GPU_MR_UNMAP_COMPLETED,
>> +};
>> +
>> struct virtio_gpu_virgl_hostmem_region {
>> MemoryRegion mr;
>> struct VirtIOGPU *g;
>> - bool finish_unmapping;
>> + enum virtio_gpu_virgl_hostmem_region_mapping_state mapping_state;
>> };
>> static struct virtio_gpu_virgl_hostmem_region *
>> @@ -95,7 +101,7 @@ static void
>> virtio_gpu_virgl_hostmem_region_free(void *obj)
>> VirtIOGPUGL *gl;
>> vmr = to_hostmem_region(mr);
>> - vmr->finish_unmapping = true;
>> + vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_COMPLETED;
>> b = VIRTIO_GPU_BASE(vmr->g);
>> b->renderer_blocked--;
>> @@ -135,6 +141,7 @@ virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
>> vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
>> vmr->g = g;
>> + vmr->mapping_state = VIRTIO_GPU_MR_MAPPED;
>> mr = &vmr->mr;
>> memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
>> @@ -171,7 +178,8 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
>> vmr = to_hostmem_region(res->mr);
>> - trace_virtio_gpu_cmd_res_unmap_blob(res->base.resource_id, mr,
>> vmr->finish_unmapping);
>> + trace_virtio_gpu_cmd_res_unmap_blob(res->base.resource_id, mr,
>> + vmr->mapping_state);
>> /*
>> * Perform async unmapping in 3 steps:
>> @@ -182,7 +190,8 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
>> * asynchronously by virtio_gpu_virgl_hostmem_region_free().
>> * 3. Finish the unmapping with final
>> virgl_renderer_resource_unmap().
>> */
>> - if (vmr->finish_unmapping) {
>> + switch (vmr->mapping_state) {
>> + case VIRTIO_GPU_MR_UNMAP_COMPLETED:
>> res->mr = NULL;
>> g_free(vmr);
>> @@ -193,15 +202,24 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU
>> *g,
>> __func__, strerror(-ret));
>> return ret;
>> }
>> - } else {
>> + break;
>> +
>> + case VIRTIO_GPU_MR_MAPPED:
>> *cmd_suspended = true;
>> /* render will be unblocked once MR is freed */
>> b->renderer_blocked++;
>> + vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED;
>> +
>> /* memory region owns self res->mr object and frees it by
>> itself */
>> memory_region_del_subregion(&b->hostmem, mr);
>> object_unparent(OBJECT(mr));
>> + break;
>
> I suggest:
>
> - Put vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED; here
> - Remove *cmd_suspended = true for VIRTIO_GPU_MR_MAPPED.
> - Let it fall through.
>
> This way, it is clear that we need to execute *cmd_suspended = true
> because the state is now VIRTIO_GPU_MR_UNMAP_STARTED, and we can save on
> line by not having a duplicate *cmd_suspended = true.
The `mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED` shall be set before
memory_region_del_subregion() is invoked because technically refcounting
logic may change in future and MR may become freed instantly. Will only
add the fall-through if no objections, otherwise please comment on v10.
Best regards,
Dmitry
On 2026/01/19 1:28, Dmitry Osipenko wrote:
> On 1/13/26 07:51, Akihiko Odaki wrote:
>> On 2026/01/13 7:52, Dmitry Osipenko wrote:
>>> Allow virtio_gpu_virgl_unmap_resource_blob() to be invoked while async
>>> unmapping is in progress. Do it in preparation to improvement of
>>> virtio-gpu
>>> resetting that will require this change.
>>>
>>> Suggested-by: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
>>> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
>>> ---
>>> hw/display/trace-events | 2 +-
>>> hw/display/virtio-gpu-virgl.c | 28 +++++++++++++++++++++++-----
>>> 2 files changed, 24 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/hw/display/trace-events b/hw/display/trace-events
>>> index e323a82cff24..4bfc457fbac1 100644
>>> --- a/hw/display/trace-events
>>> +++ b/hw/display/trace-events
>>> @@ -39,7 +39,7 @@ virtio_gpu_cmd_res_create_2d(uint32_t res, uint32_t
>>> fmt, uint32_t w, uint32_t h)
>>> virtio_gpu_cmd_res_create_3d(uint32_t res, uint32_t fmt, uint32_t w,
>>> uint32_t h, uint32_t d) "res 0x%x, fmt 0x%x, w %d, h %d, d %d"
>>> virtio_gpu_cmd_res_create_blob(uint32_t res, uint64_t size) "res
>>> 0x%x, size %" PRId64
>>> virtio_gpu_cmd_res_map_blob(uint32_t res, void *vmr, void *mr) "res
>>> 0x%x, vmr %p, mr %p"
>>> -virtio_gpu_cmd_res_unmap_blob(uint32_t res, void *mr, bool
>>> finish_unmapping) "res 0x%x, mr %p, finish_unmapping %d"
>>> +virtio_gpu_cmd_res_unmap_blob(uint32_t res, void *mr, int
>>> mapping_state) "res 0x%x, mr %p, mapping_state %d"
>>> virtio_gpu_cmd_res_unref(uint32_t res) "res 0x%x"
>>> virtio_gpu_cmd_res_back_attach(uint32_t res) "res 0x%x"
>>> virtio_gpu_cmd_res_back_detach(uint32_t res) "res 0x%x"
>>> diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-
>>> virgl.c
>>> index 6a2aac0b6e5c..342e93728df0 100644
>>> --- a/hw/display/virtio-gpu-virgl.c
>>> +++ b/hw/display/virtio-gpu-virgl.c
>>> @@ -68,10 +68,16 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
>>> #endif
>>> #if VIRGL_VERSION_MAJOR >= 1
>>> +enum virtio_gpu_virgl_hostmem_region_mapping_state {
>>> + VIRTIO_GPU_MR_MAPPED,
>>> + VIRTIO_GPU_MR_UNMAP_STARTED,
>>> + VIRTIO_GPU_MR_UNMAP_COMPLETED,
>>> +};
>>> +
>>> struct virtio_gpu_virgl_hostmem_region {
>>> MemoryRegion mr;
>>> struct VirtIOGPU *g;
>>> - bool finish_unmapping;
>>> + enum virtio_gpu_virgl_hostmem_region_mapping_state mapping_state;
>>> };
>>> static struct virtio_gpu_virgl_hostmem_region *
>>> @@ -95,7 +101,7 @@ static void
>>> virtio_gpu_virgl_hostmem_region_free(void *obj)
>>> VirtIOGPUGL *gl;
>>> vmr = to_hostmem_region(mr);
>>> - vmr->finish_unmapping = true;
>>> + vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_COMPLETED;
>>> b = VIRTIO_GPU_BASE(vmr->g);
>>> b->renderer_blocked--;
>>> @@ -135,6 +141,7 @@ virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
>>> vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
>>> vmr->g = g;
>>> + vmr->mapping_state = VIRTIO_GPU_MR_MAPPED;
>>> mr = &vmr->mr;
>>> memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
>>> @@ -171,7 +178,8 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
>>> vmr = to_hostmem_region(res->mr);
>>> - trace_virtio_gpu_cmd_res_unmap_blob(res->base.resource_id, mr,
>>> vmr->finish_unmapping);
>>> + trace_virtio_gpu_cmd_res_unmap_blob(res->base.resource_id, mr,
>>> + vmr->mapping_state);
>>> /*
>>> * Perform async unmapping in 3 steps:
>>> @@ -182,7 +190,8 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
>>> * asynchronously by virtio_gpu_virgl_hostmem_region_free().
>>> * 3. Finish the unmapping with final
>>> virgl_renderer_resource_unmap().
>>> */
>>> - if (vmr->finish_unmapping) {
>>> + switch (vmr->mapping_state) {
>>> + case VIRTIO_GPU_MR_UNMAP_COMPLETED:
>>> res->mr = NULL;
>>> g_free(vmr);
>>> @@ -193,15 +202,24 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU
>>> *g,
>>> __func__, strerror(-ret));
>>> return ret;
>>> }
>>> - } else {
>>> + break;
>>> +
>>> + case VIRTIO_GPU_MR_MAPPED:
>>> *cmd_suspended = true;
>>> /* render will be unblocked once MR is freed */
>>> b->renderer_blocked++;
>>> + vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED;
>>> +
>>> /* memory region owns self res->mr object and frees it by
>>> itself */
>>> memory_region_del_subregion(&b->hostmem, mr);
>>> object_unparent(OBJECT(mr));
>>> + break;
>>
>> I suggest:
>>
>> - Put vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED; here
>> - Remove *cmd_suspended = true for VIRTIO_GPU_MR_MAPPED.
>> - Let it fall through.
>>
>> This way, it is clear that we need to execute *cmd_suspended = true
>> because the state is now VIRTIO_GPU_MR_UNMAP_STARTED, and we can save on
>> line by not having a duplicate *cmd_suspended = true.
>
> The `mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED` shall be set before
> memory_region_del_subregion() is invoked because technically refcounting
> logic may change in future and MR may become freed instantly. Will only
> add the fall-through if no objections, otherwise please comment on v10.
That makes sense.
Strictly speaking, even if the refcounting change happens,
qemu_bh_schedule(gl->cmdq_resume_bh) will delay the next invocation of
this function in virtio_gpu_virgl_hostmem_region_free(). But even
virtio_gpu_virgl_hostmem_region_free() can be changed in the future so I
no longer believe moving `mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED`
is absolutely good. Please choose a design option you prefer.
Regards,
Akihiko Odaki
On 1/19/26 08:54, Akihiko Odaki wrote:
> On 2026/01/19 1:28, Dmitry Osipenko wrote:
>> On 1/13/26 07:51, Akihiko Odaki wrote:
>>> On 2026/01/13 7:52, Dmitry Osipenko wrote:
>>>> Allow virtio_gpu_virgl_unmap_resource_blob() to be invoked while async
>>>> unmapping is in progress. Do it in preparation to improvement of
>>>> virtio-gpu
>>>> resetting that will require this change.
>>>>
>>>> Suggested-by: Akihiko Odaki <odaki@rsg.ci.i.u-tokyo.ac.jp>
>>>> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
>>>> ---
>>>> hw/display/trace-events | 2 +-
>>>> hw/display/virtio-gpu-virgl.c | 28 +++++++++++++++++++++++-----
>>>> 2 files changed, 24 insertions(+), 6 deletions(-)
>>>>
>>>> diff --git a/hw/display/trace-events b/hw/display/trace-events
>>>> index e323a82cff24..4bfc457fbac1 100644
>>>> --- a/hw/display/trace-events
>>>> +++ b/hw/display/trace-events
>>>> @@ -39,7 +39,7 @@ virtio_gpu_cmd_res_create_2d(uint32_t res, uint32_t
>>>> fmt, uint32_t w, uint32_t h)
>>>> virtio_gpu_cmd_res_create_3d(uint32_t res, uint32_t fmt, uint32_t w,
>>>> uint32_t h, uint32_t d) "res 0x%x, fmt 0x%x, w %d, h %d, d %d"
>>>> virtio_gpu_cmd_res_create_blob(uint32_t res, uint64_t size) "res
>>>> 0x%x, size %" PRId64
>>>> virtio_gpu_cmd_res_map_blob(uint32_t res, void *vmr, void *mr) "res
>>>> 0x%x, vmr %p, mr %p"
>>>> -virtio_gpu_cmd_res_unmap_blob(uint32_t res, void *mr, bool
>>>> finish_unmapping) "res 0x%x, mr %p, finish_unmapping %d"
>>>> +virtio_gpu_cmd_res_unmap_blob(uint32_t res, void *mr, int
>>>> mapping_state) "res 0x%x, mr %p, mapping_state %d"
>>>> virtio_gpu_cmd_res_unref(uint32_t res) "res 0x%x"
>>>> virtio_gpu_cmd_res_back_attach(uint32_t res) "res 0x%x"
>>>> virtio_gpu_cmd_res_back_detach(uint32_t res) "res 0x%x"
>>>> diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-
>>>> virgl.c
>>>> index 6a2aac0b6e5c..342e93728df0 100644
>>>> --- a/hw/display/virtio-gpu-virgl.c
>>>> +++ b/hw/display/virtio-gpu-virgl.c
>>>> @@ -68,10 +68,16 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
>>>> #endif
>>>> #if VIRGL_VERSION_MAJOR >= 1
>>>> +enum virtio_gpu_virgl_hostmem_region_mapping_state {
>>>> + VIRTIO_GPU_MR_MAPPED,
>>>> + VIRTIO_GPU_MR_UNMAP_STARTED,
>>>> + VIRTIO_GPU_MR_UNMAP_COMPLETED,
>>>> +};
>>>> +
>>>> struct virtio_gpu_virgl_hostmem_region {
>>>> MemoryRegion mr;
>>>> struct VirtIOGPU *g;
>>>> - bool finish_unmapping;
>>>> + enum virtio_gpu_virgl_hostmem_region_mapping_state mapping_state;
>>>> };
>>>> static struct virtio_gpu_virgl_hostmem_region *
>>>> @@ -95,7 +101,7 @@ static void
>>>> virtio_gpu_virgl_hostmem_region_free(void *obj)
>>>> VirtIOGPUGL *gl;
>>>> vmr = to_hostmem_region(mr);
>>>> - vmr->finish_unmapping = true;
>>>> + vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_COMPLETED;
>>>> b = VIRTIO_GPU_BASE(vmr->g);
>>>> b->renderer_blocked--;
>>>> @@ -135,6 +141,7 @@ virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
>>>> vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
>>>> vmr->g = g;
>>>> + vmr->mapping_state = VIRTIO_GPU_MR_MAPPED;
>>>> mr = &vmr->mr;
>>>> memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
>>>> @@ -171,7 +178,8 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
>>>> vmr = to_hostmem_region(res->mr);
>>>> - trace_virtio_gpu_cmd_res_unmap_blob(res->base.resource_id, mr,
>>>> vmr->finish_unmapping);
>>>> + trace_virtio_gpu_cmd_res_unmap_blob(res->base.resource_id, mr,
>>>> + vmr->mapping_state);
>>>> /*
>>>> * Perform async unmapping in 3 steps:
>>>> @@ -182,7 +190,8 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
>>>> * asynchronously by virtio_gpu_virgl_hostmem_region_free().
>>>> * 3. Finish the unmapping with final
>>>> virgl_renderer_resource_unmap().
>>>> */
>>>> - if (vmr->finish_unmapping) {
>>>> + switch (vmr->mapping_state) {
>>>> + case VIRTIO_GPU_MR_UNMAP_COMPLETED:
>>>> res->mr = NULL;
>>>> g_free(vmr);
>>>> @@ -193,15 +202,24 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU
>>>> *g,
>>>> __func__, strerror(-ret));
>>>> return ret;
>>>> }
>>>> - } else {
>>>> + break;
>>>> +
>>>> + case VIRTIO_GPU_MR_MAPPED:
>>>> *cmd_suspended = true;
>>>> /* render will be unblocked once MR is freed */
>>>> b->renderer_blocked++;
>>>> + vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED;
>>>> +
>>>> /* memory region owns self res->mr object and frees it by
>>>> itself */
>>>> memory_region_del_subregion(&b->hostmem, mr);
>>>> object_unparent(OBJECT(mr));
>>>> + break;
>>>
>>> I suggest:
>>>
>>> - Put vmr->mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED; here
>>> - Remove *cmd_suspended = true for VIRTIO_GPU_MR_MAPPED.
>>> - Let it fall through.
>>>
>>> This way, it is clear that we need to execute *cmd_suspended = true
>>> because the state is now VIRTIO_GPU_MR_UNMAP_STARTED, and we can save on
>>> line by not having a duplicate *cmd_suspended = true.
>>
>> The `mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED` shall be set before
>> memory_region_del_subregion() is invoked because technically refcounting
>> logic may change in future and MR may become freed instantly. Will only
>> add the fall-through if no objections, otherwise please comment on v10.
>
> That makes sense.
>
> Strictly speaking, even if the refcounting change happens,
> qemu_bh_schedule(gl->cmdq_resume_bh) will delay the next invocation of
> this function in virtio_gpu_virgl_hostmem_region_free(). But even
> virtio_gpu_virgl_hostmem_region_free() can be changed in the future so I
> no longer believe moving `mapping_state = VIRTIO_GPU_MR_UNMAP_STARTED`
> is absolutely good. Please choose a design option you prefer.
virtio_gpu_virgl_hostmem_region_free() changes state to UNMAP_COMPLETED,
hence the UNMAP_STARTED need to be set before region_free() invoked.
Will keep the v10 variant then, thanks.
--
Best regards,
Dmitry
© 2016 - 2026 Red Hat, Inc.