[PATCH 11/33] vhost: make vhost_memory_unmap() null-safe

Vladimir Sementsov-Ogievskiy posted 33 patches 3 months ago
Maintainers: "Michael S. Tsirkin" <mst@redhat.com>, Stefano Garzarella <sgarzare@redhat.com>, "Gonglei (Arei)" <arei.gonglei@huawei.com>, Zhenwei Pi <pizhenwei@bytedance.com>, "Marc-André Lureau" <marcandre.lureau@redhat.com>, Paolo Bonzini <pbonzini@redhat.com>, Kevin Wolf <kwolf@redhat.com>, Hanna Reitz <hreitz@redhat.com>, Raphael Norwitz <raphael@enfabrica.net>, Jason Wang <jasowang@redhat.com>, Fam Zheng <fam@euphon.net>, "Alex Bennée" <alex.bennee@linaro.org>, "Daniel P. Berrangé" <berrange@redhat.com>, Peter Xu <peterx@redhat.com>, Fabiano Rosas <farosas@suse.de>, Eric Blake <eblake@redhat.com>, Markus Armbruster <armbru@redhat.com>, Thomas Huth <thuth@redhat.com>, "Philippe Mathieu-Daudé" <philmd@linaro.org>, Laurent Vivier <lvivier@redhat.com>
There is a newer version of this series
[PATCH 11/33] vhost: make vhost_memory_unmap() null-safe
Posted by Vladimir Sementsov-Ogievskiy 3 months ago
This helps to simplify failure paths of vhost_virtqueue_start()
a lot.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
---
 hw/virtio/vhost.c | 23 +++++++++++------------
 1 file changed, 11 insertions(+), 12 deletions(-)

diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 1e14987cd5..1fdc1937b6 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -489,6 +489,10 @@ static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
                                hwaddr len, int is_write,
                                hwaddr access_len)
 {
+    if (!buffer) {
+        return;
+    }
+
     if (!vhost_dev_has_iommu(dev)) {
         cpu_physical_memory_unmap(buffer, len, is_write, access_len);
     }
@@ -1313,33 +1317,33 @@ int vhost_virtqueue_start(struct vhost_dev *dev,
     vq->desc = vhost_memory_map(dev, a, l, false);
     if (!vq->desc) {
         r = -ENOMEM;
-        goto fail_alloc_desc;
+        goto fail;
     }
     vq->avail_size = l = virtio_queue_get_avail_size(vdev, idx);
     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
     vq->avail = vhost_memory_map(dev, a, l, false);
     if (!vq->avail) {
         r = -ENOMEM;
-        goto fail_alloc_avail;
+        goto fail;
     }
     vq->used_size = l = virtio_queue_get_used_size(vdev, idx);
     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
     vq->used = vhost_memory_map(dev, a, l, true);
     if (!vq->used) {
         r = -ENOMEM;
-        goto fail_alloc_used;
+        goto fail;
     }
 
     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
     if (r < 0) {
-        goto fail_alloc;
+        goto fail;
     }
 
     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
     r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
     if (r) {
         VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed");
-        goto fail_kick;
+        goto fail;
     }
 
     /* Clear and discard previous events if any. */
@@ -1359,24 +1363,19 @@ int vhost_virtqueue_start(struct vhost_dev *dev,
         file.fd = -1;
         r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
         if (r) {
-            goto fail_vector;
+            goto fail;
         }
     }
 
     return 0;
 
-fail_vector:
-fail_kick:
-fail_alloc:
+fail:
     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
                        0, 0);
-fail_alloc_used:
     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
                        0, 0);
-fail_alloc_avail:
     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
                        0, 0);
-fail_alloc_desc:
     return r;
 }
 
-- 
2.48.1
Re: [PATCH 11/33] vhost: make vhost_memory_unmap() null-safe
Posted by Raphael Norwitz 1 month ago
On Wed, Aug 13, 2025 at 12:56 PM Vladimir Sementsov-Ogievskiy
<vsementsov@yandex-team.ru> wrote:
>
> This helps to simplify failure paths of vhost_virtqueue_start()
> a lot.
>
> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
> ---
>  hw/virtio/vhost.c | 23 +++++++++++------------
>  1 file changed, 11 insertions(+), 12 deletions(-)
>
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 1e14987cd5..1fdc1937b6 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -489,6 +489,10 @@ static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
>                                 hwaddr len, int is_write,
>                                 hwaddr access_len)
>  {
> +    if (!buffer) {
> +        return;
> +    }
> +
>      if (!vhost_dev_has_iommu(dev)) {
>          cpu_physical_memory_unmap(buffer, len, is_write, access_len);
>      }
> @@ -1313,33 +1317,33 @@ int vhost_virtqueue_start(struct vhost_dev *dev,
>      vq->desc = vhost_memory_map(dev, a, l, false);
>      if (!vq->desc) {
>          r = -ENOMEM;
> -        goto fail_alloc_desc;
> +        goto fail;
>      }
>      vq->avail_size = l = virtio_queue_get_avail_size(vdev, idx);
>      vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
>      vq->avail = vhost_memory_map(dev, a, l, false);
>      if (!vq->avail) {
>          r = -ENOMEM;
> -        goto fail_alloc_avail;
> +        goto fail;
>      }
>      vq->used_size = l = virtio_queue_get_used_size(vdev, idx);
>      vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
>      vq->used = vhost_memory_map(dev, a, l, true);
>      if (!vq->used) {
>          r = -ENOMEM;
> -        goto fail_alloc_used;
> +        goto fail;
>      }
>
>      r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
>      if (r < 0) {
> -        goto fail_alloc;
> +        goto fail;
>      }
>
>      file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
>      r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
>      if (r) {
>          VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed");
> -        goto fail_kick;
> +        goto fail;
>      }
>
>      /* Clear and discard previous events if any. */
> @@ -1359,24 +1363,19 @@ int vhost_virtqueue_start(struct vhost_dev *dev,
>          file.fd = -1;
>          r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
>          if (r) {
> -            goto fail_vector;
> +            goto fail;
>          }
>      }
>
>      return 0;
>
> -fail_vector:
> -fail_kick:
> -fail_alloc:
> +fail:
>      vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
>                         0, 0);
> -fail_alloc_used:
>      vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
>                         0, 0);
> -fail_alloc_avail:
>      vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
>                         0, 0);
> -fail_alloc_desc:
>      return r;

This assumes that vq->{used, avail, desc} will be nulled out. I’m not
totally convinced that will be the case when a device is started and
stopped, or at least I don’t see the unmap path doing it.

>  }

>
> --
> 2.48.1
>
>
Re: [PATCH 11/33] vhost: make vhost_memory_unmap() null-safe
Posted by Vladimir Sementsov-Ogievskiy 1 month ago
On 09.10.25 22:00, Raphael Norwitz wrote:
> On Wed, Aug 13, 2025 at 12:56 PM Vladimir Sementsov-Ogievskiy
> <vsementsov@yandex-team.ru> wrote:
>>
>> This helps to simplify failure paths of vhost_virtqueue_start()
>> a lot.
>>
>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
>> ---
>>   hw/virtio/vhost.c | 23 +++++++++++------------
>>   1 file changed, 11 insertions(+), 12 deletions(-)
>>
>> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
>> index 1e14987cd5..1fdc1937b6 100644
>> --- a/hw/virtio/vhost.c
>> +++ b/hw/virtio/vhost.c
>> @@ -489,6 +489,10 @@ static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
>>                                  hwaddr len, int is_write,
>>                                  hwaddr access_len)
>>   {
>> +    if (!buffer) {
>> +        return;
>> +    }
>> +
>>       if (!vhost_dev_has_iommu(dev)) {
>>           cpu_physical_memory_unmap(buffer, len, is_write, access_len);
>>       }
>> @@ -1313,33 +1317,33 @@ int vhost_virtqueue_start(struct vhost_dev *dev,
>>       vq->desc = vhost_memory_map(dev, a, l, false);
>>       if (!vq->desc) {
>>           r = -ENOMEM;
>> -        goto fail_alloc_desc;
>> +        goto fail;
>>       }
>>       vq->avail_size = l = virtio_queue_get_avail_size(vdev, idx);
>>       vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
>>       vq->avail = vhost_memory_map(dev, a, l, false);
>>       if (!vq->avail) {
>>           r = -ENOMEM;
>> -        goto fail_alloc_avail;
>> +        goto fail;
>>       }
>>       vq->used_size = l = virtio_queue_get_used_size(vdev, idx);
>>       vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
>>       vq->used = vhost_memory_map(dev, a, l, true);
>>       if (!vq->used) {
>>           r = -ENOMEM;
>> -        goto fail_alloc_used;
>> +        goto fail;
>>       }
>>
>>       r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
>>       if (r < 0) {
>> -        goto fail_alloc;
>> +        goto fail;
>>       }
>>
>>       file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
>>       r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
>>       if (r) {
>>           VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed");
>> -        goto fail_kick;
>> +        goto fail;
>>       }
>>
>>       /* Clear and discard previous events if any. */
>> @@ -1359,24 +1363,19 @@ int vhost_virtqueue_start(struct vhost_dev *dev,
>>           file.fd = -1;
>>           r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
>>           if (r) {
>> -            goto fail_vector;
>> +            goto fail;
>>           }
>>       }
>>
>>       return 0;
>>
>> -fail_vector:
>> -fail_kick:
>> -fail_alloc:
>> +fail:
>>       vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
>>                          0, 0);
>> -fail_alloc_used:
>>       vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
>>                          0, 0);
>> -fail_alloc_avail:
>>       vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
>>                          0, 0);
>> -fail_alloc_desc:
>>       return r;
> 
> This assumes that vq->{used, avail, desc} will be nulled out. I’m not
> totally convinced that will be the case when a device is started and
> stopped, or at least I don’t see the unmap path doing it.
> 

Oh, right, good caught. Seems we never zero these fields, and after do_vhost_virtqueue_stop()
they become invalid. I'll rework it somehow.

I also notice now, that we do

     vq->used = vhost_memory_map(dev, a, &l, true);
     if (!vq->used || l != s) {
         r = -ENOMEM;
         goto fail_alloc_used;
     }

so, theoretically pre-patch we may leak the mapping in case when vq->used is not NULL but l != s after vhost_memory_map().

this should be fixed with this commit (of course, if fix also the problem you pointed out)

>>   }
> 
>>
>> --
>> 2.48.1
>>
>>


-- 
Best regards,
Vladimir

Re: [PATCH 11/33] vhost: make vhost_memory_unmap() null-safe
Posted by Vladimir Sementsov-Ogievskiy 1 month ago
On 09.10.25 23:00, Vladimir Sementsov-Ogievskiy wrote:
> On 09.10.25 22:00, Raphael Norwitz wrote:
>> On Wed, Aug 13, 2025 at 12:56 PM Vladimir Sementsov-Ogievskiy
>> <vsementsov@yandex-team.ru> wrote:
>>>
>>> This helps to simplify failure paths of vhost_virtqueue_start()
>>> a lot.
>>>
>>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
>>> ---
>>>   hw/virtio/vhost.c | 23 +++++++++++------------
>>>   1 file changed, 11 insertions(+), 12 deletions(-)
>>>
>>> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
>>> index 1e14987cd5..1fdc1937b6 100644
>>> --- a/hw/virtio/vhost.c
>>> +++ b/hw/virtio/vhost.c
>>> @@ -489,6 +489,10 @@ static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
>>>                                  hwaddr len, int is_write,
>>>                                  hwaddr access_len)
>>>   {
>>> +    if (!buffer) {
>>> +        return;
>>> +    }
>>> +
>>>       if (!vhost_dev_has_iommu(dev)) {
>>>           cpu_physical_memory_unmap(buffer, len, is_write, access_len);
>>>       }
>>> @@ -1313,33 +1317,33 @@ int vhost_virtqueue_start(struct vhost_dev *dev,
>>>       vq->desc = vhost_memory_map(dev, a, l, false);
>>>       if (!vq->desc) {
>>>           r = -ENOMEM;
>>> -        goto fail_alloc_desc;
>>> +        goto fail;
>>>       }
>>>       vq->avail_size = l = virtio_queue_get_avail_size(vdev, idx);
>>>       vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
>>>       vq->avail = vhost_memory_map(dev, a, l, false);
>>>       if (!vq->avail) {
>>>           r = -ENOMEM;
>>> -        goto fail_alloc_avail;
>>> +        goto fail;
>>>       }
>>>       vq->used_size = l = virtio_queue_get_used_size(vdev, idx);
>>>       vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
>>>       vq->used = vhost_memory_map(dev, a, l, true);
>>>       if (!vq->used) {
>>>           r = -ENOMEM;
>>> -        goto fail_alloc_used;
>>> +        goto fail;
>>>       }
>>>
>>>       r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
>>>       if (r < 0) {
>>> -        goto fail_alloc;
>>> +        goto fail;
>>>       }
>>>
>>>       file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
>>>       r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
>>>       if (r) {
>>>           VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed");
>>> -        goto fail_kick;
>>> +        goto fail;
>>>       }
>>>
>>>       /* Clear and discard previous events if any. */
>>> @@ -1359,24 +1363,19 @@ int vhost_virtqueue_start(struct vhost_dev *dev,
>>>           file.fd = -1;
>>>           r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
>>>           if (r) {
>>> -            goto fail_vector;
>>> +            goto fail;
>>>           }
>>>       }
>>>
>>>       return 0;
>>>
>>> -fail_vector:
>>> -fail_kick:
>>> -fail_alloc:
>>> +fail:
>>>       vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
>>>                          0, 0);
>>> -fail_alloc_used:
>>>       vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
>>>                          0, 0);
>>> -fail_alloc_avail:
>>>       vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
>>>                          0, 0);
>>> -fail_alloc_desc:
>>>       return r;
>>
>> This assumes that vq->{used, avail, desc} will be nulled out. I’m not
>> totally convinced that will be the case when a device is started and
>> stopped, or at least I don’t see the unmap path doing it.
>>
> 
> Oh, right, good caught. Seems we never zero these fields, and after do_vhost_virtqueue_stop()
> they become invalid. I'll rework it somehow.
> 
> I also notice now, that we do
> 
>      vq->used = vhost_memory_map(dev, a, &l, true);
>      if (!vq->used || l != s) {
>          r = -ENOMEM;
>          goto fail_alloc_used;
>      }
> 
> so, theoretically pre-patch we may leak the mapping in case when vq->used is not NULL but l != s after vhost_memory_map().
> 
> this should be fixed with this commit (of course, if fix also the problem you pointed out)

oh, I forget that previous patch already fixes it.

> 
>>>   }
>>
>>>
>>> -- 
>>> 2.48.1
>>>
>>>
> 
> 


-- 
Best regards,
Vladimir