[RFC v5 5/7] vhost: Forward descriptors to guest via packed vqs

Sahil Siddiq posted 7 patches 1 week ago
[RFC v5 5/7] vhost: Forward descriptors to guest via packed vqs
Posted by Sahil Siddiq 1 week ago
Detect when used descriptors are ready for consumption by the guest via
packed virtqueues and forward them from the device to the guest.

Signed-off-by: Sahil Siddiq <sahilcdq@proton.me>
---
Changes from v4 -> v5:
- New commit.
- vhost-shadow-virtqueue.c:
  (vhost_svq_more_used): Split into vhost_svq_more_used_split and
  vhost_svq_more_used_packed.
  (vhost_svq_enable_notification): Handle split and packed vqs.
  (vhost_svq_disable_notification): Likewise.
  (vhost_svq_get_buf): Split into vhost_svq_get_buf_split and
  vhost_svq_get_buf_packed.
  (vhost_svq_poll): Use new functions.

 hw/virtio/vhost-shadow-virtqueue.c | 121 ++++++++++++++++++++++++++---
 1 file changed, 110 insertions(+), 11 deletions(-)

diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index 126957231d..8430b3c94a 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -463,7 +463,7 @@ static void vhost_handle_guest_kick_notifier(EventNotifier *n)
     vhost_handle_guest_kick(svq);
 }
 
-static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
+static bool vhost_svq_more_used_split(VhostShadowVirtqueue *svq)
 {
     uint16_t *used_idx = &svq->vring.used->idx;
     if (svq->last_used_idx != svq->shadow_used_idx) {
@@ -475,6 +475,22 @@ static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
     return svq->last_used_idx != svq->shadow_used_idx;
 }
 
+static bool vhost_svq_more_used_packed(VhostShadowVirtqueue *svq)
+{
+    bool avail_flag, used_flag, used_wrap_counter;
+    uint16_t last_used_idx, last_used, flags;
+
+    last_used_idx = svq->last_used_idx;
+    last_used = last_used_idx & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
+    used_wrap_counter = !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
+
+    flags = le16_to_cpu(svq->vring_packed.vring.desc[last_used].flags);
+    avail_flag = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
+    used_flag = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
+
+    return avail_flag == used_flag && used_flag == used_wrap_counter;
+}
+
 /**
  * Enable vhost device calls after disable them.
  *
@@ -486,16 +502,31 @@ static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
  */
 static bool vhost_svq_enable_notification(VhostShadowVirtqueue *svq)
 {
+    bool more_used;
     if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
-        uint16_t *used_event = (uint16_t *)&svq->vring.avail->ring[svq->vring.num];
-        *used_event = cpu_to_le16(svq->shadow_used_idx);
+        if (!svq->is_packed) {
+            uint16_t *used_event = (uint16_t *)&svq->vring.avail->ring[svq->vring.num];
+            *used_event = cpu_to_le16(svq->shadow_used_idx);
+        }
     } else {
-        svq->vring.avail->flags &= ~cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
+        if (svq->is_packed) {
+            /* vq->vring_packed.vring.driver->off_wrap = cpu_to_le16(svq->last_used_idx); */
+            svq->vring_packed.vring.driver->flags =
+                cpu_to_le16(VRING_PACKED_EVENT_FLAG_ENABLE);
+        } else {
+            svq->vring.avail->flags &= ~cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
+        }
     }
 
     /* Make sure the event is enabled before the read of used_idx */
     smp_mb();
-    return !vhost_svq_more_used(svq);
+    if (svq->is_packed) {
+        more_used = !vhost_svq_more_used_packed(svq);
+    } else {
+        more_used = !vhost_svq_more_used_split(svq);
+    }
+
+    return more_used;
 }
 
 static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq)
@@ -505,7 +536,12 @@ static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq)
      * index is already an index too far away.
      */
     if (!virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
-        svq->vring.avail->flags |= cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
+        if (svq->is_packed) {
+            svq->vring_packed.vring.driver->flags =
+                cpu_to_le16(VRING_PACKED_EVENT_FLAG_DISABLE);
+        } else {
+            svq->vring.avail->flags |= cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
+        }
     }
 }
 
@@ -519,15 +555,14 @@ static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq,
     return i;
 }
 
-G_GNUC_WARN_UNUSED_RESULT
-static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
-                                           uint32_t *len)
+static VirtQueueElement *vhost_svq_get_buf_split(VhostShadowVirtqueue *svq,
+                                                 uint32_t *len)
 {
     const vring_used_t *used = svq->vring.used;
     vring_used_elem_t used_elem;
     uint16_t last_used, last_used_chain, num;
 
-    if (!vhost_svq_more_used(svq)) {
+    if (!vhost_svq_more_used_split(svq)) {
         return NULL;
     }
 
@@ -562,6 +597,66 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
     return g_steal_pointer(&svq->desc_state[used_elem.id].elem);
 }
 
+static VirtQueueElement *vhost_svq_get_buf_packed(VhostShadowVirtqueue *svq,
+                                                  uint32_t *len)
+{
+    bool used_wrap_counter;
+    uint16_t last_used_idx, last_used, id, num, last_used_chain;
+
+    if (!vhost_svq_more_used_packed(svq)) {
+        return NULL;
+    }
+
+    /* Only get used array entries after they have been exposed by dev */
+    smp_rmb();
+    last_used_idx = svq->last_used_idx;
+    last_used = last_used_idx & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
+    used_wrap_counter = !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
+    id = le32_to_cpu(svq->vring_packed.vring.desc[last_used].id);
+    *len = le32_to_cpu(svq->vring_packed.vring.desc[last_used].len);
+
+    if (unlikely(id >= svq->vring.num)) {
+        qemu_log_mask(LOG_GUEST_ERROR, "Device %s says index %u is used",
+                      svq->vdev->name, id);
+        return NULL;
+    }
+
+    if (unlikely(!svq->desc_state[id].ndescs)) {
+        qemu_log_mask(LOG_GUEST_ERROR,
+            "Device %s says index %u is used, but it was not available",
+            svq->vdev->name, id);
+        return NULL;
+    }
+
+    num = svq->desc_state[id].ndescs;
+    svq->desc_state[id].ndescs = 0;
+    last_used_chain = vhost_svq_last_desc_of_chain(svq, num, id);
+    svq->desc_next[last_used_chain] = svq->free_head;
+    svq->free_head = id;
+    svq->num_free += num;
+
+    last_used += num;
+    if (unlikely(last_used >= svq->vring_packed.vring.num)) {
+        last_used -= svq->vring_packed.vring.num;
+        used_wrap_counter ^= 1;
+    }
+
+    last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
+    svq->last_used_idx = last_used;
+    return g_steal_pointer(&svq->desc_state[id].elem);
+}
+
+G_GNUC_WARN_UNUSED_RESULT
+static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
+                                           uint32_t *len)
+{
+    if (svq->is_packed) {
+        return vhost_svq_get_buf_packed(svq, len);
+    }
+
+    return vhost_svq_get_buf_split(svq, len);
+}
+
 /**
  * Push an element to SVQ, returning it to the guest.
  */
@@ -639,7 +734,11 @@ size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num)
         uint32_t r = 0;
 
         do {
-            if (vhost_svq_more_used(svq)) {
+            if (!svq->is_packed && vhost_svq_more_used_split(svq)) {
+                break;
+            }
+
+            if (svq->is_packed && vhost_svq_more_used_packed(svq)) {
                 break;
             }
 
-- 
2.48.1
Re: [RFC v5 5/7] vhost: Forward descriptors to guest via packed vqs
Posted by Sahil Siddiq 1 week ago
Hi,

I had a few more queries here as well.

On 3/24/25 7:29 PM, Sahil Siddiq wrote:
> Detect when used descriptors are ready for consumption by the guest via
> packed virtqueues and forward them from the device to the guest.
> 
> Signed-off-by: Sahil Siddiq <sahilcdq@proton.me>
> ---
> Changes from v4 -> v5:
> - New commit.
> - vhost-shadow-virtqueue.c:
>    (vhost_svq_more_used): Split into vhost_svq_more_used_split and
>    vhost_svq_more_used_packed.
>    (vhost_svq_enable_notification): Handle split and packed vqs.
>    (vhost_svq_disable_notification): Likewise.
>    (vhost_svq_get_buf): Split into vhost_svq_get_buf_split and
>    vhost_svq_get_buf_packed.
>    (vhost_svq_poll): Use new functions.
> 
>   hw/virtio/vhost-shadow-virtqueue.c | 121 ++++++++++++++++++++++++++---
>   1 file changed, 110 insertions(+), 11 deletions(-)
> 
> diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
> index 126957231d..8430b3c94a 100644
> --- a/hw/virtio/vhost-shadow-virtqueue.c
> +++ b/hw/virtio/vhost-shadow-virtqueue.c
> @@ -463,7 +463,7 @@ static void vhost_handle_guest_kick_notifier(EventNotifier *n)
>       vhost_handle_guest_kick(svq);
>   }
>   
> -static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
> +static bool vhost_svq_more_used_split(VhostShadowVirtqueue *svq)
>   {
>       uint16_t *used_idx = &svq->vring.used->idx;
>       if (svq->last_used_idx != svq->shadow_used_idx) {
> @@ -475,6 +475,22 @@ static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
>       return svq->last_used_idx != svq->shadow_used_idx;
>   }
>   
> +static bool vhost_svq_more_used_packed(VhostShadowVirtqueue *svq)
> +{
> +    bool avail_flag, used_flag, used_wrap_counter;
> +    uint16_t last_used_idx, last_used, flags;
> +
> +    last_used_idx = svq->last_used_idx;
> +    last_used = last_used_idx & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);

In the linux kernel, last_used is calculated as:

last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))

...instead of...

last_used_idx & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR)

Isn't the second option good enough if last_used_idx is uint16_t
and VRING_PACKED_EVENT_F_WRAP_CTR is defined as 15.

> +    used_wrap_counter = !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
> +
> +    flags = le16_to_cpu(svq->vring_packed.vring.desc[last_used].flags);
> +    avail_flag = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
> +    used_flag = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
> +
> +    return avail_flag == used_flag && used_flag == used_wrap_counter;
> +}
> +

Also in the implementation of vhost_svq_more_used_split() [1], I haven't
understood why the following condition:

svq->last_used_idx != svq->shadow_used_idx

is checked before updating the value of "svq->shadow_used_idx":

svq->shadow_used_idx = le16_to_cpu(*(volatile uint16_t *)used_idx)

Thanks,
Sahil

[1] https://gitlab.com/qemu-project/qemu/-/blob/master/hw/virtio/vhost-shadow-virtqueue.c#L387
Re: [RFC v5 5/7] vhost: Forward descriptors to guest via packed vqs
Posted by Eugenio Perez Martin 5 days, 23 hours ago
On Mon, Mar 24, 2025 at 3:34 PM Sahil Siddiq <icegambit91@gmail.com> wrote:
>
> Hi,
>
> I had a few more queries here as well.
>
> On 3/24/25 7:29 PM, Sahil Siddiq wrote:
> > Detect when used descriptors are ready for consumption by the guest via
> > packed virtqueues and forward them from the device to the guest.
> >
> > Signed-off-by: Sahil Siddiq <sahilcdq@proton.me>
> > ---
> > Changes from v4 -> v5:
> > - New commit.
> > - vhost-shadow-virtqueue.c:
> >    (vhost_svq_more_used): Split into vhost_svq_more_used_split and
> >    vhost_svq_more_used_packed.
> >    (vhost_svq_enable_notification): Handle split and packed vqs.
> >    (vhost_svq_disable_notification): Likewise.
> >    (vhost_svq_get_buf): Split into vhost_svq_get_buf_split and
> >    vhost_svq_get_buf_packed.
> >    (vhost_svq_poll): Use new functions.
> >
> >   hw/virtio/vhost-shadow-virtqueue.c | 121 ++++++++++++++++++++++++++---
> >   1 file changed, 110 insertions(+), 11 deletions(-)
> >
> > diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
> > index 126957231d..8430b3c94a 100644
> > --- a/hw/virtio/vhost-shadow-virtqueue.c
> > +++ b/hw/virtio/vhost-shadow-virtqueue.c
> > @@ -463,7 +463,7 @@ static void vhost_handle_guest_kick_notifier(EventNotifier *n)
> >       vhost_handle_guest_kick(svq);
> >   }
> >
> > -static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
> > +static bool vhost_svq_more_used_split(VhostShadowVirtqueue *svq)
> >   {
> >       uint16_t *used_idx = &svq->vring.used->idx;
> >       if (svq->last_used_idx != svq->shadow_used_idx) {
> > @@ -475,6 +475,22 @@ static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
> >       return svq->last_used_idx != svq->shadow_used_idx;
> >   }
> >
> > +static bool vhost_svq_more_used_packed(VhostShadowVirtqueue *svq)
> > +{
> > +    bool avail_flag, used_flag, used_wrap_counter;
> > +    uint16_t last_used_idx, last_used, flags;
> > +
> > +    last_used_idx = svq->last_used_idx;
> > +    last_used = last_used_idx & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
>
> In the linux kernel, last_used is calculated as:
>
> last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))
>
> ...instead of...
>
> last_used_idx & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR)
>
> Isn't the second option good enough if last_used_idx is uint16_t
> and VRING_PACKED_EVENT_F_WRAP_CTR is defined as 15.
>

I think it is good enough with the u16 restrictions but it's just
defensive code.

> > +    used_wrap_counter = !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
> > +
> > +    flags = le16_to_cpu(svq->vring_packed.vring.desc[last_used].flags);
> > +    avail_flag = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
> > +    used_flag = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
> > +
> > +    return avail_flag == used_flag && used_flag == used_wrap_counter;
> > +}
> > +
>
> Also in the implementation of vhost_svq_more_used_split() [1], I haven't
> understood why the following condition:
>
> svq->last_used_idx != svq->shadow_used_idx
>
> is checked before updating the value of "svq->shadow_used_idx":
>
> svq->shadow_used_idx = le16_to_cpu(*(volatile uint16_t *)used_idx)
>

As far as I know this is used to avoid concurrent access to guest's
used_idx, avoiding cache sharing, the memory barrier, and the
potentially costly volatile access.
Re: [RFC v5 5/7] vhost: Forward descriptors to guest via packed vqs
Posted by Sahil Siddiq 4 days, 2 hours ago
Hi,

On 3/26/25 2:04 PM, Eugenio Perez Martin wrote:
> On Mon, Mar 24, 2025 at 3:34 PM Sahil Siddiq <icegambit91@gmail.com> wrote:
>>
>> Hi,
>>
>> I had a few more queries here as well.
>>
>> On 3/24/25 7:29 PM, Sahil Siddiq wrote:
>>> Detect when used descriptors are ready for consumption by the guest via
>>> packed virtqueues and forward them from the device to the guest.
>>>
>>> Signed-off-by: Sahil Siddiq <sahilcdq@proton.me>
>>> ---
>>> Changes from v4 -> v5:
>>> - New commit.
>>> - vhost-shadow-virtqueue.c:
>>>     (vhost_svq_more_used): Split into vhost_svq_more_used_split and
>>>     vhost_svq_more_used_packed.
>>>     (vhost_svq_enable_notification): Handle split and packed vqs.
>>>     (vhost_svq_disable_notification): Likewise.
>>>     (vhost_svq_get_buf): Split into vhost_svq_get_buf_split and
>>>     vhost_svq_get_buf_packed.
>>>     (vhost_svq_poll): Use new functions.
>>>
>>>    hw/virtio/vhost-shadow-virtqueue.c | 121 ++++++++++++++++++++++++++---
>>>    1 file changed, 110 insertions(+), 11 deletions(-)
>>>
>>> diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
>>> index 126957231d..8430b3c94a 100644
>>> --- a/hw/virtio/vhost-shadow-virtqueue.c
>>> +++ b/hw/virtio/vhost-shadow-virtqueue.c
>>> @@ -463,7 +463,7 @@ static void vhost_handle_guest_kick_notifier(EventNotifier *n)
>>>        vhost_handle_guest_kick(svq);
>>>    }
>>>
>>> -static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
>>> +static bool vhost_svq_more_used_split(VhostShadowVirtqueue *svq)
>>>    {
>>>        uint16_t *used_idx = &svq->vring.used->idx;
>>>        if (svq->last_used_idx != svq->shadow_used_idx) {
>>> @@ -475,6 +475,22 @@ static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
>>>        return svq->last_used_idx != svq->shadow_used_idx;
>>>    }
>>>
>>> +static bool vhost_svq_more_used_packed(VhostShadowVirtqueue *svq)
>>> +{
>>> +    bool avail_flag, used_flag, used_wrap_counter;
>>> +    uint16_t last_used_idx, last_used, flags;
>>> +
>>> +    last_used_idx = svq->last_used_idx;
>>> +    last_used = last_used_idx & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
>>
>> In the linux kernel, last_used is calculated as:
>>
>> last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))
>>
>> ...instead of...
>>
>> last_used_idx & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR)
>>
>> Isn't the second option good enough if last_used_idx is uint16_t
>> and VRING_PACKED_EVENT_F_WRAP_CTR is defined as 15.
>>
> 
> I think it is good enough with the u16 restrictions but it's just
> defensive code.
> 

Got it. I think it'll be better then to follow the implementation in
the kernel to keep it more robust.

>>> +    used_wrap_counter = !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
>>> +
>>> +    flags = le16_to_cpu(svq->vring_packed.vring.desc[last_used].flags);
>>> +    avail_flag = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
>>> +    used_flag = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
>>> +
>>> +    return avail_flag == used_flag && used_flag == used_wrap_counter;
>>> +}
>>> +
>>
>> Also in the implementation of vhost_svq_more_used_split() [1], I haven't
>> understood why the following condition:
>>
>> svq->last_used_idx != svq->shadow_used_idx
>>
>> is checked before updating the value of "svq->shadow_used_idx":
>>
>> svq->shadow_used_idx = le16_to_cpu(*(volatile uint16_t *)used_idx)
>>
> 
> As far as I know this is used to avoid concurrent access to guest's
> used_idx, avoiding cache sharing, the memory barrier, and the
> potentially costly volatile access.
> 

By concurrent access, do you mean in case one thread has already updated
the value of used_idx?

Thanks,
Sahil

Re: [RFC v5 5/7] vhost: Forward descriptors to guest via packed vqs
Posted by Eugenio Perez Martin 4 days ago
On Fri, Mar 28, 2025 at 6:22 AM Sahil Siddiq <icegambit91@gmail.com> wrote:
>
> Hi,
>
> On 3/26/25 2:04 PM, Eugenio Perez Martin wrote:
> > On Mon, Mar 24, 2025 at 3:34 PM Sahil Siddiq <icegambit91@gmail.com> wrote:
> >>
> >> Hi,
> >>
> >> I had a few more queries here as well.
> >>
> >> On 3/24/25 7:29 PM, Sahil Siddiq wrote:
> >>> Detect when used descriptors are ready for consumption by the guest via
> >>> packed virtqueues and forward them from the device to the guest.
> >>>
> >>> Signed-off-by: Sahil Siddiq <sahilcdq@proton.me>
> >>> ---
> >>> Changes from v4 -> v5:
> >>> - New commit.
> >>> - vhost-shadow-virtqueue.c:
> >>>     (vhost_svq_more_used): Split into vhost_svq_more_used_split and
> >>>     vhost_svq_more_used_packed.
> >>>     (vhost_svq_enable_notification): Handle split and packed vqs.
> >>>     (vhost_svq_disable_notification): Likewise.
> >>>     (vhost_svq_get_buf): Split into vhost_svq_get_buf_split and
> >>>     vhost_svq_get_buf_packed.
> >>>     (vhost_svq_poll): Use new functions.
> >>>
> >>>    hw/virtio/vhost-shadow-virtqueue.c | 121 ++++++++++++++++++++++++++---
> >>>    1 file changed, 110 insertions(+), 11 deletions(-)
> >>>
> >>> diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
> >>> index 126957231d..8430b3c94a 100644
> >>> --- a/hw/virtio/vhost-shadow-virtqueue.c
> >>> +++ b/hw/virtio/vhost-shadow-virtqueue.c
> >>> @@ -463,7 +463,7 @@ static void vhost_handle_guest_kick_notifier(EventNotifier *n)
> >>>        vhost_handle_guest_kick(svq);
> >>>    }
> >>>
> >>> -static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
> >>> +static bool vhost_svq_more_used_split(VhostShadowVirtqueue *svq)
> >>>    {
> >>>        uint16_t *used_idx = &svq->vring.used->idx;
> >>>        if (svq->last_used_idx != svq->shadow_used_idx) {
> >>> @@ -475,6 +475,22 @@ static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
> >>>        return svq->last_used_idx != svq->shadow_used_idx;
> >>>    }
> >>>
> >>> +static bool vhost_svq_more_used_packed(VhostShadowVirtqueue *svq)
> >>> +{
> >>> +    bool avail_flag, used_flag, used_wrap_counter;
> >>> +    uint16_t last_used_idx, last_used, flags;
> >>> +
> >>> +    last_used_idx = svq->last_used_idx;
> >>> +    last_used = last_used_idx & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
> >>
> >> In the linux kernel, last_used is calculated as:
> >>
> >> last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))
> >>
> >> ...instead of...
> >>
> >> last_used_idx & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR)
> >>
> >> Isn't the second option good enough if last_used_idx is uint16_t
> >> and VRING_PACKED_EVENT_F_WRAP_CTR is defined as 15.
> >>
> >
> > I think it is good enough with the u16 restrictions but it's just
> > defensive code.
> >
>
> Got it. I think it'll be better then to follow the implementation in
> the kernel to keep it more robust.
>
> >>> +    used_wrap_counter = !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
> >>> +
> >>> +    flags = le16_to_cpu(svq->vring_packed.vring.desc[last_used].flags);
> >>> +    avail_flag = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
> >>> +    used_flag = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
> >>> +
> >>> +    return avail_flag == used_flag && used_flag == used_wrap_counter;
> >>> +}
> >>> +
> >>
> >> Also in the implementation of vhost_svq_more_used_split() [1], I haven't
> >> understood why the following condition:
> >>
> >> svq->last_used_idx != svq->shadow_used_idx
> >>
> >> is checked before updating the value of "svq->shadow_used_idx":
> >>
> >> svq->shadow_used_idx = le16_to_cpu(*(volatile uint16_t *)used_idx)
> >>
> >
> > As far as I know this is used to avoid concurrent access to guest's
> > used_idx, avoiding cache sharing, the memory barrier, and the
> > potentially costly volatile access.
> >
>
> By concurrent access, do you mean in case one thread has already updated
> the value of used_idx?
>

Yes, concurrent access by the driver and the device. This could be the
case of different threads if the device is virtual in QEMU. The two
CPU threads are accessing the same memory region.