[RFC PATCH 09/12] vdpa: Extract vhost_vdpa_net_svq_add from vhost_vdpa_net_handle_ctrl_avail

Eugenio Pérez posted 12 patches 3 years, 6 months ago
There is a newer version of this series
[RFC PATCH 09/12] vdpa: Extract vhost_vdpa_net_svq_add from vhost_vdpa_net_handle_ctrl_avail
Posted by Eugenio Pérez 3 years, 6 months ago
So we can reuse to inject state messages.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 net/vhost-vdpa.c | 89 +++++++++++++++++++++++++++---------------------
 1 file changed, 51 insertions(+), 38 deletions(-)

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 211bd0468b..aaae51a778 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -334,6 +334,54 @@ static bool vhost_vdpa_net_cvq_map_elem(VhostVDPAState *s,
     return true;
 }
 
+static virtio_net_ctrl_ack vhost_vdpa_net_svq_add(VhostShadowVirtqueue *svq,
+                                               const struct iovec *dev_buffers)
+{
+    /* in buffer used for device model */
+    virtio_net_ctrl_ack status;
+    const struct iovec in = {
+        .iov_base = &status,
+        .iov_len = sizeof(status),
+    };
+    size_t dev_written;
+    int r;
+    void *unused = (void *)1;
+
+    r = vhost_svq_add(svq, &dev_buffers[0], 1, &dev_buffers[1], 1, unused);
+    if (unlikely(r != 0)) {
+        if (unlikely(r == -ENOSPC)) {
+            qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
+                          __func__);
+        }
+        return VIRTIO_NET_ERR;
+    }
+
+    /*
+     * We can poll here since we've had BQL from the time we sent the
+     * descriptor. Also, we need to take the answer before SVQ pulls by itself,
+     * when BQL is released
+     */
+    dev_written = vhost_svq_poll(svq);
+    if (unlikely(dev_written < sizeof(status))) {
+        error_report("Insufficient written data (%zu)", dev_written);
+        return VIRTIO_NET_ERR;
+    }
+
+    memcpy(&status, dev_buffers[1].iov_base, sizeof(status));
+    if (status != VIRTIO_NET_OK) {
+        return VIRTIO_NET_ERR;
+    }
+
+    status = VIRTIO_NET_ERR;
+    virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, dev_buffers, 1);
+    if (status != VIRTIO_NET_OK) {
+        error_report("Bad CVQ processing in model");
+        return VIRTIO_NET_ERR;
+    }
+
+    return VIRTIO_NET_OK;
+}
+
 static int vhost_vdpa_start_control_svq(struct vhost_vdpa *v)
 {
     struct vhost_vring_state state = {
@@ -392,19 +440,13 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
                                             void *opaque)
 {
     VhostVDPAState *s = opaque;
-    size_t in_len, dev_written;
+    size_t in_len;
     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
     /* out and in buffers sent to the device */
     struct iovec dev_buffers[2] = {
         { .iov_base = s->cvq_cmd_out_buffer },
         { .iov_base = s->cvq_cmd_in_buffer },
     };
-    /* in buffer used for device model */
-    const struct iovec in = {
-        .iov_base = &status,
-        .iov_len = sizeof(status),
-    };
-    int r;
     bool ok;
 
     ok = vhost_vdpa_net_cvq_map_elem(s, elem, dev_buffers);
@@ -417,36 +459,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
         goto out;
     }
 
-    r = vhost_svq_add(svq, &dev_buffers[0], 1, &dev_buffers[1], 1, elem);
-    if (unlikely(r != 0)) {
-        if (unlikely(r == -ENOSPC)) {
-            qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
-                          __func__);
-        }
-        goto out;
-    }
-
-    /*
-     * We can poll here since we've had BQL from the time we sent the
-     * descriptor. Also, we need to take the answer before SVQ pulls by itself,
-     * when BQL is released
-     */
-    dev_written = vhost_svq_poll(svq);
-    if (unlikely(dev_written < sizeof(status))) {
-        error_report("Insufficient written data (%zu)", dev_written);
-        goto out;
-    }
-
-    memcpy(&status, dev_buffers[1].iov_base, sizeof(status));
-    if (status != VIRTIO_NET_OK) {
-        goto out;
-    }
-
-    status = VIRTIO_NET_ERR;
-    virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, dev_buffers, 1);
-    if (status != VIRTIO_NET_OK) {
-        error_report("Bad CVQ processing in model");
-    }
+    status = vhost_vdpa_net_svq_add(svq, dev_buffers);
 
 out:
     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
@@ -462,7 +475,7 @@ out:
     if (dev_buffers[1].iov_base) {
         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, dev_buffers[1].iov_base);
     }
-    return r;
+    return status == VIRTIO_NET_OK ? 0 : 1;
 }
 
 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
-- 
2.31.1


Re: [RFC PATCH 09/12] vdpa: Extract vhost_vdpa_net_svq_add from vhost_vdpa_net_handle_ctrl_avail
Posted by Jason Wang 3 years, 6 months ago
在 2022/7/16 19:34, Eugenio Pérez 写道:
> So we can reuse to inject state messages.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
>   net/vhost-vdpa.c | 89 +++++++++++++++++++++++++++---------------------
>   1 file changed, 51 insertions(+), 38 deletions(-)
>
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index 211bd0468b..aaae51a778 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -334,6 +334,54 @@ static bool vhost_vdpa_net_cvq_map_elem(VhostVDPAState *s,
>       return true;
>   }
>   
> +static virtio_net_ctrl_ack vhost_vdpa_net_svq_add(VhostShadowVirtqueue *svq,
> +                                               const struct iovec *dev_buffers)


The name should be tweaked since it is used only for cvq.


> +{
> +    /* in buffer used for device model */
> +    virtio_net_ctrl_ack status;
> +    const struct iovec in = {
> +        .iov_base = &status,
> +        .iov_len = sizeof(status),
> +    };
> +    size_t dev_written;
> +    int r;
> +    void *unused = (void *)1;
> +
> +    r = vhost_svq_add(svq, &dev_buffers[0], 1, &dev_buffers[1], 1, unused);
> +    if (unlikely(r != 0)) {
> +        if (unlikely(r == -ENOSPC)) {
> +            qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
> +                          __func__);
> +        }
> +        return VIRTIO_NET_ERR;
> +    }
> +
> +    /*
> +     * We can poll here since we've had BQL from the time we sent the
> +     * descriptor. Also, we need to take the answer before SVQ pulls by itself,
> +     * when BQL is released
> +     */


This reminds me that, do we need a upper limit of the time on the 
polling here. (Avoid taking BQL for too long time).

Thanks


> +    dev_written = vhost_svq_poll(svq);
> +    if (unlikely(dev_written < sizeof(status))) {
> +        error_report("Insufficient written data (%zu)", dev_written);
> +        return VIRTIO_NET_ERR;
> +    }
> +
> +    memcpy(&status, dev_buffers[1].iov_base, sizeof(status));
> +    if (status != VIRTIO_NET_OK) {
> +        return VIRTIO_NET_ERR;
> +    }
> +
> +    status = VIRTIO_NET_ERR;
> +    virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, dev_buffers, 1);
> +    if (status != VIRTIO_NET_OK) {
> +        error_report("Bad CVQ processing in model");
> +        return VIRTIO_NET_ERR;
> +    }
> +
> +    return VIRTIO_NET_OK;
> +}
> +
>   static int vhost_vdpa_start_control_svq(struct vhost_vdpa *v)
>   {
>       struct vhost_vring_state state = {
> @@ -392,19 +440,13 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
>                                               void *opaque)
>   {
>       VhostVDPAState *s = opaque;
> -    size_t in_len, dev_written;
> +    size_t in_len;
>       virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
>       /* out and in buffers sent to the device */
>       struct iovec dev_buffers[2] = {
>           { .iov_base = s->cvq_cmd_out_buffer },
>           { .iov_base = s->cvq_cmd_in_buffer },
>       };
> -    /* in buffer used for device model */
> -    const struct iovec in = {
> -        .iov_base = &status,
> -        .iov_len = sizeof(status),
> -    };
> -    int r;
>       bool ok;
>   
>       ok = vhost_vdpa_net_cvq_map_elem(s, elem, dev_buffers);
> @@ -417,36 +459,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
>           goto out;
>       }
>   
> -    r = vhost_svq_add(svq, &dev_buffers[0], 1, &dev_buffers[1], 1, elem);
> -    if (unlikely(r != 0)) {
> -        if (unlikely(r == -ENOSPC)) {
> -            qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
> -                          __func__);
> -        }
> -        goto out;
> -    }
> -
> -    /*
> -     * We can poll here since we've had BQL from the time we sent the
> -     * descriptor. Also, we need to take the answer before SVQ pulls by itself,
> -     * when BQL is released
> -     */
> -    dev_written = vhost_svq_poll(svq);
> -    if (unlikely(dev_written < sizeof(status))) {
> -        error_report("Insufficient written data (%zu)", dev_written);
> -        goto out;
> -    }
> -
> -    memcpy(&status, dev_buffers[1].iov_base, sizeof(status));
> -    if (status != VIRTIO_NET_OK) {
> -        goto out;
> -    }
> -
> -    status = VIRTIO_NET_ERR;
> -    virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, dev_buffers, 1);
> -    if (status != VIRTIO_NET_OK) {
> -        error_report("Bad CVQ processing in model");
> -    }
> +    status = vhost_vdpa_net_svq_add(svq, dev_buffers);
>   
>   out:
>       in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
> @@ -462,7 +475,7 @@ out:
>       if (dev_buffers[1].iov_base) {
>           vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, dev_buffers[1].iov_base);
>       }
> -    return r;
> +    return status == VIRTIO_NET_OK ? 0 : 1;
>   }
>   
>   static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {


Re: [RFC PATCH 09/12] vdpa: Extract vhost_vdpa_net_svq_add from vhost_vdpa_net_handle_ctrl_avail
Posted by Eugenio Perez Martin 3 years, 6 months ago
On Mon, Jul 18, 2022 at 10:53 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2022/7/16 19:34, Eugenio Pérez 写道:
> > So we can reuse to inject state messages.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> >   net/vhost-vdpa.c | 89 +++++++++++++++++++++++++++---------------------
> >   1 file changed, 51 insertions(+), 38 deletions(-)
> >
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index 211bd0468b..aaae51a778 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -334,6 +334,54 @@ static bool vhost_vdpa_net_cvq_map_elem(VhostVDPAState *s,
> >       return true;
> >   }
> >
> > +static virtio_net_ctrl_ack vhost_vdpa_net_svq_add(VhostShadowVirtqueue *svq,
> > +                                               const struct iovec *dev_buffers)
>
>
> The name should be tweaked since it is used only for cvq.
>

Right, I'll change.

>
> > +{
> > +    /* in buffer used for device model */
> > +    virtio_net_ctrl_ack status;
> > +    const struct iovec in = {
> > +        .iov_base = &status,
> > +        .iov_len = sizeof(status),
> > +    };
> > +    size_t dev_written;
> > +    int r;
> > +    void *unused = (void *)1;
> > +
> > +    r = vhost_svq_add(svq, &dev_buffers[0], 1, &dev_buffers[1], 1, unused);
> > +    if (unlikely(r != 0)) {
> > +        if (unlikely(r == -ENOSPC)) {
> > +            qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
> > +                          __func__);
> > +        }
> > +        return VIRTIO_NET_ERR;
> > +    }
> > +
> > +    /*
> > +     * We can poll here since we've had BQL from the time we sent the
> > +     * descriptor. Also, we need to take the answer before SVQ pulls by itself,
> > +     * when BQL is released
> > +     */
>
>
> This reminds me that, do we need a upper limit of the time on the
> polling here. (Avoid taking BQL for too long time).
>

Sending a new version of rx filters here.

But we have other parts where we can have BQL forever because we trust
the device, like vring enable syscalls for example.

Thanks!