This is needed so the destination vdpa device see the same state a the
guest set in the source.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
net/vhost-vdpa.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 52 insertions(+), 1 deletion(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 0183fce353..2873be2ba4 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -383,7 +383,7 @@ static virtio_net_ctrl_ack vhost_vdpa_net_svq_add(VhostShadowVirtqueue *svq,
return VIRTIO_NET_OK;
}
-static int vhost_vdpa_start_control_svq(struct vhost_vdpa *v)
+static int vhost_vdpa_enable_control_svq(struct vhost_vdpa *v)
{
struct vhost_vring_state state = {
.index = v->dev->vq_index,
@@ -395,6 +395,57 @@ static int vhost_vdpa_start_control_svq(struct vhost_vdpa *v)
return r < 0 ? -errno : r;
}
+static int vhost_vdpa_start_control_svq(struct vhost_vdpa *v)
+{
+
+ VirtIONet *n = VIRTIO_NET(v->dev->vdev);
+ uint64_t features = v->dev->vdev->host_features;
+ VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, 0);
+ VhostVDPAState *s = container_of(v, VhostVDPAState, vhost_vdpa);
+ int r;
+
+ r = vhost_vdpa_enable_control_svq(v);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+
+ if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ const struct virtio_net_ctrl_hdr ctrl = {
+ .class = VIRTIO_NET_CTRL_MAC,
+ .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
+ };
+ uint8_t mac[6];
+ const struct iovec out[] = {
+ {
+ .iov_base = (void *)&ctrl,
+ .iov_len = sizeof(ctrl),
+ },{
+ .iov_base = mac,
+ .iov_len = sizeof(mac),
+ },
+ };
+ struct iovec dev_buffers[2] = {
+ { .iov_base = s->cvq_cmd_out_buffer },
+ { .iov_base = s->cvq_cmd_in_buffer },
+ };
+ bool ok;
+ virtio_net_ctrl_ack state;
+
+ ok = vhost_vdpa_net_cvq_map_sg(s, out, ARRAY_SIZE(out), dev_buffers);
+ if (unlikely(!ok)) {
+ return -1;
+ }
+
+ memcpy(mac, n->mac, sizeof(mac));
+ state = vhost_vdpa_net_svq_add(svq, dev_buffers);
+ vhost_vdpa_cvq_unmap_buf(v, dev_buffers[0].iov_base);
+ vhost_vdpa_cvq_unmap_buf(v, dev_buffers[1].iov_base);
+ return state == VIRTIO_NET_OK ? 0 : 1;
+ }
+
+ return 0;
+}
+
/**
* Do not forward commands not supported by SVQ. Otherwise, the device could
* accept it and qemu would not know how to update the device model.
--
2.31.1
在 2022/7/16 19:34, Eugenio Pérez 写道:
> This is needed so the destination vdpa device see the same state a the
> guest set in the source.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
> net/vhost-vdpa.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 52 insertions(+), 1 deletion(-)
>
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index 0183fce353..2873be2ba4 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -383,7 +383,7 @@ static virtio_net_ctrl_ack vhost_vdpa_net_svq_add(VhostShadowVirtqueue *svq,
> return VIRTIO_NET_OK;
> }
>
> -static int vhost_vdpa_start_control_svq(struct vhost_vdpa *v)
> +static int vhost_vdpa_enable_control_svq(struct vhost_vdpa *v)
> {
> struct vhost_vring_state state = {
> .index = v->dev->vq_index,
> @@ -395,6 +395,57 @@ static int vhost_vdpa_start_control_svq(struct vhost_vdpa *v)
> return r < 0 ? -errno : r;
> }
>
> +static int vhost_vdpa_start_control_svq(struct vhost_vdpa *v)
> +{
> +
> + VirtIONet *n = VIRTIO_NET(v->dev->vdev);
> + uint64_t features = v->dev->vdev->host_features;
> + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, 0);
> + VhostVDPAState *s = container_of(v, VhostVDPAState, vhost_vdpa);
> + int r;
> +
> + r = vhost_vdpa_enable_control_svq(v);
> + if (unlikely(r < 0)) {
> + return r;
> + }
> +
> + if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
> + const struct virtio_net_ctrl_hdr ctrl = {
> + .class = VIRTIO_NET_CTRL_MAC,
> + .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
> + };
> + uint8_t mac[6];
> + const struct iovec out[] = {
> + {
> + .iov_base = (void *)&ctrl,
> + .iov_len = sizeof(ctrl),
> + },{
> + .iov_base = mac,
> + .iov_len = sizeof(mac),
> + },
> + };
> + struct iovec dev_buffers[2] = {
> + { .iov_base = s->cvq_cmd_out_buffer },
> + { .iov_base = s->cvq_cmd_in_buffer },
> + };
> + bool ok;
> + virtio_net_ctrl_ack state;
> +
> + ok = vhost_vdpa_net_cvq_map_sg(s, out, ARRAY_SIZE(out), dev_buffers);
> + if (unlikely(!ok)) {
> + return -1;
> + }
> +
> + memcpy(mac, n->mac, sizeof(mac));
> + state = vhost_vdpa_net_svq_add(svq, dev_buffers);
> + vhost_vdpa_cvq_unmap_buf(v, dev_buffers[0].iov_base);
> + vhost_vdpa_cvq_unmap_buf(v, dev_buffers[1].iov_base);
Any reason we do per buffer unmap instead of the sg unmap here?
Thanks
> + return state == VIRTIO_NET_OK ? 0 : 1;
> + }
> +
> + return 0;
> +}
> +
> /**
> * Do not forward commands not supported by SVQ. Otherwise, the device could
> * accept it and qemu would not know how to update the device model.
On Mon, Jul 18, 2022 at 10:55 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2022/7/16 19:34, Eugenio Pérez 写道:
> > This is needed so the destination vdpa device see the same state a the
> > guest set in the source.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> > net/vhost-vdpa.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++-
> > 1 file changed, 52 insertions(+), 1 deletion(-)
> >
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index 0183fce353..2873be2ba4 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -383,7 +383,7 @@ static virtio_net_ctrl_ack vhost_vdpa_net_svq_add(VhostShadowVirtqueue *svq,
> > return VIRTIO_NET_OK;
> > }
> >
> > -static int vhost_vdpa_start_control_svq(struct vhost_vdpa *v)
> > +static int vhost_vdpa_enable_control_svq(struct vhost_vdpa *v)
> > {
> > struct vhost_vring_state state = {
> > .index = v->dev->vq_index,
> > @@ -395,6 +395,57 @@ static int vhost_vdpa_start_control_svq(struct vhost_vdpa *v)
> > return r < 0 ? -errno : r;
> > }
> >
> > +static int vhost_vdpa_start_control_svq(struct vhost_vdpa *v)
> > +{
> > +
> > + VirtIONet *n = VIRTIO_NET(v->dev->vdev);
> > + uint64_t features = v->dev->vdev->host_features;
> > + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, 0);
> > + VhostVDPAState *s = container_of(v, VhostVDPAState, vhost_vdpa);
> > + int r;
> > +
> > + r = vhost_vdpa_enable_control_svq(v);
> > + if (unlikely(r < 0)) {
> > + return r;
> > + }
> > +
> > + if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
> > + const struct virtio_net_ctrl_hdr ctrl = {
> > + .class = VIRTIO_NET_CTRL_MAC,
> > + .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
> > + };
> > + uint8_t mac[6];
> > + const struct iovec out[] = {
> > + {
> > + .iov_base = (void *)&ctrl,
> > + .iov_len = sizeof(ctrl),
> > + },{
> > + .iov_base = mac,
> > + .iov_len = sizeof(mac),
> > + },
> > + };
> > + struct iovec dev_buffers[2] = {
> > + { .iov_base = s->cvq_cmd_out_buffer },
> > + { .iov_base = s->cvq_cmd_in_buffer },
> > + };
> > + bool ok;
> > + virtio_net_ctrl_ack state;
> > +
> > + ok = vhost_vdpa_net_cvq_map_sg(s, out, ARRAY_SIZE(out), dev_buffers);
> > + if (unlikely(!ok)) {
> > + return -1;
> > + }
> > +
> > + memcpy(mac, n->mac, sizeof(mac));
> > + state = vhost_vdpa_net_svq_add(svq, dev_buffers);
> > + vhost_vdpa_cvq_unmap_buf(v, dev_buffers[0].iov_base);
> > + vhost_vdpa_cvq_unmap_buf(v, dev_buffers[1].iov_base);
>
>
> Any reason we do per buffer unmap instead of the sg unmap here?
>
I think I don't get this comment.
vhost_vdpa_net_handle_ctrl_avail also unmap each buffer individually,
and I need a function to unmap one of them at a time. I could create a
function to unmap a whole sg, but I'm not sure how much value it adds.
Thanks!
> Thanks
>
>
> > + return state == VIRTIO_NET_OK ? 0 : 1;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > /**
> > * Do not forward commands not supported by SVQ. Otherwise, the device could
> > * accept it and qemu would not know how to update the device model.
>
© 2016 - 2026 Red Hat, Inc.