This is needed so the destination vdpa device see the same state a the
guest set in the source.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
v6:
* Map and unmap command buffers at the start and end of device usage.
v5:
* Rename s/start/load/
* Use independent NetClientInfo to only add load callback on cvq.
---
net/vhost-vdpa.c | 43 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 43 insertions(+)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 10843e6d97..4f1524c2e9 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -363,11 +363,54 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
return vhost_svq_poll(svq);
}
+static int vhost_vdpa_net_load(NetClientState *nc)
+{
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+ struct vhost_vdpa *v = &s->vhost_vdpa;
+ VirtIONet *n;
+ uint64_t features;
+
+ assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+ if (!v->shadow_vqs_enabled) {
+ return 0;
+ }
+
+ n = VIRTIO_NET(v->dev->vdev);
+ features = v->dev->vdev->host_features;
+ if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ const struct virtio_net_ctrl_hdr ctrl = {
+ .class = VIRTIO_NET_CTRL_MAC,
+ .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
+ };
+ char *cursor = s->cvq_cmd_out_buffer;
+ ssize_t dev_written;
+ virtio_net_ctrl_ack state;
+
+ memcpy(cursor, &ctrl, sizeof(ctrl));
+ cursor += sizeof(ctrl);
+ memcpy(cursor, n->mac, sizeof(n->mac));
+ cursor += sizeof(n->mac);
+
+ dev_written = vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + sizeof(n->mac),
+ sizeof(state));
+ if (unlikely(dev_written < 0)) {
+ return dev_written;
+ }
+
+ memcpy(&state, s->cvq_cmd_in_buffer, sizeof(state));
+ return state == VIRTIO_NET_OK ? 0 : -1;
+ }
+
+ return 0;
+}
+
static NetClientInfo net_vhost_vdpa_cvq_info = {
.type = NET_CLIENT_DRIVER_VHOST_VDPA,
.size = sizeof(VhostVDPAState),
.receive = vhost_vdpa_receive,
.prepare = vhost_vdpa_net_cvq_prepare,
+ .load = vhost_vdpa_net_load,
.stop = vhost_vdpa_net_cvq_stop,
.cleanup = vhost_vdpa_cleanup,
.has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
--
2.31.1
On Fri, Aug 5, 2022 at 2:29 AM Eugenio Pérez <eperezma@redhat.com> wrote:
>
> This is needed so the destination vdpa device see the same state a the
> guest set in the source.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
> v6:
> * Map and unmap command buffers at the start and end of device usage.
>
> v5:
> * Rename s/start/load/
> * Use independent NetClientInfo to only add load callback on cvq.
> ---
> net/vhost-vdpa.c | 43 +++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 43 insertions(+)
>
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index 10843e6d97..4f1524c2e9 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -363,11 +363,54 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
> return vhost_svq_poll(svq);
> }
>
> +static int vhost_vdpa_net_load(NetClientState *nc)
> +{
> + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> + struct vhost_vdpa *v = &s->vhost_vdpa;
> + VirtIONet *n;
> + uint64_t features;
> +
> + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> +
> + if (!v->shadow_vqs_enabled) {
> + return 0;
> + }
> +
> + n = VIRTIO_NET(v->dev->vdev);
> + features = v->dev->vdev->host_features;
> + if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
> + const struct virtio_net_ctrl_hdr ctrl = {
> + .class = VIRTIO_NET_CTRL_MAC,
> + .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
> + };
Can we build this directly from the cmd_out_buffer?
> + char *cursor = s->cvq_cmd_out_buffer;
> + ssize_t dev_written;
> + virtio_net_ctrl_ack state;
I think we can read the status directly from the cmd_in_buffer.
Thanks
> +
> + memcpy(cursor, &ctrl, sizeof(ctrl));
> + cursor += sizeof(ctrl);
> + memcpy(cursor, n->mac, sizeof(n->mac));
> + cursor += sizeof(n->mac);
> +
> + dev_written = vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + sizeof(n->mac),
> + sizeof(state));
> + if (unlikely(dev_written < 0)) {
> + return dev_written;
> + }
> +
> + memcpy(&state, s->cvq_cmd_in_buffer, sizeof(state));
> + return state == VIRTIO_NET_OK ? 0 : -1;
> + }
> +
> + return 0;
> +}
> +
> static NetClientInfo net_vhost_vdpa_cvq_info = {
> .type = NET_CLIENT_DRIVER_VHOST_VDPA,
> .size = sizeof(VhostVDPAState),
> .receive = vhost_vdpa_receive,
> .prepare = vhost_vdpa_net_cvq_prepare,
> + .load = vhost_vdpa_net_load,
> .stop = vhost_vdpa_net_cvq_stop,
> .cleanup = vhost_vdpa_cleanup,
> .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
> --
> 2.31.1
>
On Tue, Aug 9, 2022 at 9:16 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Fri, Aug 5, 2022 at 2:29 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> >
> > This is needed so the destination vdpa device see the same state a the
> > guest set in the source.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> > v6:
> > * Map and unmap command buffers at the start and end of device usage.
> >
> > v5:
> > * Rename s/start/load/
> > * Use independent NetClientInfo to only add load callback on cvq.
> > ---
> > net/vhost-vdpa.c | 43 +++++++++++++++++++++++++++++++++++++++++++
> > 1 file changed, 43 insertions(+)
> >
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index 10843e6d97..4f1524c2e9 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -363,11 +363,54 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
> > return vhost_svq_poll(svq);
> > }
> >
> > +static int vhost_vdpa_net_load(NetClientState *nc)
> > +{
> > + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > + struct vhost_vdpa *v = &s->vhost_vdpa;
> > + VirtIONet *n;
> > + uint64_t features;
> > +
> > + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> > +
> > + if (!v->shadow_vqs_enabled) {
> > + return 0;
> > + }
> > +
> > + n = VIRTIO_NET(v->dev->vdev);
> > + features = v->dev->vdev->host_features;
> > + if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
> > + const struct virtio_net_ctrl_hdr ctrl = {
> > + .class = VIRTIO_NET_CTRL_MAC,
> > + .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
> > + };
>
> Can we build this directly from the cmd_out_buffer?
>
> > + char *cursor = s->cvq_cmd_out_buffer;
> > + ssize_t dev_written;
> > + virtio_net_ctrl_ack state;
>
> I think we can read the status directly from the cmd_in_buffer.
>
Directly casting it to virtio_net_ctrl_ack? Sure.
Thanks!
> Thanks
>
> > +
> > + memcpy(cursor, &ctrl, sizeof(ctrl));
> > + cursor += sizeof(ctrl);
> > + memcpy(cursor, n->mac, sizeof(n->mac));
> > + cursor += sizeof(n->mac);
> > +
> > + dev_written = vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + sizeof(n->mac),
> > + sizeof(state));
> > + if (unlikely(dev_written < 0)) {
> > + return dev_written;
> > + }
> > +
> > + memcpy(&state, s->cvq_cmd_in_buffer, sizeof(state));
> > + return state == VIRTIO_NET_OK ? 0 : -1;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > static NetClientInfo net_vhost_vdpa_cvq_info = {
> > .type = NET_CLIENT_DRIVER_VHOST_VDPA,
> > .size = sizeof(VhostVDPAState),
> > .receive = vhost_vdpa_receive,
> > .prepare = vhost_vdpa_net_cvq_prepare,
> > + .load = vhost_vdpa_net_load,
> > .stop = vhost_vdpa_net_cvq_stop,
> > .cleanup = vhost_vdpa_cleanup,
> > .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
> > --
> > 2.31.1
> >
>
On Tue, Aug 9, 2022 at 3:36 PM Eugenio Perez Martin <eperezma@redhat.com> wrote:
>
> On Tue, Aug 9, 2022 at 9:16 AM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Fri, Aug 5, 2022 at 2:29 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> > >
> > > This is needed so the destination vdpa device see the same state a the
> > > guest set in the source.
> > >
> > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > ---
> > > v6:
> > > * Map and unmap command buffers at the start and end of device usage.
> > >
> > > v5:
> > > * Rename s/start/load/
> > > * Use independent NetClientInfo to only add load callback on cvq.
> > > ---
> > > net/vhost-vdpa.c | 43 +++++++++++++++++++++++++++++++++++++++++++
> > > 1 file changed, 43 insertions(+)
> > >
> > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > index 10843e6d97..4f1524c2e9 100644
> > > --- a/net/vhost-vdpa.c
> > > +++ b/net/vhost-vdpa.c
> > > @@ -363,11 +363,54 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
> > > return vhost_svq_poll(svq);
> > > }
> > >
> > > +static int vhost_vdpa_net_load(NetClientState *nc)
> > > +{
> > > + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > + struct vhost_vdpa *v = &s->vhost_vdpa;
> > > + VirtIONet *n;
> > > + uint64_t features;
> > > +
> > > + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> > > +
> > > + if (!v->shadow_vqs_enabled) {
> > > + return 0;
> > > + }
> > > +
> > > + n = VIRTIO_NET(v->dev->vdev);
> > > + features = v->dev->vdev->host_features;
> > > + if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
> > > + const struct virtio_net_ctrl_hdr ctrl = {
> > > + .class = VIRTIO_NET_CTRL_MAC,
> > > + .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
> > > + };
> >
> > Can we build this directly from the cmd_out_buffer?
> >
> > > + char *cursor = s->cvq_cmd_out_buffer;
> > > + ssize_t dev_written;
> > > + virtio_net_ctrl_ack state;
> >
> > I think we can read the status directly from the cmd_in_buffer.
> >
>
> Directly casting it to virtio_net_ctrl_ack? Sure.
Yes.
Thanks
>
> Thanks!
>
> > Thanks
> >
> > > +
> > > + memcpy(cursor, &ctrl, sizeof(ctrl));
> > > + cursor += sizeof(ctrl);
> > > + memcpy(cursor, n->mac, sizeof(n->mac));
> > > + cursor += sizeof(n->mac);
> > > +
> > > + dev_written = vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + sizeof(n->mac),
> > > + sizeof(state));
> > > + if (unlikely(dev_written < 0)) {
> > > + return dev_written;
> > > + }
> > > +
> > > + memcpy(&state, s->cvq_cmd_in_buffer, sizeof(state));
> > > + return state == VIRTIO_NET_OK ? 0 : -1;
> > > + }
> > > +
> > > + return 0;
> > > +}
> > > +
> > > static NetClientInfo net_vhost_vdpa_cvq_info = {
> > > .type = NET_CLIENT_DRIVER_VHOST_VDPA,
> > > .size = sizeof(VhostVDPAState),
> > > .receive = vhost_vdpa_receive,
> > > .prepare = vhost_vdpa_net_cvq_prepare,
> > > + .load = vhost_vdpa_net_load,
> > > .stop = vhost_vdpa_net_cvq_stop,
> > > .cleanup = vhost_vdpa_cleanup,
> > > .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
> > > --
> > > 2.31.1
> > >
> >
>
© 2016 - 2026 Red Hat, Inc.