Finally offering the possibility to enable SVQ from the command line.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
qapi/net.json | 8 +++++++-
net/vhost-vdpa.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
2 files changed, 47 insertions(+), 9 deletions(-)
diff --git a/qapi/net.json b/qapi/net.json
index 7fab2e7cd8..d626fa441c 100644
--- a/qapi/net.json
+++ b/qapi/net.json
@@ -445,12 +445,18 @@
# @queues: number of queues to be created for multiqueue vhost-vdpa
# (default: 1)
#
+# @svq: Start device with (experimental) shadow virtqueue. (Since 7.0)
+#
+# Features:
+# @unstable: Member @svq is experimental.
+#
# Since: 5.1
##
{ 'struct': 'NetdevVhostVDPAOptions',
'data': {
'*vhostdev': 'str',
- '*queues': 'int' } }
+ '*queues': 'int',
+ '*svq': {'type': 'bool', 'features' : [ 'unstable'] } } }
##
# @NetClientDriver:
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 1e9fe47c03..c827921654 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -127,7 +127,11 @@ err_init:
static void vhost_vdpa_cleanup(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+ struct vhost_dev *dev = s->vhost_vdpa.dev;
+ if (dev && dev->vq_index + dev->nvqs == dev->vq_index_end) {
+ g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
+ }
if (s->vhost_net) {
vhost_net_cleanup(s->vhost_net);
g_free(s->vhost_net);
@@ -187,13 +191,23 @@ static NetClientInfo net_vhost_vdpa_info = {
.check_peer_type = vhost_vdpa_check_peer_type,
};
+static int vhost_vdpa_get_iova_range(int fd,
+ struct vhost_vdpa_iova_range *iova_range)
+{
+ int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
+
+ return ret < 0 ? -errno : 0;
+}
+
static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
- const char *device,
- const char *name,
- int vdpa_device_fd,
- int queue_pair_index,
- int nvqs,
- bool is_datapath)
+ const char *device,
+ const char *name,
+ int vdpa_device_fd,
+ int queue_pair_index,
+ int nvqs,
+ bool is_datapath,
+ bool svq,
+ VhostIOVATree *iova_tree)
{
NetClientState *nc = NULL;
VhostVDPAState *s;
@@ -211,6 +225,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->vhost_vdpa.device_fd = vdpa_device_fd;
s->vhost_vdpa.index = queue_pair_index;
+ s->vhost_vdpa.shadow_vqs_enabled = svq;
+ s->vhost_vdpa.iova_tree = iova_tree;
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
if (ret) {
qemu_del_net_client(nc);
@@ -266,6 +282,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
g_autofree NetClientState **ncs = NULL;
NetClientState *nc;
int queue_pairs, i, has_cvq = 0;
+ g_autoptr(VhostIOVATree) iova_tree = NULL;
assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
opts = &netdev->u.vhost_vdpa;
@@ -285,29 +302,44 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
qemu_close(vdpa_device_fd);
return queue_pairs;
}
+ if (opts->svq) {
+ struct vhost_vdpa_iova_range iova_range;
+
+ if (has_cvq) {
+ error_setg(errp, "vdpa svq does not work with cvq");
+ goto err_svq;
+ }
+ vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
+ iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
+ }
ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
for (i = 0; i < queue_pairs; i++) {
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
- vdpa_device_fd, i, 2, true);
+ vdpa_device_fd, i, 2, true, opts->svq,
+ iova_tree);
if (!ncs[i])
goto err;
}
if (has_cvq) {
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
- vdpa_device_fd, i, 1, false);
+ vdpa_device_fd, i, 1, false, opts->svq,
+ iova_tree);
if (!nc)
goto err;
}
+ iova_tree = NULL;
return 0;
err:
if (i) {
qemu_del_net_client(ncs[0]);
}
+
+err_svq:
qemu_close(vdpa_device_fd);
return -1;
--
2.27.0
On Mon, Mar 07, 2022 at 04:33:34PM +0100, Eugenio Pérez wrote:
> Finally offering the possibility to enable SVQ from the command line.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
> qapi/net.json | 8 +++++++-
> net/vhost-vdpa.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
> 2 files changed, 47 insertions(+), 9 deletions(-)
>
> diff --git a/qapi/net.json b/qapi/net.json
> index 7fab2e7cd8..d626fa441c 100644
> --- a/qapi/net.json
> +++ b/qapi/net.json
> @@ -445,12 +445,18 @@
> # @queues: number of queues to be created for multiqueue vhost-vdpa
> # (default: 1)
> #
> +# @svq: Start device with (experimental) shadow virtqueue. (Since 7.0)
> +#
> +# Features:
> +# @unstable: Member @svq is experimental.
> +#
> # Since: 5.1
> ##
> { 'struct': 'NetdevVhostVDPAOptions',
> 'data': {
> '*vhostdev': 'str',
> - '*queues': 'int' } }
> + '*queues': 'int',
> + '*svq': {'type': 'bool', 'features' : [ 'unstable'] } } }
>
> ##
> # @NetClientDriver:
I think this should be x-svq same as other unstable features.
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index 1e9fe47c03..c827921654 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -127,7 +127,11 @@ err_init:
> static void vhost_vdpa_cleanup(NetClientState *nc)
> {
> VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> + struct vhost_dev *dev = s->vhost_vdpa.dev;
>
> + if (dev && dev->vq_index + dev->nvqs == dev->vq_index_end) {
> + g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
> + }
> if (s->vhost_net) {
> vhost_net_cleanup(s->vhost_net);
> g_free(s->vhost_net);
> @@ -187,13 +191,23 @@ static NetClientInfo net_vhost_vdpa_info = {
> .check_peer_type = vhost_vdpa_check_peer_type,
> };
>
> +static int vhost_vdpa_get_iova_range(int fd,
> + struct vhost_vdpa_iova_range *iova_range)
> +{
> + int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
> +
> + return ret < 0 ? -errno : 0;
> +}
> +
> static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> - const char *device,
> - const char *name,
> - int vdpa_device_fd,
> - int queue_pair_index,
> - int nvqs,
> - bool is_datapath)
> + const char *device,
> + const char *name,
> + int vdpa_device_fd,
> + int queue_pair_index,
> + int nvqs,
> + bool is_datapath,
> + bool svq,
> + VhostIOVATree *iova_tree)
> {
> NetClientState *nc = NULL;
> VhostVDPAState *s;
> @@ -211,6 +225,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
>
> s->vhost_vdpa.device_fd = vdpa_device_fd;
> s->vhost_vdpa.index = queue_pair_index;
> + s->vhost_vdpa.shadow_vqs_enabled = svq;
> + s->vhost_vdpa.iova_tree = iova_tree;
> ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
> if (ret) {
> qemu_del_net_client(nc);
> @@ -266,6 +282,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> g_autofree NetClientState **ncs = NULL;
> NetClientState *nc;
> int queue_pairs, i, has_cvq = 0;
> + g_autoptr(VhostIOVATree) iova_tree = NULL;
>
> assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> opts = &netdev->u.vhost_vdpa;
> @@ -285,29 +302,44 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> qemu_close(vdpa_device_fd);
> return queue_pairs;
> }
> + if (opts->svq) {
> + struct vhost_vdpa_iova_range iova_range;
> +
> + if (has_cvq) {
> + error_setg(errp, "vdpa svq does not work with cvq");
> + goto err_svq;
> + }
> + vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
> + iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
> + }
>
> ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
>
> for (i = 0; i < queue_pairs; i++) {
> ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> - vdpa_device_fd, i, 2, true);
> + vdpa_device_fd, i, 2, true, opts->svq,
> + iova_tree);
> if (!ncs[i])
> goto err;
> }
>
> if (has_cvq) {
> nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> - vdpa_device_fd, i, 1, false);
> + vdpa_device_fd, i, 1, false, opts->svq,
> + iova_tree);
> if (!nc)
> goto err;
> }
>
> + iova_tree = NULL;
> return 0;
>
> err:
> if (i) {
> qemu_del_net_client(ncs[0]);
> }
> +
> +err_svq:
> qemu_close(vdpa_device_fd);
>
> return -1;
> --
> 2.27.0
On Tue, Mar 8, 2022 at 8:11 AM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Mon, Mar 07, 2022 at 04:33:34PM +0100, Eugenio Pérez wrote:
> > Finally offering the possibility to enable SVQ from the command line.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> > qapi/net.json | 8 +++++++-
> > net/vhost-vdpa.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
> > 2 files changed, 47 insertions(+), 9 deletions(-)
> >
> > diff --git a/qapi/net.json b/qapi/net.json
> > index 7fab2e7cd8..d626fa441c 100644
> > --- a/qapi/net.json
> > +++ b/qapi/net.json
> > @@ -445,12 +445,18 @@
> > # @queues: number of queues to be created for multiqueue vhost-vdpa
> > # (default: 1)
> > #
> > +# @svq: Start device with (experimental) shadow virtqueue. (Since 7.0)
> > +#
> > +# Features:
> > +# @unstable: Member @svq is experimental.
> > +#
> > # Since: 5.1
> > ##
> > { 'struct': 'NetdevVhostVDPAOptions',
> > 'data': {
> > '*vhostdev': 'str',
> > - '*queues': 'int' } }
> > + '*queues': 'int',
> > + '*svq': {'type': 'bool', 'features' : [ 'unstable'] } } }
> >
> > ##
> > # @NetClientDriver:
>
> I think this should be x-svq same as other unstable features.
>
I'm fine with both, but I was pointed to the other direction at [1] and [2].
Thanks!
[1] https://patchwork.kernel.org/project/qemu-devel/patch/20220302203012.3476835-15-eperezma@redhat.com/
[2] https://lore.kernel.org/qemu-devel/20220303185147.3605350-15-eperezma@redhat.com/
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index 1e9fe47c03..c827921654 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -127,7 +127,11 @@ err_init:
> > static void vhost_vdpa_cleanup(NetClientState *nc)
> > {
> > VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > + struct vhost_dev *dev = s->vhost_vdpa.dev;
> >
> > + if (dev && dev->vq_index + dev->nvqs == dev->vq_index_end) {
> > + g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
> > + }
> > if (s->vhost_net) {
> > vhost_net_cleanup(s->vhost_net);
> > g_free(s->vhost_net);
> > @@ -187,13 +191,23 @@ static NetClientInfo net_vhost_vdpa_info = {
> > .check_peer_type = vhost_vdpa_check_peer_type,
> > };
> >
> > +static int vhost_vdpa_get_iova_range(int fd,
> > + struct vhost_vdpa_iova_range *iova_range)
> > +{
> > + int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
> > +
> > + return ret < 0 ? -errno : 0;
> > +}
> > +
> > static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> > - const char *device,
> > - const char *name,
> > - int vdpa_device_fd,
> > - int queue_pair_index,
> > - int nvqs,
> > - bool is_datapath)
> > + const char *device,
> > + const char *name,
> > + int vdpa_device_fd,
> > + int queue_pair_index,
> > + int nvqs,
> > + bool is_datapath,
> > + bool svq,
> > + VhostIOVATree *iova_tree)
> > {
> > NetClientState *nc = NULL;
> > VhostVDPAState *s;
> > @@ -211,6 +225,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> >
> > s->vhost_vdpa.device_fd = vdpa_device_fd;
> > s->vhost_vdpa.index = queue_pair_index;
> > + s->vhost_vdpa.shadow_vqs_enabled = svq;
> > + s->vhost_vdpa.iova_tree = iova_tree;
> > ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
> > if (ret) {
> > qemu_del_net_client(nc);
> > @@ -266,6 +282,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> > g_autofree NetClientState **ncs = NULL;
> > NetClientState *nc;
> > int queue_pairs, i, has_cvq = 0;
> > + g_autoptr(VhostIOVATree) iova_tree = NULL;
> >
> > assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> > opts = &netdev->u.vhost_vdpa;
> > @@ -285,29 +302,44 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> > qemu_close(vdpa_device_fd);
> > return queue_pairs;
> > }
> > + if (opts->svq) {
> > + struct vhost_vdpa_iova_range iova_range;
> > +
> > + if (has_cvq) {
> > + error_setg(errp, "vdpa svq does not work with cvq");
> > + goto err_svq;
> > + }
> > + vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
> > + iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
> > + }
> >
> > ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
> >
> > for (i = 0; i < queue_pairs; i++) {
> > ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> > - vdpa_device_fd, i, 2, true);
> > + vdpa_device_fd, i, 2, true, opts->svq,
> > + iova_tree);
> > if (!ncs[i])
> > goto err;
> > }
> >
> > if (has_cvq) {
> > nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> > - vdpa_device_fd, i, 1, false);
> > + vdpa_device_fd, i, 1, false, opts->svq,
> > + iova_tree);
> > if (!nc)
> > goto err;
> > }
> >
> > + iova_tree = NULL;
> > return 0;
> >
> > err:
> > if (i) {
> > qemu_del_net_client(ncs[0]);
> > }
> > +
> > +err_svq:
> > qemu_close(vdpa_device_fd);
> >
> > return -1;
> > --
> > 2.27.0
>
On Tue, Mar 8, 2022 at 8:32 AM Eugenio Perez Martin <eperezma@redhat.com> wrote:
>
> On Tue, Mar 8, 2022 at 8:11 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Mon, Mar 07, 2022 at 04:33:34PM +0100, Eugenio Pérez wrote:
> > > Finally offering the possibility to enable SVQ from the command line.
> > >
> > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > ---
> > > qapi/net.json | 8 +++++++-
> > > net/vhost-vdpa.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
> > > 2 files changed, 47 insertions(+), 9 deletions(-)
> > >
> > > diff --git a/qapi/net.json b/qapi/net.json
> > > index 7fab2e7cd8..d626fa441c 100644
> > > --- a/qapi/net.json
> > > +++ b/qapi/net.json
> > > @@ -445,12 +445,18 @@
> > > # @queues: number of queues to be created for multiqueue vhost-vdpa
> > > # (default: 1)
> > > #
> > > +# @svq: Start device with (experimental) shadow virtqueue. (Since 7.0)
> > > +#
> > > +# Features:
> > > +# @unstable: Member @svq is experimental.
> > > +#
> > > # Since: 5.1
> > > ##
> > > { 'struct': 'NetdevVhostVDPAOptions',
> > > 'data': {
> > > '*vhostdev': 'str',
> > > - '*queues': 'int' } }
> > > + '*queues': 'int',
> > > + '*svq': {'type': 'bool', 'features' : [ 'unstable'] } } }
> > >
> > > ##
> > > # @NetClientDriver:
> >
> > I think this should be x-svq same as other unstable features.
> >
>
> I'm fine with both, but I was pointed to the other direction at [1] and [2].
>
(Sorry, I hit "send" too quick)
What I totally missed was to change the subject of this patch, I could
send a new series with that if you want.
> Thanks!
>
> [1] https://patchwork.kernel.org/project/qemu-devel/patch/20220302203012.3476835-15-eperezma@redhat.com/
> [2] https://lore.kernel.org/qemu-devel/20220303185147.3605350-15-eperezma@redhat.com/
>
> > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > index 1e9fe47c03..c827921654 100644
> > > --- a/net/vhost-vdpa.c
> > > +++ b/net/vhost-vdpa.c
> > > @@ -127,7 +127,11 @@ err_init:
> > > static void vhost_vdpa_cleanup(NetClientState *nc)
> > > {
> > > VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > + struct vhost_dev *dev = s->vhost_vdpa.dev;
> > >
> > > + if (dev && dev->vq_index + dev->nvqs == dev->vq_index_end) {
> > > + g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
> > > + }
> > > if (s->vhost_net) {
> > > vhost_net_cleanup(s->vhost_net);
> > > g_free(s->vhost_net);
> > > @@ -187,13 +191,23 @@ static NetClientInfo net_vhost_vdpa_info = {
> > > .check_peer_type = vhost_vdpa_check_peer_type,
> > > };
> > >
> > > +static int vhost_vdpa_get_iova_range(int fd,
> > > + struct vhost_vdpa_iova_range *iova_range)
> > > +{
> > > + int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
> > > +
> > > + return ret < 0 ? -errno : 0;
> > > +}
> > > +
> > > static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> > > - const char *device,
> > > - const char *name,
> > > - int vdpa_device_fd,
> > > - int queue_pair_index,
> > > - int nvqs,
> > > - bool is_datapath)
> > > + const char *device,
> > > + const char *name,
> > > + int vdpa_device_fd,
> > > + int queue_pair_index,
> > > + int nvqs,
> > > + bool is_datapath,
> > > + bool svq,
> > > + VhostIOVATree *iova_tree)
> > > {
> > > NetClientState *nc = NULL;
> > > VhostVDPAState *s;
> > > @@ -211,6 +225,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> > >
> > > s->vhost_vdpa.device_fd = vdpa_device_fd;
> > > s->vhost_vdpa.index = queue_pair_index;
> > > + s->vhost_vdpa.shadow_vqs_enabled = svq;
> > > + s->vhost_vdpa.iova_tree = iova_tree;
> > > ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
> > > if (ret) {
> > > qemu_del_net_client(nc);
> > > @@ -266,6 +282,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> > > g_autofree NetClientState **ncs = NULL;
> > > NetClientState *nc;
> > > int queue_pairs, i, has_cvq = 0;
> > > + g_autoptr(VhostIOVATree) iova_tree = NULL;
> > >
> > > assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> > > opts = &netdev->u.vhost_vdpa;
> > > @@ -285,29 +302,44 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> > > qemu_close(vdpa_device_fd);
> > > return queue_pairs;
> > > }
> > > + if (opts->svq) {
> > > + struct vhost_vdpa_iova_range iova_range;
> > > +
> > > + if (has_cvq) {
> > > + error_setg(errp, "vdpa svq does not work with cvq");
> > > + goto err_svq;
> > > + }
> > > + vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
> > > + iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
> > > + }
> > >
> > > ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
> > >
> > > for (i = 0; i < queue_pairs; i++) {
> > > ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> > > - vdpa_device_fd, i, 2, true);
> > > + vdpa_device_fd, i, 2, true, opts->svq,
> > > + iova_tree);
> > > if (!ncs[i])
> > > goto err;
> > > }
> > >
> > > if (has_cvq) {
> > > nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> > > - vdpa_device_fd, i, 1, false);
> > > + vdpa_device_fd, i, 1, false, opts->svq,
> > > + iova_tree);
> > > if (!nc)
> > > goto err;
> > > }
> > >
> > > + iova_tree = NULL;
> > > return 0;
> > >
> > > err:
> > > if (i) {
> > > qemu_del_net_client(ncs[0]);
> > > }
> > > +
> > > +err_svq:
> > > qemu_close(vdpa_device_fd);
> > >
> > > return -1;
> > > --
> > > 2.27.0
> >
On Tue, Mar 08, 2022 at 08:32:07AM +0100, Eugenio Perez Martin wrote:
> On Tue, Mar 8, 2022 at 8:11 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Mon, Mar 07, 2022 at 04:33:34PM +0100, Eugenio Pérez wrote:
> > > Finally offering the possibility to enable SVQ from the command line.
> > >
> > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > ---
> > > qapi/net.json | 8 +++++++-
> > > net/vhost-vdpa.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
> > > 2 files changed, 47 insertions(+), 9 deletions(-)
> > >
> > > diff --git a/qapi/net.json b/qapi/net.json
> > > index 7fab2e7cd8..d626fa441c 100644
> > > --- a/qapi/net.json
> > > +++ b/qapi/net.json
> > > @@ -445,12 +445,18 @@
> > > # @queues: number of queues to be created for multiqueue vhost-vdpa
> > > # (default: 1)
> > > #
> > > +# @svq: Start device with (experimental) shadow virtqueue. (Since 7.0)
> > > +#
> > > +# Features:
> > > +# @unstable: Member @svq is experimental.
> > > +#
> > > # Since: 5.1
> > > ##
> > > { 'struct': 'NetdevVhostVDPAOptions',
> > > 'data': {
> > > '*vhostdev': 'str',
> > > - '*queues': 'int' } }
> > > + '*queues': 'int',
> > > + '*svq': {'type': 'bool', 'features' : [ 'unstable'] } } }
> > >
> > > ##
> > > # @NetClientDriver:
> >
> > I think this should be x-svq same as other unstable features.
> >
>
> I'm fine with both, but I was pointed to the other direction at [1] and [2].
>
> Thanks!
>
> [1] https://patchwork.kernel.org/project/qemu-devel/patch/20220302203012.3476835-15-eperezma@redhat.com/
> [2] https://lore.kernel.org/qemu-devel/20220303185147.3605350-15-eperezma@redhat.com/
I think what Markus didn't know is that a bunch of changes in
behaviour will occur before we rename it to "svq".
The rename is thus less of a bother more of a bonus.
> > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > index 1e9fe47c03..c827921654 100644
> > > --- a/net/vhost-vdpa.c
> > > +++ b/net/vhost-vdpa.c
> > > @@ -127,7 +127,11 @@ err_init:
> > > static void vhost_vdpa_cleanup(NetClientState *nc)
> > > {
> > > VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > + struct vhost_dev *dev = s->vhost_vdpa.dev;
> > >
> > > + if (dev && dev->vq_index + dev->nvqs == dev->vq_index_end) {
> > > + g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
> > > + }
> > > if (s->vhost_net) {
> > > vhost_net_cleanup(s->vhost_net);
> > > g_free(s->vhost_net);
> > > @@ -187,13 +191,23 @@ static NetClientInfo net_vhost_vdpa_info = {
> > > .check_peer_type = vhost_vdpa_check_peer_type,
> > > };
> > >
> > > +static int vhost_vdpa_get_iova_range(int fd,
> > > + struct vhost_vdpa_iova_range *iova_range)
> > > +{
> > > + int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
> > > +
> > > + return ret < 0 ? -errno : 0;
> > > +}
> > > +
> > > static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> > > - const char *device,
> > > - const char *name,
> > > - int vdpa_device_fd,
> > > - int queue_pair_index,
> > > - int nvqs,
> > > - bool is_datapath)
> > > + const char *device,
> > > + const char *name,
> > > + int vdpa_device_fd,
> > > + int queue_pair_index,
> > > + int nvqs,
> > > + bool is_datapath,
> > > + bool svq,
> > > + VhostIOVATree *iova_tree)
> > > {
> > > NetClientState *nc = NULL;
> > > VhostVDPAState *s;
> > > @@ -211,6 +225,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> > >
> > > s->vhost_vdpa.device_fd = vdpa_device_fd;
> > > s->vhost_vdpa.index = queue_pair_index;
> > > + s->vhost_vdpa.shadow_vqs_enabled = svq;
> > > + s->vhost_vdpa.iova_tree = iova_tree;
> > > ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
> > > if (ret) {
> > > qemu_del_net_client(nc);
> > > @@ -266,6 +282,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> > > g_autofree NetClientState **ncs = NULL;
> > > NetClientState *nc;
> > > int queue_pairs, i, has_cvq = 0;
> > > + g_autoptr(VhostIOVATree) iova_tree = NULL;
> > >
> > > assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> > > opts = &netdev->u.vhost_vdpa;
> > > @@ -285,29 +302,44 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> > > qemu_close(vdpa_device_fd);
> > > return queue_pairs;
> > > }
> > > + if (opts->svq) {
> > > + struct vhost_vdpa_iova_range iova_range;
> > > +
> > > + if (has_cvq) {
> > > + error_setg(errp, "vdpa svq does not work with cvq");
> > > + goto err_svq;
> > > + }
> > > + vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
> > > + iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
> > > + }
> > >
> > > ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
> > >
> > > for (i = 0; i < queue_pairs; i++) {
> > > ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> > > - vdpa_device_fd, i, 2, true);
> > > + vdpa_device_fd, i, 2, true, opts->svq,
> > > + iova_tree);
> > > if (!ncs[i])
> > > goto err;
> > > }
> > >
> > > if (has_cvq) {
> > > nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> > > - vdpa_device_fd, i, 1, false);
> > > + vdpa_device_fd, i, 1, false, opts->svq,
> > > + iova_tree);
> > > if (!nc)
> > > goto err;
> > > }
> > >
> > > + iova_tree = NULL;
> > > return 0;
> > >
> > > err:
> > > if (i) {
> > > qemu_del_net_client(ncs[0]);
> > > }
> > > +
> > > +err_svq:
> > > qemu_close(vdpa_device_fd);
> > >
> > > return -1;
> > > --
> > > 2.27.0
> >
On Tue, Mar 8, 2022 at 9:02 AM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Tue, Mar 08, 2022 at 08:32:07AM +0100, Eugenio Perez Martin wrote:
> > On Tue, Mar 8, 2022 at 8:11 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > >
> > > On Mon, Mar 07, 2022 at 04:33:34PM +0100, Eugenio Pérez wrote:
> > > > Finally offering the possibility to enable SVQ from the command line.
> > > >
> > > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > > ---
> > > > qapi/net.json | 8 +++++++-
> > > > net/vhost-vdpa.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
> > > > 2 files changed, 47 insertions(+), 9 deletions(-)
> > > >
> > > > diff --git a/qapi/net.json b/qapi/net.json
> > > > index 7fab2e7cd8..d626fa441c 100644
> > > > --- a/qapi/net.json
> > > > +++ b/qapi/net.json
> > > > @@ -445,12 +445,18 @@
> > > > # @queues: number of queues to be created for multiqueue vhost-vdpa
> > > > # (default: 1)
> > > > #
> > > > +# @svq: Start device with (experimental) shadow virtqueue. (Since 7.0)
> > > > +#
> > > > +# Features:
> > > > +# @unstable: Member @svq is experimental.
> > > > +#
> > > > # Since: 5.1
> > > > ##
> > > > { 'struct': 'NetdevVhostVDPAOptions',
> > > > 'data': {
> > > > '*vhostdev': 'str',
> > > > - '*queues': 'int' } }
> > > > + '*queues': 'int',
> > > > + '*svq': {'type': 'bool', 'features' : [ 'unstable'] } } }
> > > >
> > > > ##
> > > > # @NetClientDriver:
> > >
> > > I think this should be x-svq same as other unstable features.
> > >
> >
> > I'm fine with both, but I was pointed to the other direction at [1] and [2].
> >
> > Thanks!
> >
> > [1] https://patchwork.kernel.org/project/qemu-devel/patch/20220302203012.3476835-15-eperezma@redhat.com/
> > [2] https://lore.kernel.org/qemu-devel/20220303185147.3605350-15-eperezma@redhat.com/
>
>
> I think what Markus didn't know is that a bunch of changes in
> behaviour will occur before we rename it to "svq".
> The rename is thus less of a bother more of a bonus.
>
I'm totally fine with going back to x-svq. I'm not sure if it's more
appropriate to do different modes of different parameters (svq=off,
dynamic-svq=on) or different modes of the same parameter (svq=on vs
svq=on_migration). Or something totally different.
My impression is that all of the changes are covered with @unstable
but I can see the advantage of x- prefix since we have not come to an
agreement on it. I think it's the first time it is mentioned in the
mail list.
Do you want me to send a new series with x- prefix?
Thanks!
> > > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > > index 1e9fe47c03..c827921654 100644
> > > > --- a/net/vhost-vdpa.c
> > > > +++ b/net/vhost-vdpa.c
> > > > @@ -127,7 +127,11 @@ err_init:
> > > > static void vhost_vdpa_cleanup(NetClientState *nc)
> > > > {
> > > > VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > > + struct vhost_dev *dev = s->vhost_vdpa.dev;
> > > >
> > > > + if (dev && dev->vq_index + dev->nvqs == dev->vq_index_end) {
> > > > + g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
> > > > + }
> > > > if (s->vhost_net) {
> > > > vhost_net_cleanup(s->vhost_net);
> > > > g_free(s->vhost_net);
> > > > @@ -187,13 +191,23 @@ static NetClientInfo net_vhost_vdpa_info = {
> > > > .check_peer_type = vhost_vdpa_check_peer_type,
> > > > };
> > > >
> > > > +static int vhost_vdpa_get_iova_range(int fd,
> > > > + struct vhost_vdpa_iova_range *iova_range)
> > > > +{
> > > > + int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
> > > > +
> > > > + return ret < 0 ? -errno : 0;
> > > > +}
> > > > +
> > > > static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> > > > - const char *device,
> > > > - const char *name,
> > > > - int vdpa_device_fd,
> > > > - int queue_pair_index,
> > > > - int nvqs,
> > > > - bool is_datapath)
> > > > + const char *device,
> > > > + const char *name,
> > > > + int vdpa_device_fd,
> > > > + int queue_pair_index,
> > > > + int nvqs,
> > > > + bool is_datapath,
> > > > + bool svq,
> > > > + VhostIOVATree *iova_tree)
> > > > {
> > > > NetClientState *nc = NULL;
> > > > VhostVDPAState *s;
> > > > @@ -211,6 +225,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> > > >
> > > > s->vhost_vdpa.device_fd = vdpa_device_fd;
> > > > s->vhost_vdpa.index = queue_pair_index;
> > > > + s->vhost_vdpa.shadow_vqs_enabled = svq;
> > > > + s->vhost_vdpa.iova_tree = iova_tree;
> > > > ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
> > > > if (ret) {
> > > > qemu_del_net_client(nc);
> > > > @@ -266,6 +282,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> > > > g_autofree NetClientState **ncs = NULL;
> > > > NetClientState *nc;
> > > > int queue_pairs, i, has_cvq = 0;
> > > > + g_autoptr(VhostIOVATree) iova_tree = NULL;
> > > >
> > > > assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> > > > opts = &netdev->u.vhost_vdpa;
> > > > @@ -285,29 +302,44 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> > > > qemu_close(vdpa_device_fd);
> > > > return queue_pairs;
> > > > }
> > > > + if (opts->svq) {
> > > > + struct vhost_vdpa_iova_range iova_range;
> > > > +
> > > > + if (has_cvq) {
> > > > + error_setg(errp, "vdpa svq does not work with cvq");
> > > > + goto err_svq;
> > > > + }
> > > > + vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
> > > > + iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
> > > > + }
> > > >
> > > > ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
> > > >
> > > > for (i = 0; i < queue_pairs; i++) {
> > > > ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> > > > - vdpa_device_fd, i, 2, true);
> > > > + vdpa_device_fd, i, 2, true, opts->svq,
> > > > + iova_tree);
> > > > if (!ncs[i])
> > > > goto err;
> > > > }
> > > >
> > > > if (has_cvq) {
> > > > nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> > > > - vdpa_device_fd, i, 1, false);
> > > > + vdpa_device_fd, i, 1, false, opts->svq,
> > > > + iova_tree);
> > > > if (!nc)
> > > > goto err;
> > > > }
> > > >
> > > > + iova_tree = NULL;
> > > > return 0;
> > > >
> > > > err:
> > > > if (i) {
> > > > qemu_del_net_client(ncs[0]);
> > > > }
> > > > +
> > > > +err_svq:
> > > > qemu_close(vdpa_device_fd);
> > > >
> > > > return -1;
> > > > --
> > > > 2.27.0
> > >
>
On Tue, Mar 08, 2022 at 09:24:05AM +0100, Eugenio Perez Martin wrote:
> On Tue, Mar 8, 2022 at 9:02 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Tue, Mar 08, 2022 at 08:32:07AM +0100, Eugenio Perez Martin wrote:
> > > On Tue, Mar 8, 2022 at 8:11 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > >
> > > > On Mon, Mar 07, 2022 at 04:33:34PM +0100, Eugenio Pérez wrote:
> > > > > Finally offering the possibility to enable SVQ from the command line.
> > > > >
> > > > > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > > > > ---
> > > > > qapi/net.json | 8 +++++++-
> > > > > net/vhost-vdpa.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
> > > > > 2 files changed, 47 insertions(+), 9 deletions(-)
> > > > >
> > > > > diff --git a/qapi/net.json b/qapi/net.json
> > > > > index 7fab2e7cd8..d626fa441c 100644
> > > > > --- a/qapi/net.json
> > > > > +++ b/qapi/net.json
> > > > > @@ -445,12 +445,18 @@
> > > > > # @queues: number of queues to be created for multiqueue vhost-vdpa
> > > > > # (default: 1)
> > > > > #
> > > > > +# @svq: Start device with (experimental) shadow virtqueue. (Since 7.0)
> > > > > +#
> > > > > +# Features:
> > > > > +# @unstable: Member @svq is experimental.
> > > > > +#
> > > > > # Since: 5.1
> > > > > ##
> > > > > { 'struct': 'NetdevVhostVDPAOptions',
> > > > > 'data': {
> > > > > '*vhostdev': 'str',
> > > > > - '*queues': 'int' } }
> > > > > + '*queues': 'int',
> > > > > + '*svq': {'type': 'bool', 'features' : [ 'unstable'] } } }
> > > > >
> > > > > ##
> > > > > # @NetClientDriver:
> > > >
> > > > I think this should be x-svq same as other unstable features.
> > > >
> > >
> > > I'm fine with both, but I was pointed to the other direction at [1] and [2].
> > >
> > > Thanks!
> > >
> > > [1] https://patchwork.kernel.org/project/qemu-devel/patch/20220302203012.3476835-15-eperezma@redhat.com/
> > > [2] https://lore.kernel.org/qemu-devel/20220303185147.3605350-15-eperezma@redhat.com/
> >
> >
> > I think what Markus didn't know is that a bunch of changes in
> > behaviour will occur before we rename it to "svq".
> > The rename is thus less of a bother more of a bonus.
> >
>
> I'm totally fine with going back to x-svq. I'm not sure if it's more
> appropriate to do different modes of different parameters (svq=off,
> dynamic-svq=on) or different modes of the same parameter (svq=on vs
> svq=on_migration). Or something totally different.
>
> My impression is that all of the changes are covered with @unstable
> but I can see the advantage of x- prefix since we have not come to an
> agreement on it. I think it's the first time it is mentioned in the
> mail list.
>
> Do you want me to send a new series with x- prefix?
>
> Thanks!
Sure, I think it's a prudent thing to do simply because as you say the
semantics of the flag are likely to change yet.
> > > > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > > > > index 1e9fe47c03..c827921654 100644
> > > > > --- a/net/vhost-vdpa.c
> > > > > +++ b/net/vhost-vdpa.c
> > > > > @@ -127,7 +127,11 @@ err_init:
> > > > > static void vhost_vdpa_cleanup(NetClientState *nc)
> > > > > {
> > > > > VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> > > > > + struct vhost_dev *dev = s->vhost_vdpa.dev;
> > > > >
> > > > > + if (dev && dev->vq_index + dev->nvqs == dev->vq_index_end) {
> > > > > + g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
> > > > > + }
> > > > > if (s->vhost_net) {
> > > > > vhost_net_cleanup(s->vhost_net);
> > > > > g_free(s->vhost_net);
> > > > > @@ -187,13 +191,23 @@ static NetClientInfo net_vhost_vdpa_info = {
> > > > > .check_peer_type = vhost_vdpa_check_peer_type,
> > > > > };
> > > > >
> > > > > +static int vhost_vdpa_get_iova_range(int fd,
> > > > > + struct vhost_vdpa_iova_range *iova_range)
> > > > > +{
> > > > > + int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
> > > > > +
> > > > > + return ret < 0 ? -errno : 0;
> > > > > +}
> > > > > +
> > > > > static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> > > > > - const char *device,
> > > > > - const char *name,
> > > > > - int vdpa_device_fd,
> > > > > - int queue_pair_index,
> > > > > - int nvqs,
> > > > > - bool is_datapath)
> > > > > + const char *device,
> > > > > + const char *name,
> > > > > + int vdpa_device_fd,
> > > > > + int queue_pair_index,
> > > > > + int nvqs,
> > > > > + bool is_datapath,
> > > > > + bool svq,
> > > > > + VhostIOVATree *iova_tree)
> > > > > {
> > > > > NetClientState *nc = NULL;
> > > > > VhostVDPAState *s;
> > > > > @@ -211,6 +225,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> > > > >
> > > > > s->vhost_vdpa.device_fd = vdpa_device_fd;
> > > > > s->vhost_vdpa.index = queue_pair_index;
> > > > > + s->vhost_vdpa.shadow_vqs_enabled = svq;
> > > > > + s->vhost_vdpa.iova_tree = iova_tree;
> > > > > ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
> > > > > if (ret) {
> > > > > qemu_del_net_client(nc);
> > > > > @@ -266,6 +282,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> > > > > g_autofree NetClientState **ncs = NULL;
> > > > > NetClientState *nc;
> > > > > int queue_pairs, i, has_cvq = 0;
> > > > > + g_autoptr(VhostIOVATree) iova_tree = NULL;
> > > > >
> > > > > assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> > > > > opts = &netdev->u.vhost_vdpa;
> > > > > @@ -285,29 +302,44 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> > > > > qemu_close(vdpa_device_fd);
> > > > > return queue_pairs;
> > > > > }
> > > > > + if (opts->svq) {
> > > > > + struct vhost_vdpa_iova_range iova_range;
> > > > > +
> > > > > + if (has_cvq) {
> > > > > + error_setg(errp, "vdpa svq does not work with cvq");
> > > > > + goto err_svq;
> > > > > + }
> > > > > + vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
> > > > > + iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
> > > > > + }
> > > > >
> > > > > ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
> > > > >
> > > > > for (i = 0; i < queue_pairs; i++) {
> > > > > ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> > > > > - vdpa_device_fd, i, 2, true);
> > > > > + vdpa_device_fd, i, 2, true, opts->svq,
> > > > > + iova_tree);
> > > > > if (!ncs[i])
> > > > > goto err;
> > > > > }
> > > > >
> > > > > if (has_cvq) {
> > > > > nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> > > > > - vdpa_device_fd, i, 1, false);
> > > > > + vdpa_device_fd, i, 1, false, opts->svq,
> > > > > + iova_tree);
> > > > > if (!nc)
> > > > > goto err;
> > > > > }
> > > > >
> > > > > + iova_tree = NULL;
> > > > > return 0;
> > > > >
> > > > > err:
> > > > > if (i) {
> > > > > qemu_del_net_client(ncs[0]);
> > > > > }
> > > > > +
> > > > > +err_svq:
> > > > > qemu_close(vdpa_device_fd);
> > > > >
> > > > > return -1;
> > > > > --
> > > > > 2.27.0
> > > >
> >
Eugenio Pérez <eperezma@redhat.com> writes:
> Finally offering the possibility to enable SVQ from the command line.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
> qapi/net.json | 8 +++++++-
> net/vhost-vdpa.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
> 2 files changed, 47 insertions(+), 9 deletions(-)
>
> diff --git a/qapi/net.json b/qapi/net.json
> index 7fab2e7cd8..d626fa441c 100644
> --- a/qapi/net.json
> +++ b/qapi/net.json
> @@ -445,12 +445,18 @@
> # @queues: number of queues to be created for multiqueue vhost-vdpa
> # (default: 1)
> #
> +# @svq: Start device with (experimental) shadow virtqueue. (Since 7.0)
> +#
> +# Features:
> +# @unstable: Member @svq is experimental.
> +#
> # Since: 5.1
> ##
> { 'struct': 'NetdevVhostVDPAOptions',
> 'data': {
> '*vhostdev': 'str',
> - '*queues': 'int' } }
> + '*queues': 'int',
> + '*svq': {'type': 'bool', 'features' : [ 'unstable'] } } }
>
> ##
> # @NetClientDriver:
QAPI schema:
Acked-by: Markus Armbruster <armbru@redhat.com>
© 2016 - 2026 Red Hat, Inc.