[Qemu-devel] [PATCH RFC] virtio-net: enable configurable tx queue size

Wei Wang posted 1 patch 6 years, 11 months ago
Patches applied successfully (tree, apply log)
git fetch https://github.com/patchew-project/qemu tags/patchew/1495161139-28757-1-git-send-email-wei.w.wang@intel.com
Test checkpatch passed
Test docker passed
Test s390x passed
There is a newer version of this series
hw/net/virtio-net.c                         | 71 +++++++++++++++++++++++++++--
include/hw/virtio/virtio-net.h              |  1 +
include/standard-headers/linux/virtio_net.h |  3 ++
3 files changed, 71 insertions(+), 4 deletions(-)
[Qemu-devel] [PATCH RFC] virtio-net: enable configurable tx queue size
Posted by Wei Wang 6 years, 11 months ago
This patch enables the virtio-net tx queue size to be configurable
between 256 (the default queue size) and 1024 by the user. The queue
size specified by the user should be power of 2.

Setting the tx queue size to be 1024 requires the guest driver to
support the VIRTIO_NET_F_MAX_CHAIN_SIZE feature. This feature restricts
the guest driver from chaining 1024 vring descriptors, which may cause
the device side implementation to send more than 1024 iov to writev.
Currently, the max chain size allowed for the guest driver is set to
1023.

In the case that the tx queue size is set to 1024 and the
VIRTIO_NET_F_MAX_CHAIN_SIZE feature is not supported by the guest driver,
the default tx queue size (256) will be used.

Signed-off-by: Wei Wang <wei.w.wang@intel.com>
---
 hw/net/virtio-net.c                         | 71 +++++++++++++++++++++++++++--
 include/hw/virtio/virtio-net.h              |  1 +
 include/standard-headers/linux/virtio_net.h |  3 ++
 3 files changed, 71 insertions(+), 4 deletions(-)

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 7d091c9..ef38cb1 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -33,8 +33,12 @@
 
 /* previously fixed value */
 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
+#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
 /* for now, only allow larger queues; with virtio-1, guest can downsize */
 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
+#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
+
+#define VIRTIO_NET_MAX_CHAIN_SIZE 1023
 
 /*
  * Calculate the number of bytes up to and including the given 'field' of
@@ -57,6 +61,8 @@ static VirtIOFeature feature_sizes[] = {
      .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
     {.flags = 1 << VIRTIO_NET_F_MTU,
      .end = endof(struct virtio_net_config, mtu)},
+    {.flags = 1 << VIRTIO_NET_F_MAX_CHAIN_SIZE,
+     .end = endof(struct virtio_net_config, max_chain_size)},
     {}
 };
 
@@ -84,6 +90,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
     virtio_stw_p(vdev, &netcfg.status, n->status);
     virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
     virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
+    virtio_stw_p(vdev, &netcfg.max_chain_size, VIRTIO_NET_MAX_CHAIN_SIZE);
     memcpy(netcfg.mac, n->mac, ETH_ALEN);
     memcpy(config, &netcfg, n->config_size);
 }
@@ -568,6 +575,7 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
     features |= n->host_features;
 
     virtio_add_feature(&features, VIRTIO_NET_F_MAC);
+    virtio_add_feature(&features, VIRTIO_NET_F_MAX_CHAIN_SIZE);
 
     if (!peer_has_vnet_hdr(n)) {
         virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
@@ -603,6 +611,7 @@ static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
     virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
     virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
     virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
+    virtio_add_feature(&features, VIRTIO_NET_F_MAX_CHAIN_SIZE);
 
     return features;
 }
@@ -635,6 +644,27 @@ static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
     return virtio_net_guest_offloads_by_features(vdev->guest_features);
 }
 
+static bool is_tx(int queue_index)
+{
+    return queue_index % 2 == 1;
+}
+
+static void virtio_net_change_tx_queue_size(VirtIONet *n)
+{
+    VirtIODevice *vdev = VIRTIO_DEVICE(n);
+    int i, num_queues = virtio_get_num_queues(vdev);
+
+    if (n->net_conf.tx_queue_size == VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE) {
+        return;
+    }
+
+    for (i = 0; i < num_queues; i++) {
+        if (is_tx(i)) {
+            virtio_queue_set_num(vdev, i, n->net_conf.tx_queue_size);
+        }
+    }
+}
+
 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
 {
     VirtIONet *n = VIRTIO_NET(vdev);
@@ -649,6 +679,16 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
                                virtio_has_feature(features,
                                                   VIRTIO_F_VERSION_1));
 
+    /*
+     * Change the tx queue size if the guest supports
+     * VIRTIO_NET_F_MAX_CHAIN_SIZE. This will restrict the guest from sending
+     * a very large chain of vring descriptors (e.g. 1024), which may cause
+     * 1025 iov to be written to writev.
+     */
+    if (virtio_has_feature(features, VIRTIO_NET_F_MAX_CHAIN_SIZE)) {
+        virtio_net_change_tx_queue_size(n);
+    }
+
     if (n->has_vnet_hdr) {
         n->curr_guest_offloads =
             virtio_net_guest_offloads_by_features(features);
@@ -1297,8 +1337,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
 
         out_num = elem->out_num;
         out_sg = elem->out_sg;
-        if (out_num < 1) {
-            virtio_error(vdev, "virtio-net header not in first element");
+        if (out_num < 1 || out_num > VIRTIO_NET_F_MAX_CHAIN_SIZE) {
+            virtio_error(vdev, "no packet or too large vring desc chain");
             virtqueue_detach_element(q->tx_vq, elem, 0);
             g_free(elem);
             return -EINVAL;
@@ -1491,18 +1531,27 @@ static void virtio_net_tx_bh(void *opaque)
 static void virtio_net_add_queue(VirtIONet *n, int index)
 {
     VirtIODevice *vdev = VIRTIO_DEVICE(n);
+    /*
+     * If the user specified tx queue size is less than IOV_MAX (e.g. 512),
+     * it is safe to use the specified queue size here. Otherwise, use the
+     * default queue size here, and change it when the guest confirms that
+     * it supports the VIRTIO_NET_F_MAX_CHAIN_SIZE feature.
+     */
+    uint16_t tx_queue_size = n->net_conf.tx_queue_size < IOV_MAX ?
+                                       n->net_conf.tx_queue_size :
+                                 VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
 
     n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
                                            virtio_net_handle_rx);
     if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
         n->vqs[index].tx_vq =
-            virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
+            virtio_add_queue(vdev, tx_queue_size, virtio_net_handle_tx_timer);
         n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
                                               virtio_net_tx_timer,
                                               &n->vqs[index]);
     } else {
         n->vqs[index].tx_vq =
-            virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
+            virtio_add_queue(vdev, tx_queue_size, virtio_net_handle_tx_bh);
         n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
     }
 
@@ -1857,6 +1906,7 @@ static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
 {
     int i, config_size = 0;
     virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
+    virtio_add_feature(&host_features, VIRTIO_NET_F_MAX_CHAIN_SIZE);
 
     for (i = 0; feature_sizes[i].flags != 0; i++) {
         if (host_features & feature_sizes[i].flags) {
@@ -1910,6 +1960,17 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
         return;
     }
 
+    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
+        n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
+        (n->net_conf.tx_queue_size & (n->net_conf.tx_queue_size - 1))) {
+        error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
+                   "must be a power of 2 between %d and %d.",
+                   n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
+                   VIRTQUEUE_MAX_SIZE);
+        virtio_cleanup(vdev);
+        return;
+    }
+
     n->max_queues = MAX(n->nic_conf.peers.queues, 1);
     if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
         error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
@@ -2089,6 +2150,8 @@ static Property virtio_net_properties[] = {
     DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
     DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
                        VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
+    DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
+                       VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
     DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
     DEFINE_PROP_END_OF_LIST(),
 };
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index 1eec9a2..fd944ba 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -36,6 +36,7 @@ typedef struct virtio_net_conf
     int32_t txburst;
     char *tx;
     uint16_t rx_queue_size;
+    uint16_t tx_queue_size;
     uint16_t mtu;
 } virtio_net_conf;
 
diff --git a/include/standard-headers/linux/virtio_net.h b/include/standard-headers/linux/virtio_net.h
index 30ff249..0bc1c52 100644
--- a/include/standard-headers/linux/virtio_net.h
+++ b/include/standard-headers/linux/virtio_net.h
@@ -56,6 +56,7 @@
 #define VIRTIO_NET_F_MQ	22	/* Device supports Receive Flow
 					 * Steering */
 #define VIRTIO_NET_F_CTRL_MAC_ADDR 23	/* Set MAC address */
+#define VIRTIO_NET_F_MAX_CHAIN_SIZE 25	/* Guest chains desc within a limit */
 
 #ifndef VIRTIO_NET_NO_LEGACY
 #define VIRTIO_NET_F_GSO	6	/* Host handles pkts w/ any GSO type */
@@ -76,6 +77,8 @@ struct virtio_net_config {
 	uint16_t max_virtqueue_pairs;
 	/* Default maximum transmit unit advice */
 	uint16_t mtu;
+        /* Maximum number of vring descriptors that can be chained */
+	uint16_t max_chain_size;
 } QEMU_PACKED;
 
 /*
-- 
2.7.4


Re: [Qemu-devel] [PATCH RFC] virtio-net: enable configurable tx queue size
Posted by Michael S. Tsirkin 6 years, 11 months ago
On Fri, May 19, 2017 at 10:32:19AM +0800, Wei Wang wrote:
> This patch enables the virtio-net tx queue size to be configurable
> between 256 (the default queue size) and 1024 by the user. The queue
> size specified by the user should be power of 2.
> 
> Setting the tx queue size to be 1024 requires the guest driver to
> support the VIRTIO_NET_F_MAX_CHAIN_SIZE feature.

This should be a generic ring feature, not one specific to virtio net.

> This feature restricts
> the guest driver from chaining 1024 vring descriptors, which may cause
> the device side implementation to send more than 1024 iov to writev.
> Currently, the max chain size allowed for the guest driver is set to
> 1023.
> 
> In the case that the tx queue size is set to 1024 and the
> VIRTIO_NET_F_MAX_CHAIN_SIZE feature is not supported by the guest driver,
> the default tx queue size (256) will be used.
> 
> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> ---
>  hw/net/virtio-net.c                         | 71 +++++++++++++++++++++++++++--
>  include/hw/virtio/virtio-net.h              |  1 +
>  include/standard-headers/linux/virtio_net.h |  3 ++
>  3 files changed, 71 insertions(+), 4 deletions(-)
> 
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 7d091c9..ef38cb1 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -33,8 +33,12 @@
>  
>  /* previously fixed value */
>  #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
> +#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
>  /* for now, only allow larger queues; with virtio-1, guest can downsize */
>  #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
> +#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
> +
> +#define VIRTIO_NET_MAX_CHAIN_SIZE 1023
>  
>  /*
>   * Calculate the number of bytes up to and including the given 'field' of
> @@ -57,6 +61,8 @@ static VirtIOFeature feature_sizes[] = {
>       .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
>      {.flags = 1 << VIRTIO_NET_F_MTU,
>       .end = endof(struct virtio_net_config, mtu)},
> +    {.flags = 1 << VIRTIO_NET_F_MAX_CHAIN_SIZE,
> +     .end = endof(struct virtio_net_config, max_chain_size)},
>      {}
>  };
>  
> @@ -84,6 +90,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
>      virtio_stw_p(vdev, &netcfg.status, n->status);
>      virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
>      virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
> +    virtio_stw_p(vdev, &netcfg.max_chain_size, VIRTIO_NET_MAX_CHAIN_SIZE);
>      memcpy(netcfg.mac, n->mac, ETH_ALEN);
>      memcpy(config, &netcfg, n->config_size);
>  }
> @@ -568,6 +575,7 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
>      features |= n->host_features;
>  
>      virtio_add_feature(&features, VIRTIO_NET_F_MAC);
> +    virtio_add_feature(&features, VIRTIO_NET_F_MAX_CHAIN_SIZE);
>  
>      if (!peer_has_vnet_hdr(n)) {
>          virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
> @@ -603,6 +611,7 @@ static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
>      virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
>      virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
>      virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
> +    virtio_add_feature(&features, VIRTIO_NET_F_MAX_CHAIN_SIZE);
>  
>      return features;
>  }
> @@ -635,6 +644,27 @@ static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
>      return virtio_net_guest_offloads_by_features(vdev->guest_features);
>  }
>  
> +static bool is_tx(int queue_index)
> +{
> +    return queue_index % 2 == 1;
> +}
> +
> +static void virtio_net_change_tx_queue_size(VirtIONet *n)
> +{
> +    VirtIODevice *vdev = VIRTIO_DEVICE(n);
> +    int i, num_queues = virtio_get_num_queues(vdev);
> +
> +    if (n->net_conf.tx_queue_size == VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE) {
> +        return;
> +    }
> +
> +    for (i = 0; i < num_queues; i++) {
> +        if (is_tx(i)) {
> +            virtio_queue_set_num(vdev, i, n->net_conf.tx_queue_size);
> +        }
> +    }
> +}
> +
>  static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
>  {
>      VirtIONet *n = VIRTIO_NET(vdev);
> @@ -649,6 +679,16 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
>                                 virtio_has_feature(features,
>                                                    VIRTIO_F_VERSION_1));
>  
> +    /*
> +     * Change the tx queue size if the guest supports
> +     * VIRTIO_NET_F_MAX_CHAIN_SIZE. This will restrict the guest from sending
> +     * a very large chain of vring descriptors (e.g. 1024), which may cause
> +     * 1025 iov to be written to writev.
> +     */
> +    if (virtio_has_feature(features, VIRTIO_NET_F_MAX_CHAIN_SIZE)) {
> +        virtio_net_change_tx_queue_size(n);
> +    }
> +
>      if (n->has_vnet_hdr) {
>          n->curr_guest_offloads =
>              virtio_net_guest_offloads_by_features(features);
> @@ -1297,8 +1337,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
>  
>          out_num = elem->out_num;
>          out_sg = elem->out_sg;
> -        if (out_num < 1) {
> -            virtio_error(vdev, "virtio-net header not in first element");
> +        if (out_num < 1 || out_num > VIRTIO_NET_F_MAX_CHAIN_SIZE) {
> +            virtio_error(vdev, "no packet or too large vring desc chain");
>              virtqueue_detach_element(q->tx_vq, elem, 0);
>              g_free(elem);
>              return -EINVAL;
> @@ -1491,18 +1531,27 @@ static void virtio_net_tx_bh(void *opaque)
>  static void virtio_net_add_queue(VirtIONet *n, int index)
>  {
>      VirtIODevice *vdev = VIRTIO_DEVICE(n);
> +    /*
> +     * If the user specified tx queue size is less than IOV_MAX (e.g. 512),
> +     * it is safe to use the specified queue size here. Otherwise, use the
> +     * default queue size here, and change it when the guest confirms that
> +     * it supports the VIRTIO_NET_F_MAX_CHAIN_SIZE feature.
> +     */
> +    uint16_t tx_queue_size = n->net_conf.tx_queue_size < IOV_MAX ?
> +                                       n->net_conf.tx_queue_size :
> +                                 VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
>  
>      n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
>                                             virtio_net_handle_rx);
>      if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
>          n->vqs[index].tx_vq =
> -            virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
> +            virtio_add_queue(vdev, tx_queue_size, virtio_net_handle_tx_timer);
>          n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
>                                                virtio_net_tx_timer,
>                                                &n->vqs[index]);
>      } else {
>          n->vqs[index].tx_vq =
> -            virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
> +            virtio_add_queue(vdev, tx_queue_size, virtio_net_handle_tx_bh);
>          n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
>      }
>  
> @@ -1857,6 +1906,7 @@ static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
>  {
>      int i, config_size = 0;
>      virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
> +    virtio_add_feature(&host_features, VIRTIO_NET_F_MAX_CHAIN_SIZE);


You can't really change the default features - this will
break migration to/from old QEMU versions.
You need a flag for this and fix it up with the compat machinery.

>  
>      for (i = 0; feature_sizes[i].flags != 0; i++) {
>          if (host_features & feature_sizes[i].flags) {
> @@ -1910,6 +1960,17 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>          return;
>      }
>  
> +    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
> +        n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
> +        (n->net_conf.tx_queue_size & (n->net_conf.tx_queue_size - 1))) {
> +        error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
> +                   "must be a power of 2 between %d and %d.",
> +                   n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
> +                   VIRTQUEUE_MAX_SIZE);
> +        virtio_cleanup(vdev);
> +        return;
> +    }
> +
>      n->max_queues = MAX(n->nic_conf.peers.queues, 1);
>      if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
>          error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
> @@ -2089,6 +2150,8 @@ static Property virtio_net_properties[] = {
>      DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
>      DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
>                         VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
> +    DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
> +                       VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
>      DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
>      DEFINE_PROP_END_OF_LIST(),
>  };
> diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
> index 1eec9a2..fd944ba 100644
> --- a/include/hw/virtio/virtio-net.h
> +++ b/include/hw/virtio/virtio-net.h
> @@ -36,6 +36,7 @@ typedef struct virtio_net_conf
>      int32_t txburst;
>      char *tx;
>      uint16_t rx_queue_size;
> +    uint16_t tx_queue_size;
>      uint16_t mtu;
>  } virtio_net_conf;
>  
> diff --git a/include/standard-headers/linux/virtio_net.h b/include/standard-headers/linux/virtio_net.h
> index 30ff249..0bc1c52 100644
> --- a/include/standard-headers/linux/virtio_net.h
> +++ b/include/standard-headers/linux/virtio_net.h
> @@ -56,6 +56,7 @@
>  #define VIRTIO_NET_F_MQ	22	/* Device supports Receive Flow
>  					 * Steering */
>  #define VIRTIO_NET_F_CTRL_MAC_ADDR 23	/* Set MAC address */
> +#define VIRTIO_NET_F_MAX_CHAIN_SIZE 25	/* Guest chains desc within a limit */
>  
>  #ifndef VIRTIO_NET_NO_LEGACY
>  #define VIRTIO_NET_F_GSO	6	/* Host handles pkts w/ any GSO type */
> @@ -76,6 +77,8 @@ struct virtio_net_config {
>  	uint16_t max_virtqueue_pairs;
>  	/* Default maximum transmit unit advice */
>  	uint16_t mtu;
> +        /* Maximum number of vring descriptors that can be chained */
> +	uint16_t max_chain_size;
>  } QEMU_PACKED;
>  
>  /*
> -- 
> 2.7.4

Re: [Qemu-devel] [virtio-dev] Re: [PATCH RFC] virtio-net: enable configurable tx queue size
Posted by Wei Wang 6 years, 11 months ago
On 05/20/2017 04:42 AM, Michael S. Tsirkin wrote:
> On Fri, May 19, 2017 at 10:32:19AM +0800, Wei Wang wrote:
>> This patch enables the virtio-net tx queue size to be configurable
>> between 256 (the default queue size) and 1024 by the user. The queue
>> size specified by the user should be power of 2.
>>
>> Setting the tx queue size to be 1024 requires the guest driver to
>> support the VIRTIO_NET_F_MAX_CHAIN_SIZE feature.
> This should be a generic ring feature, not one specific to virtio net.
OK. How about making two more changes below:

1) make the default tx queue size = 1024 (instead of 256).
We can reduce the size (to 256) if the MAX_CHAIN_SIZE feature
is not supported by the guest.
In this way, people who apply the QEMU patch can directly use the
largest queue size(1024) without adding the booting command line.

2) The vhost backend does not use writev, so I think when the vhost
backed is used, using 1024 queue size should not depend on the
MAX_CHAIN_SIZE feature.


Best,
Wei

Re: [Qemu-devel] [virtio-dev] Re: [PATCH RFC] virtio-net: enable configurable tx queue size
Posted by Jason Wang 6 years, 11 months ago

On 2017年05月22日 19:52, Wei Wang wrote:
> On 05/20/2017 04:42 AM, Michael S. Tsirkin wrote:
>> On Fri, May 19, 2017 at 10:32:19AM +0800, Wei Wang wrote:
>>> This patch enables the virtio-net tx queue size to be configurable
>>> between 256 (the default queue size) and 1024 by the user. The queue
>>> size specified by the user should be power of 2.
>>>
>>> Setting the tx queue size to be 1024 requires the guest driver to
>>> support the VIRTIO_NET_F_MAX_CHAIN_SIZE feature.
>> This should be a generic ring feature, not one specific to virtio net.
> OK. How about making two more changes below:
>
> 1) make the default tx queue size = 1024 (instead of 256).

As has been pointed out, you need compat the default value too in this case.

> We can reduce the size (to 256) if the MAX_CHAIN_SIZE feature
> is not supported by the guest.
> In this way, people who apply the QEMU patch can directly use the
> largest queue size(1024) without adding the booting command line.
>
> 2) The vhost backend does not use writev, so I think when the vhost
> backed is used, using 1024 queue size should not depend on the
> MAX_CHAIN_SIZE feature.

But do we need to consider even larger queue size now?

Btw, I think it's better to draft a spec patch.

Thanks

>
>
> Best,
> Wei


Re: [Qemu-devel] [virtio-dev] Re: [PATCH RFC] virtio-net: enable configurable tx queue size
Posted by Wei Wang 6 years, 11 months ago
On 05/23/2017 10:04 AM, Jason Wang wrote:
>
>
> On 2017年05月22日 19:52, Wei Wang wrote:
>> On 05/20/2017 04:42 AM, Michael S. Tsirkin wrote:
>>> On Fri, May 19, 2017 at 10:32:19AM +0800, Wei Wang wrote:
>>>> This patch enables the virtio-net tx queue size to be configurable
>>>> between 256 (the default queue size) and 1024 by the user. The queue
>>>> size specified by the user should be power of 2.
>>>>
>>>> Setting the tx queue size to be 1024 requires the guest driver to
>>>> support the VIRTIO_NET_F_MAX_CHAIN_SIZE feature.
>>> This should be a generic ring feature, not one specific to virtio net.
>> OK. How about making two more changes below:
>>
>> 1) make the default tx queue size = 1024 (instead of 256).
>
> As has been pointed out, you need compat the default value too in this 
> case.

The driver gets the size info from the device, then would it cause any
compatibility issue if we change the default ring size to 1024 in the
vhost case? In other words, is there any software (i.e. any virtio-net 
driver)
functions based on the assumption of 256 queue size?

For live migration, the queue size that is being used will also be 
transferred
to the destination.

>
>> We can reduce the size (to 256) if the MAX_CHAIN_SIZE feature
>> is not supported by the guest.
>> In this way, people who apply the QEMU patch can directly use the
>> largest queue size(1024) without adding the booting command line.
>>
>> 2) The vhost backend does not use writev, so I think when the vhost
>> backed is used, using 1024 queue size should not depend on the
>> MAX_CHAIN_SIZE feature.
>
> But do we need to consider even larger queue size now?

Need Michael's feedback on this. Meanwhile, I'll get the next version of
code ready and check if larger queue size would cause any corner case.

>
> Btw, I think it's better to draft a spec patch.
>

I think it should be easier to draft the spec patch when the code is
almost done.


Best,
Wei





Re: [Qemu-devel] [virtio-dev] Re: [PATCH RFC] virtio-net: enable configurable tx queue size
Posted by Jason Wang 6 years, 11 months ago

On 2017年05月23日 13:15, Wei Wang wrote:
> On 05/23/2017 10:04 AM, Jason Wang wrote:
>>
>>
>> On 2017年05月22日 19:52, Wei Wang wrote:
>>> On 05/20/2017 04:42 AM, Michael S. Tsirkin wrote:
>>>> On Fri, May 19, 2017 at 10:32:19AM +0800, Wei Wang wrote:
>>>>> This patch enables the virtio-net tx queue size to be configurable
>>>>> between 256 (the default queue size) and 1024 by the user. The queue
>>>>> size specified by the user should be power of 2.
>>>>>
>>>>> Setting the tx queue size to be 1024 requires the guest driver to
>>>>> support the VIRTIO_NET_F_MAX_CHAIN_SIZE feature.
>>>> This should be a generic ring feature, not one specific to virtio net.
>>> OK. How about making two more changes below:
>>>
>>> 1) make the default tx queue size = 1024 (instead of 256).
>>
>> As has been pointed out, you need compat the default value too in 
>> this case.
>
> The driver gets the size info from the device, then would it cause any
> compatibility issue if we change the default ring size to 1024 in the
> vhost case? In other words, is there any software (i.e. any virtio-net 
> driver)
> functions based on the assumption of 256 queue size?

I don't know. But is it safe e.g we migrate from 1024 to an older qemu 
with 256 as its queue size?

>
> For live migration, the queue size that is being used will also be 
> transferred
> to the destination.
>
>>
>>> We can reduce the size (to 256) if the MAX_CHAIN_SIZE feature
>>> is not supported by the guest.
>>> In this way, people who apply the QEMU patch can directly use the
>>> largest queue size(1024) without adding the booting command line.
>>>
>>> 2) The vhost backend does not use writev, so I think when the vhost
>>> backed is used, using 1024 queue size should not depend on the
>>> MAX_CHAIN_SIZE feature.
>>
>> But do we need to consider even larger queue size now?
>
> Need Michael's feedback on this. Meanwhile, I'll get the next version of
> code ready and check if larger queue size would cause any corner case.

The problem is, do we really need a new config filed for this? Or just 
introduce a flag which means "I support up to 1024 sgs" is sufficient?

>
>>
>> Btw, I think it's better to draft a spec patch.
>>
>
> I think it should be easier to draft the spec patch when the code is
> almost done.

Maybe both.

Thanks

>
>
> Best,
> Wei
>
>
>
>


Re: [Qemu-devel] [virtio-dev] Re: [PATCH RFC] virtio-net: enable configurable tx queue size
Posted by Wei Wang 6 years, 11 months ago
On 05/23/2017 02:24 PM, Jason Wang wrote:
>
>
> On 2017年05月23日 13:15, Wei Wang wrote:
>> On 05/23/2017 10:04 AM, Jason Wang wrote:
>>>
>>>
>>> On 2017年05月22日 19:52, Wei Wang wrote:
>>>> On 05/20/2017 04:42 AM, Michael S. Tsirkin wrote:
>>>>> On Fri, May 19, 2017 at 10:32:19AM +0800, Wei Wang wrote:
>>>>>> This patch enables the virtio-net tx queue size to be configurable
>>>>>> between 256 (the default queue size) and 1024 by the user. The queue
>>>>>> size specified by the user should be power of 2.
>>>>>>
>>>>>> Setting the tx queue size to be 1024 requires the guest driver to
>>>>>> support the VIRTIO_NET_F_MAX_CHAIN_SIZE feature.
>>>>> This should be a generic ring feature, not one specific to virtio 
>>>>> net.
>>>> OK. How about making two more changes below:
>>>>
>>>> 1) make the default tx queue size = 1024 (instead of 256).
>>>
>>> As has been pointed out, you need compat the default value too in 
>>> this case.
>>
>> The driver gets the size info from the device, then would it cause any
>> compatibility issue if we change the default ring size to 1024 in the
>> vhost case? In other words, is there any software (i.e. any 
>> virtio-net driver)
>> functions based on the assumption of 256 queue size?
>
> I don't know. But is it safe e.g we migrate from 1024 to an older qemu 
> with 256 as its queue size?

Yes, I think it is safe, because the default queue size is used when the 
device is being
set up (e.g. feature negotiation).
During migration (the device has already been running), the destination 
machine will
load the device state based on the the queue size that is being used 
(i.e. vring.num).
The default value is not used any more after the setup phase.

>
>>
>> For live migration, the queue size that is being used will also be 
>> transferred
>> to the destination.
>>
>>>
>>>> We can reduce the size (to 256) if the MAX_CHAIN_SIZE feature
>>>> is not supported by the guest.
>>>> In this way, people who apply the QEMU patch can directly use the
>>>> largest queue size(1024) without adding the booting command line.
>>>>
>>>> 2) The vhost backend does not use writev, so I think when the vhost
>>>> backed is used, using 1024 queue size should not depend on the
>>>> MAX_CHAIN_SIZE feature.
>>>
>>> But do we need to consider even larger queue size now?
>>
>> Need Michael's feedback on this. Meanwhile, I'll get the next version of
>> code ready and check if larger queue size would cause any corner case.
>
> The problem is, do we really need a new config filed for this? Or just 
> introduce a flag which means "I support up to 1024 sgs" is sufficient?
>

For now, it also works without the new config field, max_chain_size,
But I would prefer to keep the new config field, because:

Without that, the driver will work on  an assumed value, 1023.
If the future, QEMU needs to change it to 1022, then how can the
new QEMU tell the old driver, which supports the MAX_CHAIN_SIZE
feature but works with the old hardcode value 1023?
So, I think using that config value would be good for future updates.

Best,
Wei

Re: [Qemu-devel] [virtio-dev] Re: [PATCH RFC] virtio-net: enable configurable tx queue size
Posted by Jason Wang 6 years, 11 months ago

On 2017年05月23日 18:36, Wei Wang wrote:
> On 05/23/2017 02:24 PM, Jason Wang wrote:
>>
>>
>> On 2017年05月23日 13:15, Wei Wang wrote:
>>> On 05/23/2017 10:04 AM, Jason Wang wrote:
>>>>
>>>>
>>>> On 2017年05月22日 19:52, Wei Wang wrote:
>>>>> On 05/20/2017 04:42 AM, Michael S. Tsirkin wrote:
>>>>>> On Fri, May 19, 2017 at 10:32:19AM +0800, Wei Wang wrote:
>>>>>>> This patch enables the virtio-net tx queue size to be configurable
>>>>>>> between 256 (the default queue size) and 1024 by the user. The 
>>>>>>> queue
>>>>>>> size specified by the user should be power of 2.
>>>>>>>
>>>>>>> Setting the tx queue size to be 1024 requires the guest driver to
>>>>>>> support the VIRTIO_NET_F_MAX_CHAIN_SIZE feature.
>>>>>> This should be a generic ring feature, not one specific to virtio 
>>>>>> net.
>>>>> OK. How about making two more changes below:
>>>>>
>>>>> 1) make the default tx queue size = 1024 (instead of 256).
>>>>
>>>> As has been pointed out, you need compat the default value too in 
>>>> this case.
>>>
>>> The driver gets the size info from the device, then would it cause any
>>> compatibility issue if we change the default ring size to 1024 in the
>>> vhost case? In other words, is there any software (i.e. any 
>>> virtio-net driver)
>>> functions based on the assumption of 256 queue size?
>>
>> I don't know. But is it safe e.g we migrate from 1024 to an older 
>> qemu with 256 as its queue size?
>
> Yes, I think it is safe, because the default queue size is used when 
> the device is being
> set up (e.g. feature negotiation).
> During migration (the device has already been running), the 
> destination machine will
> load the device state based on the the queue size that is being used 
> (i.e. vring.num).
> The default value is not used any more after the setup phase.

I haven't checked all cases, but there's two obvious things:

- After migration and after a reset, it will go back to 256 on dst.
- ABI is changed, e.g -M pc-q35-2.10 returns 1024 on 2.11

>
>>
>>>
>>> For live migration, the queue size that is being used will also be 
>>> transferred
>>> to the destination.
>>>
>>>>
>>>>> We can reduce the size (to 256) if the MAX_CHAIN_SIZE feature
>>>>> is not supported by the guest.
>>>>> In this way, people who apply the QEMU patch can directly use the
>>>>> largest queue size(1024) without adding the booting command line.
>>>>>
>>>>> 2) The vhost backend does not use writev, so I think when the vhost
>>>>> backed is used, using 1024 queue size should not depend on the
>>>>> MAX_CHAIN_SIZE feature.
>>>>
>>>> But do we need to consider even larger queue size now?
>>>
>>> Need Michael's feedback on this. Meanwhile, I'll get the next 
>>> version of
>>> code ready and check if larger queue size would cause any corner case.
>>
>> The problem is, do we really need a new config filed for this? Or 
>> just introduce a flag which means "I support up to 1024 sgs" is 
>> sufficient?
>>
>
> For now, it also works without the new config field, max_chain_size,
> But I would prefer to keep the new config field, because:
>
> Without that, the driver will work on  an assumed value, 1023.

This is the fact, and it's too late to change legacy driver.

> If the future, QEMU needs to change it to 1022, then how can the
> new QEMU tell the old driver, which supports the MAX_CHAIN_SIZE
> feature but works with the old hardcode value 1023?

Can config filed help in this case? The problem is similar to 
ANY_HEADER_SG, the only thing we can is to clarify the limitation for 
new drivers.

Thanks


> So, I think using that config value would be good for future updates.
>
> Best,
> Wei
>
> ---------------------------------------------------------------------
> To unsubscribe, e-mail: virtio-dev-unsubscribe@lists.oasis-open.org
> For additional commands, e-mail: virtio-dev-help@lists.oasis-open.org
>


Re: [Qemu-devel] [virtio-dev] Re: [PATCH RFC] virtio-net: enable configurable tx queue size
Posted by Wei Wang 6 years, 11 months ago
On 05/24/2017 11:19 AM, Jason Wang wrote:
>
>
> On 2017年05月23日 18:36, Wei Wang wrote:
>> On 05/23/2017 02:24 PM, Jason Wang wrote:
>>>
>>>
>>> On 2017年05月23日 13:15, Wei Wang wrote:
>>>> On 05/23/2017 10:04 AM, Jason Wang wrote:
>>>>>
>>>>>
>>>>> On 2017年05月22日 19:52, Wei Wang wrote:
>>>>>> On 05/20/2017 04:42 AM, Michael S. Tsirkin wrote:
>>>>>>> On Fri, May 19, 2017 at 10:32:19AM +0800, Wei Wang wrote:
>>>>>>>> This patch enables the virtio-net tx queue size to be configurable
>>>>>>>> between 256 (the default queue size) and 1024 by the user. The 
>>>>>>>> queue
>>>>>>>> size specified by the user should be power of 2.
>>>>>>>>
>>>>>>>> Setting the tx queue size to be 1024 requires the guest driver to
>>>>>>>> support the VIRTIO_NET_F_MAX_CHAIN_SIZE feature.
>>>>>>> This should be a generic ring feature, not one specific to 
>>>>>>> virtio net.
>>>>>> OK. How about making two more changes below:
>>>>>>
>>>>>> 1) make the default tx queue size = 1024 (instead of 256).
>>>>>
>>>>> As has been pointed out, you need compat the default value too in 
>>>>> this case.
>>>>
>>>> The driver gets the size info from the device, then would it cause any
>>>> compatibility issue if we change the default ring size to 1024 in the
>>>> vhost case? In other words, is there any software (i.e. any 
>>>> virtio-net driver)
>>>> functions based on the assumption of 256 queue size?
>>>
>>> I don't know. But is it safe e.g we migrate from 1024 to an older 
>>> qemu with 256 as its queue size?
>>
>> Yes, I think it is safe, because the default queue size is used when 
>> the device is being
>> set up (e.g. feature negotiation).
>> During migration (the device has already been running), the 
>> destination machine will
>> load the device state based on the the queue size that is being used 
>> (i.e. vring.num).
>> The default value is not used any more after the setup phase.
>
> I haven't checked all cases, but there's two obvious things:
>
> - After migration and after a reset, it will go back to 256 on dst.

Please let me clarify what we want first: when QEMU boots and it 
realizes the
virtio-net device, if the tx_queue_size is not given by the command 
line, we want
to use 1024 as the queue size, that is, virtio_add_queue(,1024,), which sets
vring.num=1024 and vring.num_default=1024.

When migration happens, the vring.num variable (has been 1024) is sent to
the destination machine, where virtio_load() will assign the destination 
side vring.num
to that value (1024). So, vring.num=1024 continues to work on the 
destination machine
with old QEMU. I don't see an issue here.

If reset happens, I think the device and driver will re-do the 
initialization steps. So, if they are
with the old QEMU, then they use the old qemu realize() function to do 
virtio_add_queue(,256,),
and the driver will re-do the probe() steps and take vring.num=256, then 
everything works fine.


> - ABI is changed, e.g -M pc-q35-2.10 returns 1024 on 2.11
>
Didn't get this. Could you please explain more? which ABI would be 
changed, and why it affects q35?


>>
>>>
>>>>
>>>> For live migration, the queue size that is being used will also be 
>>>> transferred
>>>> to the destination.
>>>>
>>>>>
>>>>>> We can reduce the size (to 256) if the MAX_CHAIN_SIZE feature
>>>>>> is not supported by the guest.
>>>>>> In this way, people who apply the QEMU patch can directly use the
>>>>>> largest queue size(1024) without adding the booting command line.
>>>>>>
>>>>>> 2) The vhost backend does not use writev, so I think when the vhost
>>>>>> backed is used, using 1024 queue size should not depend on the
>>>>>> MAX_CHAIN_SIZE feature.
>>>>>
>>>>> But do we need to consider even larger queue size now?
>>>>
>>>> Need Michael's feedback on this. Meanwhile, I'll get the next 
>>>> version of
>>>> code ready and check if larger queue size would cause any corner case.
>>>
>>> The problem is, do we really need a new config filed for this? Or 
>>> just introduce a flag which means "I support up to 1024 sgs" is 
>>> sufficient?
>>>
>>
>> For now, it also works without the new config field, max_chain_size,
>> But I would prefer to keep the new config field, because:
>>
>> Without that, the driver will work on  an assumed value, 1023.
>
> This is the fact, and it's too late to change legacy driver.
>
>> If the future, QEMU needs to change it to 1022, then how can the
>> new QEMU tell the old driver, which supports the MAX_CHAIN_SIZE
>> feature but works with the old hardcode value 1023?
>
> Can config filed help in this case? The problem is similar to 
> ANY_HEADER_SG, the only thing we can is to clarify the limitation for 
> new drivers.
>

I think it helps, because the driver will do
virtio_cread_feature(vdev, VIRTIO_NET_F_MAX_CHAIN_SIZE,
                                   struct virtio_net_config, 
max_chain_size, &chain_size);
to get the max_chain_size from the device. So when new QEMU has a new 
value of max_chain_size, old driver
will get the new value.

Best,
Wei






Re: [Qemu-devel] [virtio-dev] Re: [PATCH RFC] virtio-net: enable configurable tx queue size
Posted by Jason Wang 6 years, 11 months ago

On 2017年05月24日 16:18, Wei Wang wrote:
> On 05/24/2017 11:19 AM, Jason Wang wrote:
>>
>>
>> On 2017年05月23日 18:36, Wei Wang wrote:
>>> On 05/23/2017 02:24 PM, Jason Wang wrote:
>>>>
>>>>
>>>> On 2017年05月23日 13:15, Wei Wang wrote:
>>>>> On 05/23/2017 10:04 AM, Jason Wang wrote:
>>>>>>
>>>>>>
>>>>>> On 2017年05月22日 19:52, Wei Wang wrote:
>>>>>>> On 05/20/2017 04:42 AM, Michael S. Tsirkin wrote:
>>>>>>>> On Fri, May 19, 2017 at 10:32:19AM +0800, Wei Wang wrote:
>>>>>>>>> This patch enables the virtio-net tx queue size to be 
>>>>>>>>> configurable
>>>>>>>>> between 256 (the default queue size) and 1024 by the user. The 
>>>>>>>>> queue
>>>>>>>>> size specified by the user should be power of 2.
>>>>>>>>>
>>>>>>>>> Setting the tx queue size to be 1024 requires the guest driver to
>>>>>>>>> support the VIRTIO_NET_F_MAX_CHAIN_SIZE feature.
>>>>>>>> This should be a generic ring feature, not one specific to 
>>>>>>>> virtio net.
>>>>>>> OK. How about making two more changes below:
>>>>>>>
>>>>>>> 1) make the default tx queue size = 1024 (instead of 256).
>>>>>>
>>>>>> As has been pointed out, you need compat the default value too in 
>>>>>> this case.
>>>>>
>>>>> The driver gets the size info from the device, then would it cause 
>>>>> any
>>>>> compatibility issue if we change the default ring size to 1024 in the
>>>>> vhost case? In other words, is there any software (i.e. any 
>>>>> virtio-net driver)
>>>>> functions based on the assumption of 256 queue size?
>>>>
>>>> I don't know. But is it safe e.g we migrate from 1024 to an older 
>>>> qemu with 256 as its queue size?
>>>
>>> Yes, I think it is safe, because the default queue size is used when 
>>> the device is being
>>> set up (e.g. feature negotiation).
>>> During migration (the device has already been running), the 
>>> destination machine will
>>> load the device state based on the the queue size that is being used 
>>> (i.e. vring.num).
>>> The default value is not used any more after the setup phase.
>>
>> I haven't checked all cases, but there's two obvious things:
>>
>> - After migration and after a reset, it will go back to 256 on dst.
>
> Please let me clarify what we want first: when QEMU boots and it 
> realizes the
> virtio-net device, if the tx_queue_size is not given by the command 
> line, we want
> to use 1024 as the queue size, that is, virtio_add_queue(,1024,), 
> which sets
> vring.num=1024 and vring.num_default=1024.
>
> When migration happens, the vring.num variable (has been 1024) is sent to
> the destination machine, where virtio_load() will assign the 
> destination side vring.num
> to that value (1024). So, vring.num=1024 continues to work on the 
> destination machine
> with old QEMU. I don't see an issue here.
>
> If reset happens, I think the device and driver will re-do the 
> initialization steps. So, if they are
> with the old QEMU, then they use the old qemu realize() function to do 
> virtio_add_queue(,256,),
> and the driver will re-do the probe() steps and take vring.num=256, 
> then everything works fine.

Probably works fine but the size is 256 forever after migration. Instead 
of using 1024 which work just one time and maybe risky, isn't it better 
to just use 256 for old machine types?

>
>
>> - ABI is changed, e.g -M pc-q35-2.10 returns 1024 on 2.11
>>
> Didn't get this. Could you please explain more? which ABI would be 
> changed, and why it affects q35?
>

Nothing specific to q35, just to point out the machine type of 2.10.

E.g on 2.10, -M pc-q35-2.10, vring.num is 256; On 2.11 -M pc-q35-2.10 
vring.num is 1024.

>
>>>
>>>>
>>>>>
>>>>> For live migration, the queue size that is being used will also be 
>>>>> transferred
>>>>> to the destination.
>>>>>
>>>>>>
>>>>>>> We can reduce the size (to 256) if the MAX_CHAIN_SIZE feature
>>>>>>> is not supported by the guest.
>>>>>>> In this way, people who apply the QEMU patch can directly use the
>>>>>>> largest queue size(1024) without adding the booting command line.
>>>>>>>
>>>>>>> 2) The vhost backend does not use writev, so I think when the vhost
>>>>>>> backed is used, using 1024 queue size should not depend on the
>>>>>>> MAX_CHAIN_SIZE feature.
>>>>>>
>>>>>> But do we need to consider even larger queue size now?
>>>>>
>>>>> Need Michael's feedback on this. Meanwhile, I'll get the next 
>>>>> version of
>>>>> code ready and check if larger queue size would cause any corner 
>>>>> case.
>>>>
>>>> The problem is, do we really need a new config filed for this? Or 
>>>> just introduce a flag which means "I support up to 1024 sgs" is 
>>>> sufficient?
>>>>
>>>
>>> For now, it also works without the new config field, max_chain_size,
>>> But I would prefer to keep the new config field, because:
>>>
>>> Without that, the driver will work on  an assumed value, 1023.
>>
>> This is the fact, and it's too late to change legacy driver.
>>
>>> If the future, QEMU needs to change it to 1022, then how can the
>>> new QEMU tell the old driver, which supports the MAX_CHAIN_SIZE
>>> feature but works with the old hardcode value 1023?
>>
>> Can config filed help in this case? The problem is similar to 
>> ANY_HEADER_SG, the only thing we can is to clarify the limitation for 
>> new drivers.
>>
>
> I think it helps, because the driver will do
> virtio_cread_feature(vdev, VIRTIO_NET_F_MAX_CHAIN_SIZE,
>                                   struct virtio_net_config, 
> max_chain_size, &chain_size);
> to get the max_chain_size from the device. So when new QEMU has a new 
> value of max_chain_size, old driver
> will get the new value.

I think we're talking about old drivers, so this won't work. It can only 
work with assumptions (implicitly).

Thanks

>
> Best,
> Wei
>
>
>
>
>
>


Re: [Qemu-devel] [virtio-dev] Re: [PATCH RFC] virtio-net: enable configurable tx queue size
Posted by Wei Wang 6 years, 11 months ago
On 05/25/2017 03:49 PM, Jason Wang wrote:
>
>
> On 2017年05月24日 16:18, Wei Wang wrote:
>> On 05/24/2017 11:19 AM, Jason Wang wrote:
>>>
>>>
>>> On 2017年05月23日 18:36, Wei Wang wrote:
>>>> On 05/23/2017 02:24 PM, Jason Wang wrote:
>>>>>
>>>>>
>>>>> On 2017年05月23日 13:15, Wei Wang wrote:
>>>>>> On 05/23/2017 10:04 AM, Jason Wang wrote:
>>>>>>>
>>>>>>>
>>>>>>> On 2017年05月22日 19:52, Wei Wang wrote:
>>>>>>>> On 05/20/2017 04:42 AM, Michael S. Tsirkin wrote:
>>>>>>>>> On Fri, May 19, 2017 at 10:32:19AM +0800, Wei Wang wrote:
>>>>>>>>>> This patch enables the virtio-net tx queue size to be 
>>>>>>>>>> configurable
>>>>>>>>>> between 256 (the default queue size) and 1024 by the user. 
>>>>>>>>>> The queue
>>>>>>>>>> size specified by the user should be power of 2.
>>>>>>>>>>
>>>>>>>>>> Setting the tx queue size to be 1024 requires the guest 
>>>>>>>>>> driver to
>>>>>>>>>> support the VIRTIO_NET_F_MAX_CHAIN_SIZE feature.
>>>>>>>>> This should be a generic ring feature, not one specific to 
>>>>>>>>> virtio net.
>>>>>>>> OK. How about making two more changes below:
>>>>>>>>
>>>>>>>> 1) make the default tx queue size = 1024 (instead of 256).
>>>>>>>
>>>>>>> As has been pointed out, you need compat the default value too 
>>>>>>> in this case.
>>>>>>
>>>>>> The driver gets the size info from the device, then would it 
>>>>>> cause any
>>>>>> compatibility issue if we change the default ring size to 1024 in 
>>>>>> the
>>>>>> vhost case? In other words, is there any software (i.e. any 
>>>>>> virtio-net driver)
>>>>>> functions based on the assumption of 256 queue size?
>>>>>
>>>>> I don't know. But is it safe e.g we migrate from 1024 to an older 
>>>>> qemu with 256 as its queue size?
>>>>
>>>> Yes, I think it is safe, because the default queue size is used 
>>>> when the device is being
>>>> set up (e.g. feature negotiation).
>>>> During migration (the device has already been running), the 
>>>> destination machine will
>>>> load the device state based on the the queue size that is being 
>>>> used (i.e. vring.num).
>>>> The default value is not used any more after the setup phase.
>>>
>>> I haven't checked all cases, but there's two obvious things:
>>>
>>> - After migration and after a reset, it will go back to 256 on dst.
>>
>> Please let me clarify what we want first: when QEMU boots and it 
>> realizes the
>> virtio-net device, if the tx_queue_size is not given by the command 
>> line, we want
>> to use 1024 as the queue size, that is, virtio_add_queue(,1024,), 
>> which sets
>> vring.num=1024 and vring.num_default=1024.
>>
>> When migration happens, the vring.num variable (has been 1024) is 
>> sent to
>> the destination machine, where virtio_load() will assign the 
>> destination side vring.num
>> to that value (1024). So, vring.num=1024 continues to work on the 
>> destination machine
>> with old QEMU. I don't see an issue here.
>>
>> If reset happens, I think the device and driver will re-do the 
>> initialization steps. So, if they are
>> with the old QEMU, then they use the old qemu realize() function to 
>> do virtio_add_queue(,256,),
>> and the driver will re-do the probe() steps and take vring.num=256, 
>> then everything works fine.
>
> Probably works fine but the size is 256 forever after migration. 
> Instead of using 1024 which work just one time and maybe risky, isn't 
> it better to just use 256 for old machine types?
>

If it migrates to the old QEMU, then I think everything should work in 
the old QEMU style after
reset (not just our virtio-net case). I think this should be something 
natural and reasonable.

Why would the change depends on machine types?

>>
>>
>>> - ABI is changed, e.g -M pc-q35-2.10 returns 1024 on 2.11
>>>
>> Didn't get this. Could you please explain more? which ABI would be 
>> changed, and why it affects q35?
>>
>
> Nothing specific to q35, just to point out the machine type of 2.10.
>
> E.g on 2.10, -M pc-q35-2.10, vring.num is 256; On 2.11 -M pc-q35-2.10 
> vring.num is 1024.
>

I think it's not related to the machine type.

Probably we can use QEMU version to discuss here.
Suppose this change will be made to the next version, QEMU 2.10. Then 
with QEMU 2.10, when
people create a virtio-net device as usual:
-device virtio-net-pci,netdev=net1,mac=52:54:00:00:00:01
it will creates a device with queue size = 1024.
If they use QEMU 2.9, then the queue size = 256.

What ABI change did you mean?

>>
>>>>
>>>>>
>>>>>>
>>>>>> For live migration, the queue size that is being used will also 
>>>>>> be transferred
>>>>>> to the destination.
>>>>>>
>>>>>>>
>>>>>>>> We can reduce the size (to 256) if the MAX_CHAIN_SIZE feature
>>>>>>>> is not supported by the guest.
>>>>>>>> In this way, people who apply the QEMU patch can directly use the
>>>>>>>> largest queue size(1024) without adding the booting command line.
>>>>>>>>
>>>>>>>> 2) The vhost backend does not use writev, so I think when the 
>>>>>>>> vhost
>>>>>>>> backed is used, using 1024 queue size should not depend on the
>>>>>>>> MAX_CHAIN_SIZE feature.
>>>>>>>
>>>>>>> But do we need to consider even larger queue size now?
>>>>>>
>>>>>> Need Michael's feedback on this. Meanwhile, I'll get the next 
>>>>>> version of
>>>>>> code ready and check if larger queue size would cause any corner 
>>>>>> case.
>>>>>
>>>>> The problem is, do we really need a new config filed for this? Or 
>>>>> just introduce a flag which means "I support up to 1024 sgs" is 
>>>>> sufficient?
>>>>>
>>>>
>>>> For now, it also works without the new config field, max_chain_size,
>>>> But I would prefer to keep the new config field, because:
>>>>
>>>> Without that, the driver will work on  an assumed value, 1023.
>>>
>>> This is the fact, and it's too late to change legacy driver.
>>>
>>>> If the future, QEMU needs to change it to 1022, then how can the
>>>> new QEMU tell the old driver, which supports the MAX_CHAIN_SIZE
>>>> feature but works with the old hardcode value 1023?
>>>
>>> Can config filed help in this case? The problem is similar to 
>>> ANY_HEADER_SG, the only thing we can is to clarify the limitation 
>>> for new drivers.
>>>
>>
>> I think it helps, because the driver will do
>> virtio_cread_feature(vdev, VIRTIO_NET_F_MAX_CHAIN_SIZE,
>>                                   struct virtio_net_config, 
>> max_chain_size, &chain_size);
>> to get the max_chain_size from the device. So when new QEMU has a new 
>> value of max_chain_size, old driver
>> will get the new value.
>
> I think we're talking about old drivers, so this won't work. It can 
> only work with assumptions (implicitly).
>
> Thanks
>
Not the case. Old drivers which don't have VIRTIO_NET_F_MAX_CHAIN_SIZE 
feature support, will not allow
the device to use 1024 queue size. So, in that case, the device will use 
the old, 256, queue size.

The point of using the config field here is, when tomorrow's device is 
released with a requirement for the driver
to use max_chain_size=1022 (not today's 1023), today's driver will 
naturally support tomorrow's device without
any modification, since it reads the max_chain_size from the config 
field which is filled by the device (either today's
device or tomorrow's device with different values).

Best,
Wei






Re: [Qemu-devel] [virtio-dev] Re: [virtio-dev] Re: [PATCH RFC] virtio-net: enable configurable tx queue size
Posted by Jason Wang 6 years, 11 months ago

On 2017年05月25日 19:50, Wei Wang wrote:
> On 05/25/2017 03:49 PM, Jason Wang wrote:
>>
>>
>> On 2017年05月24日 16:18, Wei Wang wrote:
>>> On 05/24/2017 11:19 AM, Jason Wang wrote:
>>>>
>>>>
>>>> On 2017年05月23日 18:36, Wei Wang wrote:
>>>>> On 05/23/2017 02:24 PM, Jason Wang wrote:
>>>>>>
>>>>>>
>>>>>> On 2017年05月23日 13:15, Wei Wang wrote:
>>>>>>> On 05/23/2017 10:04 AM, Jason Wang wrote:
>>>>>>>>
>>>>>>>>
>>>>>>>> On 2017年05月22日 19:52, Wei Wang wrote:
>>>>>>>>> On 05/20/2017 04:42 AM, Michael S. Tsirkin wrote:
>>>>>>>>>> On Fri, May 19, 2017 at 10:32:19AM +0800, Wei Wang wrote:
>>>>>>>>>>> This patch enables the virtio-net tx queue size to be 
>>>>>>>>>>> configurable
>>>>>>>>>>> between 256 (the default queue size) and 1024 by the user. 
>>>>>>>>>>> The queue
>>>>>>>>>>> size specified by the user should be power of 2.
>>>>>>>>>>>
>>>>>>>>>>> Setting the tx queue size to be 1024 requires the guest 
>>>>>>>>>>> driver to
>>>>>>>>>>> support the VIRTIO_NET_F_MAX_CHAIN_SIZE feature.
>>>>>>>>>> This should be a generic ring feature, not one specific to 
>>>>>>>>>> virtio net.
>>>>>>>>> OK. How about making two more changes below:
>>>>>>>>>
>>>>>>>>> 1) make the default tx queue size = 1024 (instead of 256).
>>>>>>>>
>>>>>>>> As has been pointed out, you need compat the default value too 
>>>>>>>> in this case.
>>>>>>>
>>>>>>> The driver gets the size info from the device, then would it 
>>>>>>> cause any
>>>>>>> compatibility issue if we change the default ring size to 1024 
>>>>>>> in the
>>>>>>> vhost case? In other words, is there any software (i.e. any 
>>>>>>> virtio-net driver)
>>>>>>> functions based on the assumption of 256 queue size?
>>>>>>
>>>>>> I don't know. But is it safe e.g we migrate from 1024 to an older 
>>>>>> qemu with 256 as its queue size?
>>>>>
>>>>> Yes, I think it is safe, because the default queue size is used 
>>>>> when the device is being
>>>>> set up (e.g. feature negotiation).
>>>>> During migration (the device has already been running), the 
>>>>> destination machine will
>>>>> load the device state based on the the queue size that is being 
>>>>> used (i.e. vring.num).
>>>>> The default value is not used any more after the setup phase.
>>>>
>>>> I haven't checked all cases, but there's two obvious things:
>>>>
>>>> - After migration and after a reset, it will go back to 256 on dst.
>>>
>>> Please let me clarify what we want first: when QEMU boots and it 
>>> realizes the
>>> virtio-net device, if the tx_queue_size is not given by the command 
>>> line, we want
>>> to use 1024 as the queue size, that is, virtio_add_queue(,1024,), 
>>> which sets
>>> vring.num=1024 and vring.num_default=1024.
>>>
>>> When migration happens, the vring.num variable (has been 1024) is 
>>> sent to
>>> the destination machine, where virtio_load() will assign the 
>>> destination side vring.num
>>> to that value (1024). So, vring.num=1024 continues to work on the 
>>> destination machine
>>> with old QEMU. I don't see an issue here.
>>>
>>> If reset happens, I think the device and driver will re-do the 
>>> initialization steps. So, if they are
>>> with the old QEMU, then they use the old qemu realize() function to 
>>> do virtio_add_queue(,256,),
>>> and the driver will re-do the probe() steps and take vring.num=256, 
>>> then everything works fine.
>>
>> Probably works fine but the size is 256 forever after migration. 
>> Instead of using 1024 which work just one time and maybe risky, isn't 
>> it better to just use 256 for old machine types?
>>
>
> If it migrates to the old QEMU, then I think everything should work in 
> the old QEMU style after
> reset (not just our virtio-net case). I think this should be something 
> natural and reasonable.

The point is it should behave exactly the same not only after reset but 
also before.

>
> Why would the change depends on machine types?
>
>>>
>>>
>>>> - ABI is changed, e.g -M pc-q35-2.10 returns 1024 on 2.11
>>>>
>>> Didn't get this. Could you please explain more? which ABI would be 
>>> changed, and why it affects q35?
>>>
>>
>> Nothing specific to q35, just to point out the machine type of 2.10.
>>
>> E.g on 2.10, -M pc-q35-2.10, vring.num is 256; On 2.11 -M pc-q35-2.10 
>> vring.num is 1024.
>>
>
> I think it's not related to the machine type.
>
> Probably we can use QEMU version to discuss here.
> Suppose this change will be made to the next version, QEMU 2.10. Then 
> with QEMU 2.10, when
> people create a virtio-net device as usual:
> -device virtio-net-pci,netdev=net1,mac=52:54:00:00:00:01
> it will creates a device with queue size = 1024.
> If they use QEMU 2.9, then the queue size = 256.
>
> What ABI change did you mean?

See https://fedoraproject.org/wiki/Features/KVM_Stable_Guest_ABI.

>
>>>
>>>>>
>>>>>>
>>>>>>>
>>>>>>> For live migration, the queue size that is being used will also 
>>>>>>> be transferred
>>>>>>> to the destination.
>>>>>>>
>>>>>>>>
>>>>>>>>> We can reduce the size (to 256) if the MAX_CHAIN_SIZE feature
>>>>>>>>> is not supported by the guest.
>>>>>>>>> In this way, people who apply the QEMU patch can directly use the
>>>>>>>>> largest queue size(1024) without adding the booting command line.
>>>>>>>>>
>>>>>>>>> 2) The vhost backend does not use writev, so I think when the 
>>>>>>>>> vhost
>>>>>>>>> backed is used, using 1024 queue size should not depend on the
>>>>>>>>> MAX_CHAIN_SIZE feature.
>>>>>>>>
>>>>>>>> But do we need to consider even larger queue size now?
>>>>>>>
>>>>>>> Need Michael's feedback on this. Meanwhile, I'll get the next 
>>>>>>> version of
>>>>>>> code ready and check if larger queue size would cause any corner 
>>>>>>> case.
>>>>>>
>>>>>> The problem is, do we really need a new config filed for this? Or 
>>>>>> just introduce a flag which means "I support up to 1024 sgs" is 
>>>>>> sufficient?
>>>>>>
>>>>>
>>>>> For now, it also works without the new config field, max_chain_size,
>>>>> But I would prefer to keep the new config field, because:
>>>>>
>>>>> Without that, the driver will work on  an assumed value, 1023.
>>>>
>>>> This is the fact, and it's too late to change legacy driver.
>>>>
>>>>> If the future, QEMU needs to change it to 1022, then how can the
>>>>> new QEMU tell the old driver, which supports the MAX_CHAIN_SIZE
>>>>> feature but works with the old hardcode value 1023?
>>>>
>>>> Can config filed help in this case? The problem is similar to 
>>>> ANY_HEADER_SG, the only thing we can is to clarify the limitation 
>>>> for new drivers.
>>>>
>>>
>>> I think it helps, because the driver will do
>>> virtio_cread_feature(vdev, VIRTIO_NET_F_MAX_CHAIN_SIZE,
>>>                                   struct virtio_net_config, 
>>> max_chain_size, &chain_size);
>>> to get the max_chain_size from the device. So when new QEMU has a 
>>> new value of max_chain_size, old driver
>>> will get the new value.
>>
>> I think we're talking about old drivers, so this won't work. It can 
>> only work with assumptions (implicitly).
>>
>> Thanks
>>
> Not the case. Old drivers which don't have VIRTIO_NET_F_MAX_CHAIN_SIZE 
> feature support, will not allow
> the device to use 1024 queue size. So, in that case, the device will 
> use the old, 256, queue size.

I think it's better not tie #sgs to queue size.

>
> The point of using the config field here is, when tomorrow's device is 
> released with a requirement for the driver
> to use max_chain_size=1022 (not today's 1023), today's driver will 
> naturally support tomorrow's device without
> any modification, since it reads the max_chain_size from the config 
> field which is filled by the device (either today's
> device or tomorrow's device with different values).

I'm not saying anything wrong with the config filed you introduced. But 
you should answer the following question:

Is it useful to support more than 1024? If yes, why? If not, introduce a 
VIRTIO_F_SG_1024 is more than enough I think.

Thanks

>
> Best,
> Wei
>
>
>
>
>
>
> ---------------------------------------------------------------------
> To unsubscribe, e-mail: virtio-dev-unsubscribe@lists.oasis-open.org
> For additional commands, e-mail: virtio-dev-help@lists.oasis-open.org
>


Re: [Qemu-devel] [virtio-dev] Re: [virtio-dev] Re: [PATCH RFC] virtio-net: enable configurable tx queue size
Posted by Michael S. Tsirkin 6 years, 11 months ago
On Thu, May 25, 2017 at 08:13:31PM +0800, Jason Wang wrote:
> > 
> > The point of using the config field here is, when tomorrow's device is
> > released with a requirement for the driver
> > to use max_chain_size=1022 (not today's 1023), today's driver will
> > naturally support tomorrow's device without
> > any modification, since it reads the max_chain_size from the config
> > field which is filled by the device (either today's
> > device or tomorrow's device with different values).
> 
> I'm not saying anything wrong with the config filed you introduced. But you
> should answer the following question:
> 
> Is it useful to support more than 1024? If yes, why? If not, introduce a
> VIRTIO_F_SG_1024 is more than enough I think.
> 
> Thanks

I think it's useful to limit it below queue size too.

-- 
MST