[Qemu-devel] [PATCH] virtio-blk: enable multiple vectors when using multiple I/O queues

Changpeng Liu posted 1 patch 6 years, 2 months ago
Patches applied successfully (tree, apply log)
git fetch https://github.com/patchew-project/qemu tags/patchew/1517464293-7012-1-git-send-email-changpeng.liu@intel.com
Test checkpatch passed
Test docker-build@min-glib passed
Test docker-mingw@fedora passed
Test docker-quick@centos6 passed
Test ppc passed
Test s390x passed
There is a newer version of this series
hw/virtio/virtio-pci.c | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
[Qemu-devel] [PATCH] virtio-blk: enable multiple vectors when using multiple I/O queues
Posted by Changpeng Liu 6 years, 2 months ago
Currently virtio-pci driver hardcoded 2 vectors for virtio-blk device,
for multiple I/O queues scenario, all the I/O queues will share one
interrupt vector, while here, enable multiple vectors according to
the number of I/O queues.

Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
---
 hw/virtio/virtio-pci.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 9ae10f0..379b00c 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -1932,7 +1932,7 @@ static Property virtio_blk_pci_properties[] = {
     DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
     DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
                     VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
-    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
+    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED),
     DEFINE_PROP_END_OF_LIST(),
 };
 
@@ -1941,6 +1941,10 @@ static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
     VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
     DeviceState *vdev = DEVICE(&dev->vdev);
 
+    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
+        vpci_dev->nvectors = dev->vdev.conf.num_queues + 1;
+    }
+
     qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
     object_property_set_bool(OBJECT(vdev), true, "realized", errp);
 }
@@ -1983,7 +1987,7 @@ static const TypeInfo virtio_blk_pci_info = {
 
 static Property vhost_user_blk_pci_properties[] = {
     DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
-    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
+    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED),
     DEFINE_PROP_END_OF_LIST(),
 };
 
@@ -1992,6 +1996,10 @@ static void vhost_user_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
     VHostUserBlkPCI *dev = VHOST_USER_BLK_PCI(vpci_dev);
     DeviceState *vdev = DEVICE(&dev->vdev);
 
+    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
+        vpci_dev->nvectors = dev->vdev.num_queues + 1;
+    }
+
     qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
     object_property_set_bool(OBJECT(vdev), true, "realized", errp);
 }
-- 
1.9.3


Re: [Qemu-devel] [PATCH] virtio-blk: enable multiple vectors when using multiple I/O queues
Posted by Paolo Bonzini 6 years, 2 months ago
On 01/02/2018 06:51, Changpeng Liu wrote:
> Currently virtio-pci driver hardcoded 2 vectors for virtio-blk device,
> for multiple I/O queues scenario, all the I/O queues will share one
> interrupt vector, while here, enable multiple vectors according to
> the number of I/O queues.
> 
> Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>

This is a change in the device's interface to the guest ("guest ABI"),
so it has to be protected with migration compatibility: just add to
HW_COMPAT_2_11 in include/hw/compat.h the following:

    {\
        .driver   = "virtio-blk-pci",\
        .property = "vectors",\
        .value    = "2",\
    },\
    {\
        .driver   = "vhost-user-blk-pci",\
        .property = "vectors",\
        .value    = "2",\
    },\

Otherwise, it looks good.

Thanks,

Paolo

> ---
>  hw/virtio/virtio-pci.c | 12 ++++++++++--
>  1 file changed, 10 insertions(+), 2 deletions(-)
> 
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 9ae10f0..379b00c 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -1932,7 +1932,7 @@ static Property virtio_blk_pci_properties[] = {
>      DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
>      DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
>                      VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
> -    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
> +    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED),
>      DEFINE_PROP_END_OF_LIST(),
>  };
>  
> @@ -1941,6 +1941,10 @@ static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
>      VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
>      DeviceState *vdev = DEVICE(&dev->vdev);
>  
> +    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
> +        vpci_dev->nvectors = dev->vdev.conf.num_queues + 1;
> +    }
> +
>      qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
>      object_property_set_bool(OBJECT(vdev), true, "realized", errp);
>  }
> @@ -1983,7 +1987,7 @@ static const TypeInfo virtio_blk_pci_info = {
>  
>  static Property vhost_user_blk_pci_properties[] = {
>      DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
> -    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
> +    DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED),
>      DEFINE_PROP_END_OF_LIST(),
>  };
>  
> @@ -1992,6 +1996,10 @@ static void vhost_user_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
>      VHostUserBlkPCI *dev = VHOST_USER_BLK_PCI(vpci_dev);
>      DeviceState *vdev = DEVICE(&dev->vdev);
>  
> +    if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
> +        vpci_dev->nvectors = dev->vdev.num_queues + 1;
> +    }
> +
>      qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
>      object_property_set_bool(OBJECT(vdev), true, "realized", errp);
>  }
>