Add an element to configure the thread pool size:
...
<binary>
<thread_pool size='16'/>
</binary>
...
https://bugzilla.redhat.com/show_bug.cgi?id=2072905
Signed-off-by: Ján Tomko <jtomko@redhat.com>
---
docs/formatdomain.rst | 6 ++++++
src/conf/domain_conf.c | 14 ++++++++++++++
src/conf/domain_conf.h | 1 +
src/conf/schemas/domaincommon.rng | 9 +++++++++
tests/qemuxml2argvdata/vhost-user-fs-fd-memory.xml | 1 +
5 files changed, 31 insertions(+)
diff --git a/docs/formatdomain.rst b/docs/formatdomain.rst
index 7da625380c..e8bff7632a 100644
--- a/docs/formatdomain.rst
+++ b/docs/formatdomain.rst
@@ -3329,6 +3329,7 @@ A directory on the host that can be accessed directly from the guest.
<cache mode='always'/>
<sandbox mode='namespace'/>
<lock posix='on' flock='on'/>
+ <thread_pool size='16'/>
</binary>
<source dir='/path'/>
<target dir='mount_tag'/>
@@ -3462,6 +3463,11 @@ A directory on the host that can be accessed directly from the guest.
``chroot``, see the
`virtiofsd documentation <https://qemu.readthedocs.io/en/latest/tools/virtiofsd.html>`__
for more details. ( :since:`Since 7.2.0` )
+ Element ``thread_pool`` accepts one attribute ``size`` which defines the
+ maximum thread pool size. A value of "0" disables the pool.
+ The thread pool helps increase the number of requests in flight when used with
+ storage that has a higher latency. However, it has an overhead, and so for
+ fast, low latency filesystems, it may be best to turn it off. ( :since:`Since 8.5.0` )
``source``
The resource on the host that is being accessed in the guest. The ``name``
attribute must be used with ``type='template'``, and the ``dir`` attribute
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index 761c3f4d87..05fac7b39c 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -2476,6 +2476,8 @@ virDomainFSDefNew(virDomainXMLOption *xmlopt)
ret->src = virStorageSourceNew();
+ ret->thread_pool_size = -1;
+
if (xmlopt &&
xmlopt->privateData.fsNew &&
!(ret->privateData = xmlopt->privateData.fsNew()))
@@ -9914,6 +9916,7 @@ virDomainFSDefParseXML(virDomainXMLOption *xmlopt,
if (def->fsdriver == VIR_DOMAIN_FS_DRIVER_TYPE_VIRTIOFS) {
g_autofree char *queue_size = virXPathString("string(./driver/@queue)", ctxt);
g_autofree char *binary = virXPathString("string(./binary/@path)", ctxt);
+ g_autofree char *thread_pool_size = virXPathString("string(./binary/thread_pool/@size)", ctxt);
xmlNodePtr binary_node = virXPathNode("./binary", ctxt);
xmlNodePtr binary_lock_node = virXPathNode("./binary/lock", ctxt);
xmlNodePtr binary_cache_node = virXPathNode("./binary/cache", ctxt);
@@ -9926,6 +9929,13 @@ virDomainFSDefParseXML(virDomainXMLOption *xmlopt,
goto error;
}
+ if (thread_pool_size && virStrToLong_i(thread_pool_size, NULL, 10, &def->thread_pool_size) < 0) {
+ virReportError(VIR_ERR_XML_ERROR,
+ _("cannot parse thread pool size '%s' for virtiofs"),
+ queue_size);
+ goto error;
+ }
+
if (binary)
def->binary = virFileSanitizePath(binary);
@@ -24211,6 +24221,10 @@ virDomainFSDefFormat(virBuffer *buf,
}
virXMLFormatElement(&binaryBuf, "lock", &lockAttrBuf, NULL);
+
+ if (def->thread_pool_size >= 0)
+ virBufferAsprintf(&binaryBuf, "<thread_pool size='%i'/>\n", def->thread_pool_size);
+
}
virDomainVirtioOptionsFormat(&driverAttrBuf, def->virtio);
diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
index da9e281214..8e0ccff543 100644
--- a/src/conf/domain_conf.h
+++ b/src/conf/domain_conf.h
@@ -906,6 +906,7 @@ struct _virDomainFSDef {
virTristateSwitch posix_lock;
virTristateSwitch flock;
virDomainFSSandboxMode sandbox;
+ int thread_pool_size;
virDomainVirtioOptions *virtio;
virObject *privateData;
};
diff --git a/src/conf/schemas/domaincommon.rng b/src/conf/schemas/domaincommon.rng
index efd49cfa01..ddbd9fa07d 100644
--- a/src/conf/schemas/domaincommon.rng
+++ b/src/conf/schemas/domaincommon.rng
@@ -3130,6 +3130,15 @@
</optional>
</element>
</optional>
+ <optional>
+ <element name="thread_pool">
+ <optional>
+ <attribute name="size">
+ <data type="integer"/>
+ </attribute>
+ </optional>
+ </element>
+ </optional>
</interleave>
</element>
</define>
diff --git a/tests/qemuxml2argvdata/vhost-user-fs-fd-memory.xml b/tests/qemuxml2argvdata/vhost-user-fs-fd-memory.xml
index abddf0870b..81de8c0dd7 100644
--- a/tests/qemuxml2argvdata/vhost-user-fs-fd-memory.xml
+++ b/tests/qemuxml2argvdata/vhost-user-fs-fd-memory.xml
@@ -32,6 +32,7 @@
<cache mode='always'/>
<sandbox mode='chroot'/>
<lock posix='off' flock='off'/>
+ <thread_pool size='16'/>
</binary>
<source dir='/path'/>
<target dir='mount_tag'/>
--
2.35.1
On 6/10/22 15:25, Ján Tomko wrote:
> Add an element to configure the thread pool size:
>
> ...
> <binary>
> <thread_pool size='16'/>
> </binary>
> ...
>
> https://bugzilla.redhat.com/show_bug.cgi?id=2072905
>
> Signed-off-by: Ján Tomko <jtomko@redhat.com>
> ---
> docs/formatdomain.rst | 6 ++++++
> src/conf/domain_conf.c | 14 ++++++++++++++
> src/conf/domain_conf.h | 1 +
> src/conf/schemas/domaincommon.rng | 9 +++++++++
> tests/qemuxml2argvdata/vhost-user-fs-fd-memory.xml | 1 +
> 5 files changed, 31 insertions(+)
>
> diff --git a/docs/formatdomain.rst b/docs/formatdomain.rst
> index 7da625380c..e8bff7632a 100644
> --- a/docs/formatdomain.rst
> +++ b/docs/formatdomain.rst
> @@ -3329,6 +3329,7 @@ A directory on the host that can be accessed directly from the guest.
> <cache mode='always'/>
> <sandbox mode='namespace'/>
> <lock posix='on' flock='on'/>
> + <thread_pool size='16'/>
I'm not sure this is extensible. I mean <thread_pool/> is pretty
specific. <thread pool_size='16'/> might look better, but then what is
<thread/>? I don't have any better idea, sorry.
> </binary>
> <source dir='/path'/>
> <target dir='mount_tag'/>
> @@ -3462,6 +3463,11 @@ A directory on the host that can be accessed directly from the guest.
> ``chroot``, see the
> `virtiofsd documentation <https://qemu.readthedocs.io/en/latest/tools/virtiofsd.html>`__
> for more details. ( :since:`Since 7.2.0` )
> + Element ``thread_pool`` accepts one attribute ``size`` which defines the
> + maximum thread pool size. A value of "0" disables the pool.
> + The thread pool helps increase the number of requests in flight when used with
> + storage that has a higher latency. However, it has an overhead, and so for
> + fast, low latency filesystems, it may be best to turn it off. ( :since:`Since 8.5.0` )
> ``source``
> The resource on the host that is being accessed in the guest. The ``name``
> attribute must be used with ``type='template'``, and the ``dir`` attribute
> diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
> index 761c3f4d87..05fac7b39c 100644
> --- a/src/conf/domain_conf.c
> +++ b/src/conf/domain_conf.c
> @@ -2476,6 +2476,8 @@ virDomainFSDefNew(virDomainXMLOption *xmlopt)
>
> ret->src = virStorageSourceNew();
>
> + ret->thread_pool_size = -1;
> +
> if (xmlopt &&
> xmlopt->privateData.fsNew &&
> !(ret->privateData = xmlopt->privateData.fsNew()))
> @@ -9914,6 +9916,7 @@ virDomainFSDefParseXML(virDomainXMLOption *xmlopt,
> if (def->fsdriver == VIR_DOMAIN_FS_DRIVER_TYPE_VIRTIOFS) {
> g_autofree char *queue_size = virXPathString("string(./driver/@queue)", ctxt);
> g_autofree char *binary = virXPathString("string(./binary/@path)", ctxt);
> + g_autofree char *thread_pool_size = virXPathString("string(./binary/thread_pool/@size)", ctxt);
> xmlNodePtr binary_node = virXPathNode("./binary", ctxt);
> xmlNodePtr binary_lock_node = virXPathNode("./binary/lock", ctxt);
> xmlNodePtr binary_cache_node = virXPathNode("./binary/cache", ctxt);
> @@ -9926,6 +9929,13 @@ virDomainFSDefParseXML(virDomainXMLOption *xmlopt,
> goto error;
> }
>
> + if (thread_pool_size && virStrToLong_i(thread_pool_size, NULL, 10, &def->thread_pool_size) < 0) {
Very long line.
> + virReportError(VIR_ERR_XML_ERROR,
> + _("cannot parse thread pool size '%s' for virtiofs"),
> + queue_size);
> + goto error;
> + }
> +
> if (binary)
> def->binary = virFileSanitizePath(binary);
>
> @@ -24211,6 +24221,10 @@ virDomainFSDefFormat(virBuffer *buf,
> }
>
> virXMLFormatElement(&binaryBuf, "lock", &lockAttrBuf, NULL);
> +
> + if (def->thread_pool_size >= 0)
> + virBufferAsprintf(&binaryBuf, "<thread_pool size='%i'/>\n", def->thread_pool_size);
I believe %d is preferred.
> +
> }
>
Michal
© 2016 - 2026 Red Hat, Inc.