qemu_job_wait_time, a timeout for QEMU jobs, was defined
as a fixed value (30 seconds). It is useful if the user
can change this value arbitrarily for debugging or tuning
purposes. So we move this value to the configuration file
so that it can be controlled by the user.
Signed-off-by: MIKI Nobuhiro <nmiki@yahoo-corp.jp>
---
src/qemu/libvirtd_qemu.aug | 1 +
src/qemu/qemu.conf | 3 +++
src/qemu/qemu_conf.c | 3 +++
src/qemu/qemu_conf.h | 2 ++
src/qemu/qemu_domain.c | 7 ++-----
src/qemu/test_libvirtd_qemu.aug.in | 1 +
6 files changed, 12 insertions(+), 5 deletions(-)
diff --git a/src/qemu/libvirtd_qemu.aug b/src/qemu/libvirtd_qemu.aug
index 404498b..76c896e 100644
--- a/src/qemu/libvirtd_qemu.aug
+++ b/src/qemu/libvirtd_qemu.aug
@@ -97,6 +97,7 @@ module Libvirtd_qemu =
| bool_entry "dump_guest_core"
| str_entry "stdio_handler"
| int_entry "max_threads_per_process"
+ | int_entry "qemu_job_wait_time"
let device_entry = bool_entry "mac_filter"
| bool_entry "relaxed_acs_check"
diff --git a/src/qemu/qemu.conf b/src/qemu/qemu.conf
index abdbf07..a05aaa5 100644
--- a/src/qemu/qemu.conf
+++ b/src/qemu/qemu.conf
@@ -612,6 +612,9 @@
#
#max_threads_per_process = 0
+# How many seconds the new job waits for the existing job to finish.
+#qemu_job_wait_time = 30
+
# If max_core is set to a non-zero integer, then QEMU will be
# permitted to create core dumps when it crashes, provided its
# RAM size is smaller than the limit set.
diff --git a/src/qemu/qemu_conf.c b/src/qemu/qemu_conf.c
index 2d5e527..4607879 100644
--- a/src/qemu/qemu_conf.c
+++ b/src/qemu/qemu_conf.c
@@ -272,6 +272,7 @@ virQEMUDriverConfigPtr virQEMUDriverConfigNew(bool privileged,
cfg->keepAliveInterval = 5;
cfg->keepAliveCount = 5;
cfg->seccompSandbox = -1;
+ cfg->qemuJobWaitTime = 30;
cfg->logTimestamp = true;
cfg->glusterDebugLevel = 4;
@@ -655,6 +656,8 @@ virQEMUDriverConfigLoadProcessEntry(virQEMUDriverConfigPtr cfg,
return -1;
if (virConfGetValueUInt(conf, "max_threads_per_process", &cfg->maxThreadsPerProc) < 0)
return -1;
+ if (virConfGetValueUInt(conf, "qemu_job_wait_time", &cfg->qemuJobWaitTime) < 0)
+ return -1;
if (virConfGetValueType(conf, "max_core") == VIR_CONF_STRING) {
if (virConfGetValueString(conf, "max_core", &corestr) < 0)
diff --git a/src/qemu/qemu_conf.h b/src/qemu/qemu_conf.h
index b9ef455..49dd7b6 100644
--- a/src/qemu/qemu_conf.h
+++ b/src/qemu/qemu_conf.h
@@ -218,6 +218,8 @@ struct _virQEMUDriverConfig {
gid_t swtpm_group;
char **capabilityfilters;
+
+ unsigned int qemuJobWaitTime;
};
G_DEFINE_AUTOPTR_CLEANUP_FUNC(virQEMUDriverConfig, virObjectUnref);
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 9c629c3..23622d9 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -6212,9 +6212,6 @@ qemuDomainObjCanSetJob(qemuDomainObjPrivatePtr priv,
priv->job.agentActive == QEMU_AGENT_JOB_NONE));
}
-/* Give up waiting for mutex after 30 seconds */
-#define QEMU_JOB_WAIT_TIME (1000ull * 30)
-
/**
* qemuDomainObjBeginJobInternal:
* @driver: qemu driver
@@ -6225,7 +6222,7 @@ qemuDomainObjCanSetJob(qemuDomainObjPrivatePtr priv,
*
* Acquires job for a domain object which must be locked before
* calling. If there's already a job running waits up to
- * QEMU_JOB_WAIT_TIME after which the functions fails reporting
+ * cfg->qemuJobWaitTime after which the functions fails reporting
* an error unless @nowait is set.
*
* If @nowait is true this function tries to acquire job and if
@@ -6272,7 +6269,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver,
return -1;
priv->jobs_queued++;
- then = now + QEMU_JOB_WAIT_TIME;
+ then = now + 1000ull * cfg->qemuJobWaitTime;
retry:
if ((!async && job != QEMU_JOB_DESTROY) &&
diff --git a/src/qemu/test_libvirtd_qemu.aug.in b/src/qemu/test_libvirtd_qemu.aug.in
index 19da591..b2886df 100644
--- a/src/qemu/test_libvirtd_qemu.aug.in
+++ b/src/qemu/test_libvirtd_qemu.aug.in
@@ -76,6 +76,7 @@ module Test_libvirtd_qemu =
{ "max_processes" = "0" }
{ "max_files" = "0" }
{ "max_threads_per_process" = "0" }
+{ "qemu_job_wait_time" = "30" }
{ "max_core" = "unlimited" }
{ "dump_guest_core" = "1" }
{ "mac_filter" = "1" }
--
1.8.3.1
On 5/7/20 11:41 AM, MIKI Nobuhiro wrote: > qemu_job_wait_time, a timeout for QEMU jobs, was defined > as a fixed value (30 seconds). It is useful if the user > can change this value arbitrarily for debugging or tuning > purposes. So we move this value to the configuration file > so that it can be controlled by the user. > > Signed-off-by: MIKI Nobuhiro <nmiki@yahoo-corp.jp> > --- > src/qemu/libvirtd_qemu.aug | 1 + > src/qemu/qemu.conf | 3 +++ > src/qemu/qemu_conf.c | 3 +++ > src/qemu/qemu_conf.h | 2 ++ > src/qemu/qemu_domain.c | 7 ++----- > src/qemu/test_libvirtd_qemu.aug.in | 1 + > 6 files changed, 12 insertions(+), 5 deletions(-) > > diff --git a/src/qemu/libvirtd_qemu.aug b/src/qemu/libvirtd_qemu.aug > index 404498b..76c896e 100644 > --- a/src/qemu/libvirtd_qemu.aug > +++ b/src/qemu/libvirtd_qemu.aug > @@ -97,6 +97,7 @@ module Libvirtd_qemu = > | bool_entry "dump_guest_core" > | str_entry "stdio_handler" > | int_entry "max_threads_per_process" > + | int_entry "qemu_job_wait_time" > > let device_entry = bool_entry "mac_filter" > | bool_entry "relaxed_acs_check" > diff --git a/src/qemu/qemu.conf b/src/qemu/qemu.conf > index abdbf07..a05aaa5 100644 > --- a/src/qemu/qemu.conf > +++ b/src/qemu/qemu.conf > @@ -612,6 +612,9 @@ > # > #max_threads_per_process = 0 > > +# How many seconds the new job waits for the existing job to finish. > +#qemu_job_wait_time = 30 This is rather sparse documentation of the knob. If I were a regular sysadmin I probably wouldn't know if I want to tweak this or not. On the other hand, we don't want regular sysadmins to touch this :-) > + > # If max_core is set to a non-zero integer, then QEMU will be > # permitted to create core dumps when it crashes, provided its > # RAM size is smaller than the limit set. > diff --git a/src/qemu/qemu_conf.c b/src/qemu/qemu_conf.c > index 2d5e527..4607879 100644 > --- a/src/qemu/qemu_conf.c > +++ b/src/qemu/qemu_conf.c > @@ -272,6 +272,7 @@ virQEMUDriverConfigPtr virQEMUDriverConfigNew(bool privileged, > cfg->keepAliveInterval = 5; > cfg->keepAliveCount = 5; > cfg->seccompSandbox = -1; > + cfg->qemuJobWaitTime = 30; > > cfg->logTimestamp = true; > cfg->glusterDebugLevel = 4; > @@ -655,6 +656,8 @@ virQEMUDriverConfigLoadProcessEntry(virQEMUDriverConfigPtr cfg, > return -1; > if (virConfGetValueUInt(conf, "max_threads_per_process", &cfg->maxThreadsPerProc) < 0) > return -1; > + if (virConfGetValueUInt(conf, "qemu_job_wait_time", &cfg->qemuJobWaitTime) < 0) > + return -1; > > if (virConfGetValueType(conf, "max_core") == VIR_CONF_STRING) { > if (virConfGetValueString(conf, "max_core", &corestr) < 0) > diff --git a/src/qemu/qemu_conf.h b/src/qemu/qemu_conf.h > index b9ef455..49dd7b6 100644 > --- a/src/qemu/qemu_conf.h > +++ b/src/qemu/qemu_conf.h > @@ -218,6 +218,8 @@ struct _virQEMUDriverConfig { > gid_t swtpm_group; > > char **capabilityfilters; > + > + unsigned int qemuJobWaitTime; > }; > > G_DEFINE_AUTOPTR_CLEANUP_FUNC(virQEMUDriverConfig, virObjectUnref); > diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c > index 9c629c3..23622d9 100644 > --- a/src/qemu/qemu_domain.c > +++ b/src/qemu/qemu_domain.c > @@ -6212,9 +6212,6 @@ qemuDomainObjCanSetJob(qemuDomainObjPrivatePtr priv, > priv->job.agentActive == QEMU_AGENT_JOB_NONE)); > } > > -/* Give up waiting for mutex after 30 seconds */ > -#define QEMU_JOB_WAIT_TIME (1000ull * 30) > - > /** > * qemuDomainObjBeginJobInternal: > * @driver: qemu driver > @@ -6225,7 +6222,7 @@ qemuDomainObjCanSetJob(qemuDomainObjPrivatePtr priv, > * > * Acquires job for a domain object which must be locked before > * calling. If there's already a job running waits up to > - * QEMU_JOB_WAIT_TIME after which the functions fails reporting > + * cfg->qemuJobWaitTime after which the functions fails reporting > * an error unless @nowait is set. > * > * If @nowait is true this function tries to acquire job and if > @@ -6272,7 +6269,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, > return -1; > > priv->jobs_queued++; > - then = now + QEMU_JOB_WAIT_TIME; > + then = now + 1000ull * cfg->qemuJobWaitTime; So this wil set the timeout globally. Meaning, if I start a domain it will by default use 30 seconds timeout. But then, if I'd change the config and restart the daemon the domain would use the new value. On the other hand, that might not be necessary. So if you send the expanded comment for the knob then I can merge these. Michal
On Mon, May 18, 2020 at 01:06:36PM +0200, Michal Privoznik wrote: > This is rather sparse documentation of the knob. If I were a regular > sysadmin I probably wouldn't know if I want to tweak this or not. On the > other hand, we don't want regular sysadmins to touch this :-) I've experienced just a few timeout related issues in the past on the nodes I manage. At that time, I adjusted this knob and temporarily surpassed it. Note that the essential problem appears to have been fixed in the master branch. > So this wil set the timeout globally. Meaning, if I start a domain it will > by default use 30 seconds timeout. But then, if I'd change the config and > restart the daemon the domain would use the new value. > On the other hand, that might not be necessary. So if you send the expanded > comment for the knob then I can merge these. Yes, this knob is applied to all domains when the libivrtd process is restarted, as you said. However, I don't understand exactly how this knob will affect the entire system. So, this knob worked well in my environment, but may not be needed in other users' environments.
© 2016 - 2024 Red Hat, Inc.