To activate the periodic CPU throttleing feature, introduce
the cpu-periodic-throttle.
To control the frequency of throttling, introduce the
cpu-periodic-throttle-interval.
Signed-off-by: Hyman Huang <yong.huang@smartx.com>
---
migration/migration-hmp-cmds.c | 17 +++++++++++
migration/options.c | 54 ++++++++++++++++++++++++++++++++++
migration/options.h | 2 ++
qapi/migration.json | 25 +++++++++++++++-
4 files changed, 97 insertions(+), 1 deletion(-)
diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c
index 7d608d26e1..f7b8e06bb4 100644
--- a/migration/migration-hmp-cmds.c
+++ b/migration/migration-hmp-cmds.c
@@ -264,6 +264,15 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict)
monitor_printf(mon, "%s: %s\n",
MigrationParameter_str(MIGRATION_PARAMETER_CPU_THROTTLE_TAILSLOW),
params->cpu_throttle_tailslow ? "on" : "off");
+ assert(params->has_cpu_periodic_throttle);
+ monitor_printf(mon, "%s: %s\n",
+ MigrationParameter_str(MIGRATION_PARAMETER_CPU_PERIODIC_THROTTLE),
+ params->cpu_periodic_throttle ? "on" : "off");
+ assert(params->has_cpu_periodic_throttle_interval);
+ monitor_printf(mon, "%s: %u\n",
+ MigrationParameter_str(
+ MIGRATION_PARAMETER_CPU_PERIODIC_THROTTLE_INTERVAL),
+ params->cpu_periodic_throttle_interval);
assert(params->has_max_cpu_throttle);
monitor_printf(mon, "%s: %u\n",
MigrationParameter_str(MIGRATION_PARAMETER_MAX_CPU_THROTTLE),
@@ -512,6 +521,14 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict)
p->has_cpu_throttle_tailslow = true;
visit_type_bool(v, param, &p->cpu_throttle_tailslow, &err);
break;
+ case MIGRATION_PARAMETER_CPU_PERIODIC_THROTTLE:
+ p->has_cpu_periodic_throttle = true;
+ visit_type_bool(v, param, &p->cpu_periodic_throttle, &err);
+ break;
+ case MIGRATION_PARAMETER_CPU_PERIODIC_THROTTLE_INTERVAL:
+ p->has_cpu_periodic_throttle_interval = true;
+ visit_type_uint8(v, param, &p->cpu_periodic_throttle_interval, &err);
+ break;
case MIGRATION_PARAMETER_MAX_CPU_THROTTLE:
p->has_max_cpu_throttle = true;
visit_type_uint8(v, param, &p->max_cpu_throttle, &err);
diff --git a/migration/options.c b/migration/options.c
index 645f55003d..2dbe275ba0 100644
--- a/migration/options.c
+++ b/migration/options.c
@@ -44,6 +44,7 @@
#define DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD 50
#define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
#define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
+#define DEFAULT_MIGRATE_CPU_PERIODIC_THROTTLE_INTERVAL 5
#define DEFAULT_MIGRATE_MAX_CPU_THROTTLE 99
/* Migration XBZRLE default cache size */
@@ -104,6 +105,11 @@ Property migration_properties[] = {
DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT),
DEFINE_PROP_BOOL("x-cpu-throttle-tailslow", MigrationState,
parameters.cpu_throttle_tailslow, false),
+ DEFINE_PROP_BOOL("x-cpu-periodic-throttle", MigrationState,
+ parameters.cpu_periodic_throttle, false),
+ DEFINE_PROP_UINT8("x-cpu-periodic-throttle-interval", MigrationState,
+ parameters.cpu_periodic_throttle_interval,
+ DEFAULT_MIGRATE_CPU_PERIODIC_THROTTLE_INTERVAL),
DEFINE_PROP_SIZE("x-max-bandwidth", MigrationState,
parameters.max_bandwidth, MAX_THROTTLE),
DEFINE_PROP_SIZE("avail-switchover-bandwidth", MigrationState,
@@ -695,6 +701,20 @@ uint8_t migrate_cpu_throttle_initial(void)
return s->parameters.cpu_throttle_initial;
}
+uint8_t migrate_periodic_throttle_interval(void)
+{
+ MigrationState *s = migrate_get_current();
+
+ return s->parameters.cpu_periodic_throttle_interval;
+}
+
+bool migrate_periodic_throttle(void)
+{
+ MigrationState *s = migrate_get_current();
+
+ return s->parameters.cpu_periodic_throttle;
+}
+
bool migrate_cpu_throttle_tailslow(void)
{
MigrationState *s = migrate_get_current();
@@ -874,6 +894,11 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp)
params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
params->has_cpu_throttle_tailslow = true;
params->cpu_throttle_tailslow = s->parameters.cpu_throttle_tailslow;
+ params->has_cpu_periodic_throttle = true;
+ params->cpu_periodic_throttle = s->parameters.cpu_periodic_throttle;
+ params->has_cpu_periodic_throttle_interval = true;
+ params->cpu_periodic_throttle_interval =
+ s->parameters.cpu_periodic_throttle_interval;
params->tls_creds = g_strdup(s->parameters.tls_creds);
params->tls_hostname = g_strdup(s->parameters.tls_hostname);
params->tls_authz = g_strdup(s->parameters.tls_authz ?
@@ -940,6 +965,8 @@ void migrate_params_init(MigrationParameters *params)
params->has_cpu_throttle_initial = true;
params->has_cpu_throttle_increment = true;
params->has_cpu_throttle_tailslow = true;
+ params->has_cpu_periodic_throttle = true;
+ params->has_cpu_periodic_throttle_interval = true;
params->has_max_bandwidth = true;
params->has_downtime_limit = true;
params->has_x_checkpoint_delay = true;
@@ -996,6 +1023,15 @@ bool migrate_params_check(MigrationParameters *params, Error **errp)
return false;
}
+ if (params->has_cpu_periodic_throttle_interval &&
+ (params->cpu_periodic_throttle_interval < 2 ||
+ params->cpu_periodic_throttle_interval > 10)) {
+ error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
+ "cpu_periodic_throttle_interval",
+ "an integer in the range of 2 to 10");
+ return false;
+ }
+
if (params->has_max_bandwidth && (params->max_bandwidth > SIZE_MAX)) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
"max_bandwidth",
@@ -1163,6 +1199,15 @@ static void migrate_params_test_apply(MigrateSetParameters *params,
dest->cpu_throttle_tailslow = params->cpu_throttle_tailslow;
}
+ if (params->has_cpu_periodic_throttle) {
+ dest->cpu_periodic_throttle = params->cpu_periodic_throttle;
+ }
+
+ if (params->has_cpu_periodic_throttle_interval) {
+ dest->cpu_periodic_throttle_interval =
+ params->cpu_periodic_throttle_interval;
+ }
+
if (params->tls_creds) {
assert(params->tls_creds->type == QTYPE_QSTRING);
dest->tls_creds = params->tls_creds->u.s;
@@ -1271,6 +1316,15 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp)
s->parameters.cpu_throttle_tailslow = params->cpu_throttle_tailslow;
}
+ if (params->has_cpu_periodic_throttle) {
+ s->parameters.cpu_periodic_throttle = params->cpu_periodic_throttle;
+ }
+
+ if (params->has_cpu_periodic_throttle_interval) {
+ s->parameters.cpu_periodic_throttle_interval =
+ params->cpu_periodic_throttle_interval;
+ }
+
if (params->tls_creds) {
g_free(s->parameters.tls_creds);
assert(params->tls_creds->type == QTYPE_QSTRING);
diff --git a/migration/options.h b/migration/options.h
index a2397026db..efeac01470 100644
--- a/migration/options.h
+++ b/migration/options.h
@@ -68,6 +68,8 @@ bool migrate_has_block_bitmap_mapping(void);
uint32_t migrate_checkpoint_delay(void);
uint8_t migrate_cpu_throttle_increment(void);
uint8_t migrate_cpu_throttle_initial(void);
+uint8_t migrate_periodic_throttle_interval(void);
+bool migrate_periodic_throttle(void);
bool migrate_cpu_throttle_tailslow(void);
bool migrate_direct_io(void);
uint64_t migrate_downtime_limit(void);
diff --git a/qapi/migration.json b/qapi/migration.json
index 7324571e92..8281d4a83b 100644
--- a/qapi/migration.json
+++ b/qapi/migration.json
@@ -724,6 +724,12 @@
# be excessive at tail stage. The default value is false. (Since
# 5.1)
#
+# @cpu-periodic-throttle: Make CPU throttling periodically.
+# (Since 9.1)
+#
+# @cpu-periodic-throttle-interval: Interval of the periodic CPU throttling.
+# (Since 9.1)
+#
# @tls-creds: ID of the 'tls-creds' object that provides credentials
# for establishing a TLS connection over the migration data
# channel. On the outgoing side of the migration, the credentials
@@ -844,7 +850,8 @@
'announce-rounds', 'announce-step',
'throttle-trigger-threshold',
'cpu-throttle-initial', 'cpu-throttle-increment',
- 'cpu-throttle-tailslow',
+ 'cpu-throttle-tailslow', 'cpu-periodic-throttle',
+ 'cpu-periodic-throttle-interval',
'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth',
'avail-switchover-bandwidth', 'downtime-limit',
{ 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] },
@@ -899,6 +906,12 @@
# be excessive at tail stage. The default value is false. (Since
# 5.1)
#
+# @cpu-periodic-throttle: Make CPU throttling periodically.
+# (Since 9.1)
+#
+# @cpu-periodic-throttle-interval: Interval of the periodic CPU throttling.
+# (Since 9.1)
+#
# @tls-creds: ID of the 'tls-creds' object that provides credentials
# for establishing a TLS connection over the migration data
# channel. On the outgoing side of the migration, the credentials
@@ -1026,6 +1039,8 @@
'*cpu-throttle-initial': 'uint8',
'*cpu-throttle-increment': 'uint8',
'*cpu-throttle-tailslow': 'bool',
+ '*cpu-periodic-throttle': 'bool',
+ '*cpu-periodic-throttle-interval': 'uint8',
'*tls-creds': 'StrOrNull',
'*tls-hostname': 'StrOrNull',
'*tls-authz': 'StrOrNull',
@@ -1107,6 +1122,12 @@
# be excessive at tail stage. The default value is false. (Since
# 5.1)
#
+# @cpu-periodic-throttle: Make CPU throttling periodically.
+# (Since 9.1)
+#
+# @cpu-periodic-throttle-interval: Interval of the periodic CPU throttling.
+# (Since 9.1)
+#
# @tls-creds: ID of the 'tls-creds' object that provides credentials
# for establishing a TLS connection over the migration data
# channel. On the outgoing side of the migration, the credentials
@@ -1227,6 +1248,8 @@
'*cpu-throttle-initial': 'uint8',
'*cpu-throttle-increment': 'uint8',
'*cpu-throttle-tailslow': 'bool',
+ '*cpu-periodic-throttle': 'bool',
+ '*cpu-periodic-throttle-interval': 'uint8',
'*tls-creds': 'str',
'*tls-hostname': 'str',
'*tls-authz': 'str',
--
2.39.1
On Mon, Sep 09, 2024 at 10:25:36PM +0800, Hyman Huang wrote: > To activate the periodic CPU throttleing feature, introduce > the cpu-periodic-throttle. > > To control the frequency of throttling, introduce the > cpu-periodic-throttle-interval. > > Signed-off-by: Hyman Huang <yong.huang@smartx.com> Considering that I would still suggest postcopy over auto-converge, IMO we should be cautious on adding more QMP interfaces on top of auto-converge, because that means more maintenance burden everywhere.. and it's against our goal to provide, hopefully, one solution for the long term for convergence issues. Postcopy has a major issue with VFIO, but auto converge isn't anything better from that regard.. as we can't yet throttle a device so far anyway. Throttling of DMA probably means DMA faults, then postcopy might be doable too. Meanwhile we're looking at working out 1G postcopy at some point. So I wonder whether we can make any further optmization for auto-converge (if we still really want that..) to be at least transparent, so that they get auto enabled on new machine types. If we really want some knobs to control, we can still expose via -global migration.x-* parameters, but then they'll be all debug tunables only, perhaps that can at least reduce burdens to QMP maintainers and Libvirt side. Thanks, -- Peter Xu
On Tue, Sep 10, 2024 at 5:30 AM Peter Xu <peterx@redhat.com> wrote: > On Mon, Sep 09, 2024 at 10:25:36PM +0800, Hyman Huang wrote: > > To activate the periodic CPU throttleing feature, introduce > > the cpu-periodic-throttle. > > > > To control the frequency of throttling, introduce the > > cpu-periodic-throttle-interval. > > > > Signed-off-by: Hyman Huang <yong.huang@smartx.com> > > Considering that I would still suggest postcopy over auto-converge, IMO we > We are considering the hybrid of precopy and postcopy in fact, and i entirely agree with what you are saying: postcopy migration is an alternative solution to deal with migrations that refuse to converge, or take too long to converge. But enabling this feature may not be easy in production since the recovery requires upper apps to interface, the hugepages and spdk/dpdk scenarios also need to be considered and re-test. Considering auto-converge is the main policy in the industry, the optimization may still make sense. We would like to try to optimize the auto-converge in huge VM case and, IMHO, it doesn't conflict with postcopy. > should be cautious on adding more QMP interfaces on top of auto-converge, > because that means more maintenance burden everywhere.. and it's against > our goal to provide, hopefully, one solution for the long term for > convergence issues. > > Postcopy has a major issue with VFIO, but auto converge isn't anything > better from that regard.. as we can't yet throttle a device so far anyway. > Throttling of DMA probably means DMA faults, then postcopy might be doable > too. Meanwhile we're looking at working out 1G postcopy at some point. > > So I wonder whether we can make any further optmization for auto-converge > (if we still really want that..) to be at least transparent, so that they > Thanks for the advice and of course yes. So, at first, We'll try to avoid adding the new periodic throttle parameter and make it be transparent ? > get auto enabled on new machine types. If we really want some knobs to > control, we can still expose via -global migration.x-* parameters, but then > they'll be all debug tunables only, perhaps that can at least reduce > burdens to QMP maintainers and Libvirt side. > > Thanks, > > -- > Peter Xu > > Yong -- Best regards
On Tue, Sep 10, 2024 at 01:47:04PM +0800, Yong Huang wrote: > On Tue, Sep 10, 2024 at 5:30 AM Peter Xu <peterx@redhat.com> wrote: > > > On Mon, Sep 09, 2024 at 10:25:36PM +0800, Hyman Huang wrote: > > > To activate the periodic CPU throttleing feature, introduce > > > the cpu-periodic-throttle. > > > > > > To control the frequency of throttling, introduce the > > > cpu-periodic-throttle-interval. > > > > > > Signed-off-by: Hyman Huang <yong.huang@smartx.com> > > > > Considering that I would still suggest postcopy over auto-converge, IMO we > > > > We are considering the hybrid of precopy and postcopy in fact, and i > entirely agree with what you are saying: postcopy migration is an > alternative > solution to deal with migrations that refuse to converge, or take too long > to converge. But enabling this feature may not be easy in production since > the > recovery requires upper apps to interface, the hugepages and spdk/dpdk Libvirt should support recovery, while vhost-user should also be supported in general by both qemu/libvirt. Huge page is indeed still the issue, though. > scenarios also need to be considered and re-test. > Considering auto-converge is the main policy in the industry, the > optimization > may still make sense. We would like to try to optimize the auto-converge in > huge > VM case and, IMHO, it doesn't conflict with postcopy. Yeah, that's OK. > > > > should be cautious on adding more QMP interfaces on top of auto-converge, > > because that means more maintenance burden everywhere.. and it's against > > our goal to provide, hopefully, one solution for the long term for > > convergence issues. > > > > Postcopy has a major issue with VFIO, but auto converge isn't anything > > better from that regard.. as we can't yet throttle a device so far anyway. > > Throttling of DMA probably means DMA faults, then postcopy might be doable > > too. Meanwhile we're looking at working out 1G postcopy at some point. > > > > So I wonder whether we can make any further optmization for auto-converge > > (if we still really want that..) to be at least transparent, so that they > > > > Thanks for the advice and of course yes. > So, at first, We'll try to avoid adding the new periodic throttle parameter > and make it be transparent ? That'll be my take on this, so we can keep relatively focused for hopefully all migration developers around QEMU in the near future. I wonder this could be a good measure so we at least try to reduce part of the burden. I don't think it's a published rule, it's just something I thought about when glancing your series. So feel free to share your thoughts. Btw I'll not be able to read into details yet in the next few days due to flooded inbox.. sorry for that. But I'll come back after I flush the rest. Thanks, -- Peter Xu
© 2016 - 2024 Red Hat, Inc.