Add a helper migration_get_switchover_bw() to return an estimate of
switchover bandwidth. Use it to simplify the current code.
This will be used in later to remove expected_downtime.
When at it, remove two qatomic_read() to shrink the lines because atomic
ops are not needed when it's always the same thread who does the updates.
Reviewed-by: Juraj Marcin <jmarcin@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
migration/migration.c | 48 +++++++++++++++++++++----------------------
1 file changed, 24 insertions(+), 24 deletions(-)
diff --git a/migration/migration.c b/migration/migration.c
index 57d91ad9e3..a1a02e3a9f 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -984,6 +984,21 @@ void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value)
migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf);
}
+/*
+ * Returns the estimated switchover bandwidth (unit: bytes / seconds)
+ */
+static double migration_get_switchover_bw(MigrationState *s)
+{
+ uint64_t switchover_bw = migrate_avail_switchover_bandwidth();
+
+ if (switchover_bw) {
+ /* If user specified, prioritize this value and don't estimate */
+ return (double)switchover_bw;
+ }
+
+ return s->mbps / 8 * 1000 * 1000;
+}
+
bool migration_is_running(void)
{
MigrationState *s = current_migration;
@@ -3126,37 +3141,22 @@ static void migration_update_counters(MigrationState *s,
{
uint64_t transferred, transferred_pages, time_spent;
uint64_t current_bytes; /* bytes transferred since the beginning */
- uint64_t switchover_bw;
- /* Expected bandwidth when switching over to destination QEMU */
- double expected_bw_per_ms;
- double bandwidth;
+ double switchover_bw_per_ms;
if (current_time < s->iteration_start_time + BUFFER_DELAY) {
return;
}
- switchover_bw = migrate_avail_switchover_bandwidth();
current_bytes = migration_transferred_bytes();
transferred = current_bytes - s->iteration_initial_bytes;
time_spent = current_time - s->iteration_start_time;
- bandwidth = (double)transferred / time_spent;
-
- if (switchover_bw) {
- /*
- * If the user specified a switchover bandwidth, let's trust the
- * user so that can be more accurate than what we estimated.
- */
- expected_bw_per_ms = (double)switchover_bw / 1000;
- } else {
- /* If the user doesn't specify bandwidth, we use the estimated */
- expected_bw_per_ms = bandwidth;
- }
-
- s->threshold_size = expected_bw_per_ms * migrate_downtime_limit();
-
s->mbps = (((double) transferred * 8.0) /
((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
+ /* NOTE: only update this after bandwidth (s->mbps) updated */
+ switchover_bw_per_ms = migration_get_switchover_bw(s) / 1000;
+ s->threshold_size = switchover_bw_per_ms * migrate_downtime_limit();
+
transferred_pages = ram_get_total_transferred_pages() -
s->iteration_initial_pages;
s->pages_per_second = (double) transferred_pages /
@@ -3166,10 +3166,9 @@ static void migration_update_counters(MigrationState *s,
* if we haven't sent anything, we don't want to
* recalculate. 10000 is a small enough number for our purposes
*/
- if (qatomic_read(&mig_stats.dirty_pages_rate) &&
- transferred > 10000) {
+ if (mig_stats.dirty_pages_rate && transferred > 10000) {
s->expected_downtime =
- qatomic_read(&mig_stats.dirty_bytes_last_sync) / expected_bw_per_ms;
+ mig_stats.dirty_bytes_last_sync / switchover_bw_per_ms;
}
migration_rate_reset();
@@ -3178,7 +3177,8 @@ static void migration_update_counters(MigrationState *s,
trace_migrate_transferred(transferred, time_spent,
/* Both in unit bytes/ms */
- bandwidth, switchover_bw / 1000,
+ (uint64_t)s->mbps,
+ (uint64_t)switchover_bw_per_ms,
s->threshold_size);
}
--
2.53.0