We can migrate some dirty pages during the gap of checkpointing,
by this way, we can reduce the amount of ram migrated during checkpointing.
Signed-off-by: Hailiang Zhang <zhang.zhanghailiang@huawei.com>
---
migration/colo.c | 69 +++++++++++++++++++++++++++++++++++++++---
migration/migration.h | 1 +
migration/trace-events | 1 +
qapi/migration.json | 4 ++-
4 files changed, 70 insertions(+), 5 deletions(-)
diff --git a/migration/colo.c b/migration/colo.c
index 93c5a452fb..d30c6bc4ad 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -46,6 +46,13 @@ static COLOMode last_colo_mode;
#define COLO_BUFFER_BASE_SIZE (4 * 1024 * 1024)
+#define DEFAULT_RAM_PENDING_CHECK 1000
+
+/* should be calculated by bandwidth and max downtime ? */
+#define THRESHOLD_PENDING_SIZE (100 * 1024 * 1024UL)
+
+static int checkpoint_request;
+
bool migration_in_colo_state(void)
{
MigrationState *s = migrate_get_current();
@@ -516,6 +523,20 @@ static void colo_compare_notify_checkpoint(Notifier *notifier, void *data)
colo_checkpoint_notify(data);
}
+static bool colo_need_migrate_ram_background(MigrationState *s)
+{
+ uint64_t pending_size, pend_pre, pend_compat, pend_post;
+ int64_t max_size = THRESHOLD_PENDING_SIZE;
+
+ qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_pre,
+ &pend_compat, &pend_post);
+ pending_size = pend_pre + pend_compat + pend_post;
+
+ trace_colo_need_migrate_ram_background(pending_size);
+ return (pending_size >= max_size);
+}
+
+
static void colo_process_checkpoint(MigrationState *s)
{
QIOChannelBuffer *bioc;
@@ -571,6 +592,8 @@ static void colo_process_checkpoint(MigrationState *s)
timer_mod(s->colo_delay_timer,
current_time + s->parameters.x_checkpoint_delay);
+ timer_mod(s->pending_ram_check_timer,
+ current_time + DEFAULT_RAM_PENDING_CHECK);
while (s->state == MIGRATION_STATUS_COLO) {
if (failover_get_state() != FAILOVER_STATUS_NONE) {
@@ -583,10 +606,25 @@ static void colo_process_checkpoint(MigrationState *s)
if (s->state != MIGRATION_STATUS_COLO) {
goto out;
}
- ret = colo_do_checkpoint_transaction(s, bioc, fb);
- if (ret < 0) {
- goto out;
- }
+ if (atomic_xchg(&checkpoint_request, 0)) {
+ /* start a colo checkpoint */
+ ret = colo_do_checkpoint_transaction(s, bioc, fb);
+ if (ret < 0) {
+ goto out;
+ }
+ } else {
+ if (colo_need_migrate_ram_background(s)) {
+ colo_send_message(s->to_dst_file,
+ COLO_MESSAGE_MIGRATE_RAM_BACKGROUND,
+ &local_err);
+ if (local_err) {
+ goto out;
+ }
+
+ qemu_savevm_state_iterate(s->to_dst_file, false);
+ qemu_put_byte(s->to_dst_file, QEMU_VM_EOF);
+ }
+ }
}
out:
@@ -626,6 +664,8 @@ out:
colo_compare_unregister_notifier(&packets_compare_notifier);
timer_del(s->colo_delay_timer);
timer_free(s->colo_delay_timer);
+ timer_del(s->pending_ram_check_timer);
+ timer_free(s->pending_ram_check_timer);
qemu_sem_destroy(&s->colo_checkpoint_sem);
/*
@@ -643,6 +683,7 @@ void colo_checkpoint_notify(void *opaque)
MigrationState *s = opaque;
int64_t next_notify_time;
+ atomic_inc(&checkpoint_request);
qemu_sem_post(&s->colo_checkpoint_sem);
s->colo_checkpoint_time = qemu_clock_get_ms(QEMU_CLOCK_HOST);
next_notify_time = s->colo_checkpoint_time +
@@ -650,6 +691,19 @@ void colo_checkpoint_notify(void *opaque)
timer_mod(s->colo_delay_timer, next_notify_time);
}
+static void colo_pending_ram_check_notify(void *opaque)
+{
+ int64_t next_notify_time;
+ MigrationState *s = opaque;
+
+ if (migration_in_colo_state()) {
+ next_notify_time = DEFAULT_RAM_PENDING_CHECK +
+ qemu_clock_get_ms(QEMU_CLOCK_HOST);
+ timer_mod(s->pending_ram_check_timer, next_notify_time);
+ qemu_sem_post(&s->colo_checkpoint_sem);
+ }
+}
+
void migrate_start_colo_process(MigrationState *s)
{
qemu_mutex_unlock_iothread();
@@ -657,6 +711,8 @@ void migrate_start_colo_process(MigrationState *s)
s->colo_delay_timer = timer_new_ms(QEMU_CLOCK_HOST,
colo_checkpoint_notify, s);
+ s->pending_ram_check_timer = timer_new_ms(QEMU_CLOCK_HOST,
+ colo_pending_ram_check_notify, s);
qemu_sem_init(&s->colo_exit_sem, 0);
migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
MIGRATION_STATUS_COLO);
@@ -805,6 +861,11 @@ static void colo_wait_handle_message(MigrationIncomingState *mis,
case COLO_MESSAGE_CHECKPOINT_REQUEST:
colo_incoming_process_checkpoint(mis, fb, bioc, errp);
break;
+ case COLO_MESSAGE_MIGRATE_RAM_BACKGROUND:
+ if (qemu_loadvm_state_main(mis->from_src_file, mis) < 0) {
+ error_setg(errp, "Load ram background failed");
+ }
+ break;
default:
error_setg(errp, "Got unknown COLO message: %d", msg);
break;
diff --git a/migration/migration.h b/migration/migration.h
index 8473ddfc88..5355259789 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -219,6 +219,7 @@ struct MigrationState
QemuSemaphore colo_checkpoint_sem;
int64_t colo_checkpoint_time;
QEMUTimer *colo_delay_timer;
+ QEMUTimer *pending_ram_check_timer;
/* The first error that has occurred.
We used the mutex to be able to return the 1st error message */
diff --git a/migration/trace-events b/migration/trace-events
index 4ab0a503d2..f2ed0c8645 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -295,6 +295,7 @@ migration_tls_incoming_handshake_complete(void) ""
colo_vm_state_change(const char *old, const char *new) "Change '%s' => '%s'"
colo_send_message(const char *msg) "Send '%s' message"
colo_receive_message(const char *msg) "Receive '%s' message"
+colo_need_migrate_ram_background(uint64_t pending_size) "Pending 0x%" PRIx64 " dirty ram"
# colo-failover.c
colo_failover_set_state(const char *new_state) "new state %s"
diff --git a/qapi/migration.json b/qapi/migration.json
index b7348d0c8b..ff7a4f18b0 100644
--- a/qapi/migration.json
+++ b/qapi/migration.json
@@ -977,12 +977,14 @@
#
# @vmstate-loaded: VM's state has been loaded by SVM.
#
+# @migrate-ram-background: Send some dirty pages during the gap of COLO checkpoint
+#
# Since: 2.8
##
{ 'enum': 'COLOMessage',
'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
'vmstate-send', 'vmstate-size', 'vmstate-received',
- 'vmstate-loaded' ] }
+ 'vmstate-loaded', 'migrate-ram-background' ] }
##
# @COLOMode:
--
2.21.0
* Hailiang Zhang (zhang.zhanghailiang@huawei.com) wrote:
> We can migrate some dirty pages during the gap of checkpointing,
> by this way, we can reduce the amount of ram migrated during checkpointing.
>
> Signed-off-by: Hailiang Zhang <zhang.zhanghailiang@huawei.com>
> ---
> migration/colo.c | 69 +++++++++++++++++++++++++++++++++++++++---
> migration/migration.h | 1 +
> migration/trace-events | 1 +
> qapi/migration.json | 4 ++-
> 4 files changed, 70 insertions(+), 5 deletions(-)
>
> diff --git a/migration/colo.c b/migration/colo.c
> index 93c5a452fb..d30c6bc4ad 100644
> --- a/migration/colo.c
> +++ b/migration/colo.c
> @@ -46,6 +46,13 @@ static COLOMode last_colo_mode;
>
> #define COLO_BUFFER_BASE_SIZE (4 * 1024 * 1024)
>
> +#define DEFAULT_RAM_PENDING_CHECK 1000
> +
> +/* should be calculated by bandwidth and max downtime ? */
> +#define THRESHOLD_PENDING_SIZE (100 * 1024 * 1024UL)
Turn both of these magic constants into parameters.
> +static int checkpoint_request;
> +
> bool migration_in_colo_state(void)
> {
> MigrationState *s = migrate_get_current();
> @@ -516,6 +523,20 @@ static void colo_compare_notify_checkpoint(Notifier *notifier, void *data)
> colo_checkpoint_notify(data);
> }
>
> +static bool colo_need_migrate_ram_background(MigrationState *s)
> +{
> + uint64_t pending_size, pend_pre, pend_compat, pend_post;
> + int64_t max_size = THRESHOLD_PENDING_SIZE;
> +
> + qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_pre,
> + &pend_compat, &pend_post);
> + pending_size = pend_pre + pend_compat + pend_post;
> +
> + trace_colo_need_migrate_ram_background(pending_size);
> + return (pending_size >= max_size);
> +}
> +
> +
> static void colo_process_checkpoint(MigrationState *s)
> {
> QIOChannelBuffer *bioc;
> @@ -571,6 +592,8 @@ static void colo_process_checkpoint(MigrationState *s)
>
> timer_mod(s->colo_delay_timer,
> current_time + s->parameters.x_checkpoint_delay);
> + timer_mod(s->pending_ram_check_timer,
> + current_time + DEFAULT_RAM_PENDING_CHECK);
What happens if the iterate takes a while and this triggers in the
middle of the iterate?
> while (s->state == MIGRATION_STATUS_COLO) {
> if (failover_get_state() != FAILOVER_STATUS_NONE) {
> @@ -583,10 +606,25 @@ static void colo_process_checkpoint(MigrationState *s)
> if (s->state != MIGRATION_STATUS_COLO) {
> goto out;
> }
> - ret = colo_do_checkpoint_transaction(s, bioc, fb);
> - if (ret < 0) {
> - goto out;
> - }
> + if (atomic_xchg(&checkpoint_request, 0)) {
> + /* start a colo checkpoint */
> + ret = colo_do_checkpoint_transaction(s, bioc, fb);
> + if (ret < 0) {
> + goto out;
> + }
> + } else {
> + if (colo_need_migrate_ram_background(s)) {
> + colo_send_message(s->to_dst_file,
> + COLO_MESSAGE_MIGRATE_RAM_BACKGROUND,
> + &local_err);
> + if (local_err) {
> + goto out;
> + }
> +
> + qemu_savevm_state_iterate(s->to_dst_file, false);
> + qemu_put_byte(s->to_dst_file, QEMU_VM_EOF);
Maybe you should do a qemu_file_get_error(..) at this point to check
it's OK.
> + }
> + }
> }
>
> out:
> @@ -626,6 +664,8 @@ out:
> colo_compare_unregister_notifier(&packets_compare_notifier);
> timer_del(s->colo_delay_timer);
> timer_free(s->colo_delay_timer);
> + timer_del(s->pending_ram_check_timer);
> + timer_free(s->pending_ram_check_timer);
> qemu_sem_destroy(&s->colo_checkpoint_sem);
>
> /*
> @@ -643,6 +683,7 @@ void colo_checkpoint_notify(void *opaque)
> MigrationState *s = opaque;
> int64_t next_notify_time;
>
> + atomic_inc(&checkpoint_request);
Can you explain what you've changed about this atomic in this patch,
I don't quite see what you're doing.
> qemu_sem_post(&s->colo_checkpoint_sem);
> s->colo_checkpoint_time = qemu_clock_get_ms(QEMU_CLOCK_HOST);
> next_notify_time = s->colo_checkpoint_time +
> @@ -650,6 +691,19 @@ void colo_checkpoint_notify(void *opaque)
> timer_mod(s->colo_delay_timer, next_notify_time);
> }
>
> +static void colo_pending_ram_check_notify(void *opaque)
> +{
> + int64_t next_notify_time;
> + MigrationState *s = opaque;
> +
> + if (migration_in_colo_state()) {
> + next_notify_time = DEFAULT_RAM_PENDING_CHECK +
> + qemu_clock_get_ms(QEMU_CLOCK_HOST);
> + timer_mod(s->pending_ram_check_timer, next_notify_time);
> + qemu_sem_post(&s->colo_checkpoint_sem);
> + }
> +}
> +
> void migrate_start_colo_process(MigrationState *s)
> {
> qemu_mutex_unlock_iothread();
> @@ -657,6 +711,8 @@ void migrate_start_colo_process(MigrationState *s)
> s->colo_delay_timer = timer_new_ms(QEMU_CLOCK_HOST,
> colo_checkpoint_notify, s);
>
> + s->pending_ram_check_timer = timer_new_ms(QEMU_CLOCK_HOST,
> + colo_pending_ram_check_notify, s);
> qemu_sem_init(&s->colo_exit_sem, 0);
> migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
> MIGRATION_STATUS_COLO);
> @@ -805,6 +861,11 @@ static void colo_wait_handle_message(MigrationIncomingState *mis,
> case COLO_MESSAGE_CHECKPOINT_REQUEST:
> colo_incoming_process_checkpoint(mis, fb, bioc, errp);
> break;
> + case COLO_MESSAGE_MIGRATE_RAM_BACKGROUND:
> + if (qemu_loadvm_state_main(mis->from_src_file, mis) < 0) {
> + error_setg(errp, "Load ram background failed");
> + }
OK, that's a little dangerou, in that it relies on the source being good
and only sending iterative data; stuff would break badly if it actually
sent you devices.
> + break;
> default:
> error_setg(errp, "Got unknown COLO message: %d", msg);
> break;
> diff --git a/migration/migration.h b/migration/migration.h
> index 8473ddfc88..5355259789 100644
> --- a/migration/migration.h
> +++ b/migration/migration.h
> @@ -219,6 +219,7 @@ struct MigrationState
> QemuSemaphore colo_checkpoint_sem;
> int64_t colo_checkpoint_time;
> QEMUTimer *colo_delay_timer;
> + QEMUTimer *pending_ram_check_timer;
>
> /* The first error that has occurred.
> We used the mutex to be able to return the 1st error message */
> diff --git a/migration/trace-events b/migration/trace-events
> index 4ab0a503d2..f2ed0c8645 100644
> --- a/migration/trace-events
> +++ b/migration/trace-events
> @@ -295,6 +295,7 @@ migration_tls_incoming_handshake_complete(void) ""
> colo_vm_state_change(const char *old, const char *new) "Change '%s' => '%s'"
> colo_send_message(const char *msg) "Send '%s' message"
> colo_receive_message(const char *msg) "Receive '%s' message"
> +colo_need_migrate_ram_background(uint64_t pending_size) "Pending 0x%" PRIx64 " dirty ram"
>
> # colo-failover.c
> colo_failover_set_state(const char *new_state) "new state %s"
> diff --git a/qapi/migration.json b/qapi/migration.json
> index b7348d0c8b..ff7a4f18b0 100644
> --- a/qapi/migration.json
> +++ b/qapi/migration.json
> @@ -977,12 +977,14 @@
> #
> # @vmstate-loaded: VM's state has been loaded by SVM.
> #
> +# @migrate-ram-background: Send some dirty pages during the gap of COLO checkpoint
> +#
> # Since: 2.8
> ##
> { 'enum': 'COLOMessage',
> 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
> 'vmstate-send', 'vmstate-size', 'vmstate-received',
> - 'vmstate-loaded' ] }
> + 'vmstate-loaded', 'migrate-ram-background' ] }
>
> ##
> # @COLOMode:
> --
> 2.21.0
>
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
> -----Original Message-----
> From: Dr. David Alan Gilbert [mailto:dgilbert@redhat.com]
> Sent: Thursday, February 20, 2020 2:51 AM
> To: Zhanghailiang <zhang.zhanghailiang@huawei.com>
> Cc: qemu-devel@nongnu.org; quintela@redhat.com; chen.zhang@intel.com;
> danielcho@qnap.com
> Subject: Re: [PATCH 2/3] COLO: Migrate dirty pages during the gap of
> checkpointing
>
> * Hailiang Zhang (zhang.zhanghailiang@huawei.com) wrote:
> > We can migrate some dirty pages during the gap of checkpointing,
> > by this way, we can reduce the amount of ram migrated during
> checkpointing.
> >
> > Signed-off-by: Hailiang Zhang <zhang.zhanghailiang@huawei.com>
> > ---
> > migration/colo.c | 69
> +++++++++++++++++++++++++++++++++++++++---
> > migration/migration.h | 1 +
> > migration/trace-events | 1 +
> > qapi/migration.json | 4 ++-
> > 4 files changed, 70 insertions(+), 5 deletions(-)
> >
> > diff --git a/migration/colo.c b/migration/colo.c
> > index 93c5a452fb..d30c6bc4ad 100644
> > --- a/migration/colo.c
> > +++ b/migration/colo.c
> > @@ -46,6 +46,13 @@ static COLOMode last_colo_mode;
> >
> > #define COLO_BUFFER_BASE_SIZE (4 * 1024 * 1024)
> >
> > +#define DEFAULT_RAM_PENDING_CHECK 1000
> > +
> > +/* should be calculated by bandwidth and max downtime ? */
> > +#define THRESHOLD_PENDING_SIZE (100 * 1024 * 1024UL)
>
> Turn both of these magic constants into parameters.
>
Good idea, will do this in later patches.
> > +static int checkpoint_request;
> > +
> > bool migration_in_colo_state(void)
> > {
> > MigrationState *s = migrate_get_current();
> > @@ -516,6 +523,20 @@ static void
> colo_compare_notify_checkpoint(Notifier *notifier, void *data)
> > colo_checkpoint_notify(data);
> > }
> >
> > +static bool colo_need_migrate_ram_background(MigrationState *s)
> > +{
> > + uint64_t pending_size, pend_pre, pend_compat, pend_post;
> > + int64_t max_size = THRESHOLD_PENDING_SIZE;
> > +
> > + qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_pre,
> > + &pend_compat, &pend_post);
> > + pending_size = pend_pre + pend_compat + pend_post;
> > +
> > + trace_colo_need_migrate_ram_background(pending_size);
> > + return (pending_size >= max_size);
> > +}
> > +
> > +
> > static void colo_process_checkpoint(MigrationState *s)
> > {
> > QIOChannelBuffer *bioc;
> > @@ -571,6 +592,8 @@ static void
> colo_process_checkpoint(MigrationState *s)
> >
> > timer_mod(s->colo_delay_timer,
> > current_time + s->parameters.x_checkpoint_delay);
> > + timer_mod(s->pending_ram_check_timer,
> > + current_time + DEFAULT_RAM_PENDING_CHECK);
>
> What happens if the iterate takes a while and this triggers in the
> middle of the iterate?
>
It will trigger another iterate after this one been finished.
> > while (s->state == MIGRATION_STATUS_COLO) {
> > if (failover_get_state() != FAILOVER_STATUS_NONE) {
> > @@ -583,10 +606,25 @@ static void
> colo_process_checkpoint(MigrationState *s)
> > if (s->state != MIGRATION_STATUS_COLO) {
> > goto out;
> > }
> > - ret = colo_do_checkpoint_transaction(s, bioc, fb);
> > - if (ret < 0) {
> > - goto out;
> > - }
> > + if (atomic_xchg(&checkpoint_request, 0)) {
> > + /* start a colo checkpoint */
> > + ret = colo_do_checkpoint_transaction(s, bioc, fb);
> > + if (ret < 0) {
> > + goto out;
> > + }
> > + } else {
> > + if (colo_need_migrate_ram_background(s)) {
> > + colo_send_message(s->to_dst_file,
> > +
> COLO_MESSAGE_MIGRATE_RAM_BACKGROUND,
> > + &local_err);
> > + if (local_err) {
> > + goto out;
> > + }
> > +
> > + qemu_savevm_state_iterate(s->to_dst_file, false);
> > + qemu_put_byte(s->to_dst_file, QEMU_VM_EOF);
>
> Maybe you should do a qemu_file_get_error(..) at this point to check
> it's OK.
Agreed, we should check it.
>
> > + }
> > + }
> > }
> >
> > out:
> > @@ -626,6 +664,8 @@ out:
> > colo_compare_unregister_notifier(&packets_compare_notifier);
> > timer_del(s->colo_delay_timer);
> > timer_free(s->colo_delay_timer);
> > + timer_del(s->pending_ram_check_timer);
> > + timer_free(s->pending_ram_check_timer);
> > qemu_sem_destroy(&s->colo_checkpoint_sem);
> >
> > /*
> > @@ -643,6 +683,7 @@ void colo_checkpoint_notify(void *opaque)
> > MigrationState *s = opaque;
> > int64_t next_notify_time;
> >
> > + atomic_inc(&checkpoint_request);
>
> Can you explain what you've changed about this atomic in this patch,
> I don't quite see what you're doing.
>
We use this to check who waked it from waiting for colo_checkpoint_sem,
By background migration request or checkpoint request.
If the value is zero, it is waked by background migration request, or it is waked
By checkpoint request.
> > qemu_sem_post(&s->colo_checkpoint_sem);
> > s->colo_checkpoint_time =
> qemu_clock_get_ms(QEMU_CLOCK_HOST);
> > next_notify_time = s->colo_checkpoint_time +
> > @@ -650,6 +691,19 @@ void colo_checkpoint_notify(void *opaque)
> > timer_mod(s->colo_delay_timer, next_notify_time);
> > }
> >
> > +static void colo_pending_ram_check_notify(void *opaque)
> > +{
> > + int64_t next_notify_time;
> > + MigrationState *s = opaque;
> > +
> > + if (migration_in_colo_state()) {
> > + next_notify_time = DEFAULT_RAM_PENDING_CHECK +
> > +
> qemu_clock_get_ms(QEMU_CLOCK_HOST);
> > + timer_mod(s->pending_ram_check_timer, next_notify_time);
> > + qemu_sem_post(&s->colo_checkpoint_sem);
> > + }
> > +}
> > +
> > void migrate_start_colo_process(MigrationState *s)
> > {
> > qemu_mutex_unlock_iothread();
> > @@ -657,6 +711,8 @@ void migrate_start_colo_process(MigrationState
> *s)
> > s->colo_delay_timer = timer_new_ms(QEMU_CLOCK_HOST,
> > colo_checkpoint_notify, s);
> >
> > + s->pending_ram_check_timer =
> timer_new_ms(QEMU_CLOCK_HOST,
> > + colo_pending_ram_check_notify, s);
> > qemu_sem_init(&s->colo_exit_sem, 0);
> > migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
> > MIGRATION_STATUS_COLO);
> > @@ -805,6 +861,11 @@ static void
> colo_wait_handle_message(MigrationIncomingState *mis,
> > case COLO_MESSAGE_CHECKPOINT_REQUEST:
> > colo_incoming_process_checkpoint(mis, fb, bioc, errp);
> > break;
> > + case COLO_MESSAGE_MIGRATE_RAM_BACKGROUND:
> > + if (qemu_loadvm_state_main(mis->from_src_file, mis) < 0) {
> > + error_setg(errp, "Load ram background failed");
> > + }
>
> OK, that's a little dangerou, in that it relies on the source being good
> and only sending iterative data; stuff would break badly if it actually
> sent you devices.
>
You are right, we should check it, besides we should have a timeout mechanism to
avoid been stuck here.
> > + break;
> > default:
> > error_setg(errp, "Got unknown COLO message: %d", msg);
> > break;
> > diff --git a/migration/migration.h b/migration/migration.h
> > index 8473ddfc88..5355259789 100644
> > --- a/migration/migration.h
> > +++ b/migration/migration.h
> > @@ -219,6 +219,7 @@ struct MigrationState
> > QemuSemaphore colo_checkpoint_sem;
> > int64_t colo_checkpoint_time;
> > QEMUTimer *colo_delay_timer;
> > + QEMUTimer *pending_ram_check_timer;
> >
> > /* The first error that has occurred.
> > We used the mutex to be able to return the 1st error message */
> > diff --git a/migration/trace-events b/migration/trace-events
> > index 4ab0a503d2..f2ed0c8645 100644
> > --- a/migration/trace-events
> > +++ b/migration/trace-events
> > @@ -295,6 +295,7 @@
> migration_tls_incoming_handshake_complete(void) ""
> > colo_vm_state_change(const char *old, const char *new) "Change '%s' =>
> '%s'"
> > colo_send_message(const char *msg) "Send '%s' message"
> > colo_receive_message(const char *msg) "Receive '%s' message"
> > +colo_need_migrate_ram_background(uint64_t pending_size) "Pending
> 0x%" PRIx64 " dirty ram"
> >
> > # colo-failover.c
> > colo_failover_set_state(const char *new_state) "new state %s"
> > diff --git a/qapi/migration.json b/qapi/migration.json
> > index b7348d0c8b..ff7a4f18b0 100644
> > --- a/qapi/migration.json
> > +++ b/qapi/migration.json
> > @@ -977,12 +977,14 @@
> > #
> > # @vmstate-loaded: VM's state has been loaded by SVM.
> > #
> > +# @migrate-ram-background: Send some dirty pages during the gap of
> COLO checkpoint
> > +#
> > # Since: 2.8
> > ##
> > { 'enum': 'COLOMessage',
> > 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
> > 'vmstate-send', 'vmstate-size', 'vmstate-received',
> > - 'vmstate-loaded' ] }
> > + 'vmstate-loaded', 'migrate-ram-background' ] }
> >
> > ##
> > # @COLOMode:
> > --
> > 2.21.0
> >
> >
> --
> Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
On 2/16/20 7:20 PM, Hailiang Zhang wrote:
> We can migrate some dirty pages during the gap of checkpointing,
> by this way, we can reduce the amount of ram migrated during checkpointing.
>
> Signed-off-by: Hailiang Zhang <zhang.zhanghailiang@huawei.com>
> ---
> +++ b/qapi/migration.json
> @@ -977,12 +977,14 @@
> #
> # @vmstate-loaded: VM's state has been loaded by SVM.
> #
> +# @migrate-ram-background: Send some dirty pages during the gap of COLO checkpoint
> +#
Missing a '(since 5.0)' marker.
> # Since: 2.8
> ##
> { 'enum': 'COLOMessage',
> 'data': [ 'checkpoint-ready', 'checkpoint-request', 'checkpoint-reply',
> 'vmstate-send', 'vmstate-size', 'vmstate-received',
> - 'vmstate-loaded' ] }
> + 'vmstate-loaded', 'migrate-ram-background' ] }
>
> ##
> # @COLOMode:
>
--
Eric Blake, Principal Software Engineer
Red Hat, Inc. +1-919-301-3226
Virtualization: qemu.org | libvirt.org
© 2016 - 2025 Red Hat, Inc.