[PATCH] ram: add support for dirty page tracking

Bingsong Si posted 1 patch 3 years, 9 months ago
Test FreeBSD passed
Test docker-quick@centos7 passed
Test checkpatch passed
Test docker-mingw@fedora failed
Patches applied successfully (tree, apply log)
git fetch https://github.com/patchew-project/qemu tags/patchew/20200703092853.1448582-1-owen.si@ucloud.cn
Maintainers: Juan Quintela <quintela@redhat.com>, "Dr. David Alan Gilbert" <dgilbert@redhat.com>
There is a newer version of this series
hmp-commands.hx       | 26 ++++++++++++++
include/monitor/hmp.h |  2 ++
migration/migration.c |  5 +++
migration/ram.c       | 65 +++++++++++++++++++++++++++++++++
migration/ram.h       |  5 +++
migration/savevm.c    | 83 +++++++++++++++++++++++++++++++++++++++++++
migration/savevm.h    |  2 ++
7 files changed, 188 insertions(+)
[PATCH] ram: add support for dirty page tracking
Posted by Bingsong Si 3 years, 9 months ago
In production, the VM with insentive memory activity maybe failed to migrate,
because of the change of memory in the VM greater than the throughtput of the
network interface, and we want to identify it before migration.

1. dirty tracking start:
virsh qemu-monitor-command <domain> --hmp dirty_track

2. wait some time, stop dirty tracking:
virsh qemu-monitor-command <domain> --hmp dirty_track_stop
Dirty rate: 607 pages/s

Signed-off-by: Bingsong Si <owen.si@ucloud.cn>
---
 hmp-commands.hx       | 26 ++++++++++++++
 include/monitor/hmp.h |  2 ++
 migration/migration.c |  5 +++
 migration/ram.c       | 65 +++++++++++++++++++++++++++++++++
 migration/ram.h       |  5 +++
 migration/savevm.c    | 83 +++++++++++++++++++++++++++++++++++++++++++
 migration/savevm.h    |  2 ++
 7 files changed, 188 insertions(+)

diff --git a/hmp-commands.hx b/hmp-commands.hx
index 60f395c276..05a688286b 100644
--- a/hmp-commands.hx
+++ b/hmp-commands.hx
@@ -1819,6 +1819,32 @@ SRST
   Set QOM property *property* of object at location *path* to value *value*
 ERST
 
+    {
+        .name       = "dirty_track",
+        .args_type  = "",
+        .params     = "",
+        .help       = "track dirty pages rate",
+        .cmd        = hmp_dirty_track,
+    },
+
+SRST
+``dirty_track``
+  Track dirty pages rate.
+ERST
+
+    {
+        .name       = "dirty_track_stop",
+        .args_type  = "",
+        .params     = "",
+        .help       = "stop current dirty pages track",
+        .cmd        = hmp_dirty_track_stop,
+    },
+
+SRST
+``dirty_track_stop``
+  Stop current dirty pages track.
+ERST
+
     {
         .name       = "info",
         .args_type  = "item:s?",
diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h
index c986cfd28b..c139fe8758 100644
--- a/include/monitor/hmp.h
+++ b/include/monitor/hmp.h
@@ -130,5 +130,7 @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict);
 void hmp_info_vm_generation_id(Monitor *mon, const QDict *qdict);
 void hmp_info_memory_size_summary(Monitor *mon, const QDict *qdict);
 void hmp_info_sev(Monitor *mon, const QDict *qdict);
+void hmp_dirty_track(Monitor *mon, const QDict *qdict);
+void hmp_dirty_track_stop(Monitor *mon, const QDict *qdict);
 
 #endif
diff --git a/migration/migration.c b/migration/migration.c
index 481a590f72..5550afafe6 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1964,6 +1964,11 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
 {
     Error *local_err = NULL;
 
+    if (dirty_track_is_running()) {
+        error_setg(errp, "There is a dirty tracking process in progress");
+        return false;
+    }
+
     if (resume) {
         if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
             error_setg(errp, "Cannot resume if there is no "
diff --git a/migration/ram.c b/migration/ram.c
index 069b6e30bc..03a5e44617 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -3780,6 +3780,71 @@ static int ram_resume_prepare(MigrationState *s, void *opaque)
     return 0;
 }
 
+void dirty_track_init(void)
+{
+    RAMBlock *block;
+
+    if (ram_bytes_total()) {
+        RAMBLOCK_FOREACH_NOT_IGNORED(block) {
+            unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
+
+            block->bmap = bitmap_new(pages);
+            bitmap_set(block->bmap, 0, pages);
+        }
+    }
+    ram_state = g_new0(RAMState, 1);
+    ram_state->migration_dirty_pages = 0;
+    memory_global_dirty_log_start();
+}
+
+uint64_t dirty_track_dirty_pages(void)
+{
+    return ram_state->migration_dirty_pages;
+}
+
+void dirty_track_sync(void)
+{
+    RAMBlock *block = NULL;
+    unsigned long offset = 0;
+
+    memory_global_dirty_log_sync();
+    rcu_read_lock();
+    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
+       ramblock_sync_dirty_bitmap(ram_state, block);
+    }
+    rcu_read_unlock();
+
+    rcu_read_lock();
+    block = QLIST_FIRST_RCU(&ram_list.blocks);
+
+    while (block) {
+        offset = migration_bitmap_find_dirty(ram_state, block, offset);
+
+        if (offset << TARGET_PAGE_BITS >= block->used_length) {
+            offset = 0;
+            block = QLIST_NEXT_RCU(block, next);
+        } else {
+            test_and_clear_bit(offset, block->bmap);
+        }
+    }
+
+    rcu_read_unlock();
+}
+
+void dirty_track_cleanup(void)
+{
+    RAMBlock *block;
+
+    memory_global_dirty_log_stop();
+    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
+        g_free(block->bmap);
+        block->bmap = NULL;
+    }
+
+    g_free(ram_state);
+    ram_state = NULL;
+}
+
 static SaveVMHandlers savevm_ram_handlers = {
     .save_setup = ram_save_setup,
     .save_live_iterate = ram_save_iterate,
diff --git a/migration/ram.h b/migration/ram.h
index 2eeaacfa13..104c48285c 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -69,4 +69,9 @@ void colo_flush_ram_cache(void);
 void colo_release_ram_cache(void);
 void colo_incoming_start_dirty_log(void);
 
+void dirty_track_init(void);
+uint64_t dirty_track_dirty_pages(void);
+void dirty_track_sync(void);
+void dirty_track_cleanup(void);
+
 #endif
diff --git a/migration/savevm.c b/migration/savevm.c
index b979ea6e7f..0be70e6528 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -63,6 +63,8 @@
 #include "migration/colo.h"
 #include "qemu/bitmap.h"
 #include "net/announce.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp.h"
 
 const unsigned int postcopy_ram_discard_version = 0;
 
@@ -171,6 +173,15 @@ static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable)
     return qemu_fopen_ops(bs, &bdrv_read_ops);
 }
 
+static struct DirtyTrackState {
+    QemuThread thread;
+    int dirty_pages_rate;
+    bool quit;
+} current_dirty_track_state = {
+    { .thread = 0 },
+    .dirty_pages_rate = 0,
+    .quit = false,
+};
 
 /* QEMUFile timer support.
  * Not in qemu-file.c to not add qemu-timer.c as dependency to qemu-file.c
@@ -2747,6 +2758,78 @@ int save_snapshot(const char *name, Error **errp)
     return ret;
 }
 
+static void *dirty_track_thread(void *opaque)
+{
+    int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+    struct DirtyTrackState *s = opaque;
+    int64_t current_time;
+    uint64_t time_spent;
+
+    for (;;) {
+        dirty_track_sync();
+        if (s->quit) {
+            current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+            time_spent = current_time - initial_time;
+
+            if (time_spent) {
+                s->dirty_pages_rate = dirty_track_dirty_pages() * 1000 /
+                    time_spent;
+            }
+            break;
+        }
+        usleep(1000 * 100);
+    }
+    return NULL;
+}
+
+bool dirty_track_is_running(void)
+{
+    return !!current_dirty_track_state.thread.thread;
+}
+
+void hmp_dirty_track(Monitor *mon, const QDict *qdict)
+{
+    MigrationState *s = migrate_get_current();
+
+    if (migration_is_running(s->state)) {
+        error_report(QERR_MIGRATION_ACTIVE);
+        return;
+    }
+
+    if (runstate_check(RUN_STATE_INMIGRATE)) {
+        error_report("Guest is waiting for an incoming migration");
+        return;
+    }
+
+    if (dirty_track_is_running()) {
+        error_report("There is a dirty tracking process in progress");
+        return;
+    }
+
+    dirty_track_init();
+    qemu_thread_create(&current_dirty_track_state.thread, "dirty tracking",
+                       dirty_track_thread, &current_dirty_track_state,
+                       QEMU_THREAD_JOINABLE);
+}
+
+void hmp_dirty_track_stop(Monitor *mon, const QDict *qdict)
+{
+    if (current_dirty_track_state.thread.thread == 0) {
+        error_report("There is no dirty tracking process in progress");
+        return;
+    }
+
+    current_dirty_track_state.quit = true;
+    qemu_thread_join(&current_dirty_track_state.thread);
+    monitor_printf(mon, "Dirty rate: %d pages/s\n",
+                   current_dirty_track_state.dirty_pages_rate);
+
+    dirty_track_cleanup();
+    current_dirty_track_state.thread.thread = 0;
+    current_dirty_track_state.dirty_pages_rate = 0;
+    current_dirty_track_state.quit = false;
+}
+
 void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live,
                                 Error **errp)
 {
diff --git a/migration/savevm.h b/migration/savevm.h
index ba64a7e271..216b9b7396 100644
--- a/migration/savevm.h
+++ b/migration/savevm.h
@@ -65,4 +65,6 @@ void qemu_loadvm_state_cleanup(void);
 int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis);
 int qemu_load_device_state(QEMUFile *f);
 
+bool dirty_track_is_running(void);
+
 #endif
-- 
2.18.4


Re: [PATCH] ram: add support for dirty page tracking
Posted by no-reply@patchew.org 3 years, 9 months ago
Patchew URL: https://patchew.org/QEMU/20200703092853.1448582-1-owen.si@ucloud.cn/



Hi,

This series failed the docker-mingw@fedora build test. Please find the testing commands and
their output below. If you have Docker installed, you can probably reproduce it
locally.

=== TEST SCRIPT BEGIN ===
#! /bin/bash
export ARCH=x86_64
make docker-image-fedora V=1 NETWORK=1
time make docker-test-mingw@fedora J=14 NETWORK=1
=== TEST SCRIPT END ===

  CC      ui/vnc-palette.o
  CC      ui/vnc-enc-zrle.o
  CC      ui/vnc-auth-vencrypt.o
/tmp/qemu-test/src/migration/savevm.c:181:8: error: 'QemuThread' {aka 'struct QemuThread'} has no member named 'thread'
  181 |     { .thread = 0 },
      |        ^~~~~~
/tmp/qemu-test/src/migration/savevm.c: In function 'dirty_track_is_running':
/tmp/qemu-test/src/migration/savevm.c:2787:46: error: 'QemuThread' {aka 'struct QemuThread'} has no member named 'thread'
 2787 |     return !!current_dirty_track_state.thread.thread;
      |                                              ^
/tmp/qemu-test/src/migration/savevm.c: In function 'hmp_dirty_track_stop':
/tmp/qemu-test/src/migration/savevm.c:2817:41: error: 'QemuThread' {aka 'struct QemuThread'} has no member named 'thread'
 2817 |     if (current_dirty_track_state.thread.thread == 0) {
      |                                         ^
/tmp/qemu-test/src/migration/savevm.c:2828:37: error: 'QemuThread' {aka 'struct QemuThread'} has no member named 'thread'
 2828 |     current_dirty_track_state.thread.thread = 0;
      |                                     ^
/tmp/qemu-test/src/migration/savevm.c: In function 'dirty_track_is_running':
/tmp/qemu-test/src/migration/savevm.c:2788:1: error: control reaches end of non-void function [-Werror=return-type]
 2788 | }
      | ^
cc1: all warnings being treated as errors
  CC      ui/vnc-ws.o
  CC      ui/vnc-jobs.o
  CC      ui/win32-kbd-hook.o
make: *** [/tmp/qemu-test/src/rules.mak:69: migration/savevm.o] Error 1
make: *** Waiting for unfinished jobs....
Traceback (most recent call last):
  File "./tests/docker/docker.py", line 669, in <module>
---
    raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['sudo', '-n', 'docker', 'run', '--label', 'com.qemu.instance.uuid=5c30b49d565c47b9a718072b03d4d878', '-u', '1001', '--security-opt', 'seccomp=unconfined', '--rm', '-e', 'TARGET_LIST=', '-e', 'EXTRA_CONFIGURE_OPTS=', '-e', 'V=', '-e', 'J=14', '-e', 'DEBUG=', '-e', 'SHOW_ENV=', '-e', 'CCACHE_DIR=/var/tmp/ccache', '-v', '/home/patchew/.cache/qemu-docker-ccache:/var/tmp/ccache:z', '-v', '/var/tmp/patchew-tester-tmp-g0jzomto/src/docker-src.2020-07-03-06.27.44.29989:/var/tmp/qemu:z,ro', 'qemu:fedora', '/var/tmp/qemu/run', 'test-mingw']' returned non-zero exit status 2.
filter=--filter=label=com.qemu.instance.uuid=5c30b49d565c47b9a718072b03d4d878
make[1]: *** [docker-run] Error 1
make[1]: Leaving directory `/var/tmp/patchew-tester-tmp-g0jzomto/src'
make: *** [docker-run-test-mingw@fedora] Error 2

real    2m48.392s
user    0m8.073s


The full log is available at
http://patchew.org/logs/20200703092853.1448582-1-owen.si@ucloud.cn/testing.docker-mingw@fedora/?type=message.
---
Email generated automatically by Patchew [https://patchew.org/].
Please send your feedback to patchew-devel@redhat.com