[Qemu-devel] [PATCH] ram: Split dirty bitmap by RAMBlock

Juan Quintela posted 1 patch 6 years, 11 months ago
Patches applied successfully (tree, apply log)
git fetch https://github.com/patchew-project/qemu tags/patchew/20170426073247.7441-2-quintela@redhat.com
Test checkpatch failed
Test docker passed
Test s390x passed
include/exec/ram_addr.h          |  13 +-
include/migration/migration.h    |   3 +-
include/migration/postcopy-ram.h |   3 -
migration/postcopy-ram.c         |   5 +-
migration/ram.c                  | 273 +++++++++++++++------------------------
5 files changed, 119 insertions(+), 178 deletions(-)
[Qemu-devel] [PATCH] ram: Split dirty bitmap by RAMBlock
Posted by Juan Quintela 6 years, 11 months ago
Both the ram bitmap and the unsent bitmap are split by RAMBlock.

Signed-off-by: Juan Quintela <quintela@redhat.com>

--

Fix compilation when DEBUG_POSTCOPY is enabled (thanks Hailiang)

Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 include/exec/ram_addr.h          |  13 +-
 include/migration/migration.h    |   3 +-
 include/migration/postcopy-ram.h |   3 -
 migration/postcopy-ram.c         |   5 +-
 migration/ram.c                  | 273 +++++++++++++++------------------------
 5 files changed, 119 insertions(+), 178 deletions(-)

diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 6436a41..c56b35b 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -39,6 +39,14 @@ struct RAMBlock {
     QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
     int fd;
     size_t page_size;
+    /* dirty bitmap used during migration */
+    unsigned long *bmap;
+    /* bitmap of pages that haven't been sent even once
+     * only maintained and used in postcopy at the moment
+     * where it's used to send the dirtymap at the start
+     * of the postcopy phase
+     */
+    unsigned long *unsentmap;
 };
 
 static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
@@ -360,16 +368,15 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
 
 
 static inline
-uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
-                                               RAMBlock *rb,
+uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
                                                ram_addr_t start,
                                                ram_addr_t length,
                                                uint64_t *real_dirty_pages)
 {
     ram_addr_t addr;
-    start = rb->offset + start;
     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
     uint64_t num_dirty = 0;
+    unsigned long *dest = rb->bmap;
 
     /* start address is aligned at the start of a word? */
     if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
diff --git a/include/migration/migration.h b/include/migration/migration.h
index ba1a16c..e29cb01 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -266,7 +266,8 @@ uint64_t xbzrle_mig_pages_cache_miss(void);
 double xbzrle_mig_cache_miss_rate(void);
 
 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size);
-void ram_debug_dump_bitmap(unsigned long *todump, bool expected);
+void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
+                           unsigned long pages);
 /* For outgoing discard bitmap */
 int ram_postcopy_send_discard_bitmap(MigrationState *ms);
 /* For incoming postcopy discard */
diff --git a/include/migration/postcopy-ram.h b/include/migration/postcopy-ram.h
index 8e036b9..4c25f03 100644
--- a/include/migration/postcopy-ram.h
+++ b/include/migration/postcopy-ram.h
@@ -43,12 +43,9 @@ int postcopy_ram_prepare_discard(MigrationIncomingState *mis);
 
 /*
  * Called at the start of each RAMBlock by the bitmap code.
- * 'offset' is the bitmap offset of the named RAMBlock in the migration
- * bitmap.
  * Returns a new PDS
  */
 PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
-                                                 unsigned long offset,
                                                  const char *name);
 
 /*
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index 85fd8d7..e3f4a37 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -33,7 +33,6 @@
 
 struct PostcopyDiscardState {
     const char *ramblock_name;
-    uint64_t offset; /* Bitmap entry for the 1st bit of this RAMBlock */
     uint16_t cur_entry;
     /*
      * Start and length of a discard range (bytes)
@@ -717,14 +716,12 @@ void *postcopy_get_tmp_page(MigrationIncomingState *mis)
  * returns: a new PDS.
  */
 PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
-                                                 unsigned long offset,
                                                  const char *name)
 {
     PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState));
 
     if (res) {
         res->ramblock_name = name;
-        res->offset = offset;
     }
 
     return res;
@@ -745,7 +742,7 @@ void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
 {
     size_t tp_size = qemu_target_page_size();
     /* Convert to byte offsets within the RAM block */
-    pds->start_list[pds->cur_entry] = (start - pds->offset) * tp_size;
+    pds->start_list[pds->cur_entry] = start  * tp_size;
     pds->length_list[pds->cur_entry] = length * tp_size;
     trace_postcopy_discard_send_range(pds->ramblock_name, start, length);
     pds->cur_entry++;
diff --git a/migration/ram.c b/migration/ram.c
index f48664e..235f400 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -138,19 +138,6 @@ out:
     return ret;
 }
 
-struct RAMBitmap {
-    struct rcu_head rcu;
-    /* Main migration bitmap */
-    unsigned long *bmap;
-    /* bitmap of pages that haven't been sent even once
-     * only maintained and used in postcopy at the moment
-     * where it's used to send the dirtymap at the start
-     * of the postcopy phase
-     */
-    unsigned long *unsentmap;
-};
-typedef struct RAMBitmap RAMBitmap;
-
 /*
  * An outstanding page request, on the source, having been received
  * and queued
@@ -220,8 +207,6 @@ struct RAMState {
     uint64_t postcopy_requests;
     /* protects modification of the bitmap */
     QemuMutex bitmap_mutex;
-    /* Ram Bitmap protected by RCU */
-    RAMBitmap *ram_bitmap;
     /* The RAMBlock used in the last src_page_requests */
     RAMBlock *last_req_rb;
     /* Queue of outstanding page requests from the destination */
@@ -614,22 +599,17 @@ static inline
 unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
                                           unsigned long start)
 {
-    unsigned long base = rb->offset >> TARGET_PAGE_BITS;
-    unsigned long nr = base + start;
-    uint64_t rb_size = rb->used_length;
-    unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
-    unsigned long *bitmap;
-
+    unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
+    unsigned long *bitmap = rb->bmap;
     unsigned long next;
 
-    bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
-    if (rs->ram_bulk_stage && nr > base) {
-        next = nr + 1;
+    if (rs->ram_bulk_stage && start > 0) {
+        next = start + 1;
     } else {
-        next = find_next_bit(bitmap, size, nr);
+        next = find_next_bit(bitmap, size, start);
     }
 
-    return next - base;
+    return next;
 }
 
 static inline bool migration_bitmap_clear_dirty(RAMState *rs,
@@ -637,10 +617,8 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
                                                 unsigned long page)
 {
     bool ret;
-    unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
-    unsigned long nr = (rb->offset >> TARGET_PAGE_BITS) + page;
 
-    ret = test_and_clear_bit(nr, bitmap);
+    ret = test_and_clear_bit(page, rb->bmap);
 
     if (ret) {
         rs->migration_dirty_pages--;
@@ -651,10 +629,8 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
 static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
                                         ram_addr_t start, ram_addr_t length)
 {
-    unsigned long *bitmap;
-    bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
     rs->migration_dirty_pages +=
-        cpu_physical_memory_sync_dirty_bitmap(bitmap, rb, start, length,
+        cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
                                               &rs->num_dirty_pages_period);
 }
 
@@ -1153,17 +1129,13 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
          * search already sent it.
          */
         if (block) {
-            unsigned long *bitmap;
             unsigned long page;
 
-            bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
-            page = (block->offset + offset) >> TARGET_PAGE_BITS;
-            dirty = test_bit(page, bitmap);
+            page = offset >> TARGET_PAGE_BITS;
+            dirty = test_bit(page, block->bmap);
             if (!dirty) {
                 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
-                    page,
-                    test_bit(page,
-                             atomic_rcu_read(&rs->ram_bitmap)->unsentmap));
+                       page, test_bit(page, block->unsentmap));
             } else {
                 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
             }
@@ -1301,16 +1273,13 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
 
     /* Check the pages is dirty and if it is send it */
     if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
-        unsigned long *unsentmap;
         /*
          * If xbzrle is on, stop using the data compression after first
          * round of migration even if compression is enabled. In theory,
          * xbzrle can do better than compression.
          */
-        unsigned long page =
-            (pss->block->offset >> TARGET_PAGE_BITS) + pss->page;
-        if (migrate_use_compression()
-            && (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
+        if (migrate_use_compression() &&
+            (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
             res = ram_save_compressed_page(rs, pss, last_stage);
         } else {
             res = ram_save_page(rs, pss, last_stage);
@@ -1319,9 +1288,8 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
         if (res < 0) {
             return res;
         }
-        unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
-        if (unsentmap) {
-            clear_bit(page, unsentmap);
+        if (pss->block->unsentmap) {
+            clear_bit(pss->page, pss->block->unsentmap);
         }
     }
 
@@ -1451,25 +1419,20 @@ void free_xbzrle_decoded_buf(void)
     xbzrle_decoded_buf = NULL;
 }
 
-static void migration_bitmap_free(RAMBitmap *bmap)
-{
-    g_free(bmap->bmap);
-    g_free(bmap->unsentmap);
-    g_free(bmap);
-}
-
 static void ram_migration_cleanup(void *opaque)
 {
-    RAMState *rs = opaque;
+    RAMBlock *block;
 
     /* caller have hold iothread lock or is in a bh, so there is
      * no writing race against this migration_bitmap
      */
-    RAMBitmap *bitmap = rs->ram_bitmap;
-    atomic_rcu_set(&rs->ram_bitmap, NULL);
-    if (bitmap) {
-        memory_global_dirty_log_stop();
-        call_rcu(bitmap, migration_bitmap_free, rcu);
+    memory_global_dirty_log_stop();
+
+    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+        g_free(block->bmap);
+        block->bmap = NULL;
+        g_free(block->unsentmap);
+        block->unsentmap = NULL;
     }
 
     XBZRLE_cache_lock();
@@ -1501,27 +1464,22 @@ static void ram_state_reset(RAMState *rs)
  * of; it won't bother printing lines that are all this value.
  * If 'todump' is null the migration bitmap is dumped.
  */
-void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
+void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
+                           unsigned long pages)
 {
-    unsigned long ram_pages = last_ram_page();
-    RAMState *rs = &ram_state;
     int64_t cur;
     int64_t linelen = 128;
     char linebuf[129];
 
-    if (!todump) {
-        todump = atomic_rcu_read(&rs->ram_bitmap)->bmap;
-    }
-
-    for (cur = 0; cur < ram_pages; cur += linelen) {
+    for (cur = 0; cur < pages; cur += linelen) {
         int64_t curb;
         bool found = false;
         /*
          * Last line; catch the case where the line length
          * is longer than remaining ram
          */
-        if (cur + linelen > ram_pages) {
-            linelen = ram_pages - cur;
+        if (cur + linelen > pages) {
+            linelen = pages - cur;
         }
         for (curb = 0; curb < linelen; curb++) {
             bool thisbit = test_bit(cur + curb, todump);
@@ -1539,14 +1497,12 @@ void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
 
 void ram_postcopy_migrated_memory_release(MigrationState *ms)
 {
-    RAMState *rs = &ram_state;
     struct RAMBlock *block;
-    unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
 
     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
-        unsigned long first = block->offset >> TARGET_PAGE_BITS;
-        unsigned long range = first + (block->used_length >> TARGET_PAGE_BITS);
-        unsigned long run_start = find_next_zero_bit(bitmap, range, first);
+        unsigned long *bitmap = block->bmap;
+        unsigned long range = block->used_length >> TARGET_PAGE_BITS;
+        unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
 
         while (run_start < range) {
             unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
@@ -1573,16 +1529,13 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms)
  */
 static int postcopy_send_discard_bm_ram(MigrationState *ms,
                                         PostcopyDiscardState *pds,
-                                        unsigned long start,
-                                        unsigned long length)
+                                        RAMBlock *block)
 {
-    RAMState *rs = &ram_state;
-    unsigned long end = start + length; /* one after the end */
+    unsigned long end = block->used_length >> TARGET_PAGE_BITS;
     unsigned long current;
-    unsigned long *unsentmap;
+    unsigned long *unsentmap = block->unsentmap;
 
-    unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
-    for (current = start; current < end; ) {
+    for (current = 0; current < end; ) {
         unsigned long one = find_next_bit(unsentmap, end, current);
 
         if (one <= end) {
@@ -1625,18 +1578,15 @@ static int postcopy_each_ram_send_discard(MigrationState *ms)
     int ret;
 
     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
-        unsigned long first = block->offset >> TARGET_PAGE_BITS;
-        PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
-                                                               first,
-                                                               block->idstr);
+        PostcopyDiscardState *pds =
+            postcopy_discard_send_init(ms, block->idstr);
 
         /*
          * Postcopy sends chunks of bitmap over the wire, but it
          * just needs indexes at this point, avoids it having
          * target page specific code.
          */
-        ret = postcopy_send_discard_bm_ram(ms, pds, first,
-                                    block->used_length >> TARGET_PAGE_BITS);
+        ret = postcopy_send_discard_bm_ram(ms, pds, block);
         postcopy_discard_send_finish(ms, pds);
         if (ret) {
             return ret;
@@ -1667,12 +1617,10 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
                                           PostcopyDiscardState *pds)
 {
     RAMState *rs = &ram_state;
-    unsigned long *bitmap;
-    unsigned long *unsentmap;
+    unsigned long *bitmap = block->bmap;
+    unsigned long *unsentmap = block->unsentmap;
     unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
-    unsigned long first = block->offset >> TARGET_PAGE_BITS;
-    unsigned long len = block->used_length >> TARGET_PAGE_BITS;
-    unsigned long last = first + (len - 1);
+    unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
     unsigned long run_start;
 
     if (block->page_size == TARGET_PAGE_SIZE) {
@@ -1680,18 +1628,15 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
         return;
     }
 
-    bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
-    unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
-
     if (unsent_pass) {
         /* Find a sent page */
-        run_start = find_next_zero_bit(unsentmap, last + 1, first);
+        run_start = find_next_zero_bit(unsentmap, pages, 0);
     } else {
         /* Find a dirty page */
-        run_start = find_next_bit(bitmap, last + 1, first);
+        run_start = find_next_bit(bitmap, pages, 0);
     }
 
-    while (run_start <= last) {
+    while (run_start < pages) {
         bool do_fixup = false;
         unsigned long fixup_start_addr;
         unsigned long host_offset;
@@ -1711,9 +1656,9 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
             /* Find the end of this run */
             unsigned long run_end;
             if (unsent_pass) {
-                run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
+                run_end = find_next_bit(unsentmap, pages, run_start + 1);
             } else {
-                run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
+                run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
             }
             /*
              * If the end isn't at the start of a host page, then the
@@ -1770,11 +1715,10 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
 
         if (unsent_pass) {
             /* Find the next sent page for the next iteration */
-            run_start = find_next_zero_bit(unsentmap, last + 1,
-                                           run_start);
+            run_start = find_next_zero_bit(unsentmap, pages, run_start);
         } else {
             /* Find the next dirty page for the next iteration */
-            run_start = find_next_bit(bitmap, last + 1, run_start);
+            run_start = find_next_bit(bitmap, pages, run_start);
         }
     }
 }
@@ -1791,34 +1735,22 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
  * Returns zero on success
  *
  * @ms: current migration state
+ * @block: block we want to work with
  */
-static int postcopy_chunk_hostpages(MigrationState *ms)
+static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
 {
-    RAMState *rs = &ram_state;
-    struct RAMBlock *block;
-
-    /* Easiest way to make sure we don't resume in the middle of a host-page */
-    rs->last_seen_block = NULL;
-    rs->last_sent_block = NULL;
-    rs->last_page = 0;
-
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
-        unsigned long first = block->offset >> TARGET_PAGE_BITS;
-
-        PostcopyDiscardState *pds =
-                         postcopy_discard_send_init(ms, first, block->idstr);
-
-        /* First pass: Discard all partially sent host pages */
-        postcopy_chunk_hostpages_pass(ms, true, block, pds);
-        /*
-         * Second pass: Ensure that all partially dirty host pages are made
-         * fully dirty.
-         */
-        postcopy_chunk_hostpages_pass(ms, false, block, pds);
-
-        postcopy_discard_send_finish(ms, pds);
-    } /* ram_list loop */
-
+    PostcopyDiscardState *pds =
+        postcopy_discard_send_init(ms, block->idstr);
+
+    /* First pass: Discard all partially sent host pages */
+    postcopy_chunk_hostpages_pass(ms, true, block, pds);
+    /*
+     * Second pass: Ensure that all partially dirty host pages are made
+     * fully dirty.
+     */
+    postcopy_chunk_hostpages_pass(ms, false, block, pds);
+
+    postcopy_discard_send_finish(ms, pds);
     return 0;
 }
 
@@ -1836,47 +1768,53 @@ static int postcopy_chunk_hostpages(MigrationState *ms)
  * Hopefully this is pretty sparse
  *
  * @ms: current migration state
- */
+ * @block: block that contains the page we want to canonicalize */
 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
 {
     RAMState *rs = &ram_state;
+    RAMBlock *block;
     int ret;
-    unsigned long *bitmap, *unsentmap;
 
     rcu_read_lock();
 
     /* This should be our last sync, the src is now paused */
     migration_bitmap_sync(rs);
 
-    unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
-    if (!unsentmap) {
-        /* We don't have a safe way to resize the sentmap, so
-         * if the bitmap was resized it will be NULL at this
-         * point.
+    /* Easiest way to make sure we don't resume in the middle of a host-page */
+    rs->last_seen_block = NULL;
+    rs->last_sent_block = NULL;
+    rs->last_page = 0;
+
+    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+        unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
+        unsigned long *bitmap = block->bmap;
+        unsigned long *unsentmap = block->unsentmap;
+
+        if (!unsentmap) {
+            /* We don't have a safe way to resize the sentmap, so
+             * if the bitmap was resized it will be NULL at this
+             * point.
+             */
+            error_report("migration ram resized during precopy phase");
+            rcu_read_unlock();
+            return -EINVAL;
+        }
+        /* Deal with TPS != HPS and huge pages */
+        ret = postcopy_chunk_hostpages(ms, block);
+        if (ret) {
+            rcu_read_unlock();
+            return ret;
+        }
+
+        /*
+         * Update the unsentmap to be unsentmap = unsentmap | dirty
          */
-        error_report("migration ram resized during precopy phase");
-        rcu_read_unlock();
-        return -EINVAL;
-    }
-
-    /* Deal with TPS != HPS and huge pages */
-    ret = postcopy_chunk_hostpages(ms);
-    if (ret) {
-        rcu_read_unlock();
-        return ret;
-    }
-
-    /*
-     * Update the unsentmap to be unsentmap = unsentmap | dirty
-     */
-    bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
-    bitmap_or(unsentmap, unsentmap, bitmap, last_ram_page());
-
-
-    trace_ram_postcopy_send_discard_bitmap();
+        bitmap_or(unsentmap, unsentmap, bitmap, pages);
 #ifdef DEBUG_POSTCOPY
-    ram_debug_dump_bitmap(unsentmap, true);
+    	ram_debug_dump_bitmap(unsentmap, true, pages);
 #endif
+    }
+    trace_ram_postcopy_send_discard_bitmap();
 
     ret = postcopy_each_ram_send_discard(ms);
     rcu_read_unlock();
@@ -1918,8 +1856,6 @@ err:
 
 static int ram_state_init(RAMState *rs)
 {
-    unsigned long ram_bitmap_pages;
-
     memset(rs, 0, sizeof(*rs));
     qemu_mutex_init(&rs->bitmap_mutex);
     qemu_mutex_init(&rs->src_page_req_mutex);
@@ -1961,16 +1897,19 @@ static int ram_state_init(RAMState *rs)
     rcu_read_lock();
     ram_state_reset(rs);
 
-    rs->ram_bitmap = g_new0(RAMBitmap, 1);
     /* Skip setting bitmap if there is no RAM */
     if (ram_bytes_total()) {
-        ram_bitmap_pages = last_ram_page();
-        rs->ram_bitmap->bmap = bitmap_new(ram_bitmap_pages);
-        bitmap_set(rs->ram_bitmap->bmap, 0, ram_bitmap_pages);
+        RAMBlock *block;
 
-        if (migrate_postcopy_ram()) {
-            rs->ram_bitmap->unsentmap = bitmap_new(ram_bitmap_pages);
-            bitmap_set(rs->ram_bitmap->unsentmap, 0, ram_bitmap_pages);
+        QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+            unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
+
+            block->bmap = bitmap_new(pages);
+            bitmap_set(block->bmap, 0, pages);
+            if (migrate_postcopy_ram()) {
+                block->unsentmap = bitmap_new(pages);
+                bitmap_set(block->unsentmap, 0, pages);
+            }
         }
     }
 
-- 
2.9.3


Re: [Qemu-devel] [PATCH] ram: Split dirty bitmap by RAMBlock
Posted by Hailiang Zhang 6 years, 11 months ago
On 2017/4/26 15:32, Juan Quintela wrote:
> Both the ram bitmap and the unsent bitmap are split by RAMBlock.
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
>
> --
>
> Fix compilation when DEBUG_POSTCOPY is enabled (thanks Hailiang)
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>   include/exec/ram_addr.h          |  13 +-
>   include/migration/migration.h    |   3 +-
>   include/migration/postcopy-ram.h |   3 -
>   migration/postcopy-ram.c         |   5 +-
>   migration/ram.c                  | 273 +++++++++++++++------------------------
>   5 files changed, 119 insertions(+), 178 deletions(-)
>
> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
> index 6436a41..c56b35b 100644
> --- a/include/exec/ram_addr.h
> +++ b/include/exec/ram_addr.h
> @@ -39,6 +39,14 @@ struct RAMBlock {
>       QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
>       int fd;
>       size_t page_size;
> +    /* dirty bitmap used during migration */
> +    unsigned long *bmap;
> +    /* bitmap of pages that haven't been sent even once
> +     * only maintained and used in postcopy at the moment
> +     * where it's used to send the dirtymap at the start
> +     * of the postcopy phase
> +     */
> +    unsigned long *unsentmap;
>   };
>   
>   static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
> @@ -360,16 +368,15 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
>   
>   
>   static inline
> -uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
> -                                               RAMBlock *rb,
> +uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
>                                                  ram_addr_t start,
>                                                  ram_addr_t length,
>                                                  uint64_t *real_dirty_pages)
>   {
>       ram_addr_t addr;
> -    start = rb->offset + start;
>       unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
>       uint64_t num_dirty = 0;
> +    unsigned long *dest = rb->bmap;
>   
>       /* start address is aligned at the start of a word? */
>       if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
> diff --git a/include/migration/migration.h b/include/migration/migration.h
> index ba1a16c..e29cb01 100644
> --- a/include/migration/migration.h
> +++ b/include/migration/migration.h
> @@ -266,7 +266,8 @@ uint64_t xbzrle_mig_pages_cache_miss(void);
>   double xbzrle_mig_cache_miss_rate(void);
>   
>   void ram_handle_compressed(void *host, uint8_t ch, uint64_t size);
> -void ram_debug_dump_bitmap(unsigned long *todump, bool expected);
> +void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
> +                           unsigned long pages);
>   /* For outgoing discard bitmap */
>   int ram_postcopy_send_discard_bitmap(MigrationState *ms);
>   /* For incoming postcopy discard */
> diff --git a/include/migration/postcopy-ram.h b/include/migration/postcopy-ram.h
> index 8e036b9..4c25f03 100644
> --- a/include/migration/postcopy-ram.h
> +++ b/include/migration/postcopy-ram.h
> @@ -43,12 +43,9 @@ int postcopy_ram_prepare_discard(MigrationIncomingState *mis);
>   
>   /*
>    * Called at the start of each RAMBlock by the bitmap code.
> - * 'offset' is the bitmap offset of the named RAMBlock in the migration
> - * bitmap.
>    * Returns a new PDS
>    */
>   PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
> -                                                 unsigned long offset,
>                                                    const char *name);
>   
>   /*
> diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
> index 85fd8d7..e3f4a37 100644
> --- a/migration/postcopy-ram.c
> +++ b/migration/postcopy-ram.c
> @@ -33,7 +33,6 @@
>   
>   struct PostcopyDiscardState {
>       const char *ramblock_name;
> -    uint64_t offset; /* Bitmap entry for the 1st bit of this RAMBlock */
>       uint16_t cur_entry;
>       /*
>        * Start and length of a discard range (bytes)
> @@ -717,14 +716,12 @@ void *postcopy_get_tmp_page(MigrationIncomingState *mis)
>    * returns: a new PDS.
>    */
>   PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
> -                                                 unsigned long offset,
>                                                    const char *name)
>   {
>       PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState));
>   
>       if (res) {
>           res->ramblock_name = name;
> -        res->offset = offset;
>       }
>   
>       return res;
> @@ -745,7 +742,7 @@ void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
>   {
>       size_t tp_size = qemu_target_page_size();
>       /* Convert to byte offsets within the RAM block */
> -    pds->start_list[pds->cur_entry] = (start - pds->offset) * tp_size;
> +    pds->start_list[pds->cur_entry] = start  * tp_size;
>       pds->length_list[pds->cur_entry] = length * tp_size;
>       trace_postcopy_discard_send_range(pds->ramblock_name, start, length);
>       pds->cur_entry++;
> diff --git a/migration/ram.c b/migration/ram.c
> index f48664e..235f400 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -138,19 +138,6 @@ out:
>       return ret;
>   }
>   
> -struct RAMBitmap {
> -    struct rcu_head rcu;
> -    /* Main migration bitmap */
> -    unsigned long *bmap;
> -    /* bitmap of pages that haven't been sent even once
> -     * only maintained and used in postcopy at the moment
> -     * where it's used to send the dirtymap at the start
> -     * of the postcopy phase
> -     */
> -    unsigned long *unsentmap;
> -};
> -typedef struct RAMBitmap RAMBitmap;
> -
>   /*
>    * An outstanding page request, on the source, having been received
>    * and queued
> @@ -220,8 +207,6 @@ struct RAMState {
>       uint64_t postcopy_requests;
>       /* protects modification of the bitmap */
>       QemuMutex bitmap_mutex;
> -    /* Ram Bitmap protected by RCU */
> -    RAMBitmap *ram_bitmap;
>       /* The RAMBlock used in the last src_page_requests */
>       RAMBlock *last_req_rb;
>       /* Queue of outstanding page requests from the destination */
> @@ -614,22 +599,17 @@ static inline
>   unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
>                                             unsigned long start)
>   {
> -    unsigned long base = rb->offset >> TARGET_PAGE_BITS;
> -    unsigned long nr = base + start;
> -    uint64_t rb_size = rb->used_length;
> -    unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
> -    unsigned long *bitmap;
> -
> +    unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
> +    unsigned long *bitmap = rb->bmap;
>       unsigned long next;
>   
> -    bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
> -    if (rs->ram_bulk_stage && nr > base) {
> -        next = nr + 1;
> +    if (rs->ram_bulk_stage && start > 0) {
> +        next = start + 1;
>       } else {
> -        next = find_next_bit(bitmap, size, nr);
> +        next = find_next_bit(bitmap, size, start);
>       }
>   
> -    return next - base;
> +    return next;
>   }
>   
>   static inline bool migration_bitmap_clear_dirty(RAMState *rs,
> @@ -637,10 +617,8 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
>                                                   unsigned long page)
>   {
>       bool ret;
> -    unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
> -    unsigned long nr = (rb->offset >> TARGET_PAGE_BITS) + page;
>   
> -    ret = test_and_clear_bit(nr, bitmap);
> +    ret = test_and_clear_bit(page, rb->bmap);
>   
>       if (ret) {
>           rs->migration_dirty_pages--;
> @@ -651,10 +629,8 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
>   static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
>                                           ram_addr_t start, ram_addr_t length)
>   {
> -    unsigned long *bitmap;
> -    bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
>       rs->migration_dirty_pages +=
> -        cpu_physical_memory_sync_dirty_bitmap(bitmap, rb, start, length,
> +        cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
>                                                 &rs->num_dirty_pages_period);
>   }
>   
> @@ -1153,17 +1129,13 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
>            * search already sent it.
>            */
>           if (block) {
> -            unsigned long *bitmap;
>               unsigned long page;
>   
> -            bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
> -            page = (block->offset + offset) >> TARGET_PAGE_BITS;
> -            dirty = test_bit(page, bitmap);
> +            page = offset >> TARGET_PAGE_BITS;
> +            dirty = test_bit(page, block->bmap);
>               if (!dirty) {
>                   trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
> -                    page,
> -                    test_bit(page,
> -                             atomic_rcu_read(&rs->ram_bitmap)->unsentmap));
> +                       page, test_bit(page, block->unsentmap));
>               } else {
>                   trace_get_queued_page(block->idstr, (uint64_t)offset, page);
>               }
> @@ -1301,16 +1273,13 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
>   
>       /* Check the pages is dirty and if it is send it */
>       if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
> -        unsigned long *unsentmap;
>           /*
>            * If xbzrle is on, stop using the data compression after first
>            * round of migration even if compression is enabled. In theory,
>            * xbzrle can do better than compression.
>            */
> -        unsigned long page =
> -            (pss->block->offset >> TARGET_PAGE_BITS) + pss->page;
> -        if (migrate_use_compression()
> -            && (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
> +        if (migrate_use_compression() &&
> +            (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
>               res = ram_save_compressed_page(rs, pss, last_stage);
>           } else {
>               res = ram_save_page(rs, pss, last_stage);
> @@ -1319,9 +1288,8 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
>           if (res < 0) {
>               return res;
>           }
> -        unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
> -        if (unsentmap) {
> -            clear_bit(page, unsentmap);
> +        if (pss->block->unsentmap) {
> +            clear_bit(pss->page, pss->block->unsentmap);
>           }
>       }
>   
> @@ -1451,25 +1419,20 @@ void free_xbzrle_decoded_buf(void)
>       xbzrle_decoded_buf = NULL;
>   }
>   
> -static void migration_bitmap_free(RAMBitmap *bmap)
> -{
> -    g_free(bmap->bmap);
> -    g_free(bmap->unsentmap);
> -    g_free(bmap);
> -}
> -
>   static void ram_migration_cleanup(void *opaque)
>   {
> -    RAMState *rs = opaque;
> +    RAMBlock *block;
>   
>       /* caller have hold iothread lock or is in a bh, so there is
>        * no writing race against this migration_bitmap
>        */
> -    RAMBitmap *bitmap = rs->ram_bitmap;
> -    atomic_rcu_set(&rs->ram_bitmap, NULL);
> -    if (bitmap) {
> -        memory_global_dirty_log_stop();
> -        call_rcu(bitmap, migration_bitmap_free, rcu);
> +    memory_global_dirty_log_stop();
> +
> +    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
> +        g_free(block->bmap);
> +        block->bmap = NULL;
> +        g_free(block->unsentmap);
> +        block->unsentmap = NULL;
>       }
>   
>       XBZRLE_cache_lock();
> @@ -1501,27 +1464,22 @@ static void ram_state_reset(RAMState *rs)
>    * of; it won't bother printing lines that are all this value.
>    * If 'todump' is null the migration bitmap is dumped.
>    */
> -void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
> +void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
> +                           unsigned long pages)
>   {
> -    unsigned long ram_pages = last_ram_page();
> -    RAMState *rs = &ram_state;
>       int64_t cur;
>       int64_t linelen = 128;
>       char linebuf[129];
>   
> -    if (!todump) {
> -        todump = atomic_rcu_read(&rs->ram_bitmap)->bmap;
> -    }
> -
> -    for (cur = 0; cur < ram_pages; cur += linelen) {
> +    for (cur = 0; cur < pages; cur += linelen) {
>           int64_t curb;
>           bool found = false;
>           /*
>            * Last line; catch the case where the line length
>            * is longer than remaining ram
>            */
> -        if (cur + linelen > ram_pages) {
> -            linelen = ram_pages - cur;
> +        if (cur + linelen > pages) {
> +            linelen = pages - cur;
>           }
>           for (curb = 0; curb < linelen; curb++) {
>               bool thisbit = test_bit(cur + curb, todump);
> @@ -1539,14 +1497,12 @@ void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
>   
>   void ram_postcopy_migrated_memory_release(MigrationState *ms)
>   {
> -    RAMState *rs = &ram_state;
>       struct RAMBlock *block;
> -    unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
>   
>       QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
> -        unsigned long first = block->offset >> TARGET_PAGE_BITS;
> -        unsigned long range = first + (block->used_length >> TARGET_PAGE_BITS);
> -        unsigned long run_start = find_next_zero_bit(bitmap, range, first);
> +        unsigned long *bitmap = block->bmap;
> +        unsigned long range = block->used_length >> TARGET_PAGE_BITS;
> +        unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
>   
>           while (run_start < range) {
>               unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
> @@ -1573,16 +1529,13 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms)
>    */
>   static int postcopy_send_discard_bm_ram(MigrationState *ms,
>                                           PostcopyDiscardState *pds,
> -                                        unsigned long start,
> -                                        unsigned long length)
> +                                        RAMBlock *block)
>   {
> -    RAMState *rs = &ram_state;
> -    unsigned long end = start + length; /* one after the end */
> +    unsigned long end = block->used_length >> TARGET_PAGE_BITS;
>       unsigned long current;
> -    unsigned long *unsentmap;
> +    unsigned long *unsentmap = block->unsentmap;
>   
> -    unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
> -    for (current = start; current < end; ) {
> +    for (current = 0; current < end; ) {
>           unsigned long one = find_next_bit(unsentmap, end, current);
>   
>           if (one <= end) {
> @@ -1625,18 +1578,15 @@ static int postcopy_each_ram_send_discard(MigrationState *ms)
>       int ret;
>   
>       QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
> -        unsigned long first = block->offset >> TARGET_PAGE_BITS;
> -        PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
> -                                                               first,
> -                                                               block->idstr);
> +        PostcopyDiscardState *pds =
> +            postcopy_discard_send_init(ms, block->idstr);
>   
>           /*
>            * Postcopy sends chunks of bitmap over the wire, but it
>            * just needs indexes at this point, avoids it having
>            * target page specific code.
>            */
> -        ret = postcopy_send_discard_bm_ram(ms, pds, first,
> -                                    block->used_length >> TARGET_PAGE_BITS);
> +        ret = postcopy_send_discard_bm_ram(ms, pds, block);
>           postcopy_discard_send_finish(ms, pds);
>           if (ret) {
>               return ret;
> @@ -1667,12 +1617,10 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
>                                             PostcopyDiscardState *pds)
>   {
>       RAMState *rs = &ram_state;
> -    unsigned long *bitmap;
> -    unsigned long *unsentmap;
> +    unsigned long *bitmap = block->bmap;
> +    unsigned long *unsentmap = block->unsentmap;
>       unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
> -    unsigned long first = block->offset >> TARGET_PAGE_BITS;
> -    unsigned long len = block->used_length >> TARGET_PAGE_BITS;
> -    unsigned long last = first + (len - 1);
> +    unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
>       unsigned long run_start;
>   
>       if (block->page_size == TARGET_PAGE_SIZE) {
> @@ -1680,18 +1628,15 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
>           return;
>       }
>   
> -    bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
> -    unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
> -
>       if (unsent_pass) {
>           /* Find a sent page */
> -        run_start = find_next_zero_bit(unsentmap, last + 1, first);
> +        run_start = find_next_zero_bit(unsentmap, pages, 0);
>       } else {
>           /* Find a dirty page */
> -        run_start = find_next_bit(bitmap, last + 1, first);
> +        run_start = find_next_bit(bitmap, pages, 0);
>       }
>   
> -    while (run_start <= last) {
> +    while (run_start < pages) {
>           bool do_fixup = false;
>           unsigned long fixup_start_addr;
>           unsigned long host_offset;
> @@ -1711,9 +1656,9 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
>               /* Find the end of this run */
>               unsigned long run_end;
>               if (unsent_pass) {
> -                run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
> +                run_end = find_next_bit(unsentmap, pages, run_start + 1);
>               } else {
> -                run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
> +                run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
>               }
>               /*
>                * If the end isn't at the start of a host page, then the
> @@ -1770,11 +1715,10 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
>   
>           if (unsent_pass) {
>               /* Find the next sent page for the next iteration */
> -            run_start = find_next_zero_bit(unsentmap, last + 1,
> -                                           run_start);
> +            run_start = find_next_zero_bit(unsentmap, pages, run_start);
>           } else {
>               /* Find the next dirty page for the next iteration */
> -            run_start = find_next_bit(bitmap, last + 1, run_start);
> +            run_start = find_next_bit(bitmap, pages, run_start);
>           }
>       }
>   }
> @@ -1791,34 +1735,22 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
>    * Returns zero on success
>    *
>    * @ms: current migration state
> + * @block: block we want to work with
>    */
> -static int postcopy_chunk_hostpages(MigrationState *ms)
> +static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
>   {
> -    RAMState *rs = &ram_state;
> -    struct RAMBlock *block;
> -
> -    /* Easiest way to make sure we don't resume in the middle of a host-page */
> -    rs->last_seen_block = NULL;
> -    rs->last_sent_block = NULL;
> -    rs->last_page = 0;
> -
> -    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
> -        unsigned long first = block->offset >> TARGET_PAGE_BITS;
> -
> -        PostcopyDiscardState *pds =
> -                         postcopy_discard_send_init(ms, first, block->idstr);
> -
> -        /* First pass: Discard all partially sent host pages */
> -        postcopy_chunk_hostpages_pass(ms, true, block, pds);
> -        /*
> -         * Second pass: Ensure that all partially dirty host pages are made
> -         * fully dirty.
> -         */
> -        postcopy_chunk_hostpages_pass(ms, false, block, pds);
> -
> -        postcopy_discard_send_finish(ms, pds);
> -    } /* ram_list loop */
> -
> +    PostcopyDiscardState *pds =
> +        postcopy_discard_send_init(ms, block->idstr);
> +
> +    /* First pass: Discard all partially sent host pages */
> +    postcopy_chunk_hostpages_pass(ms, true, block, pds);
> +    /*
> +     * Second pass: Ensure that all partially dirty host pages are made
> +     * fully dirty.
> +     */
> +    postcopy_chunk_hostpages_pass(ms, false, block, pds);
> +
> +    postcopy_discard_send_finish(ms, pds);
>       return 0;

It always return 0 here for postcopy_chunk_hostpages(), so change it to void ?

>   }
>   
> @@ -1836,47 +1768,53 @@ static int postcopy_chunk_hostpages(MigrationState *ms)
>    * Hopefully this is pretty sparse
>    *
>    * @ms: current migration state
> - */
> + * @block: block that contains the page we want to canonicalize */
>   int ram_postcopy_send_discard_bitmap(MigrationState *ms)
>   {
>       RAMState *rs = &ram_state;
> +    RAMBlock *block;
>       int ret;
> -    unsigned long *bitmap, *unsentmap;
>   
>       rcu_read_lock();
>   
>       /* This should be our last sync, the src is now paused */
>       migration_bitmap_sync(rs);
>   
> -    unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
> -    if (!unsentmap) {
> -        /* We don't have a safe way to resize the sentmap, so
> -         * if the bitmap was resized it will be NULL at this
> -         * point.
> +    /* Easiest way to make sure we don't resume in the middle of a host-page */
> +    rs->last_seen_block = NULL;
> +    rs->last_sent_block = NULL;
> +    rs->last_page = 0;
> +
> +    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
> +        unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
> +        unsigned long *bitmap = block->bmap;
> +        unsigned long *unsentmap = block->unsentmap;
> +
> +        if (!unsentmap) {
> +            /* We don't have a safe way to resize the sentmap, so
> +             * if the bitmap was resized it will be NULL at this
> +             * point.
> +             */
> +            error_report("migration ram resized during precopy phase");
> +            rcu_read_unlock();
> +            return -EINVAL;
> +        }
> +        /* Deal with TPS != HPS and huge pages */
> +        ret = postcopy_chunk_hostpages(ms, block);
> +        if (ret) {

It will never go here, see above.

> +            rcu_read_unlock();
> +            return ret;
> +        }
> +
> +        /*
> +         * Update the unsentmap to be unsentmap = unsentmap | dirty
>            */
> -        error_report("migration ram resized during precopy phase");
> -        rcu_read_unlock();
> -        return -EINVAL;
> -    }
> -
> -    /* Deal with TPS != HPS and huge pages */
> -    ret = postcopy_chunk_hostpages(ms);
> -    if (ret) {
> -        rcu_read_unlock();
> -        return ret;
> -    }
> -
> -    /*
> -     * Update the unsentmap to be unsentmap = unsentmap | dirty
> -     */
> -    bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
> -    bitmap_or(unsentmap, unsentmap, bitmap, last_ram_page());
> -
> -
> -    trace_ram_postcopy_send_discard_bitmap();
> +        bitmap_or(unsentmap, unsentmap, bitmap, pages);
>   #ifdef DEBUG_POSTCOPY
> -    ram_debug_dump_bitmap(unsentmap, true);
> +    	ram_debug_dump_bitmap(unsentmap, true, pages);
Indent, you use tab here.

Others look good to me.

Reviewed-by: zhanghailiang <zhang.zhanghailiang@huawei.com>

>   #endif
> +    }
> +    trace_ram_postcopy_send_discard_bitmap();
>   
>       ret = postcopy_each_ram_send_discard(ms);
>       rcu_read_unlock();
> @@ -1918,8 +1856,6 @@ err:
>   
>   static int ram_state_init(RAMState *rs)
>   {
> -    unsigned long ram_bitmap_pages;
> -
>       memset(rs, 0, sizeof(*rs));
>       qemu_mutex_init(&rs->bitmap_mutex);
>       qemu_mutex_init(&rs->src_page_req_mutex);
> @@ -1961,16 +1897,19 @@ static int ram_state_init(RAMState *rs)
>       rcu_read_lock();
>       ram_state_reset(rs);
>   
> -    rs->ram_bitmap = g_new0(RAMBitmap, 1);
>       /* Skip setting bitmap if there is no RAM */
>       if (ram_bytes_total()) {
> -        ram_bitmap_pages = last_ram_page();
> -        rs->ram_bitmap->bmap = bitmap_new(ram_bitmap_pages);
> -        bitmap_set(rs->ram_bitmap->bmap, 0, ram_bitmap_pages);
> +        RAMBlock *block;
>   
> -        if (migrate_postcopy_ram()) {
> -            rs->ram_bitmap->unsentmap = bitmap_new(ram_bitmap_pages);
> -            bitmap_set(rs->ram_bitmap->unsentmap, 0, ram_bitmap_pages);
> +        QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
> +            unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
> +
> +            block->bmap = bitmap_new(pages);
> +            bitmap_set(block->bmap, 0, pages);
> +            if (migrate_postcopy_ram()) {
> +                block->unsentmap = bitmap_new(pages);
> +                bitmap_set(block->unsentmap, 0, pages);
> +            }
>           }
>       }
>   



Re: [Qemu-devel] [PATCH] ram: Split dirty bitmap by RAMBlock
Posted by Peter Xu 6 years, 11 months ago
On Wed, Apr 26, 2017 at 09:32:47AM +0200, Juan Quintela wrote:
> Both the ram bitmap and the unsent bitmap are split by RAMBlock.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>

Reviewed-by: Peter Xu <peterx@redhat.com>

-- 
Peter Xu

Re: [Qemu-devel] [PATCH] ram: Split dirty bitmap by RAMBlock
Posted by no-reply@patchew.org 6 years, 11 months ago
Hi,

This series seems to have some coding style problems. See output below for
more information:

Message-id: 20170426073247.7441-2-quintela@redhat.com
Subject: [Qemu-devel] [PATCH] ram: Split dirty bitmap by RAMBlock
Type: series

=== TEST SCRIPT BEGIN ===
#!/bin/bash

BASE=base
n=1
total=$(git log --oneline $BASE.. | wc -l)
failed=0

# Useful git options
git config --local diff.renamelimit 0
git config --local diff.renames True

commits="$(git log --format=%H --reverse $BASE..)"
for c in $commits; do
    echo "Checking PATCH $n/$total: $(git log -n 1 --format=%s $c)..."
    if ! git show $c --format=email | ./scripts/checkpatch.pl --mailback -; then
        failed=1
        echo
    fi
    n=$((n+1))
done

exit $failed
=== TEST SCRIPT END ===

Updating 3c8cf5a9c21ff8782164d1def7f44bd888713384
Switched to a new branch 'test'
4e43913 ram: Split dirty bitmap by RAMBlock

=== OUTPUT BEGIN ===
Checking PATCH 1/1: ram: Split dirty bitmap by RAMBlock...
ERROR: code indent should never use tabs
#575: FILE: migration/ram.c:1814:
+    ^Iram_debug_dump_bitmap(unsentmap, true, pages);$

total: 1 errors, 0 warnings, 551 lines checked

Your patch has style problems, please review.  If any of these errors
are false positives report them to the maintainer, see
CHECKPATCH in MAINTAINERS.

=== OUTPUT END ===

Test command exited with code: 1


---
Email generated automatically by Patchew [http://patchew.org/].
Please send your feedback to patchew-devel@freelists.org