Drop migration_bitmap_sync_precopy() since dirty bitmap is initialized to all
ones anyways, so no need to sync at start.
Since at it, clean the locks up a bit:
- RCU lock is only needed to walk the ramblocks, move it into
ram_list_init_bitmaps().
- The ram_list lock seems to be unnecessary now, drop it.
- The bql should only be needed for memory_global_dirty_log_start(), move it
to only protect that.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
migration/ram.c | 19 +++++++------------
1 file changed, 7 insertions(+), 12 deletions(-)
diff --git a/migration/ram.c b/migration/ram.c
index 7a43bfd7af..189d6427ac 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2651,6 +2651,7 @@ static void ram_list_init_bitmaps(void)
shift = CLEAR_BITMAP_SHIFT_MIN;
}
+ RCU_READ_LOCK_GUARD();
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
pages = block->max_length >> TARGET_PAGE_BITS;
/*
@@ -2672,20 +2673,14 @@ static void ram_list_init_bitmaps(void)
static void ram_init_bitmaps(RAMState *rs)
{
- /* For memory_global_dirty_log_start below. */
- qemu_mutex_lock_iothread();
- qemu_mutex_lock_ramlist();
+ ram_list_init_bitmaps();
- WITH_RCU_READ_LOCK_GUARD() {
- ram_list_init_bitmaps();
- /* We don't use dirty log with background snapshots */
- if (!migrate_background_snapshot()) {
- memory_global_dirty_log_start();
- migration_bitmap_sync_precopy(rs);
- }
+ /* We don't use dirty log with background snapshots */
+ if (!migrate_background_snapshot()) {
+ qemu_mutex_lock_iothread();
+ memory_global_dirty_log_start();
+ qemu_mutex_unlock_iothread();
}
- qemu_mutex_unlock_ramlist();
- qemu_mutex_unlock_iothread();
}
static int ram_init_all(RAMState **rsp)
--
2.31.1