Split parse_ramblock_mapped_ram() into two stages:
- parse_ramblock_mapped_ram_header(): reads the MappedRamHeader and
dirty bitmap, storing pages_offset and file_bmap on the RAMBlock
for later use.
- parse_ramblock_mapped_ram_pages(): reads all page data using the
bitmap and pages_offset populated by the header stage.
The original parse_ramblock_mapped_ram() becomes a thin wrapper that
calls both in sequence, preserving the existing behavior.
The bitmap is now stored in block->file_bmap (which already exists
on RAMBlock for the save side) instead of a function-local variable.
The pages function frees the bitmap after loading is complete.
This separation prepares for fast snapshot load, which will call
only the header stage at load time and defer page loading to a
userfaultfd-based demand paging mechanism.
No functional change.
Signed-off-by: Takeru Hayasaka <hayatake396@gmail.com>
---
migration/ram.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++------
1 file changed, 46 insertions(+), 6 deletions(-)
diff --git a/migration/ram.c b/migration/ram.c
index 979751f61b30..301ef9758e25 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -4152,10 +4152,15 @@ err:
return false;
}
-static void parse_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block,
- ram_addr_t length, Error **errp)
+/*
+ * Read the mapped-ram header and dirty bitmap for a RAM block.
+ *
+ * Populates block->pages_offset and block->file_bmap so that page
+ * data can be loaded later (either eagerly or on demand).
+ */
+static void parse_ramblock_mapped_ram_header(QEMUFile *f, RAMBlock *block,
+ ram_addr_t length, Error **errp)
{
- g_autofree unsigned long *bitmap = NULL;
MappedRamHeader header;
size_t bitmap_size;
long num_pages;
@@ -4187,19 +4192,54 @@ static void parse_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block,
num_pages = length / header.page_size;
bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long);
- bitmap = g_malloc0(bitmap_size);
- if (qemu_get_buffer_at(f, (uint8_t *)bitmap, bitmap_size,
+ block->file_bmap = g_malloc0(bitmap_size);
+ if (qemu_get_buffer_at(f, (uint8_t *)block->file_bmap, bitmap_size,
header.bitmap_offset) != bitmap_size) {
error_setg(errp, "Error reading dirty bitmap");
+ g_free(block->file_bmap);
+ block->file_bmap = NULL;
return;
}
+}
+
+/*
+ * Read all page data for a mapped-ram RAM block using the bitmap
+ * and pages_offset previously populated by parse_ramblock_mapped_ram_header().
+ */
+static void parse_ramblock_mapped_ram_pages(QEMUFile *f, RAMBlock *block,
+ ram_addr_t length, Error **errp)
+{
+ long num_pages;
- if (!read_ramblock_mapped_ram(f, block, num_pages, bitmap, errp)) {
+ if (!block->file_bmap) {
+ /* Shared block that was skipped during header parsing */
return;
}
+ num_pages = length / TARGET_PAGE_SIZE;
+
+ if (!read_ramblock_mapped_ram(f, block, num_pages,
+ block->file_bmap, errp)) {
+ goto out;
+ }
+
/* Skip pages array */
qemu_set_offset(f, block->pages_offset + length, SEEK_SET);
+
+out:
+ g_free(block->file_bmap);
+ block->file_bmap = NULL;
+}
+
+static void parse_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block,
+ ram_addr_t length, Error **errp)
+{
+ parse_ramblock_mapped_ram_header(f, block, length, errp);
+ if (*errp) {
+ return;
+ }
+
+ parse_ramblock_mapped_ram_pages(f, block, length, errp);
}
static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length)
--
2.43.0
© 2016 - 2026 Red Hat, Inc.