Since we use PageSearchStatus to represent a channel, it makes perfect
sense to keep last_sent_block (aka, leverage RAM_SAVE_FLAG_CONTINUE) to be
per-channel rather than global because each channel can be sending
different pages on ramblocks.
Hence move it from RAMState into PageSearchStatus.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
migration/ram.c | 71 ++++++++++++++++++++++++++++---------------------
1 file changed, 41 insertions(+), 30 deletions(-)
diff --git a/migration/ram.c b/migration/ram.c
index dbe11e1ace..fdcb61a2c8 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -89,6 +89,8 @@ XBZRLECacheStats xbzrle_counters;
struct PageSearchStatus {
/* The migration channel used for a specific host page */
QEMUFile *pss_channel;
+ /* Last block from where we have sent data */
+ RAMBlock *last_sent_block;
/* Current block being searched */
RAMBlock *block;
/* Current page to search from */
@@ -368,8 +370,6 @@ struct RAMState {
int uffdio_fd;
/* Last block that we have visited searching for dirty pages */
RAMBlock *last_seen_block;
- /* Last block from where we have sent data */
- RAMBlock *last_sent_block;
/* Last dirty target page we have sent */
ram_addr_t last_page;
/* last ram version we have seen */
@@ -677,16 +677,17 @@ exit:
*
* Returns the number of bytes written
*
- * @f: QEMUFile where to send the data
+ * @pss: current PSS channel status
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
* in the lower bits, it contains flags
*/
-static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
+static size_t save_page_header(PageSearchStatus *pss, RAMBlock *block,
ram_addr_t offset)
{
size_t size, len;
- bool same_block = (block == rs->last_sent_block);
+ bool same_block = (block == pss->last_sent_block);
+ QEMUFile *f = pss->pss_channel;
if (same_block) {
offset |= RAM_SAVE_FLAG_CONTINUE;
@@ -699,7 +700,7 @@ static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
qemu_put_byte(f, len);
qemu_put_buffer(f, (uint8_t *)block->idstr, len);
size += 1 + len;
- rs->last_sent_block = block;
+ pss->last_sent_block = block;
}
return size;
}
@@ -783,17 +784,19 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
* -1 means that xbzrle would be longer than normal
*
* @rs: current RAM state
+ * @pss: current PSS channel
* @current_data: pointer to the address of the page contents
* @current_addr: addr of the page
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
*/
-static int save_xbzrle_page(RAMState *rs, QEMUFile *file,
+static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss,
uint8_t **current_data, ram_addr_t current_addr,
RAMBlock *block, ram_addr_t offset)
{
int encoded_len = 0, bytes_xbzrle;
uint8_t *prev_cached_page;
+ QEMUFile *file = pss->pss_channel;
if (!cache_is_cached(XBZRLE.cache, current_addr,
ram_counters.dirty_sync_count)) {
@@ -858,7 +861,7 @@ static int save_xbzrle_page(RAMState *rs, QEMUFile *file,
}
/* Send XBZRLE based compressed page */
- bytes_xbzrle = save_page_header(rs, file, block,
+ bytes_xbzrle = save_page_header(pss, block,
offset | RAM_SAVE_FLAG_XBZRLE);
qemu_put_byte(file, ENCODING_FLAG_XBZRLE);
qemu_put_be16(file, encoded_len);
@@ -1289,19 +1292,19 @@ static void ram_release_page(const char *rbname, uint64_t offset)
* Returns the size of data written to the file, 0 means the page is not
* a zero page
*
- * @rs: current RAM state
- * @file: the file where the data is saved
+ * @pss: current PSS channel
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
*/
-static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
+static int save_zero_page_to_file(PageSearchStatus *pss,
RAMBlock *block, ram_addr_t offset)
{
uint8_t *p = block->host + offset;
+ QEMUFile *file = pss->pss_channel;
int len = 0;
if (buffer_is_zero(p, TARGET_PAGE_SIZE)) {
- len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
+ len += save_page_header(pss, block, offset | RAM_SAVE_FLAG_ZERO);
qemu_put_byte(file, 0);
len += 1;
ram_release_page(block->idstr, offset);
@@ -1314,14 +1317,14 @@ static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
*
* Returns the number of pages written.
*
- * @rs: current RAM state
+ * @pss: current PSS channel
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
*/
-static int save_zero_page(RAMState *rs, QEMUFile *file, RAMBlock *block,
+static int save_zero_page(PageSearchStatus *pss, RAMBlock *block,
ram_addr_t offset)
{
- int len = save_zero_page_to_file(rs, file, block, offset);
+ int len = save_zero_page_to_file(pss, block, offset);
if (len) {
qatomic_inc(&ram_counters.duplicate);
@@ -1374,16 +1377,18 @@ static bool control_save_page(PageSearchStatus *pss, RAMBlock *block,
*
* Returns the number of pages written.
*
- * @rs: current RAM state
+ * @pss: current PSS channel
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
* @buf: the page to be sent
* @async: send to page asyncly
*/
-static int save_normal_page(RAMState *rs, QEMUFile *file, RAMBlock *block,
+static int save_normal_page(PageSearchStatus *pss, RAMBlock *block,
ram_addr_t offset, uint8_t *buf, bool async)
{
- ram_transferred_add(save_page_header(rs, file, block,
+ QEMUFile *file = pss->pss_channel;
+
+ ram_transferred_add(save_page_header(pss, block,
offset | RAM_SAVE_FLAG_PAGE));
if (async) {
qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE,
@@ -1423,7 +1428,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
XBZRLE_cache_lock();
if (rs->xbzrle_enabled && !migration_in_postcopy()) {
- pages = save_xbzrle_page(rs, pss->pss_channel, &p, current_addr,
+ pages = save_xbzrle_page(rs, pss, &p, current_addr,
block, offset);
if (!rs->last_stage) {
/* Can't send this cached data async, since the cache page
@@ -1435,8 +1440,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
/* XBZRLE overflow or normal page */
if (pages == -1) {
- pages = save_normal_page(rs, pss->pss_channel, block, offset,
- p, send_async);
+ pages = save_normal_page(pss, block, offset, p, send_async);
}
XBZRLE_cache_unlock();
@@ -1459,14 +1463,15 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
ram_addr_t offset, uint8_t *source_buf)
{
RAMState *rs = ram_state;
+ PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
uint8_t *p = block->host + offset;
int ret;
- if (save_zero_page_to_file(rs, f, block, offset)) {
+ if (save_zero_page_to_file(pss, block, offset)) {
return true;
}
- save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
+ save_page_header(pss, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
/*
* copy it to a internal buffer to avoid it being modified by VM
@@ -2286,7 +2291,8 @@ static bool save_page_use_compression(RAMState *rs)
* has been properly handled by compression, otherwise needs other
* paths to handle it
*/
-static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
+static bool save_compress_page(RAMState *rs, PageSearchStatus *pss,
+ RAMBlock *block, ram_addr_t offset)
{
if (!save_page_use_compression(rs)) {
return false;
@@ -2302,7 +2308,7 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
* We post the fist page as normal page as compression will take
* much CPU resource.
*/
- if (block != rs->last_sent_block) {
+ if (block != pss->last_sent_block) {
flush_compressed_data(rs);
return false;
}
@@ -2333,11 +2339,11 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
return res;
}
- if (save_compress_page(rs, block, offset)) {
+ if (save_compress_page(rs, pss, block, offset)) {
return 1;
}
- res = save_zero_page(rs, pss->pss_channel, block, offset);
+ res = save_zero_page(pss, block, offset);
if (res > 0) {
/* Must let xbzrle know, otherwise a previous (now 0'd) cached
* page would be stale
@@ -2469,7 +2475,7 @@ static void postcopy_preempt_choose_channel(RAMState *rs, PageSearchStatus *pss)
* If channel switched, reset last_sent_block since the old sent block
* may not be on the same channel.
*/
- rs->last_sent_block = NULL;
+ pss->last_sent_block = NULL;
trace_postcopy_preempt_switch_channel(channel);
}
@@ -2804,8 +2810,13 @@ static void ram_save_cleanup(void *opaque)
static void ram_state_reset(RAMState *rs)
{
+ int i;
+
+ for (i = 0; i < RAM_CHANNEL_MAX; i++) {
+ rs->pss[i].last_sent_block = NULL;
+ }
+
rs->last_seen_block = NULL;
- rs->last_sent_block = NULL;
rs->last_page = 0;
rs->last_version = ram_list.version;
rs->xbzrle_enabled = false;
@@ -2999,8 +3010,8 @@ void ram_postcopy_send_discard_bitmap(MigrationState *ms)
migration_bitmap_sync(rs);
/* Easiest way to make sure we don't resume in the middle of a host-page */
+ rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL;
rs->last_seen_block = NULL;
- rs->last_sent_block = NULL;
rs->last_page = 0;
postcopy_each_ram_send_discard(ms);
--
2.32.0
* Peter Xu (peterx@redhat.com) wrote:
> Since we use PageSearchStatus to represent a channel, it makes perfect
> sense to keep last_sent_block (aka, leverage RAM_SAVE_FLAG_CONTINUE) to be
> per-channel rather than global because each channel can be sending
> different pages on ramblocks.
>
> Hence move it from RAMState into PageSearchStatus.
>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
> migration/ram.c | 71 ++++++++++++++++++++++++++++---------------------
> 1 file changed, 41 insertions(+), 30 deletions(-)
>
> diff --git a/migration/ram.c b/migration/ram.c
> index dbe11e1ace..fdcb61a2c8 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -89,6 +89,8 @@ XBZRLECacheStats xbzrle_counters;
> struct PageSearchStatus {
> /* The migration channel used for a specific host page */
> QEMUFile *pss_channel;
> + /* Last block from where we have sent data */
> + RAMBlock *last_sent_block;
> /* Current block being searched */
> RAMBlock *block;
> /* Current page to search from */
> @@ -368,8 +370,6 @@ struct RAMState {
> int uffdio_fd;
> /* Last block that we have visited searching for dirty pages */
> RAMBlock *last_seen_block;
> - /* Last block from where we have sent data */
> - RAMBlock *last_sent_block;
> /* Last dirty target page we have sent */
> ram_addr_t last_page;
> /* last ram version we have seen */
> @@ -677,16 +677,17 @@ exit:
> *
> * Returns the number of bytes written
> *
> - * @f: QEMUFile where to send the data
> + * @pss: current PSS channel status
> * @block: block that contains the page we want to send
> * @offset: offset inside the block for the page
> * in the lower bits, it contains flags
> */
> -static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
> +static size_t save_page_header(PageSearchStatus *pss, RAMBlock *block,
> ram_addr_t offset)
> {
> size_t size, len;
> - bool same_block = (block == rs->last_sent_block);
> + bool same_block = (block == pss->last_sent_block);
> + QEMUFile *f = pss->pss_channel;
>
> if (same_block) {
> offset |= RAM_SAVE_FLAG_CONTINUE;
> @@ -699,7 +700,7 @@ static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
> qemu_put_byte(f, len);
> qemu_put_buffer(f, (uint8_t *)block->idstr, len);
> size += 1 + len;
> - rs->last_sent_block = block;
> + pss->last_sent_block = block;
> }
> return size;
> }
> @@ -783,17 +784,19 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
> * -1 means that xbzrle would be longer than normal
> *
> * @rs: current RAM state
> + * @pss: current PSS channel
> * @current_data: pointer to the address of the page contents
> * @current_addr: addr of the page
> * @block: block that contains the page we want to send
> * @offset: offset inside the block for the page
> */
> -static int save_xbzrle_page(RAMState *rs, QEMUFile *file,
> +static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss,
> uint8_t **current_data, ram_addr_t current_addr,
> RAMBlock *block, ram_addr_t offset)
> {
> int encoded_len = 0, bytes_xbzrle;
> uint8_t *prev_cached_page;
> + QEMUFile *file = pss->pss_channel;
>
> if (!cache_is_cached(XBZRLE.cache, current_addr,
> ram_counters.dirty_sync_count)) {
> @@ -858,7 +861,7 @@ static int save_xbzrle_page(RAMState *rs, QEMUFile *file,
> }
>
> /* Send XBZRLE based compressed page */
> - bytes_xbzrle = save_page_header(rs, file, block,
> + bytes_xbzrle = save_page_header(pss, block,
> offset | RAM_SAVE_FLAG_XBZRLE);
> qemu_put_byte(file, ENCODING_FLAG_XBZRLE);
> qemu_put_be16(file, encoded_len);
> @@ -1289,19 +1292,19 @@ static void ram_release_page(const char *rbname, uint64_t offset)
> * Returns the size of data written to the file, 0 means the page is not
> * a zero page
> *
> - * @rs: current RAM state
> - * @file: the file where the data is saved
> + * @pss: current PSS channel
> * @block: block that contains the page we want to send
> * @offset: offset inside the block for the page
> */
> -static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
> +static int save_zero_page_to_file(PageSearchStatus *pss,
> RAMBlock *block, ram_addr_t offset)
> {
> uint8_t *p = block->host + offset;
> + QEMUFile *file = pss->pss_channel;
> int len = 0;
>
> if (buffer_is_zero(p, TARGET_PAGE_SIZE)) {
> - len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
> + len += save_page_header(pss, block, offset | RAM_SAVE_FLAG_ZERO);
> qemu_put_byte(file, 0);
> len += 1;
> ram_release_page(block->idstr, offset);
> @@ -1314,14 +1317,14 @@ static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
> *
> * Returns the number of pages written.
> *
> - * @rs: current RAM state
> + * @pss: current PSS channel
> * @block: block that contains the page we want to send
> * @offset: offset inside the block for the page
> */
> -static int save_zero_page(RAMState *rs, QEMUFile *file, RAMBlock *block,
> +static int save_zero_page(PageSearchStatus *pss, RAMBlock *block,
> ram_addr_t offset)
> {
> - int len = save_zero_page_to_file(rs, file, block, offset);
> + int len = save_zero_page_to_file(pss, block, offset);
>
> if (len) {
> qatomic_inc(&ram_counters.duplicate);
> @@ -1374,16 +1377,18 @@ static bool control_save_page(PageSearchStatus *pss, RAMBlock *block,
> *
> * Returns the number of pages written.
> *
> - * @rs: current RAM state
> + * @pss: current PSS channel
> * @block: block that contains the page we want to send
> * @offset: offset inside the block for the page
> * @buf: the page to be sent
> * @async: send to page asyncly
> */
> -static int save_normal_page(RAMState *rs, QEMUFile *file, RAMBlock *block,
> +static int save_normal_page(PageSearchStatus *pss, RAMBlock *block,
> ram_addr_t offset, uint8_t *buf, bool async)
> {
> - ram_transferred_add(save_page_header(rs, file, block,
> + QEMUFile *file = pss->pss_channel;
> +
> + ram_transferred_add(save_page_header(pss, block,
> offset | RAM_SAVE_FLAG_PAGE));
> if (async) {
> qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE,
> @@ -1423,7 +1428,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
>
> XBZRLE_cache_lock();
> if (rs->xbzrle_enabled && !migration_in_postcopy()) {
> - pages = save_xbzrle_page(rs, pss->pss_channel, &p, current_addr,
> + pages = save_xbzrle_page(rs, pss, &p, current_addr,
> block, offset);
> if (!rs->last_stage) {
> /* Can't send this cached data async, since the cache page
> @@ -1435,8 +1440,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
>
> /* XBZRLE overflow or normal page */
> if (pages == -1) {
> - pages = save_normal_page(rs, pss->pss_channel, block, offset,
> - p, send_async);
> + pages = save_normal_page(pss, block, offset, p, send_async);
> }
>
> XBZRLE_cache_unlock();
> @@ -1459,14 +1463,15 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
> ram_addr_t offset, uint8_t *source_buf)
> {
> RAMState *rs = ram_state;
> + PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
> uint8_t *p = block->host + offset;
> int ret;
>
> - if (save_zero_page_to_file(rs, f, block, offset)) {
> + if (save_zero_page_to_file(pss, block, offset)) {
> return true;
> }
>
> - save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
> + save_page_header(pss, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
>
> /*
> * copy it to a internal buffer to avoid it being modified by VM
> @@ -2286,7 +2291,8 @@ static bool save_page_use_compression(RAMState *rs)
> * has been properly handled by compression, otherwise needs other
> * paths to handle it
> */
> -static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
> +static bool save_compress_page(RAMState *rs, PageSearchStatus *pss,
> + RAMBlock *block, ram_addr_t offset)
> {
> if (!save_page_use_compression(rs)) {
> return false;
> @@ -2302,7 +2308,7 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
> * We post the fist page as normal page as compression will take
> * much CPU resource.
> */
> - if (block != rs->last_sent_block) {
> + if (block != pss->last_sent_block) {
> flush_compressed_data(rs);
> return false;
> }
> @@ -2333,11 +2339,11 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
> return res;
> }
>
> - if (save_compress_page(rs, block, offset)) {
> + if (save_compress_page(rs, pss, block, offset)) {
> return 1;
> }
>
> - res = save_zero_page(rs, pss->pss_channel, block, offset);
> + res = save_zero_page(pss, block, offset);
> if (res > 0) {
> /* Must let xbzrle know, otherwise a previous (now 0'd) cached
> * page would be stale
> @@ -2469,7 +2475,7 @@ static void postcopy_preempt_choose_channel(RAMState *rs, PageSearchStatus *pss)
> * If channel switched, reset last_sent_block since the old sent block
> * may not be on the same channel.
> */
> - rs->last_sent_block = NULL;
> + pss->last_sent_block = NULL;
>
> trace_postcopy_preempt_switch_channel(channel);
> }
> @@ -2804,8 +2810,13 @@ static void ram_save_cleanup(void *opaque)
>
> static void ram_state_reset(RAMState *rs)
> {
> + int i;
> +
> + for (i = 0; i < RAM_CHANNEL_MAX; i++) {
> + rs->pss[i].last_sent_block = NULL;
> + }
> +
> rs->last_seen_block = NULL;
> - rs->last_sent_block = NULL;
> rs->last_page = 0;
> rs->last_version = ram_list.version;
> rs->xbzrle_enabled = false;
> @@ -2999,8 +3010,8 @@ void ram_postcopy_send_discard_bitmap(MigrationState *ms)
> migration_bitmap_sync(rs);
>
> /* Easiest way to make sure we don't resume in the middle of a host-page */
> + rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL;
Why don't we reset the postcopy one here as well?
Dave
> rs->last_seen_block = NULL;
> - rs->last_sent_block = NULL;
> rs->last_page = 0;
>
> postcopy_each_ram_send_discard(ms);
> --
> 2.32.0
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
On Thu, Oct 06, 2022 at 05:59:51PM +0100, Dr. David Alan Gilbert wrote: > > @@ -2999,8 +3010,8 @@ void ram_postcopy_send_discard_bitmap(MigrationState *ms) > > migration_bitmap_sync(rs); > > > > /* Easiest way to make sure we don't resume in the middle of a host-page */ > > + rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL; > > Why don't we reset the postcopy one here as well? Because ram_postcopy_send_discard_bitmap() is only called before postcopy starts, so the other field should be NULL already. Thanks, -- Peter Xu
* Peter Xu (peterx@redhat.com) wrote: > On Thu, Oct 06, 2022 at 05:59:51PM +0100, Dr. David Alan Gilbert wrote: > > > @@ -2999,8 +3010,8 @@ void ram_postcopy_send_discard_bitmap(MigrationState *ms) > > > migration_bitmap_sync(rs); > > > > > > /* Easiest way to make sure we don't resume in the middle of a host-page */ > > > + rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL; > > > > Why don't we reset the postcopy one here as well? > > Because ram_postcopy_send_discard_bitmap() is only called before postcopy > starts, so the other field should be NULL already. Thanks, Ah, yes, OK Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> > -- > Peter Xu > -- Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
© 2016 - 2026 Red Hat, Inc.