From: Prasad Pandit <pjp@fedoraproject.org>
Refactor ram_save_target_page legacy and multifd
functions into one. Other than simplifying it,
it avoids reinitialization of the 'migration_ops'
object, when migration moves from multifd to postcopy
phase.
Signed-off-by: Prasad Pandit <pjp@fedoraproject.org>
---
migration/ram.c | 54 ++++++++++++++++++++-----------------------------
1 file changed, 22 insertions(+), 32 deletions(-)
diff --git a/migration/ram.c b/migration/ram.c
index 326ce7eb79..f9a6395d00 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1985,18 +1985,36 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len,
}
/**
- * ram_save_target_page_legacy: save one target page
+ * ram_save_target_page_common:
+ * send one target page to multifd workers OR save one target page.
*
- * Returns the number of pages written
+ * Multifd mode: returns 1 if the page was queued, -1 otherwise.
+ *
+ * Non-multifd mode: returns the number of pages written
*
* @rs: current RAM state
* @pss: data about the page we want to send
*/
-static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
+static int ram_save_target_page_common(RAMState *rs, PageSearchStatus *pss)
{
ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
int res;
+ if (migrate_multifd()) {
+ RAMBlock *block = pss->block;
+ /*
+ * While using multifd live migration, we still need to handle zero
+ * page checking on the migration main thread.
+ */
+ if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) {
+ if (save_zero_page(rs, pss, offset)) {
+ return 1;
+ }
+ }
+
+ return ram_save_multifd_page(block, offset);
+ }
+
if (control_save_page(pss, offset, &res)) {
return res;
}
@@ -2008,32 +2026,6 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
return ram_save_page(rs, pss);
}
-/**
- * ram_save_target_page_multifd: send one target page to multifd workers
- *
- * Returns 1 if the page was queued, -1 otherwise.
- *
- * @rs: current RAM state
- * @pss: data about the page we want to send
- */
-static int ram_save_target_page_multifd(RAMState *rs, PageSearchStatus *pss)
-{
- RAMBlock *block = pss->block;
- ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
-
- /*
- * While using multifd live migration, we still need to handle zero
- * page checking on the migration main thread.
- */
- if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) {
- if (save_zero_page(rs, pss, offset)) {
- return 1;
- }
- }
-
- return ram_save_multifd_page(block, offset);
-}
-
/* Should be called before sending a host page */
static void pss_host_page_prepare(PageSearchStatus *pss)
{
@@ -3055,12 +3047,10 @@ static int ram_save_setup(QEMUFile *f, void *opaque, Error **errp)
}
migration_ops = g_malloc0(sizeof(MigrationOps));
+ migration_ops->ram_save_target_page = ram_save_target_page_common;
if (migrate_multifd()) {
multifd_ram_save_setup();
- migration_ops->ram_save_target_page = ram_save_target_page_multifd;
- } else {
- migration_ops->ram_save_target_page = ram_save_target_page_legacy;
}
bql_unlock();
--
2.47.0
On Tue, Oct 29, 2024 at 08:39:07PM +0530, Prasad Pandit wrote: > From: Prasad Pandit <pjp@fedoraproject.org> > > Refactor ram_save_target_page legacy and multifd > functions into one. Other than simplifying it, > it avoids reinitialization of the 'migration_ops' > object, when migration moves from multifd to postcopy > phase. > > Signed-off-by: Prasad Pandit <pjp@fedoraproject.org> > --- > migration/ram.c | 54 ++++++++++++++++++++----------------------------- > 1 file changed, 22 insertions(+), 32 deletions(-) > > diff --git a/migration/ram.c b/migration/ram.c > index 326ce7eb79..f9a6395d00 100644 > --- a/migration/ram.c > +++ b/migration/ram.c > @@ -1985,18 +1985,36 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len, > } > > /** > - * ram_save_target_page_legacy: save one target page > + * ram_save_target_page_common: > + * send one target page to multifd workers OR save one target page. > * > - * Returns the number of pages written > + * Multifd mode: returns 1 if the page was queued, -1 otherwise. > + * > + * Non-multifd mode: returns the number of pages written > * > * @rs: current RAM state > * @pss: data about the page we want to send > */ > -static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) > +static int ram_save_target_page_common(RAMState *rs, PageSearchStatus *pss) > { > ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; > int res; > > + if (migrate_multifd()) { > + RAMBlock *block = pss->block; > + /* > + * While using multifd live migration, we still need to handle zero > + * page checking on the migration main thread. > + */ > + if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) { > + if (save_zero_page(rs, pss, offset)) { There's one more save_zero_page() below. Please consider properly merging them. > + return 1; > + } > + } > + > + return ram_save_multifd_page(block, offset); > + } > + > if (control_save_page(pss, offset, &res)) { > return res; > } > @@ -2008,32 +2026,6 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) > return ram_save_page(rs, pss); > } > > -/** > - * ram_save_target_page_multifd: send one target page to multifd workers > - * > - * Returns 1 if the page was queued, -1 otherwise. > - * > - * @rs: current RAM state > - * @pss: data about the page we want to send > - */ > -static int ram_save_target_page_multifd(RAMState *rs, PageSearchStatus *pss) > -{ > - RAMBlock *block = pss->block; > - ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; > - > - /* > - * While using multifd live migration, we still need to handle zero > - * page checking on the migration main thread. > - */ > - if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) { > - if (save_zero_page(rs, pss, offset)) { > - return 1; > - } > - } > - > - return ram_save_multifd_page(block, offset); > -} > - > /* Should be called before sending a host page */ > static void pss_host_page_prepare(PageSearchStatus *pss) > { > @@ -3055,12 +3047,10 @@ static int ram_save_setup(QEMUFile *f, void *opaque, Error **errp) > } > > migration_ops = g_malloc0(sizeof(MigrationOps)); > + migration_ops->ram_save_target_page = ram_save_target_page_common; If we want to merge the hooks, we should drop the hook in one shot, then call the new function directly. > > if (migrate_multifd()) { > multifd_ram_save_setup(); > - migration_ops->ram_save_target_page = ram_save_target_page_multifd; > - } else { > - migration_ops->ram_save_target_page = ram_save_target_page_legacy; > } > > bql_unlock(); > -- > 2.47.0 > -- Peter Xu
On Fri, 1 Nov 2024 at 20:09, Peter Xu <peterx@redhat.com> wrote: > > + if (migrate_multifd()) { > > + RAMBlock *block = pss->block; > > + /* > > + * While using multifd live migration, we still need to handle zero > > + * page checking on the migration main thread. > > + */ > > + if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) { > > + if (save_zero_page(rs, pss, offset)) { > > + return 1; > > + } > } > There's one more save_zero_page() below. Please consider properly merging them. if (save_zero_page(rs, pss, offset)) { return 1; } * First is called in migrate_multifd() mode, the second (above) is called in non-multifd mode. Will check how/if we can conflate them. > > migration_ops = g_malloc0(sizeof(MigrationOps)); > > + migration_ops->ram_save_target_page = ram_save_target_page_common; > > If we want to merge the hooks, we should drop the hook in one shot, then > call the new function directly. > * Ie. drop the 'migration_ops' object altogether? And call ram_save_target_page() as it used to be before multifd mode? Thank you. --- - Prasad
On Mon, Nov 04, 2024 at 05:26:45PM +0530, Prasad Pandit wrote: > On Fri, 1 Nov 2024 at 20:09, Peter Xu <peterx@redhat.com> wrote: > > > + if (migrate_multifd()) { > > > + RAMBlock *block = pss->block; > > > + /* > > > + * While using multifd live migration, we still need to handle zero > > > + * page checking on the migration main thread. > > > + */ > > > + if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) { > > > + if (save_zero_page(rs, pss, offset)) { > > > + return 1; > > > + } > > } > > There's one more save_zero_page() below. Please consider properly merging them. > > if (save_zero_page(rs, pss, offset)) { > return 1; > } > > * First is called in migrate_multifd() mode, the second (above) is > called in non-multifd mode. Will check how/if we can conflate them. Yes, IMHO it's better when merged. One more note here, that even with ZERO_PAGE_DETECTION_MULTIFD, qemu will fallback to use LEGACY in reality when !multifd before. We need to keep that behavior. > > > > migration_ops = g_malloc0(sizeof(MigrationOps)); > > > + migration_ops->ram_save_target_page = ram_save_target_page_common; > > > > If we want to merge the hooks, we should drop the hook in one shot, then > > call the new function directly. > > > > * Ie. drop the 'migration_ops' object altogether? And call > ram_save_target_page() as it used to be before multifd mode? Yes. Thanks, -- Peter Xu
On Mon, 4 Nov 2024 at 22:30, Peter Xu <peterx@redhat.com> wrote: > Yes, IMHO it's better when merged. > > One more note here, that even with ZERO_PAGE_DETECTION_MULTIFD, qemu will > fallback to use LEGACY in reality when !multifd before. We need to keep > that behavior. * Where does this fallback happen? in ram_save_target_page()? > > * Ie. drop the 'migration_ops' object altogether? And call > > ram_save_target_page() as it used to be before multifd mode? > > Yes. Okay, thank you. --- - Prasad
On Tue, Nov 05, 2024 at 03:31:19PM +0530, Prasad Pandit wrote: > On Mon, 4 Nov 2024 at 22:30, Peter Xu <peterx@redhat.com> wrote: > > Yes, IMHO it's better when merged. > > > > One more note here, that even with ZERO_PAGE_DETECTION_MULTIFD, qemu will > > fallback to use LEGACY in reality when !multifd before. We need to keep > > that behavior. > > * Where does this fallback happen? in ram_save_target_page()? When ZERO_PAGE_DETECTION_MULTIFD is used but when !multifd cap, it'll use legacy even if it's MULTIFD. We don't yet change the value, the fallback will still happen. -- Peter Xu
© 2016 - 2024 Red Hat, Inc.