[PATCH 3/4] migration: Refactor incoming cleanup into migration_incoming_finish()

Juraj Marcin posted 4 patches 1 week, 6 days ago
Maintainers: Peter Xu <peterx@redhat.com>, Fabiano Rosas <farosas@suse.de>, Eric Blake <eblake@redhat.com>, Markus Armbruster <armbru@redhat.com>, Laurent Vivier <lvivier@redhat.com>, Paolo Bonzini <pbonzini@redhat.com>
[PATCH 3/4] migration: Refactor incoming cleanup into migration_incoming_finish()
Posted by Juraj Marcin 1 week, 6 days ago
From: Juraj Marcin <jmarcin@redhat.com>

Currently, there are two functions that are responsible for cleanup of
the incoming migration state. With successful precopy, it's the main
thread and with successful postcopy it's the listen thread. However, if
postcopy fails during in the device load, both functions will try to do
the cleanup. Moreover, when exit-on-error parameter was added, it was
applied only to precopy.

This patch refactors common cleanup and exiting on error into a helper
function that can be started either from precopy or postcopy, reducing
the duplication. If the listen thread has been started (the postcopy
state is at least LISTENING), the listen thread is responsible for all
cleanup and exiting, otherwise it's the main thread's responsibility.

Signed-off-by: Juraj Marcin <jmarcin@redhat.com>
---
 migration/migration.c | 64 ++++++++++++++++++++++++-------------------
 migration/migration.h |  1 +
 migration/savevm.c    | 48 +++++++++++---------------------
 3 files changed, 53 insertions(+), 60 deletions(-)

diff --git a/migration/migration.c b/migration/migration.c
index 2c0b3a7229..7222e3de13 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -442,9 +442,19 @@ void migration_incoming_transport_cleanup(MigrationIncomingState *mis)
 void migration_incoming_state_destroy(void)
 {
     struct MigrationIncomingState *mis = migration_incoming_get_current();
+    PostcopyState ps = postcopy_state_get();
 
     multifd_recv_cleanup();
 
+    if (mis->have_listen_thread) {
+        qemu_thread_join(&mis->listen_thread);
+        mis->have_listen_thread = false;
+    }
+
+    if (ps != POSTCOPY_INCOMING_NONE) {
+        postcopy_ram_incoming_cleanup(mis);
+    }
+
     /*
      * RAM state cleanup needs to happen after multifd cleanup, because
      * multifd threads can use some of its states (receivedmap).
@@ -809,6 +819,23 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels,
     cpr_state_close();
 }
 
+void migration_incoming_finish(void)
+{
+    MigrationState *s = migrate_get_current();
+    MigrationIncomingState *mis = migration_incoming_get_current();
+
+    migration_incoming_state_destroy();
+
+    if (migration_has_failed(mis->state) && mis->exit_on_error) {
+        WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
+            error_report_err(s->error);
+            s->error = NULL;
+        }
+
+        exit(EXIT_FAILURE);
+    }
+}
+
 static void process_incoming_migration_bh(void *opaque)
 {
     MigrationIncomingState *mis = opaque;
@@ -861,7 +888,7 @@ static void process_incoming_migration_bh(void *opaque)
      */
     migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
                       MIGRATION_STATUS_COMPLETED);
-    migration_incoming_state_destroy();
+    migration_incoming_finish();
 }
 
 static void coroutine_fn
@@ -888,23 +915,13 @@ process_incoming_migration_co(void *opaque)
 
     ps = postcopy_state_get();
     trace_process_incoming_migration_co_end(ret, ps);
-    if (ps != POSTCOPY_INCOMING_NONE) {
-        if (ps == POSTCOPY_INCOMING_ADVISE) {
-            /*
-             * Where a migration had postcopy enabled (and thus went to advise)
-             * but managed to complete within the precopy period, we can use
-             * the normal exit.
-             */
-            postcopy_ram_incoming_cleanup(mis);
-        } else if (ret >= 0) {
-            /*
-             * Postcopy was started, cleanup should happen at the end of the
-             * postcopy thread.
-             */
-            trace_process_incoming_migration_co_postcopy_end_main();
-            goto out;
-        }
-        /* Else if something went wrong then just fall out of the normal exit */
+    if (ps >= POSTCOPY_INCOMING_LISTENING) {
+        /*
+         * Postcopy was started, cleanup should happen at the end of the
+         * postcopy thread.
+         */
+        trace_process_incoming_migration_co_postcopy_end_main();
+        goto out;
     }
 
     if (ret < 0) {
@@ -926,16 +943,7 @@ fail:
     migrate_set_error(s, local_err);
     error_free(local_err);
 
-    migration_incoming_state_destroy();
-
-    if (mis->exit_on_error) {
-        WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
-            error_report_err(s->error);
-            s->error = NULL;
-        }
-
-        exit(EXIT_FAILURE);
-    }
+    migration_incoming_finish();
 out:
     /* Pairs with the refcount taken in qmp_migrate_incoming() */
     migrate_incoming_unref_outgoing_state();
diff --git a/migration/migration.h b/migration/migration.h
index 2c2331f40d..67e3318467 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -518,6 +518,7 @@ void migrate_set_state(MigrationStatus *state, MigrationStatus old_state,
 void migration_fd_process_incoming(QEMUFile *f);
 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp);
 void migration_incoming_process(void);
+void migration_incoming_finish(void);
 
 bool  migration_has_all_channels(void);
 
diff --git a/migration/savevm.c b/migration/savevm.c
index fabbeb296a..d7eb416d48 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -2069,6 +2069,11 @@ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis,
     return 0;
 }
 
+static void postcopy_ram_listen_thread_bh(void *opaque)
+{
+    migration_incoming_finish();
+}
+
 /*
  * Triggered by a postcopy_listen command; this thread takes over reading
  * the input stream, leaving the main thread free to carry on loading the rest
@@ -2122,52 +2127,31 @@ static void *postcopy_ram_listen_thread(void *opaque)
                          "bitmaps may be lost, and present migrated dirty "
                          "bitmaps are correctly migrated and valid.",
                          __func__, load_res);
-            load_res = 0; /* prevent further exit() */
         } else {
             error_report("%s: loadvm failed: %d", __func__, load_res);
             migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
                                            MIGRATION_STATUS_FAILED);
+            goto out;
         }
     }
-    if (load_res >= 0) {
-        /*
-         * This looks good, but it's possible that the device loading in the
-         * main thread hasn't finished yet, and so we might not be in 'RUN'
-         * state yet; wait for the end of the main thread.
-         */
-        qemu_event_wait(&mis->main_thread_load_event);
-    }
-    postcopy_ram_incoming_cleanup(mis);
-
-    if (load_res < 0) {
-        /*
-         * If something went wrong then we have a bad state so exit;
-         * depending how far we got it might be possible at this point
-         * to leave the guest running and fire MCEs for pages that never
-         * arrived as a desperate recovery step.
-         */
-        rcu_unregister_thread();
-        exit(EXIT_FAILURE);
-    }
+    /*
+     * This looks good, but it's possible that the device loading in the
+     * main thread hasn't finished yet, and so we might not be in 'RUN'
+     * state yet; wait for the end of the main thread.
+     */
+    qemu_event_wait(&mis->main_thread_load_event);
 
     migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
                                    MIGRATION_STATUS_COMPLETED);
-    /*
-     * If everything has worked fine, then the main thread has waited
-     * for us to start, and we're the last use of the mis.
-     * (If something broke then qemu will have to exit anyway since it's
-     * got a bad migration state).
-     */
-    bql_lock();
-    migration_incoming_state_destroy();
-    bql_unlock();
 
+out:
     rcu_unregister_thread();
-    mis->have_listen_thread = false;
     postcopy_state_set(POSTCOPY_INCOMING_END);
 
     object_unref(OBJECT(migr));
 
+    migration_bh_schedule(postcopy_ram_listen_thread_bh, NULL);
+
     return NULL;
 }
 
@@ -2217,7 +2201,7 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
     mis->have_listen_thread = true;
     postcopy_thread_create(mis, &mis->listen_thread,
                            MIGRATION_THREAD_DST_LISTEN,
-                           postcopy_ram_listen_thread, QEMU_THREAD_DETACHED);
+                           postcopy_ram_listen_thread, QEMU_THREAD_JOINABLE);
     trace_loadvm_postcopy_handle_listen("return");
 
     return 0;
-- 
2.51.0
Re: [PATCH 3/4] migration: Refactor incoming cleanup into migration_incoming_finish()
Posted by Fabiano Rosas 1 week, 1 day ago
Juraj Marcin <jmarcin@redhat.com> writes:

Hi Juraj,

Good patch, nice use of migrate_has_failed()

> From: Juraj Marcin <jmarcin@redhat.com>
>
> Currently, there are two functions that are responsible for cleanup of
> the incoming migration state. With successful precopy, it's the main
> thread and with successful postcopy it's the listen thread. However, if
> postcopy fails during in the device load, both functions will try to do
> the cleanup. Moreover, when exit-on-error parameter was added, it was
> applied only to precopy.
>

Someone could be relying in postcopy always exiting on error while
explicitly setting exit-on-error=false for precopy and this patch would
change the behavior incompatibly. Is this an issue? I'm willing to
ignore it, but you guys know more about postcopy.

> This patch refactors common cleanup and exiting on error into a helper
> function that can be started either from precopy or postcopy, reducing
> the duplication. If the listen thread has been started (the postcopy
> state is at least LISTENING), the listen thread is responsible for all
> cleanup and exiting, otherwise it's the main thread's responsibility.

Don't the BHs also run in the main thread? I'm not sure this sentence is
accurate.

>
> Signed-off-by: Juraj Marcin <jmarcin@redhat.com>
> ---
>  migration/migration.c | 64 ++++++++++++++++++++++++-------------------
>  migration/migration.h |  1 +
>  migration/savevm.c    | 48 +++++++++++---------------------

Could someone act on the TODOs and move postcopy code into postcopy-ram?
It's never too late to make things consistent.

>  3 files changed, 53 insertions(+), 60 deletions(-)
>
> diff --git a/migration/migration.c b/migration/migration.c
> index 2c0b3a7229..7222e3de13 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -442,9 +442,19 @@ void migration_incoming_transport_cleanup(MigrationIncomingState *mis)
>  void migration_incoming_state_destroy(void)
>  {
>      struct MigrationIncomingState *mis = migration_incoming_get_current();
> +    PostcopyState ps = postcopy_state_get();
>  
>      multifd_recv_cleanup();
>  
> +    if (mis->have_listen_thread) {
> +        qemu_thread_join(&mis->listen_thread);
> +        mis->have_listen_thread = false;
> +    }
> +
> +    if (ps != POSTCOPY_INCOMING_NONE) {
> +        postcopy_ram_incoming_cleanup(mis);
> +    }
> +
>      /*
>       * RAM state cleanup needs to happen after multifd cleanup, because
>       * multifd threads can use some of its states (receivedmap).
> @@ -809,6 +819,23 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels,
>      cpr_state_close();
>  }
>  
> +void migration_incoming_finish(void)
> +{
> +    MigrationState *s = migrate_get_current();
> +    MigrationIncomingState *mis = migration_incoming_get_current();
> +
> +    migration_incoming_state_destroy();
> +
> +    if (migration_has_failed(mis->state) && mis->exit_on_error) {
> +        WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
> +            error_report_err(s->error);
> +            s->error = NULL;
> +        }
> +
> +        exit(EXIT_FAILURE);
> +    }
> +}
> +
>  static void process_incoming_migration_bh(void *opaque)
>  {
>      MigrationIncomingState *mis = opaque;
> @@ -861,7 +888,7 @@ static void process_incoming_migration_bh(void *opaque)
>       */
>      migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
>                        MIGRATION_STATUS_COMPLETED);
> -    migration_incoming_state_destroy();
> +    migration_incoming_finish();
>  }
>  
>  static void coroutine_fn
> @@ -888,23 +915,13 @@ process_incoming_migration_co(void *opaque)
>  
>      ps = postcopy_state_get();
>      trace_process_incoming_migration_co_end(ret, ps);
> -    if (ps != POSTCOPY_INCOMING_NONE) {
> -        if (ps == POSTCOPY_INCOMING_ADVISE) {
> -            /*
> -             * Where a migration had postcopy enabled (and thus went to advise)
> -             * but managed to complete within the precopy period, we can use
> -             * the normal exit.
> -             */
> -            postcopy_ram_incoming_cleanup(mis);
> -        } else if (ret >= 0) {
> -            /*
> -             * Postcopy was started, cleanup should happen at the end of the
> -             * postcopy thread.
> -             */
> -            trace_process_incoming_migration_co_postcopy_end_main();
> -            goto out;
> -        }
> -        /* Else if something went wrong then just fall out of the normal exit */
> +    if (ps >= POSTCOPY_INCOMING_LISTENING) {
> +        /*
> +         * Postcopy was started, cleanup should happen at the end of the
> +         * postcopy thread.
> +         */
> +        trace_process_incoming_migration_co_postcopy_end_main();
> +        goto out;
>      }
>  
>      if (ret < 0) {
> @@ -926,16 +943,7 @@ fail:
>      migrate_set_error(s, local_err);
>      error_free(local_err);
>  
> -    migration_incoming_state_destroy();
> -
> -    if (mis->exit_on_error) {
> -        WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
> -            error_report_err(s->error);
> -            s->error = NULL;
> -        }
> -
> -        exit(EXIT_FAILURE);
> -    }
> +    migration_incoming_finish();
>  out:
>      /* Pairs with the refcount taken in qmp_migrate_incoming() */
>      migrate_incoming_unref_outgoing_state();
> diff --git a/migration/migration.h b/migration/migration.h
> index 2c2331f40d..67e3318467 100644
> --- a/migration/migration.h
> +++ b/migration/migration.h
> @@ -518,6 +518,7 @@ void migrate_set_state(MigrationStatus *state, MigrationStatus old_state,
>  void migration_fd_process_incoming(QEMUFile *f);
>  void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp);
>  void migration_incoming_process(void);
> +void migration_incoming_finish(void);

What about merging migration_incoming_state_destroy and
migration_incoming_finish and pair all of this with migration_cleanup?

static void migration_cleanup_bh(void *opaque)
{
    migration_cleanup(opaque);
}

static void migration_incoming_cleanup_bh(void *opaque)
{
    migration_incoming_cleanup(opaque);
}

>  
>  bool  migration_has_all_channels(void);
>  
> diff --git a/migration/savevm.c b/migration/savevm.c
> index fabbeb296a..d7eb416d48 100644
> --- a/migration/savevm.c
> +++ b/migration/savevm.c
> @@ -2069,6 +2069,11 @@ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis,
>      return 0;
>  }
>  
> +static void postcopy_ram_listen_thread_bh(void *opaque)
> +{
> +    migration_incoming_finish();
> +}
> +
>  /*
>   * Triggered by a postcopy_listen command; this thread takes over reading
>   * the input stream, leaving the main thread free to carry on loading the rest
> @@ -2122,52 +2127,31 @@ static void *postcopy_ram_listen_thread(void *opaque)
>                           "bitmaps may be lost, and present migrated dirty "
>                           "bitmaps are correctly migrated and valid.",
>                           __func__, load_res);
> -            load_res = 0; /* prevent further exit() */
>          } else {
>              error_report("%s: loadvm failed: %d", __func__, load_res);
>              migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
>                                             MIGRATION_STATUS_FAILED);
> +            goto out;
>          }
>      }
> -    if (load_res >= 0) {
> -        /*
> -         * This looks good, but it's possible that the device loading in the
> -         * main thread hasn't finished yet, and so we might not be in 'RUN'
> -         * state yet; wait for the end of the main thread.
> -         */
> -        qemu_event_wait(&mis->main_thread_load_event);
> -    }
> -    postcopy_ram_incoming_cleanup(mis);
> -
> -    if (load_res < 0) {
> -        /*
> -         * If something went wrong then we have a bad state so exit;
> -         * depending how far we got it might be possible at this point
> -         * to leave the guest running and fire MCEs for pages that never
> -         * arrived as a desperate recovery step.
> -         */
> -        rcu_unregister_thread();
> -        exit(EXIT_FAILURE);
> -    }
> +    /*
> +     * This looks good, but it's possible that the device loading in the
> +     * main thread hasn't finished yet, and so we might not be in 'RUN'
> +     * state yet; wait for the end of the main thread.
> +     */
> +    qemu_event_wait(&mis->main_thread_load_event);
>  
>      migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
>                                     MIGRATION_STATUS_COMPLETED);
> -    /*
> -     * If everything has worked fine, then the main thread has waited
> -     * for us to start, and we're the last use of the mis.
> -     * (If something broke then qemu will have to exit anyway since it's
> -     * got a bad migration state).
> -     */
> -    bql_lock();
> -    migration_incoming_state_destroy();
> -    bql_unlock();
>  
> +out:
>      rcu_unregister_thread();
> -    mis->have_listen_thread = false;
>      postcopy_state_set(POSTCOPY_INCOMING_END);
>  
>      object_unref(OBJECT(migr));
>  
> +    migration_bh_schedule(postcopy_ram_listen_thread_bh, NULL);

Better to schedule before the object_unref to ensure there's always
someone holding a reference?

> +
>      return NULL;
>  }
>  
> @@ -2217,7 +2201,7 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
>      mis->have_listen_thread = true;
>      postcopy_thread_create(mis, &mis->listen_thread,
>                             MIGRATION_THREAD_DST_LISTEN,
> -                           postcopy_ram_listen_thread, QEMU_THREAD_DETACHED);
> +                           postcopy_ram_listen_thread, QEMU_THREAD_JOINABLE);
>      trace_loadvm_postcopy_handle_listen("return");
>  
>      return 0;
Re: [PATCH 3/4] migration: Refactor incoming cleanup into migration_incoming_finish()
Posted by Juraj Marcin 6 days, 3 hours ago
Hi Fabiano,

On 2025-09-19 13:46, Fabiano Rosas wrote:
> Juraj Marcin <jmarcin@redhat.com> writes:
> 
> Hi Juraj,
> 
> Good patch, nice use of migrate_has_failed()

Thanks!

> 
> > From: Juraj Marcin <jmarcin@redhat.com>
> >
> > Currently, there are two functions that are responsible for cleanup of
> > the incoming migration state. With successful precopy, it's the main
> > thread and with successful postcopy it's the listen thread. However, if
> > postcopy fails during in the device load, both functions will try to do
> > the cleanup. Moreover, when exit-on-error parameter was added, it was
> > applied only to precopy.
> >
> 
> Someone could be relying in postcopy always exiting on error while
> explicitly setting exit-on-error=false for precopy and this patch would
> change the behavior incompatibly. Is this an issue? I'm willing to
> ignore it, but you guys know more about postcopy.

Good question. When going through older patches where postcopy listen
thread and then where exit-on-error were implemented, it seemed more
like an overlook than intentional omission. However, it might be better
to not break any potential users of this, we could add another option,
"exit-on-postcopy-error" that would allow such handling if postscopy
failed unrecoverably. I've already talked about such option with
@jdenemar and he agreed with it.

> 
> > This patch refactors common cleanup and exiting on error into a helper
> > function that can be started either from precopy or postcopy, reducing
> > the duplication. If the listen thread has been started (the postcopy
> > state is at least LISTENING), the listen thread is responsible for all
> > cleanup and exiting, otherwise it's the main thread's responsibility.
> 
> Don't the BHs also run in the main thread? I'm not sure this sentence is
> accurate.

Yeah, it is a bit inaccurate now that you mention it, BHs are indeed run
in the main thread. More accurate would to replace the main thread with
process incoming migration coroutine, that would precisely describe who
has the responsibility to call the cleanup.

> 
> >
> > Signed-off-by: Juraj Marcin <jmarcin@redhat.com>
> > ---
> >  migration/migration.c | 64 ++++++++++++++++++++++++-------------------
> >  migration/migration.h |  1 +
> >  migration/savevm.c    | 48 +++++++++++---------------------
> 
> Could someone act on the TODOs and move postcopy code into postcopy-ram?
> It's never too late to make things consistent.

I can take a look.

> 
> >  3 files changed, 53 insertions(+), 60 deletions(-)
> >
> > diff --git a/migration/migration.c b/migration/migration.c
> > index 2c0b3a7229..7222e3de13 100644
> > --- a/migration/migration.c
> > +++ b/migration/migration.c
> > @@ -442,9 +442,19 @@ void migration_incoming_transport_cleanup(MigrationIncomingState *mis)
> >  void migration_incoming_state_destroy(void)
> >  {
> >      struct MigrationIncomingState *mis = migration_incoming_get_current();
> > +    PostcopyState ps = postcopy_state_get();
> >  
> >      multifd_recv_cleanup();
> >  
> > +    if (mis->have_listen_thread) {
> > +        qemu_thread_join(&mis->listen_thread);
> > +        mis->have_listen_thread = false;
> > +    }
> > +
> > +    if (ps != POSTCOPY_INCOMING_NONE) {
> > +        postcopy_ram_incoming_cleanup(mis);
> > +    }
> > +
> >      /*
> >       * RAM state cleanup needs to happen after multifd cleanup, because
> >       * multifd threads can use some of its states (receivedmap).
> > @@ -809,6 +819,23 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels,
> >      cpr_state_close();
> >  }
> >  
> > +void migration_incoming_finish(void)
> > +{
> > +    MigrationState *s = migrate_get_current();
> > +    MigrationIncomingState *mis = migration_incoming_get_current();
> > +
> > +    migration_incoming_state_destroy();
> > +
> > +    if (migration_has_failed(mis->state) && mis->exit_on_error) {
> > +        WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
> > +            error_report_err(s->error);
> > +            s->error = NULL;
> > +        }
> > +
> > +        exit(EXIT_FAILURE);
> > +    }
> > +}
> > +
> >  static void process_incoming_migration_bh(void *opaque)
> >  {
> >      MigrationIncomingState *mis = opaque;
> > @@ -861,7 +888,7 @@ static void process_incoming_migration_bh(void *opaque)
> >       */
> >      migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
> >                        MIGRATION_STATUS_COMPLETED);
> > -    migration_incoming_state_destroy();
> > +    migration_incoming_finish();
> >  }
> >  
> >  static void coroutine_fn
> > @@ -888,23 +915,13 @@ process_incoming_migration_co(void *opaque)
> >  
> >      ps = postcopy_state_get();
> >      trace_process_incoming_migration_co_end(ret, ps);
> > -    if (ps != POSTCOPY_INCOMING_NONE) {
> > -        if (ps == POSTCOPY_INCOMING_ADVISE) {
> > -            /*
> > -             * Where a migration had postcopy enabled (and thus went to advise)
> > -             * but managed to complete within the precopy period, we can use
> > -             * the normal exit.
> > -             */
> > -            postcopy_ram_incoming_cleanup(mis);
> > -        } else if (ret >= 0) {
> > -            /*
> > -             * Postcopy was started, cleanup should happen at the end of the
> > -             * postcopy thread.
> > -             */
> > -            trace_process_incoming_migration_co_postcopy_end_main();
> > -            goto out;
> > -        }
> > -        /* Else if something went wrong then just fall out of the normal exit */
> > +    if (ps >= POSTCOPY_INCOMING_LISTENING) {
> > +        /*
> > +         * Postcopy was started, cleanup should happen at the end of the
> > +         * postcopy thread.
> > +         */
> > +        trace_process_incoming_migration_co_postcopy_end_main();
> > +        goto out;
> >      }
> >  
> >      if (ret < 0) {
> > @@ -926,16 +943,7 @@ fail:
> >      migrate_set_error(s, local_err);
> >      error_free(local_err);
> >  
> > -    migration_incoming_state_destroy();
> > -
> > -    if (mis->exit_on_error) {
> > -        WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
> > -            error_report_err(s->error);
> > -            s->error = NULL;
> > -        }
> > -
> > -        exit(EXIT_FAILURE);
> > -    }
> > +    migration_incoming_finish();
> >  out:
> >      /* Pairs with the refcount taken in qmp_migrate_incoming() */
> >      migrate_incoming_unref_outgoing_state();
> > diff --git a/migration/migration.h b/migration/migration.h
> > index 2c2331f40d..67e3318467 100644
> > --- a/migration/migration.h
> > +++ b/migration/migration.h
> > @@ -518,6 +518,7 @@ void migrate_set_state(MigrationStatus *state, MigrationStatus old_state,
> >  void migration_fd_process_incoming(QEMUFile *f);
> >  void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp);
> >  void migration_incoming_process(void);
> > +void migration_incoming_finish(void);
> 
> What about merging migration_incoming_state_destroy and
> migration_incoming_finish and pair all of this with migration_cleanup?
> 
> static void migration_cleanup_bh(void *opaque)
> {
>     migration_cleanup(opaque);
> }
> 
> static void migration_incoming_cleanup_bh(void *opaque)
> {
>     migration_incoming_cleanup(opaque);
> }

Yes, it would pair well. I guess it would also solve Peter's nitpick
about the change in process_incoming_migration_bh() above.

> >  
> >  bool  migration_has_all_channels(void);
> >  
> > diff --git a/migration/savevm.c b/migration/savevm.c
> > index fabbeb296a..d7eb416d48 100644
> > --- a/migration/savevm.c
> > +++ b/migration/savevm.c
> > @@ -2069,6 +2069,11 @@ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis,
> >      return 0;
> >  }
> >  
> > +static void postcopy_ram_listen_thread_bh(void *opaque)
> > +{
> > +    migration_incoming_finish();
> > +}
> > +
> >  /*
> >   * Triggered by a postcopy_listen command; this thread takes over reading
> >   * the input stream, leaving the main thread free to carry on loading the rest
> > @@ -2122,52 +2127,31 @@ static void *postcopy_ram_listen_thread(void *opaque)
> >                           "bitmaps may be lost, and present migrated dirty "
> >                           "bitmaps are correctly migrated and valid.",
> >                           __func__, load_res);
> > -            load_res = 0; /* prevent further exit() */
> >          } else {
> >              error_report("%s: loadvm failed: %d", __func__, load_res);
> >              migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
> >                                             MIGRATION_STATUS_FAILED);
> > +            goto out;
> >          }
> >      }
> > -    if (load_res >= 0) {
> > -        /*
> > -         * This looks good, but it's possible that the device loading in the
> > -         * main thread hasn't finished yet, and so we might not be in 'RUN'
> > -         * state yet; wait for the end of the main thread.
> > -         */
> > -        qemu_event_wait(&mis->main_thread_load_event);
> > -    }
> > -    postcopy_ram_incoming_cleanup(mis);
> > -
> > -    if (load_res < 0) {
> > -        /*
> > -         * If something went wrong then we have a bad state so exit;
> > -         * depending how far we got it might be possible at this point
> > -         * to leave the guest running and fire MCEs for pages that never
> > -         * arrived as a desperate recovery step.
> > -         */
> > -        rcu_unregister_thread();
> > -        exit(EXIT_FAILURE);
> > -    }
> > +    /*
> > +     * This looks good, but it's possible that the device loading in the
> > +     * main thread hasn't finished yet, and so we might not be in 'RUN'
> > +     * state yet; wait for the end of the main thread.
> > +     */
> > +    qemu_event_wait(&mis->main_thread_load_event);
> >  
> >      migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
> >                                     MIGRATION_STATUS_COMPLETED);
> > -    /*
> > -     * If everything has worked fine, then the main thread has waited
> > -     * for us to start, and we're the last use of the mis.
> > -     * (If something broke then qemu will have to exit anyway since it's
> > -     * got a bad migration state).
> > -     */
> > -    bql_lock();
> > -    migration_incoming_state_destroy();
> > -    bql_unlock();
> >  
> > +out:
> >      rcu_unregister_thread();
> > -    mis->have_listen_thread = false;
> >      postcopy_state_set(POSTCOPY_INCOMING_END);
> >  
> >      object_unref(OBJECT(migr));
> >  
> > +    migration_bh_schedule(postcopy_ram_listen_thread_bh, NULL);
> 
> Better to schedule before the object_unref to ensure there's always
> someone holding a reference?

True, I'll move it.

> 
> > +
> >      return NULL;
> >  }
> >  
> > @@ -2217,7 +2201,7 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
> >      mis->have_listen_thread = true;
> >      postcopy_thread_create(mis, &mis->listen_thread,
> >                             MIGRATION_THREAD_DST_LISTEN,
> > -                           postcopy_ram_listen_thread, QEMU_THREAD_DETACHED);
> > +                           postcopy_ram_listen_thread, QEMU_THREAD_JOINABLE);
> >      trace_loadvm_postcopy_handle_listen("return");
> >  
> >      return 0;
>
Re: [PATCH 3/4] migration: Refactor incoming cleanup into migration_incoming_finish()
Posted by Peter Xu 6 days ago
On Mon, Sep 22, 2025 at 02:58:38PM +0200, Juraj Marcin wrote:
> Hi Fabiano,
> 
> On 2025-09-19 13:46, Fabiano Rosas wrote:
> > Juraj Marcin <jmarcin@redhat.com> writes:
> > 
> > Hi Juraj,
> > 
> > Good patch, nice use of migrate_has_failed()
> 
> Thanks!
> 
> > 
> > > From: Juraj Marcin <jmarcin@redhat.com>
> > >
> > > Currently, there are two functions that are responsible for cleanup of
> > > the incoming migration state. With successful precopy, it's the main
> > > thread and with successful postcopy it's the listen thread. However, if
> > > postcopy fails during in the device load, both functions will try to do
> > > the cleanup. Moreover, when exit-on-error parameter was added, it was
> > > applied only to precopy.
> > >
> > 
> > Someone could be relying in postcopy always exiting on error while
> > explicitly setting exit-on-error=false for precopy and this patch would
> > change the behavior incompatibly. Is this an issue? I'm willing to
> > ignore it, but you guys know more about postcopy.
> 
> Good question. When going through older patches where postcopy listen
> thread and then where exit-on-error were implemented, it seemed more
> like an overlook than intentional omission. However, it might be better
> to not break any potential users of this, we could add another option,
> "exit-on-postcopy-error" that would allow such handling if postscopy
> failed unrecoverably. I've already talked about such option with
> @jdenemar and he agreed with it.

The idea for postcopy ram is, it should never fail.. as failing should
never be better than a pause.  Block dirty bitmap might be different,
though, when enabled separately.

For postcopy-ram, qemu_loadvm_state_main() will in reality only receive RAM
updates. It'll almost always trigger the postcopy_pause_incoming() path
when anything fails.

For pure block-dirty-bitmap-only styled postcopy: for this exit-on-error, I
also don't think we should really "exit on errors", even if the flag is
set.  IIUC, it's not fatal to the VM if that failed, as described in:

commit ee64722514fabcad2430982ade86180208f5be4f
Author: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Date:   Mon Jul 27 22:42:32 2020 +0300

    migration/savevm: don't worry if bitmap migration postcopy failed

    ...

    And anyway, bitmaps postcopy is not prepared to be somehow recovered.
    The original idea instead is that if bitmaps postcopy failed, we just
    lose some bitmaps, which is not critical. So, on failure we just need
    to remove unfinished bitmaps and guest should continue execution on
    destination.

Hence, exit here might be an overkill.. need block developers to double
check, though..

> 
> > 
> > > This patch refactors common cleanup and exiting on error into a helper
> > > function that can be started either from precopy or postcopy, reducing
> > > the duplication. If the listen thread has been started (the postcopy
> > > state is at least LISTENING), the listen thread is responsible for all
> > > cleanup and exiting, otherwise it's the main thread's responsibility.
> > 
> > Don't the BHs also run in the main thread? I'm not sure this sentence is
> > accurate.
> 
> Yeah, it is a bit inaccurate now that you mention it, BHs are indeed run
> in the main thread. More accurate would to replace the main thread with
> process incoming migration coroutine, that would precisely describe who
> has the responsibility to call the cleanup.
> 
> > 
> > >
> > > Signed-off-by: Juraj Marcin <jmarcin@redhat.com>
> > > ---
> > >  migration/migration.c | 64 ++++++++++++++++++++++++-------------------
> > >  migration/migration.h |  1 +
> > >  migration/savevm.c    | 48 +++++++++++---------------------
> > 
> > Could someone act on the TODOs and move postcopy code into postcopy-ram?
> > It's never too late to make things consistent.
> 
> I can take a look.
> 
> > 
> > >  3 files changed, 53 insertions(+), 60 deletions(-)
> > >
> > > diff --git a/migration/migration.c b/migration/migration.c
> > > index 2c0b3a7229..7222e3de13 100644
> > > --- a/migration/migration.c
> > > +++ b/migration/migration.c
> > > @@ -442,9 +442,19 @@ void migration_incoming_transport_cleanup(MigrationIncomingState *mis)
> > >  void migration_incoming_state_destroy(void)
> > >  {
> > >      struct MigrationIncomingState *mis = migration_incoming_get_current();
> > > +    PostcopyState ps = postcopy_state_get();
> > >  
> > >      multifd_recv_cleanup();
> > >  
> > > +    if (mis->have_listen_thread) {
> > > +        qemu_thread_join(&mis->listen_thread);
> > > +        mis->have_listen_thread = false;
> > > +    }
> > > +
> > > +    if (ps != POSTCOPY_INCOMING_NONE) {
> > > +        postcopy_ram_incoming_cleanup(mis);
> > > +    }
> > > +
> > >      /*
> > >       * RAM state cleanup needs to happen after multifd cleanup, because
> > >       * multifd threads can use some of its states (receivedmap).
> > > @@ -809,6 +819,23 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels,
> > >      cpr_state_close();
> > >  }
> > >  
> > > +void migration_incoming_finish(void)
> > > +{
> > > +    MigrationState *s = migrate_get_current();
> > > +    MigrationIncomingState *mis = migration_incoming_get_current();
> > > +
> > > +    migration_incoming_state_destroy();
> > > +
> > > +    if (migration_has_failed(mis->state) && mis->exit_on_error) {
> > > +        WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
> > > +            error_report_err(s->error);
> > > +            s->error = NULL;
> > > +        }
> > > +
> > > +        exit(EXIT_FAILURE);
> > > +    }
> > > +}
> > > +
> > >  static void process_incoming_migration_bh(void *opaque)
> > >  {
> > >      MigrationIncomingState *mis = opaque;
> > > @@ -861,7 +888,7 @@ static void process_incoming_migration_bh(void *opaque)
> > >       */
> > >      migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
> > >                        MIGRATION_STATUS_COMPLETED);
> > > -    migration_incoming_state_destroy();
> > > +    migration_incoming_finish();
> > >  }
> > >  
> > >  static void coroutine_fn
> > > @@ -888,23 +915,13 @@ process_incoming_migration_co(void *opaque)
> > >  
> > >      ps = postcopy_state_get();
> > >      trace_process_incoming_migration_co_end(ret, ps);
> > > -    if (ps != POSTCOPY_INCOMING_NONE) {
> > > -        if (ps == POSTCOPY_INCOMING_ADVISE) {
> > > -            /*
> > > -             * Where a migration had postcopy enabled (and thus went to advise)
> > > -             * but managed to complete within the precopy period, we can use
> > > -             * the normal exit.
> > > -             */
> > > -            postcopy_ram_incoming_cleanup(mis);
> > > -        } else if (ret >= 0) {
> > > -            /*
> > > -             * Postcopy was started, cleanup should happen at the end of the
> > > -             * postcopy thread.
> > > -             */
> > > -            trace_process_incoming_migration_co_postcopy_end_main();
> > > -            goto out;
> > > -        }
> > > -        /* Else if something went wrong then just fall out of the normal exit */
> > > +    if (ps >= POSTCOPY_INCOMING_LISTENING) {
> > > +        /*
> > > +         * Postcopy was started, cleanup should happen at the end of the
> > > +         * postcopy thread.
> > > +         */
> > > +        trace_process_incoming_migration_co_postcopy_end_main();
> > > +        goto out;
> > >      }
> > >  
> > >      if (ret < 0) {
> > > @@ -926,16 +943,7 @@ fail:
> > >      migrate_set_error(s, local_err);
> > >      error_free(local_err);
> > >  
> > > -    migration_incoming_state_destroy();
> > > -
> > > -    if (mis->exit_on_error) {
> > > -        WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
> > > -            error_report_err(s->error);
> > > -            s->error = NULL;
> > > -        }
> > > -
> > > -        exit(EXIT_FAILURE);
> > > -    }
> > > +    migration_incoming_finish();
> > >  out:
> > >      /* Pairs with the refcount taken in qmp_migrate_incoming() */
> > >      migrate_incoming_unref_outgoing_state();
> > > diff --git a/migration/migration.h b/migration/migration.h
> > > index 2c2331f40d..67e3318467 100644
> > > --- a/migration/migration.h
> > > +++ b/migration/migration.h
> > > @@ -518,6 +518,7 @@ void migrate_set_state(MigrationStatus *state, MigrationStatus old_state,
> > >  void migration_fd_process_incoming(QEMUFile *f);
> > >  void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp);
> > >  void migration_incoming_process(void);
> > > +void migration_incoming_finish(void);
> > 
> > What about merging migration_incoming_state_destroy and
> > migration_incoming_finish and pair all of this with migration_cleanup?
> > 
> > static void migration_cleanup_bh(void *opaque)
> > {
> >     migration_cleanup(opaque);
> > }
> > 
> > static void migration_incoming_cleanup_bh(void *opaque)
> > {
> >     migration_incoming_cleanup(opaque);
> > }
> 
> Yes, it would pair well. I guess it would also solve Peter's nitpick
> about the change in process_incoming_migration_bh() above.
> 
> > >  
> > >  bool  migration_has_all_channels(void);
> > >  
> > > diff --git a/migration/savevm.c b/migration/savevm.c
> > > index fabbeb296a..d7eb416d48 100644
> > > --- a/migration/savevm.c
> > > +++ b/migration/savevm.c
> > > @@ -2069,6 +2069,11 @@ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis,
> > >      return 0;
> > >  }
> > >  
> > > +static void postcopy_ram_listen_thread_bh(void *opaque)
> > > +{
> > > +    migration_incoming_finish();
> > > +}
> > > +
> > >  /*
> > >   * Triggered by a postcopy_listen command; this thread takes over reading
> > >   * the input stream, leaving the main thread free to carry on loading the rest
> > > @@ -2122,52 +2127,31 @@ static void *postcopy_ram_listen_thread(void *opaque)
> > >                           "bitmaps may be lost, and present migrated dirty "
> > >                           "bitmaps are correctly migrated and valid.",
> > >                           __func__, load_res);
> > > -            load_res = 0; /* prevent further exit() */
> > >          } else {
> > >              error_report("%s: loadvm failed: %d", __func__, load_res);
> > >              migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
> > >                                             MIGRATION_STATUS_FAILED);
> > > +            goto out;
> > >          }
> > >      }
> > > -    if (load_res >= 0) {
> > > -        /*
> > > -         * This looks good, but it's possible that the device loading in the
> > > -         * main thread hasn't finished yet, and so we might not be in 'RUN'
> > > -         * state yet; wait for the end of the main thread.
> > > -         */
> > > -        qemu_event_wait(&mis->main_thread_load_event);
> > > -    }
> > > -    postcopy_ram_incoming_cleanup(mis);
> > > -
> > > -    if (load_res < 0) {
> > > -        /*
> > > -         * If something went wrong then we have a bad state so exit;
> > > -         * depending how far we got it might be possible at this point
> > > -         * to leave the guest running and fire MCEs for pages that never
> > > -         * arrived as a desperate recovery step.
> > > -         */
> > > -        rcu_unregister_thread();
> > > -        exit(EXIT_FAILURE);
> > > -    }
> > > +    /*
> > > +     * This looks good, but it's possible that the device loading in the
> > > +     * main thread hasn't finished yet, and so we might not be in 'RUN'
> > > +     * state yet; wait for the end of the main thread.
> > > +     */
> > > +    qemu_event_wait(&mis->main_thread_load_event);

PS: I didn't notice this change, looks like this may be better to be a
separate patch when moving out of the if.  Meanwhile, I don't think we set
it right either, in qemu_loadvm_state():

    qemu_event_set(&mis->main_thread_load_event);

The problem is e.g. load_snapshot / qmp_xen_load_devices_state also set
that event, even if there'll be no one to consume it.. not a huge deal, but
maybe while moving it out of the if, we can also cleanup the set() side
too, by moving the set() upper into process_incoming_migration_co().

> > >  
> > >      migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
> > >                                     MIGRATION_STATUS_COMPLETED);
> > > -    /*
> > > -     * If everything has worked fine, then the main thread has waited
> > > -     * for us to start, and we're the last use of the mis.
> > > -     * (If something broke then qemu will have to exit anyway since it's
> > > -     * got a bad migration state).
> > > -     */
> > > -    bql_lock();
> > > -    migration_incoming_state_destroy();
> > > -    bql_unlock();
> > >  
> > > +out:
> > >      rcu_unregister_thread();
> > > -    mis->have_listen_thread = false;
> > >      postcopy_state_set(POSTCOPY_INCOMING_END);
> > >  
> > >      object_unref(OBJECT(migr));
> > >  
> > > +    migration_bh_schedule(postcopy_ram_listen_thread_bh, NULL);
> > 
> > Better to schedule before the object_unref to ensure there's always
> > someone holding a reference?
> 
> True, I'll move it.

Good point.  Though I'm not sure moving it upper would help, because it'll
be the BH that references the MigrationState*..  So maybe we could unref at
the end of postcopy_ram_listen_thread_bh().  If so, we should add a comment
on ref() / unref() saying how they're paired.

> 
> > 
> > > +
> > >      return NULL;
> > >  }
> > >  
> > > @@ -2217,7 +2201,7 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
> > >      mis->have_listen_thread = true;
> > >      postcopy_thread_create(mis, &mis->listen_thread,
> > >                             MIGRATION_THREAD_DST_LISTEN,
> > > -                           postcopy_ram_listen_thread, QEMU_THREAD_DETACHED);
> > > +                           postcopy_ram_listen_thread, QEMU_THREAD_JOINABLE);
> > >      trace_loadvm_postcopy_handle_listen("return");
> > >  
> > >      return 0;
> > 
> 

-- 
Peter Xu
Re: [PATCH 3/4] migration: Refactor incoming cleanup into migration_incoming_finish()
Posted by Juraj Marcin 5 days, 1 hour ago
On 2025-09-22 11:51, Peter Xu wrote:
> On Mon, Sep 22, 2025 at 02:58:38PM +0200, Juraj Marcin wrote:
> > Hi Fabiano,
> > 
> > On 2025-09-19 13:46, Fabiano Rosas wrote:
> > > Juraj Marcin <jmarcin@redhat.com> writes:
> > > 
> > > Hi Juraj,
> > > 
> > > Good patch, nice use of migrate_has_failed()
> > 
> > Thanks!
> > 
> > > 
> > > > From: Juraj Marcin <jmarcin@redhat.com>
> > > >
> > > > Currently, there are two functions that are responsible for cleanup of
> > > > the incoming migration state. With successful precopy, it's the main
> > > > thread and with successful postcopy it's the listen thread. However, if
> > > > postcopy fails during in the device load, both functions will try to do
> > > > the cleanup. Moreover, when exit-on-error parameter was added, it was
> > > > applied only to precopy.
> > > >
> > > 
> > > Someone could be relying in postcopy always exiting on error while
> > > explicitly setting exit-on-error=false for precopy and this patch would
> > > change the behavior incompatibly. Is this an issue? I'm willing to
> > > ignore it, but you guys know more about postcopy.
> > 
> > Good question. When going through older patches where postcopy listen
> > thread and then where exit-on-error were implemented, it seemed more
> > like an overlook than intentional omission. However, it might be better
> > to not break any potential users of this, we could add another option,
> > "exit-on-postcopy-error" that would allow such handling if postscopy
> > failed unrecoverably. I've already talked about such option with
> > @jdenemar and he agreed with it.
> 
> The idea for postcopy ram is, it should never fail.. as failing should
> never be better than a pause.  Block dirty bitmap might be different,
> though, when enabled separately.
> 
> For postcopy-ram, qemu_loadvm_state_main() will in reality only receive RAM
> updates. It'll almost always trigger the postcopy_pause_incoming() path
> when anything fails.
> 
> For pure block-dirty-bitmap-only styled postcopy: for this exit-on-error, I
> also don't think we should really "exit on errors", even if the flag is
> set.  IIUC, it's not fatal to the VM if that failed, as described in:

I agree, however, this patch doesn't add any new cases in which the
destination QEMU would exit. If there is an error in block dirty bitmaps
it is only reported to the console, and then it continues to waiting for
main_thread_load_event, marks the migration as COMPLETED and does the
cleanup, same as before. [1] I can add a comment similar to "prevent
further exit" as there was before.

However, if there is other error, in which the postcopy cannot pause
(for example there was a failure in the main thread loading the device
state before the machine started), the migration status changes to
FAILED and jumps right to cleanup which then checks exit-on-error and
optionally exits the QEMU, before it would always exit in such case [2]:

[1]: https://gitlab.com/qemu-project/qemu/-/blob/ab8008b231e758e03c87c1c483c03afdd9c02e19/migration/savevm.c#L2120
[2]: https://gitlab.com/qemu-project/qemu/-/blob/ab8008b231e758e03c87c1c483c03afdd9c02e19/migration/savevm.c#L2150

> 
> commit ee64722514fabcad2430982ade86180208f5be4f
> Author: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
> Date:   Mon Jul 27 22:42:32 2020 +0300
> 
>     migration/savevm: don't worry if bitmap migration postcopy failed
> 
>     ...
> 
>     And anyway, bitmaps postcopy is not prepared to be somehow recovered.
>     The original idea instead is that if bitmaps postcopy failed, we just
>     lose some bitmaps, which is not critical. So, on failure we just need
>     to remove unfinished bitmaps and guest should continue execution on
>     destination.
> 
> Hence, exit here might be an overkill.. need block developers to double
> check, though..
> 

/* snip */

> > > > diff --git a/migration/savevm.c b/migration/savevm.c
> > > > index fabbeb296a..d7eb416d48 100644
> > > > --- a/migration/savevm.c
> > > > +++ b/migration/savevm.c
> > > > @@ -2069,6 +2069,11 @@ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis,
> > > >      return 0;
> > > >  }
> > > >  
> > > > +static void postcopy_ram_listen_thread_bh(void *opaque)
> > > > +{
> > > > +    migration_incoming_finish();
> > > > +}
> > > > +
> > > >  /*
> > > >   * Triggered by a postcopy_listen command; this thread takes over reading
> > > >   * the input stream, leaving the main thread free to carry on loading the rest
> > > > @@ -2122,52 +2127,31 @@ static void *postcopy_ram_listen_thread(void *opaque)
> > > >                           "bitmaps may be lost, and present migrated dirty "
> > > >                           "bitmaps are correctly migrated and valid.",
> > > >                           __func__, load_res);
> > > > -            load_res = 0; /* prevent further exit() */
> > > >          } else {
> > > >              error_report("%s: loadvm failed: %d", __func__, load_res);
> > > >              migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
> > > >                                             MIGRATION_STATUS_FAILED);
> > > > +            goto out;
> > > >          }
> > > >      }
> > > > -    if (load_res >= 0) {
> > > > -        /*
> > > > -         * This looks good, but it's possible that the device loading in the
> > > > -         * main thread hasn't finished yet, and so we might not be in 'RUN'
> > > > -         * state yet; wait for the end of the main thread.
> > > > -         */
> > > > -        qemu_event_wait(&mis->main_thread_load_event);
> > > > -    }
> > > > -    postcopy_ram_incoming_cleanup(mis);
> > > > -
> > > > -    if (load_res < 0) {
> > > > -        /*
> > > > -         * If something went wrong then we have a bad state so exit;
> > > > -         * depending how far we got it might be possible at this point
> > > > -         * to leave the guest running and fire MCEs for pages that never
> > > > -         * arrived as a desperate recovery step.
> > > > -         */
> > > > -        rcu_unregister_thread();
> > > > -        exit(EXIT_FAILURE);
> > > > -    }
> > > > +    /*
> > > > +     * This looks good, but it's possible that the device loading in the
> > > > +     * main thread hasn't finished yet, and so we might not be in 'RUN'
> > > > +     * state yet; wait for the end of the main thread.
> > > > +     */
> > > > +    qemu_event_wait(&mis->main_thread_load_event);
> 
> PS: I didn't notice this change, looks like this may be better to be a
> separate patch when moving out of the if.  Meanwhile, I don't think we set
> it right either, in qemu_loadvm_state():
> 
>     qemu_event_set(&mis->main_thread_load_event);
> 
> The problem is e.g. load_snapshot / qmp_xen_load_devices_state also set
> that event, even if there'll be no one to consume it.. not a huge deal, but
> maybe while moving it out of the if, we can also cleanup the set() side
> too, by moving the set() upper into process_incoming_migration_co().

While I have moved it out of the condition, it is still only waited in
the success path, if there is an error that would previously cause the
condition to be false, the execution now jumps directly to the cleanup
section (out label), skipping this wait and setting migration state to
COMPLETED (it's set to FAILED before the jump). But I can still look
into moving the set() up.

> 
> > > >  
> > > >      migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
> > > >                                     MIGRATION_STATUS_COMPLETED);
> > > > -    /*
> > > > -     * If everything has worked fine, then the main thread has waited
> > > > -     * for us to start, and we're the last use of the mis.
> > > > -     * (If something broke then qemu will have to exit anyway since it's
> > > > -     * got a bad migration state).
> > > > -     */
> > > > -    bql_lock();
> > > > -    migration_incoming_state_destroy();
> > > > -    bql_unlock();
> > > >  
> > > > +out:
> > > >      rcu_unregister_thread();
> > > > -    mis->have_listen_thread = false;
> > > >      postcopy_state_set(POSTCOPY_INCOMING_END);
> > > >  
> > > >      object_unref(OBJECT(migr));
> > > >  
> > > > +    migration_bh_schedule(postcopy_ram_listen_thread_bh, NULL);
> > > 
> > > Better to schedule before the object_unref to ensure there's always
> > > someone holding a reference?
> > 
> > True, I'll move it.
> 
> Good point.  Though I'm not sure moving it upper would help, because it'll
> be the BH that references the MigrationState*..  So maybe we could unref at
> the end of postcopy_ram_listen_thread_bh().  If so, we should add a comment
> on ref() / unref() saying how they're paired.
> 
> > 
> > > 
> > > > +
> > > >      return NULL;
> > > >  }
> > > >  
> > > > @@ -2217,7 +2201,7 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
> > > >      mis->have_listen_thread = true;
> > > >      postcopy_thread_create(mis, &mis->listen_thread,
> > > >                             MIGRATION_THREAD_DST_LISTEN,
> > > > -                           postcopy_ram_listen_thread, QEMU_THREAD_DETACHED);
> > > > +                           postcopy_ram_listen_thread, QEMU_THREAD_JOINABLE);
> > > >      trace_loadvm_postcopy_handle_listen("return");
> > > >  
> > > >      return 0;
> > > 
> > 
> 
> -- 
> Peter Xu
>
Re: [PATCH 3/4] migration: Refactor incoming cleanup into migration_incoming_finish()
Posted by Peter Xu 5 days ago
On Tue, Sep 23, 2025 at 04:58:15PM +0200, Juraj Marcin wrote:
> On 2025-09-22 11:51, Peter Xu wrote:
> > On Mon, Sep 22, 2025 at 02:58:38PM +0200, Juraj Marcin wrote:
> > > Hi Fabiano,
> > > 
> > > On 2025-09-19 13:46, Fabiano Rosas wrote:
> > > > Juraj Marcin <jmarcin@redhat.com> writes:
> > > > 
> > > > Hi Juraj,
> > > > 
> > > > Good patch, nice use of migrate_has_failed()
> > > 
> > > Thanks!
> > > 
> > > > 
> > > > > From: Juraj Marcin <jmarcin@redhat.com>
> > > > >
> > > > > Currently, there are two functions that are responsible for cleanup of
> > > > > the incoming migration state. With successful precopy, it's the main
> > > > > thread and with successful postcopy it's the listen thread. However, if
> > > > > postcopy fails during in the device load, both functions will try to do
> > > > > the cleanup. Moreover, when exit-on-error parameter was added, it was
> > > > > applied only to precopy.
> > > > >
> > > > 
> > > > Someone could be relying in postcopy always exiting on error while
> > > > explicitly setting exit-on-error=false for precopy and this patch would
> > > > change the behavior incompatibly. Is this an issue? I'm willing to
> > > > ignore it, but you guys know more about postcopy.
> > > 
> > > Good question. When going through older patches where postcopy listen
> > > thread and then where exit-on-error were implemented, it seemed more
> > > like an overlook than intentional omission. However, it might be better
> > > to not break any potential users of this, we could add another option,
> > > "exit-on-postcopy-error" that would allow such handling if postscopy
> > > failed unrecoverably. I've already talked about such option with
> > > @jdenemar and he agreed with it.
> > 
> > The idea for postcopy ram is, it should never fail.. as failing should
> > never be better than a pause.  Block dirty bitmap might be different,
> > though, when enabled separately.
> > 
> > For postcopy-ram, qemu_loadvm_state_main() will in reality only receive RAM
> > updates. It'll almost always trigger the postcopy_pause_incoming() path
> > when anything fails.
> > 
> > For pure block-dirty-bitmap-only styled postcopy: for this exit-on-error, I
> > also don't think we should really "exit on errors", even if the flag is
> > set.  IIUC, it's not fatal to the VM if that failed, as described in:
> 
> I agree, however, this patch doesn't add any new cases in which the
> destination QEMU would exit. If there is an error in block dirty bitmaps
> it is only reported to the console, and then it continues to waiting for
> main_thread_load_event, marks the migration as COMPLETED and does the
> cleanup, same as before. [1] I can add a comment similar to "prevent
> further exit" as there was before.
> 
> However, if there is other error, in which the postcopy cannot pause
> (for example there was a failure in the main thread loading the device
> state before the machine started), the migration status changes to
> FAILED and jumps right to cleanup which then checks exit-on-error and
> optionally exits the QEMU, before it would always exit in such case [2]:
> 
> [1]: https://gitlab.com/qemu-project/qemu/-/blob/ab8008b231e758e03c87c1c483c03afdd9c02e19/migration/savevm.c#L2120
> [2]: https://gitlab.com/qemu-project/qemu/-/blob/ab8008b231e758e03c87c1c483c03afdd9c02e19/migration/savevm.c#L2150

Ah, so you're saying an failure within qemu_loadvm_state_main() but during
POSTCOPY_INCOMING_LISTENING..  I think you're right.  It's possible.

In that case, exit-on-postcopy-error still sounds an overkill to me.  I
vote that we go ahead reusing exit-on-error for postcopy too. IIUC that's
what this current patch does as-is.

-- 
Peter Xu
Re: [PATCH 3/4] migration: Refactor incoming cleanup into migration_incoming_finish()
Posted by Fabiano Rosas 5 days, 22 hours ago
Peter Xu <peterx@redhat.com> writes:

> On Mon, Sep 22, 2025 at 02:58:38PM +0200, Juraj Marcin wrote:
>> Hi Fabiano,
>> 
>> On 2025-09-19 13:46, Fabiano Rosas wrote:
>> > Juraj Marcin <jmarcin@redhat.com> writes:
>> > 
>> > Hi Juraj,
>> > 
>> > Good patch, nice use of migrate_has_failed()
>> 
>> Thanks!
>> 
>> > 
>> > > From: Juraj Marcin <jmarcin@redhat.com>
>> > >
>> > > Currently, there are two functions that are responsible for cleanup of
>> > > the incoming migration state. With successful precopy, it's the main
>> > > thread and with successful postcopy it's the listen thread. However, if
>> > > postcopy fails during in the device load, both functions will try to do
>> > > the cleanup. Moreover, when exit-on-error parameter was added, it was
>> > > applied only to precopy.
>> > >
>> > 
>> > Someone could be relying in postcopy always exiting on error while
>> > explicitly setting exit-on-error=false for precopy and this patch would
>> > change the behavior incompatibly. Is this an issue? I'm willing to
>> > ignore it, but you guys know more about postcopy.
>> 
>> Good question. When going through older patches where postcopy listen
>> thread and then where exit-on-error were implemented, it seemed more
>> like an overlook than intentional omission. However, it might be better
>> to not break any potential users of this, we could add another option,
>> "exit-on-postcopy-error" that would allow such handling if postscopy
>> failed unrecoverably. I've already talked about such option with
>> @jdenemar and he agreed with it.
>
> The idea for postcopy ram is, it should never fail.. as failing should
> never be better than a pause.  Block dirty bitmap might be different,
> though, when enabled separately.
>
> For postcopy-ram, qemu_loadvm_state_main() will in reality only receive RAM
> updates. It'll almost always trigger the postcopy_pause_incoming() path
> when anything fails.
>
> For pure block-dirty-bitmap-only styled postcopy: for this exit-on-error, I
> also don't think we should really "exit on errors", even if the flag is
> set.  IIUC, it's not fatal to the VM if that failed, as described in:
>
> commit ee64722514fabcad2430982ade86180208f5be4f
> Author: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
> Date:   Mon Jul 27 22:42:32 2020 +0300
>
>     migration/savevm: don't worry if bitmap migration postcopy failed
>
>     ...
>
>     And anyway, bitmaps postcopy is not prepared to be somehow recovered.
>     The original idea instead is that if bitmaps postcopy failed, we just
>     lose some bitmaps, which is not critical. So, on failure we just need
>     to remove unfinished bitmaps and guest should continue execution on
>     destination.
>
> Hence, exit here might be an overkill.. need block developers to double
> check, though..
>
>> 
>> > 
>> > > This patch refactors common cleanup and exiting on error into a helper
>> > > function that can be started either from precopy or postcopy, reducing
>> > > the duplication. If the listen thread has been started (the postcopy
>> > > state is at least LISTENING), the listen thread is responsible for all
>> > > cleanup and exiting, otherwise it's the main thread's responsibility.
>> > 
>> > Don't the BHs also run in the main thread? I'm not sure this sentence is
>> > accurate.
>> 
>> Yeah, it is a bit inaccurate now that you mention it, BHs are indeed run
>> in the main thread. More accurate would to replace the main thread with
>> process incoming migration coroutine, that would precisely describe who
>> has the responsibility to call the cleanup.
>> 
>> > 
>> > >
>> > > Signed-off-by: Juraj Marcin <jmarcin@redhat.com>
>> > > ---
>> > >  migration/migration.c | 64 ++++++++++++++++++++++++-------------------
>> > >  migration/migration.h |  1 +
>> > >  migration/savevm.c    | 48 +++++++++++---------------------
>> > 
>> > Could someone act on the TODOs and move postcopy code into postcopy-ram?
>> > It's never too late to make things consistent.
>> 
>> I can take a look.
>> 
>> > 
>> > >  3 files changed, 53 insertions(+), 60 deletions(-)
>> > >
>> > > diff --git a/migration/migration.c b/migration/migration.c
>> > > index 2c0b3a7229..7222e3de13 100644
>> > > --- a/migration/migration.c
>> > > +++ b/migration/migration.c
>> > > @@ -442,9 +442,19 @@ void migration_incoming_transport_cleanup(MigrationIncomingState *mis)
>> > >  void migration_incoming_state_destroy(void)
>> > >  {
>> > >      struct MigrationIncomingState *mis = migration_incoming_get_current();
>> > > +    PostcopyState ps = postcopy_state_get();
>> > >  
>> > >      multifd_recv_cleanup();
>> > >  
>> > > +    if (mis->have_listen_thread) {
>> > > +        qemu_thread_join(&mis->listen_thread);
>> > > +        mis->have_listen_thread = false;
>> > > +    }
>> > > +
>> > > +    if (ps != POSTCOPY_INCOMING_NONE) {
>> > > +        postcopy_ram_incoming_cleanup(mis);
>> > > +    }
>> > > +
>> > >      /*
>> > >       * RAM state cleanup needs to happen after multifd cleanup, because
>> > >       * multifd threads can use some of its states (receivedmap).
>> > > @@ -809,6 +819,23 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels,
>> > >      cpr_state_close();
>> > >  }
>> > >  
>> > > +void migration_incoming_finish(void)
>> > > +{
>> > > +    MigrationState *s = migrate_get_current();
>> > > +    MigrationIncomingState *mis = migration_incoming_get_current();
>> > > +
>> > > +    migration_incoming_state_destroy();
>> > > +
>> > > +    if (migration_has_failed(mis->state) && mis->exit_on_error) {
>> > > +        WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
>> > > +            error_report_err(s->error);
>> > > +            s->error = NULL;
>> > > +        }
>> > > +
>> > > +        exit(EXIT_FAILURE);
>> > > +    }
>> > > +}
>> > > +
>> > >  static void process_incoming_migration_bh(void *opaque)
>> > >  {
>> > >      MigrationIncomingState *mis = opaque;
>> > > @@ -861,7 +888,7 @@ static void process_incoming_migration_bh(void *opaque)
>> > >       */
>> > >      migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
>> > >                        MIGRATION_STATUS_COMPLETED);
>> > > -    migration_incoming_state_destroy();
>> > > +    migration_incoming_finish();
>> > >  }
>> > >  
>> > >  static void coroutine_fn
>> > > @@ -888,23 +915,13 @@ process_incoming_migration_co(void *opaque)
>> > >  
>> > >      ps = postcopy_state_get();
>> > >      trace_process_incoming_migration_co_end(ret, ps);
>> > > -    if (ps != POSTCOPY_INCOMING_NONE) {
>> > > -        if (ps == POSTCOPY_INCOMING_ADVISE) {
>> > > -            /*
>> > > -             * Where a migration had postcopy enabled (and thus went to advise)
>> > > -             * but managed to complete within the precopy period, we can use
>> > > -             * the normal exit.
>> > > -             */
>> > > -            postcopy_ram_incoming_cleanup(mis);
>> > > -        } else if (ret >= 0) {
>> > > -            /*
>> > > -             * Postcopy was started, cleanup should happen at the end of the
>> > > -             * postcopy thread.
>> > > -             */
>> > > -            trace_process_incoming_migration_co_postcopy_end_main();
>> > > -            goto out;
>> > > -        }
>> > > -        /* Else if something went wrong then just fall out of the normal exit */
>> > > +    if (ps >= POSTCOPY_INCOMING_LISTENING) {
>> > > +        /*
>> > > +         * Postcopy was started, cleanup should happen at the end of the
>> > > +         * postcopy thread.
>> > > +         */
>> > > +        trace_process_incoming_migration_co_postcopy_end_main();
>> > > +        goto out;
>> > >      }
>> > >  
>> > >      if (ret < 0) {
>> > > @@ -926,16 +943,7 @@ fail:
>> > >      migrate_set_error(s, local_err);
>> > >      error_free(local_err);
>> > >  
>> > > -    migration_incoming_state_destroy();
>> > > -
>> > > -    if (mis->exit_on_error) {
>> > > -        WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
>> > > -            error_report_err(s->error);
>> > > -            s->error = NULL;
>> > > -        }
>> > > -
>> > > -        exit(EXIT_FAILURE);
>> > > -    }
>> > > +    migration_incoming_finish();
>> > >  out:
>> > >      /* Pairs with the refcount taken in qmp_migrate_incoming() */
>> > >      migrate_incoming_unref_outgoing_state();
>> > > diff --git a/migration/migration.h b/migration/migration.h
>> > > index 2c2331f40d..67e3318467 100644
>> > > --- a/migration/migration.h
>> > > +++ b/migration/migration.h
>> > > @@ -518,6 +518,7 @@ void migrate_set_state(MigrationStatus *state, MigrationStatus old_state,
>> > >  void migration_fd_process_incoming(QEMUFile *f);
>> > >  void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp);
>> > >  void migration_incoming_process(void);
>> > > +void migration_incoming_finish(void);
>> > 
>> > What about merging migration_incoming_state_destroy and
>> > migration_incoming_finish and pair all of this with migration_cleanup?
>> > 
>> > static void migration_cleanup_bh(void *opaque)
>> > {
>> >     migration_cleanup(opaque);
>> > }
>> > 
>> > static void migration_incoming_cleanup_bh(void *opaque)
>> > {
>> >     migration_incoming_cleanup(opaque);
>> > }
>> 
>> Yes, it would pair well. I guess it would also solve Peter's nitpick
>> about the change in process_incoming_migration_bh() above.
>> 
>> > >  
>> > >  bool  migration_has_all_channels(void);
>> > >  
>> > > diff --git a/migration/savevm.c b/migration/savevm.c
>> > > index fabbeb296a..d7eb416d48 100644
>> > > --- a/migration/savevm.c
>> > > +++ b/migration/savevm.c
>> > > @@ -2069,6 +2069,11 @@ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis,
>> > >      return 0;
>> > >  }
>> > >  
>> > > +static void postcopy_ram_listen_thread_bh(void *opaque)
>> > > +{
>> > > +    migration_incoming_finish();
>> > > +}
>> > > +
>> > >  /*
>> > >   * Triggered by a postcopy_listen command; this thread takes over reading
>> > >   * the input stream, leaving the main thread free to carry on loading the rest
>> > > @@ -2122,52 +2127,31 @@ static void *postcopy_ram_listen_thread(void *opaque)
>> > >                           "bitmaps may be lost, and present migrated dirty "
>> > >                           "bitmaps are correctly migrated and valid.",
>> > >                           __func__, load_res);
>> > > -            load_res = 0; /* prevent further exit() */
>> > >          } else {
>> > >              error_report("%s: loadvm failed: %d", __func__, load_res);
>> > >              migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
>> > >                                             MIGRATION_STATUS_FAILED);
>> > > +            goto out;
>> > >          }
>> > >      }
>> > > -    if (load_res >= 0) {
>> > > -        /*
>> > > -         * This looks good, but it's possible that the device loading in the
>> > > -         * main thread hasn't finished yet, and so we might not be in 'RUN'
>> > > -         * state yet; wait for the end of the main thread.
>> > > -         */
>> > > -        qemu_event_wait(&mis->main_thread_load_event);
>> > > -    }
>> > > -    postcopy_ram_incoming_cleanup(mis);
>> > > -
>> > > -    if (load_res < 0) {
>> > > -        /*
>> > > -         * If something went wrong then we have a bad state so exit;
>> > > -         * depending how far we got it might be possible at this point
>> > > -         * to leave the guest running and fire MCEs for pages that never
>> > > -         * arrived as a desperate recovery step.
>> > > -         */
>> > > -        rcu_unregister_thread();
>> > > -        exit(EXIT_FAILURE);
>> > > -    }
>> > > +    /*
>> > > +     * This looks good, but it's possible that the device loading in the
>> > > +     * main thread hasn't finished yet, and so we might not be in 'RUN'
>> > > +     * state yet; wait for the end of the main thread.
>> > > +     */
>> > > +    qemu_event_wait(&mis->main_thread_load_event);
>
> PS: I didn't notice this change, looks like this may be better to be a
> separate patch when moving out of the if.  Meanwhile, I don't think we set
> it right either, in qemu_loadvm_state():
>
>     qemu_event_set(&mis->main_thread_load_event);
>
> The problem is e.g. load_snapshot / qmp_xen_load_devices_state also set
> that event, even if there'll be no one to consume it.. not a huge deal, but
> maybe while moving it out of the if, we can also cleanup the set() side
> too, by moving the set() upper into process_incoming_migration_co().
>
>> > >  
>> > >      migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
>> > >                                     MIGRATION_STATUS_COMPLETED);
>> > > -    /*
>> > > -     * If everything has worked fine, then the main thread has waited
>> > > -     * for us to start, and we're the last use of the mis.
>> > > -     * (If something broke then qemu will have to exit anyway since it's
>> > > -     * got a bad migration state).
>> > > -     */
>> > > -    bql_lock();
>> > > -    migration_incoming_state_destroy();
>> > > -    bql_unlock();
>> > >  
>> > > +out:
>> > >      rcu_unregister_thread();
>> > > -    mis->have_listen_thread = false;
>> > >      postcopy_state_set(POSTCOPY_INCOMING_END);
>> > >  
>> > >      object_unref(OBJECT(migr));
>> > >  
>> > > +    migration_bh_schedule(postcopy_ram_listen_thread_bh, NULL);
>> > 
>> > Better to schedule before the object_unref to ensure there's always
>> > someone holding a reference?
>> 
>> True, I'll move it.
>
> Good point.  Though I'm not sure moving it upper would help, because it'll
> be the BH that references the MigrationState*..

A while ago we wrapped QEMU BHs with migration_bh_dispatch_bh. The BHs
don't need to hold or drop MigrationState reference anymore because the
dispatch callback does it:

void migration_bh_schedule(QEMUBHFunc *cb, void *opaque)
{
    ...
    object_ref(OBJECT(s));
    qemu_bh_schedule(bh);
}

static void migration_bh_dispatch_bh(void *opaque)
{
    ...        
    migbh->cb(migbh->opaque);
    object_unref(OBJECT(s));
    ...
}

>  So maybe we could unref at
> the end of postcopy_ram_listen_thread_bh().  If so, we should add a comment
> on ref() / unref() saying how they're paired.
>

>> 
>> > 
>> > > +
>> > >      return NULL;
>> > >  }
>> > >  
>> > > @@ -2217,7 +2201,7 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
>> > >      mis->have_listen_thread = true;
>> > >      postcopy_thread_create(mis, &mis->listen_thread,
>> > >                             MIGRATION_THREAD_DST_LISTEN,
>> > > -                           postcopy_ram_listen_thread, QEMU_THREAD_DETACHED);
>> > > +                           postcopy_ram_listen_thread, QEMU_THREAD_JOINABLE);
>> > >      trace_loadvm_postcopy_handle_listen("return");
>> > >  
>> > >      return 0;
>> > 
>>
Re: [PATCH 3/4] migration: Refactor incoming cleanup into migration_incoming_finish()
Posted by Peter Xu 5 days, 22 hours ago
On Mon, Sep 22, 2025 at 02:40:37PM -0300, Fabiano Rosas wrote:
> A while ago we wrapped QEMU BHs with migration_bh_dispatch_bh. The BHs
> don't need to hold or drop MigrationState reference anymore because the
> dispatch callback does it:
> 
> void migration_bh_schedule(QEMUBHFunc *cb, void *opaque)
> {
>     ...
>     object_ref(OBJECT(s));
>     qemu_bh_schedule(bh);
> }
> 
> static void migration_bh_dispatch_bh(void *opaque)
> {
>     ...        
>     migbh->cb(migbh->opaque);
>     object_unref(OBJECT(s));
>     ...
> }

Indeed!  Ignore my comment.. :)

-- 
Peter Xu
Re: [PATCH 3/4] migration: Refactor incoming cleanup into migration_incoming_finish()
Posted by Peter Xu 1 week, 2 days ago
On Mon, Sep 15, 2025 at 01:59:14PM +0200, Juraj Marcin wrote:
> From: Juraj Marcin <jmarcin@redhat.com>
> 
> Currently, there are two functions that are responsible for cleanup of
> the incoming migration state. With successful precopy, it's the main
> thread and with successful postcopy it's the listen thread. However, if
> postcopy fails during in the device load, both functions will try to do
> the cleanup. Moreover, when exit-on-error parameter was added, it was
> applied only to precopy.
> 
> This patch refactors common cleanup and exiting on error into a helper
> function that can be started either from precopy or postcopy, reducing
> the duplication. If the listen thread has been started (the postcopy
> state is at least LISTENING), the listen thread is responsible for all
> cleanup and exiting, otherwise it's the main thread's responsibility.

Looks almost good, thanks!  Only nitpicks below.

> 
> Signed-off-by: Juraj Marcin <jmarcin@redhat.com>
> ---
>  migration/migration.c | 64 ++++++++++++++++++++++++-------------------
>  migration/migration.h |  1 +
>  migration/savevm.c    | 48 +++++++++++---------------------
>  3 files changed, 53 insertions(+), 60 deletions(-)
> 
> diff --git a/migration/migration.c b/migration/migration.c
> index 2c0b3a7229..7222e3de13 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -442,9 +442,19 @@ void migration_incoming_transport_cleanup(MigrationIncomingState *mis)
>  void migration_incoming_state_destroy(void)
>  {
>      struct MigrationIncomingState *mis = migration_incoming_get_current();
> +    PostcopyState ps = postcopy_state_get();
>  
>      multifd_recv_cleanup();
>  
> +    if (mis->have_listen_thread) {
> +        qemu_thread_join(&mis->listen_thread);
> +        mis->have_listen_thread = false;
> +    }

Maybe this fits more to be in postcopy_ram_incoming_cleanup() below?

> +
> +    if (ps != POSTCOPY_INCOMING_NONE) {
> +        postcopy_ram_incoming_cleanup(mis);
> +    }
> +
>      /*
>       * RAM state cleanup needs to happen after multifd cleanup, because
>       * multifd threads can use some of its states (receivedmap).
> @@ -809,6 +819,23 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels,
>      cpr_state_close();
>  }
>  
> +void migration_incoming_finish(void)
> +{
> +    MigrationState *s = migrate_get_current();
> +    MigrationIncomingState *mis = migration_incoming_get_current();
> +
> +    migration_incoming_state_destroy();
> +
> +    if (migration_has_failed(mis->state) && mis->exit_on_error) {

If you agree on my comment in patch 2, we can keep checking against FAILED.

> +        WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
> +            error_report_err(s->error);
> +            s->error = NULL;
> +        }
> +
> +        exit(EXIT_FAILURE);
> +    }
> +}
> +
>  static void process_incoming_migration_bh(void *opaque)
>  {
>      MigrationIncomingState *mis = opaque;
> @@ -861,7 +888,7 @@ static void process_incoming_migration_bh(void *opaque)
>       */
>      migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
>                        MIGRATION_STATUS_COMPLETED);
> -    migration_incoming_state_destroy();
> +    migration_incoming_finish();

This is exactly the same as before when we know it's succeeding, but I
think I get your point, always using migration_incoming_finish() should be
fine.

>  }
>  
>  static void coroutine_fn
> @@ -888,23 +915,13 @@ process_incoming_migration_co(void *opaque)
>  
>      ps = postcopy_state_get();
>      trace_process_incoming_migration_co_end(ret, ps);
> -    if (ps != POSTCOPY_INCOMING_NONE) {
> -        if (ps == POSTCOPY_INCOMING_ADVISE) {
> -            /*
> -             * Where a migration had postcopy enabled (and thus went to advise)
> -             * but managed to complete within the precopy period, we can use
> -             * the normal exit.
> -             */
> -            postcopy_ram_incoming_cleanup(mis);
> -        } else if (ret >= 0) {
> -            /*
> -             * Postcopy was started, cleanup should happen at the end of the
> -             * postcopy thread.
> -             */
> -            trace_process_incoming_migration_co_postcopy_end_main();
> -            goto out;
> -        }
> -        /* Else if something went wrong then just fall out of the normal exit */
> +    if (ps >= POSTCOPY_INCOMING_LISTENING) {
> +        /*
> +         * Postcopy was started, cleanup should happen at the end of the
> +         * postcopy thread.

Postcopy has >1 threads, better mention "at the end of postcopy ram listen
thread", that helps explain why checking >= POSTCOPY_INCOMING_LISTENING,
because that event creates the ram listen thread.

> +         */
> +        trace_process_incoming_migration_co_postcopy_end_main();
> +        goto out;
>      }
>  
>      if (ret < 0) {
> @@ -926,16 +943,7 @@ fail:
>      migrate_set_error(s, local_err);
>      error_free(local_err);
>  
> -    migration_incoming_state_destroy();
> -
> -    if (mis->exit_on_error) {
> -        WITH_QEMU_LOCK_GUARD(&s->error_mutex) {
> -            error_report_err(s->error);
> -            s->error = NULL;
> -        }
> -
> -        exit(EXIT_FAILURE);
> -    }
> +    migration_incoming_finish();
>  out:
>      /* Pairs with the refcount taken in qmp_migrate_incoming() */
>      migrate_incoming_unref_outgoing_state();
> diff --git a/migration/migration.h b/migration/migration.h
> index 2c2331f40d..67e3318467 100644
> --- a/migration/migration.h
> +++ b/migration/migration.h
> @@ -518,6 +518,7 @@ void migrate_set_state(MigrationStatus *state, MigrationStatus old_state,
>  void migration_fd_process_incoming(QEMUFile *f);
>  void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp);
>  void migration_incoming_process(void);
> +void migration_incoming_finish(void);
>  
>  bool  migration_has_all_channels(void);
>  
> diff --git a/migration/savevm.c b/migration/savevm.c
> index fabbeb296a..d7eb416d48 100644
> --- a/migration/savevm.c
> +++ b/migration/savevm.c
> @@ -2069,6 +2069,11 @@ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis,
>      return 0;
>  }
>  
> +static void postcopy_ram_listen_thread_bh(void *opaque)
> +{
> +    migration_incoming_finish();
> +}
> +
>  /*
>   * Triggered by a postcopy_listen command; this thread takes over reading
>   * the input stream, leaving the main thread free to carry on loading the rest
> @@ -2122,52 +2127,31 @@ static void *postcopy_ram_listen_thread(void *opaque)
>                           "bitmaps may be lost, and present migrated dirty "
>                           "bitmaps are correctly migrated and valid.",
>                           __func__, load_res);
> -            load_res = 0; /* prevent further exit() */
>          } else {
>              error_report("%s: loadvm failed: %d", __func__, load_res);
>              migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
>                                             MIGRATION_STATUS_FAILED);
> +            goto out;
>          }
>      }
> -    if (load_res >= 0) {
> -        /*
> -         * This looks good, but it's possible that the device loading in the
> -         * main thread hasn't finished yet, and so we might not be in 'RUN'
> -         * state yet; wait for the end of the main thread.
> -         */
> -        qemu_event_wait(&mis->main_thread_load_event);
> -    }
> -    postcopy_ram_incoming_cleanup(mis);
> -
> -    if (load_res < 0) {
> -        /*
> -         * If something went wrong then we have a bad state so exit;
> -         * depending how far we got it might be possible at this point
> -         * to leave the guest running and fire MCEs for pages that never
> -         * arrived as a desperate recovery step.
> -         */
> -        rcu_unregister_thread();
> -        exit(EXIT_FAILURE);
> -    }
> +    /*
> +     * This looks good, but it's possible that the device loading in the
> +     * main thread hasn't finished yet, and so we might not be in 'RUN'
> +     * state yet; wait for the end of the main thread.
> +     */
> +    qemu_event_wait(&mis->main_thread_load_event);
>  
>      migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
>                                     MIGRATION_STATUS_COMPLETED);
> -    /*
> -     * If everything has worked fine, then the main thread has waited
> -     * for us to start, and we're the last use of the mis.
> -     * (If something broke then qemu will have to exit anyway since it's
> -     * got a bad migration state).
> -     */
> -    bql_lock();
> -    migration_incoming_state_destroy();
> -    bql_unlock();
>  
> +out:
>      rcu_unregister_thread();
> -    mis->have_listen_thread = false;
>      postcopy_state_set(POSTCOPY_INCOMING_END);
>  
>      object_unref(OBJECT(migr));
>  
> +    migration_bh_schedule(postcopy_ram_listen_thread_bh, NULL);
> +
>      return NULL;
>  }
>  
> @@ -2217,7 +2201,7 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
>      mis->have_listen_thread = true;
>      postcopy_thread_create(mis, &mis->listen_thread,
>                             MIGRATION_THREAD_DST_LISTEN,
> -                           postcopy_ram_listen_thread, QEMU_THREAD_DETACHED);
> +                           postcopy_ram_listen_thread, QEMU_THREAD_JOINABLE);

Nit once more: better mention this change in commit message.

Thanks!

>      trace_loadvm_postcopy_handle_listen("return");
>  
>      return 0;
> -- 
> 2.51.0
> 

-- 
Peter Xu