[libvirt PATCH 3/3] qemu: Make qemuMigrationSrcCancel optionally synchronous

Jiri Denemark posted 3 patches 3 years, 5 months ago
[libvirt PATCH 3/3] qemu: Make qemuMigrationSrcCancel optionally synchronous
Posted by Jiri Denemark 3 years, 5 months ago
We have always considered "migrate_cancel" QMP command to return after
successfully cancelling the migration. But this is no longer true (to be
honest I'm not sure it ever was) as it just changes the migration state
to "cancelling". In most cases the migration is canceled pretty quickly
and we don't really notice anything, but sometimes it takes so long we
even get to clearing migration capabilities before the migration is
actually canceled, which fails as capabilities can only be changed when
no migration is running. So to avoid this issue, we can wait for the
migration to be really canceled after sending migrate_cancel. The only
place where we don't need synchronous behavior is when we're cancelling
migration on user's request while it is actively watched by another
thread.

https://bugzilla.redhat.com/show_bug.cgi?id=2114866

Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
---
 src/qemu/qemu_driver.c    |  2 +-
 src/qemu/qemu_migration.c | 45 +++++++++++++++++++++++++++++++++++----
 src/qemu/qemu_migration.h |  3 ++-
 src/qemu/qemu_process.c   |  2 +-
 4 files changed, 45 insertions(+), 7 deletions(-)

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index a86efc769a..71a1de19b8 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -12811,7 +12811,7 @@ qemuDomainAbortJobMigration(virDomainObj *vm)
     VIR_DEBUG("Cancelling migration job at client request");
 
     qemuDomainObjAbortAsyncJob(vm);
-    return qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE);
+    return qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE, false);
 }
 
 
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 5845dfdb9c..83c3ca4dcf 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -4611,9 +4611,37 @@ qemuMigrationSrcStart(virDomainObj *vm,
 }
 
 
+static bool
+qemuMigrationSrcIsCanceled(virDomainObj *vm)
+{
+    qemuDomainObjPrivate *priv = vm->privateData;
+    virDomainJobData *jobData = priv->job.current;
+
+    qemuMigrationUpdateJobType(jobData);
+    switch (jobData->status) {
+    case VIR_DOMAIN_JOB_STATUS_FAILED:
+    case VIR_DOMAIN_JOB_STATUS_CANCELED:
+    case VIR_DOMAIN_JOB_STATUS_COMPLETED:
+    case VIR_DOMAIN_JOB_STATUS_NONE:
+        return true;
+
+    case VIR_DOMAIN_JOB_STATUS_MIGRATING:
+    case VIR_DOMAIN_JOB_STATUS_POSTCOPY:
+    case VIR_DOMAIN_JOB_STATUS_PAUSED:
+    case VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED:
+    case VIR_DOMAIN_JOB_STATUS_POSTCOPY_PAUSED:
+    case VIR_DOMAIN_JOB_STATUS_ACTIVE:
+        break;
+    }
+
+    return false;
+}
+
+
 int
 qemuMigrationSrcCancel(virDomainObj *vm,
-                       virDomainAsyncJob asyncJob)
+                       virDomainAsyncJob asyncJob,
+                       bool wait)
 {
     qemuDomainObjPrivate *priv = vm->privateData;
 
@@ -4625,6 +4653,15 @@ qemuMigrationSrcCancel(virDomainObj *vm,
     qemuMonitorMigrateCancel(priv->mon);
     qemuDomainObjExitMonitor(vm);
 
+    if (virDomainObjIsActive(vm) && wait) {
+        VIR_DEBUG("Waiting for migration to be canceled");
+
+        while (!qemuMigrationSrcIsCanceled(vm)) {
+            if (qemuDomainObjWait(vm) < 0)
+                return -1;
+        }
+    }
+
     return 0;
 }
 
@@ -4971,7 +5008,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
 
         if (cancel &&
             priv->job.current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
-            qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_MIGRATION_OUT);
+            qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_MIGRATION_OUT, true);
 
         /* cancel any outstanding NBD jobs */
         if (mig && mig->nbd)
@@ -6916,7 +6953,7 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
             virErrorPreserveLast(&orig_err);
             virCommandAbort(compressor);
             if (virDomainObjIsActive(vm))
-                qemuMigrationSrcCancel(vm, asyncJob);
+                qemuMigrationSrcCancel(vm, asyncJob, true);
         }
         goto cleanup;
     }
@@ -6963,7 +7000,7 @@ qemuMigrationSrcCancelUnattended(virDomainObj *vm)
     VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
               vm->def->name);
 
-    qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE);
+    qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE, true);
 
     for (i = 0; i < vm->def->ndisks; i++) {
         virDomainDiskDef *disk = vm->def->disks[i];
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index 31a5547399..fbea45ad4e 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -245,7 +245,8 @@ qemuMigrationSrcCancelUnattended(virDomainObj *vm);
 
 int
 qemuMigrationSrcCancel(virDomainObj *vm,
-                       virDomainAsyncJob asyncJob);
+                       virDomainAsyncJob asyncJob,
+                       bool wait);
 
 int
 qemuMigrationAnyFetchStats(virDomainObj *vm,
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 4465fa89e9..08eefd0fba 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -3696,7 +3696,7 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
     case VIR_ASYNC_JOB_SAVE:
     case VIR_ASYNC_JOB_DUMP:
     case VIR_ASYNC_JOB_SNAPSHOT:
-        qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE);
+        qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE, true);
         /* resume the domain but only if it was paused as a result of
          * running a migration-to-file operation.  Although we are
          * recovering an async job, this function is run at startup
-- 
2.37.2
Re: [libvirt PATCH 3/3] qemu: Make qemuMigrationSrcCancel optionally synchronous
Posted by Peter Krempa 3 years, 5 months ago
On Thu, Sep 01, 2022 at 14:47:41 +0200, Jiri Denemark wrote:
> We have always considered "migrate_cancel" QMP command to return after
> successfully cancelling the migration. But this is no longer true (to be
> honest I'm not sure it ever was) as it just changes the migration state
> to "cancelling". In most cases the migration is canceled pretty quickly
> and we don't really notice anything, but sometimes it takes so long we
> even get to clearing migration capabilities before the migration is
> actually canceled, which fails as capabilities can only be changed when
> no migration is running. So to avoid this issue, we can wait for the
> migration to be really canceled after sending migrate_cancel. The only
> place where we don't need synchronous behavior is when we're cancelling
> migration on user's request while it is actively watched by another
> thread.
> 
> https://bugzilla.redhat.com/show_bug.cgi?id=2114866
> 
> Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
> ---
>  src/qemu/qemu_driver.c    |  2 +-
>  src/qemu/qemu_migration.c | 45 +++++++++++++++++++++++++++++++++++----
>  src/qemu/qemu_migration.h |  3 ++-
>  src/qemu/qemu_process.c   |  2 +-
>  4 files changed, 45 insertions(+), 7 deletions(-)

[...]

>  int
>  qemuMigrationSrcCancel(virDomainObj *vm,
> -                       virDomainAsyncJob asyncJob)
> +                       virDomainAsyncJob asyncJob,
> +                       bool wait)
>  {
>      qemuDomainObjPrivate *priv = vm->privateData;
>  
> @@ -4625,6 +4653,15 @@ qemuMigrationSrcCancel(virDomainObj *vm,
>      qemuMonitorMigrateCancel(priv->mon);
>      qemuDomainObjExitMonitor(vm);
>  
> +    if (virDomainObjIsActive(vm) && wait) {

Is the call to virDomainObjIsActive() necessary here? IIUC the domain
shutdown code is always executed in a way to make sure that waiting
threads are always woken.


> +        VIR_DEBUG("Waiting for migration to be canceled");
> +
> +        while (!qemuMigrationSrcIsCanceled(vm)) {
> +            if (qemuDomainObjWait(vm) < 0)

So here if the VM would crash before we wait we'd report success and if
it crashed during our wait we'll report failure, which seems weird too.

> +                return -1;
> +        }

The rest of the patch looks okay.
Re: [libvirt PATCH 3/3] qemu: Make qemuMigrationSrcCancel optionally synchronous
Posted by Jiri Denemark 3 years, 5 months ago
On Thu, Sep 01, 2022 at 16:51:34 +0200, Peter Krempa wrote:
> On Thu, Sep 01, 2022 at 14:47:41 +0200, Jiri Denemark wrote:
> > We have always considered "migrate_cancel" QMP command to return after
> > successfully cancelling the migration. But this is no longer true (to be
> > honest I'm not sure it ever was) as it just changes the migration state
> > to "cancelling". In most cases the migration is canceled pretty quickly
> > and we don't really notice anything, but sometimes it takes so long we
> > even get to clearing migration capabilities before the migration is
> > actually canceled, which fails as capabilities can only be changed when
> > no migration is running. So to avoid this issue, we can wait for the
> > migration to be really canceled after sending migrate_cancel. The only
> > place where we don't need synchronous behavior is when we're cancelling
> > migration on user's request while it is actively watched by another
> > thread.
> > 
> > https://bugzilla.redhat.com/show_bug.cgi?id=2114866
> > 
> > Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
> > ---
> >  src/qemu/qemu_driver.c    |  2 +-
> >  src/qemu/qemu_migration.c | 45 +++++++++++++++++++++++++++++++++++----
> >  src/qemu/qemu_migration.h |  3 ++-
> >  src/qemu/qemu_process.c   |  2 +-
> >  4 files changed, 45 insertions(+), 7 deletions(-)
> 
> [...]
> 
> >  int
> >  qemuMigrationSrcCancel(virDomainObj *vm,
> > -                       virDomainAsyncJob asyncJob)
> > +                       virDomainAsyncJob asyncJob,
> > +                       bool wait)
> >  {
> >      qemuDomainObjPrivate *priv = vm->privateData;
> >  
> > @@ -4625,6 +4653,15 @@ qemuMigrationSrcCancel(virDomainObj *vm,
> >      qemuMonitorMigrateCancel(priv->mon);
> >      qemuDomainObjExitMonitor(vm);
> >  
> > +    if (virDomainObjIsActive(vm) && wait) {
> 
> Is the call to virDomainObjIsActive() necessary here? IIUC the domain
> shutdown code is always executed in a way to make sure that waiting
> threads are always woken.
> 
> 
> > +        VIR_DEBUG("Waiting for migration to be canceled");
> > +
> > +        while (!qemuMigrationSrcIsCanceled(vm)) {
> > +            if (qemuDomainObjWait(vm) < 0)
> 
> So here if the VM would crash before we wait we'd report success and if
> it crashed during our wait we'll report failure, which seems weird too.

Oh right, qemuDomainObjWait already checks for virDomainObjIsActive so
we don't have to do it explicitly here. Just

        if (wait) {
            ...
        }

is enough.

Jirka
Re: [libvirt PATCH 3/3] qemu: Make qemuMigrationSrcCancel optionally synchronous
Posted by Peter Krempa 3 years, 5 months ago
On Thu, Sep 01, 2022 at 17:18:55 +0200, Jiri Denemark wrote:
> On Thu, Sep 01, 2022 at 16:51:34 +0200, Peter Krempa wrote:
> > On Thu, Sep 01, 2022 at 14:47:41 +0200, Jiri Denemark wrote:
> > > We have always considered "migrate_cancel" QMP command to return after
> > > successfully cancelling the migration. But this is no longer true (to be
> > > honest I'm not sure it ever was) as it just changes the migration state
> > > to "cancelling". In most cases the migration is canceled pretty quickly
> > > and we don't really notice anything, but sometimes it takes so long we
> > > even get to clearing migration capabilities before the migration is
> > > actually canceled, which fails as capabilities can only be changed when
> > > no migration is running. So to avoid this issue, we can wait for the
> > > migration to be really canceled after sending migrate_cancel. The only
> > > place where we don't need synchronous behavior is when we're cancelling
> > > migration on user's request while it is actively watched by another
> > > thread.
> > > 
> > > https://bugzilla.redhat.com/show_bug.cgi?id=2114866
> > > 
> > > Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
> > > ---
> > >  src/qemu/qemu_driver.c    |  2 +-
> > >  src/qemu/qemu_migration.c | 45 +++++++++++++++++++++++++++++++++++----
> > >  src/qemu/qemu_migration.h |  3 ++-
> > >  src/qemu/qemu_process.c   |  2 +-
> > >  4 files changed, 45 insertions(+), 7 deletions(-)
> > 
> > [...]
> > 
> > >  int
> > >  qemuMigrationSrcCancel(virDomainObj *vm,
> > > -                       virDomainAsyncJob asyncJob)
> > > +                       virDomainAsyncJob asyncJob,
> > > +                       bool wait)
> > >  {
> > >      qemuDomainObjPrivate *priv = vm->privateData;
> > >  
> > > @@ -4625,6 +4653,15 @@ qemuMigrationSrcCancel(virDomainObj *vm,
> > >      qemuMonitorMigrateCancel(priv->mon);
> > >      qemuDomainObjExitMonitor(vm);
> > >  
> > > +    if (virDomainObjIsActive(vm) && wait) {
> > 
> > Is the call to virDomainObjIsActive() necessary here? IIUC the domain
> > shutdown code is always executed in a way to make sure that waiting
> > threads are always woken.
> > 
> > 
> > > +        VIR_DEBUG("Waiting for migration to be canceled");
> > > +
> > > +        while (!qemuMigrationSrcIsCanceled(vm)) {
> > > +            if (qemuDomainObjWait(vm) < 0)
> > 
> > So here if the VM would crash before we wait we'd report success and if
> > it crashed during our wait we'll report failure, which seems weird too.
> 
> Oh right, qemuDomainObjWait already checks for virDomainObjIsActive so
> we don't have to do it explicitly here. Just
> 
>         if (wait) {
>             ...
>         }
> 
> is enough.

Reviewed-by: Peter Krempa <pkrempa@redhat.com>