[PATCH 3/4] qemu snapshot: use QMP snapshot-save/delete for internal snapshots

Nikolai Barybin via Devel posted 4 patches 2 months ago
There is a newer version of this series
[PATCH 3/4] qemu snapshot: use QMP snapshot-save/delete for internal snapshots
Posted by Nikolai Barybin via Devel 2 months ago
The usage of HMP commands are highly discouraged by qemu. Moreover,
current snapshot creation routine does not provide flexibility in
choosing target device for VM state snapshot.

This patch makes use of QMP commands snapshot-save/delete and by
default chooses first writable disk (if present) as target for VM
state, NVRAM - otherwise.

Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com>
---
 src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++---
 1 file changed, 148 insertions(+), 10 deletions(-)

diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
index f5260c4a22..83949a9a27 100644
--- a/src/qemu/qemu_snapshot.c
+++ b/src/qemu/qemu_snapshot.c
@@ -308,6 +308,96 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver,
     return ret;
 }
 
+static int
+qemuSnapshotActiveInternalGetWrdevListHelper(virDomainObj *vm,
+                                             char **wrdevs)
+{
+    size_t wrdevCount = 0;
+    size_t i = 0;
+
+    for (i = 0; i < vm->def->ndisks; i++) {
+        virDomainDiskDef *disk = vm->def->disks[i];
+        if (!disk->src->readonly) {
+            wrdevs[wrdevCount] = g_strdup(disk->src->nodenameformat);
+            wrdevCount++;
+        }
+    }
+
+    if (wrdevCount == 0) {
+        if (vm->def->os.loader->nvram) {
+            wrdevs[0] = g_strdup(vm->def->os.loader->nvram->nodenameformat);
+        } else {
+            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                           _("no writable device for internal snapshot creation/deletion"));
+            return -1;
+        }
+    }
+
+    return 0;
+}
+
+
+static int
+qemuSnapshotCreateActiveInternalDone(virDomainObj *vm)
+{
+    qemuBlockJobData *job = NULL;
+    qemuDomainObjPrivate *priv = vm->privateData;
+
+    if (!(job = virHashLookup(priv->blockjobs, g_strdup_printf("snapsave%d", vm->def->id)))) {
+        virReportError(VIR_ERR_INTERNAL_ERROR,
+                       _("failed to lookup blockjob 'snapsave%1$d'"), vm->def->id);
+        return -1;
+    }
+
+    qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
+    if (job->state == VIR_DOMAIN_BLOCK_JOB_FAILED) {
+        virReportError(VIR_ERR_INTERNAL_ERROR,
+                       _("snapshot-save job failed: %1$s"), NULLSTR(job->errmsg));
+        return -1;
+    }
+
+    return job->state == VIR_DOMAIN_BLOCK_JOB_COMPLETED ? 1 : 0;
+}
+
+
+static int
+qemuSnapshotCreateActiveInternalStart(virDomainObj *vm,
+                                      const char *name)
+{
+    qemuBlockJobData *job = NULL;
+    g_autofree char** wrdevs = NULL;
+    int ret = -1;
+    int rc = 0;
+
+    wrdevs = g_new0(char *, vm->def->ndisks + 1);
+    if (qemuSnapshotActiveInternalGetWrdevListHelper(vm, wrdevs) < 0)
+        return -1;
+
+    if (!(job = qemuBlockJobDiskNew(vm, NULL, QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE,
+                                    g_strdup_printf("snapsave%d", vm->def->id)))) {
+        virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("failed to create new blockjob"));
+        return -1;
+    }
+
+    qemuBlockJobSyncBegin(job);
+    if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) {
+        ret = -1;
+        goto cleanup;
+    }
+
+    rc = qemuMonitorSnapshotSave(qemuDomainGetMonitor(vm), job->name,
+                                 name, wrdevs[0], wrdevs);
+    qemuDomainObjExitMonitor(vm);
+    if (rc == 0) {
+        qemuBlockJobStarted(job, vm);
+        ret = 0;
+    }
+
+ cleanup:
+    qemuBlockJobStartupFinalize(vm, job);
+    return ret;
+}
+
 
 /* The domain is expected to be locked and active. */
 static int
@@ -316,11 +406,11 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
                                  virDomainMomentObj *snap,
                                  unsigned int flags)
 {
-    qemuDomainObjPrivate *priv = vm->privateData;
     virObjectEvent *event = NULL;
     bool resume = false;
     virDomainSnapshotDef *snapdef = virDomainSnapshotObjGetDef(snap);
     int ret = -1;
+    int rv = 0;
 
     if (!qemuMigrationSrcIsAllowed(vm, false, VIR_ASYNC_JOB_SNAPSHOT, 0))
         goto cleanup;
@@ -342,15 +432,17 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
         }
     }
 
-    if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) {
+    if ((ret = qemuSnapshotCreateActiveInternalStart(vm, snap->def->name)) < 0) {
         resume = false;
         goto cleanup;
     }
 
-    ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name);
-    qemuDomainObjExitMonitor(vm);
-    if (ret < 0)
-        goto cleanup;
+    while ((rv = qemuSnapshotCreateActiveInternalDone(vm)) != 1) {
+        if (rv < 0 || qemuDomainObjWait(vm) < 0) {
+            ret = -1;
+            goto cleanup;
+        }
+    }
 
     if (!(snapdef->cookie = (virObject *) qemuDomainSaveCookieNew(vm)))
         goto cleanup;
@@ -3603,6 +3695,55 @@ qemuSnapshotDiscardMetadata(virDomainObj *vm,
 }
 
 
+static int
+qemuSnapshotDiscardActiveInternal(virDomainObj *vm,
+                                  const char *name)
+{
+    qemuBlockJobData *job = NULL;
+    g_autofree char** wrdevs = NULL;
+    int ret = -1;
+    int rc = 0;
+
+    wrdevs = g_new0(char *, vm->def->ndisks + 1);
+    if (qemuSnapshotActiveInternalGetWrdevListHelper(vm, wrdevs) < 0)
+        return -1;
+
+    if (!(job = qemuBlockJobDiskNew(vm, NULL, QEMU_BLOCKJOB_TYPE_SNAPSHOT_DELETE,
+                                    g_strdup_printf("snapdelete%d", vm->def->id)))) {
+        virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("failed to create new blockjob"));
+        return -1;
+    }
+
+    qemuBlockJobSyncBegin(job);
+    if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0)
+        goto cleanup;
+
+    rc = qemuMonitorSnapshotDelete(qemuDomainGetMonitor(vm), job->name, name, wrdevs);
+    qemuDomainObjExitMonitor(vm);
+    if (rc == 0) {
+        qemuBlockJobStarted(job, vm);
+        ret = 0;
+    } else {
+        goto cleanup;
+    }
+
+    while (job->state != VIR_DOMAIN_BLOCK_JOB_COMPLETED) {
+        qemuDomainObjWait(vm);
+        qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
+        if (job->state == VIR_DOMAIN_BLOCK_JOB_FAILED) {
+            virReportError(VIR_ERR_INTERNAL_ERROR,
+                           _("snapshot-delete job failed: %1$s"), NULLSTR(job->errmsg));
+            ret = -1;
+            break;
+        }
+    }
+
+ cleanup:
+    qemuBlockJobStartupFinalize(vm, job);
+    return ret;
+}
+
+
 /* Discard one snapshot (or its metadata), without reparenting any children.  */
 static int
 qemuSnapshotDiscardImpl(virQEMUDriver *driver,
@@ -3645,11 +3786,8 @@ qemuSnapshotDiscardImpl(virQEMUDriver *driver,
                 /* Similarly as internal snapshot creation we would use a regular job
                  * here so set a mask to forbid any other job. */
                 qemuDomainObjSetAsyncJobMask(vm, VIR_JOB_NONE);
-                if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0)
-                    return -1;
                 /* we continue on even in the face of error */
-                qemuMonitorDeleteSnapshot(qemuDomainGetMonitor(vm), snap->def->name);
-                qemuDomainObjExitMonitor(vm);
+                qemuSnapshotDiscardActiveInternal(vm, snap->def->name);
                 qemuDomainObjSetAsyncJobMask(vm, VIR_JOB_DEFAULT_MASK);
             }
         }
-- 
2.43.5
Re: [PATCH 3/4] qemu snapshot: use QMP snapshot-save/delete for internal snapshots
Posted by Daniel P. Berrangé 2 months ago
On Tue, Jul 16, 2024 at 01:42:27AM +0300, Nikolai Barybin via Devel wrote:
> The usage of HMP commands are highly discouraged by qemu. Moreover,
> current snapshot creation routine does not provide flexibility in
> choosing target device for VM state snapshot.
> 
> This patch makes use of QMP commands snapshot-save/delete and by
> default chooses first writable disk (if present) as target for VM
> state, NVRAM - otherwise.
> 
> Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com>
> ---
>  src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++---
>  1 file changed, 148 insertions(+), 10 deletions(-)
> 
> diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
> index f5260c4a22..83949a9a27 100644
> --- a/src/qemu/qemu_snapshot.c
> +++ b/src/qemu/qemu_snapshot.c
> @@ -308,6 +308,96 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver,
>      return ret;
>  }
>  
> +static int
> +qemuSnapshotActiveInternalGetWrdevListHelper(virDomainObj *vm,
> +                                             char **wrdevs)
> +{
> +    size_t wrdevCount = 0;
> +    size_t i = 0;
> +
> +    for (i = 0; i < vm->def->ndisks; i++) {
> +        virDomainDiskDef *disk = vm->def->disks[i];
> +        if (!disk->src->readonly) {
> +            wrdevs[wrdevCount] = g_strdup(disk->src->nodenameformat);
> +            wrdevCount++;
> +        }
> +    }

Being !readonly is not sufficient criteria form allowing use of internal
snapshots. It also must be !shareable. It also must be of a format that
permits snapshots, ie effectively only qcow2.

We should likely deny snapshots entirely if there are any shareable disks,
since I can't see it being conceptually sensible to snapshot a VM while
excluding shared writable disks.

Likewise dney snapshots if any writable disk is non-qcow2, as snapshots
need to be consistent across the entire VM writable storage set.

> +
> +    if (wrdevCount == 0) {
> +        if (vm->def->os.loader->nvram) {
> +            wrdevs[0] = g_strdup(vm->def->os.loader->nvram->nodenameformat);

This doesn't make sense IMHO.

If there are not writable disks, we shouldn't be trying to snapshot at
all.

> +        } else {
> +            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> +                           _("no writable device for internal snapshot creation/deletion"));
> +            return -1;
> +        }
> +    }
> +
> +    return 0;
> +}
> +
> +
> +static int
> +qemuSnapshotCreateActiveInternalDone(virDomainObj *vm)
> +{
> +    qemuBlockJobData *job = NULL;
> +    qemuDomainObjPrivate *priv = vm->privateData;
> +
> +    if (!(job = virHashLookup(priv->blockjobs, g_strdup_printf("snapsave%d", vm->def->id)))) {
> +        virReportError(VIR_ERR_INTERNAL_ERROR,
> +                       _("failed to lookup blockjob 'snapsave%1$d'"), vm->def->id);
> +        return -1;
> +    }
> +
> +    qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
> +    if (job->state == VIR_DOMAIN_BLOCK_JOB_FAILED) {
> +        virReportError(VIR_ERR_INTERNAL_ERROR,
> +                       _("snapshot-save job failed: %1$s"), NULLSTR(job->errmsg));
> +        return -1;
> +    }
> +
> +    return job->state == VIR_DOMAIN_BLOCK_JOB_COMPLETED ? 1 : 0;
> +}
> +
> +
> +static int
> +qemuSnapshotCreateActiveInternalStart(virDomainObj *vm,
> +                                      const char *name)
> +{
> +    qemuBlockJobData *job = NULL;
> +    g_autofree char** wrdevs = NULL;
> +    int ret = -1;
> +    int rc = 0;
> +
> +    wrdevs = g_new0(char *, vm->def->ndisks + 1);

IMHO the qemuSnapshotActiveInternalGetWrdevListHelper method should be
allocating this, to the correct size it needs.

> +    if (qemuSnapshotActiveInternalGetWrdevListHelper(vm, wrdevs) < 0)
> +        return -1;
> +
> +    if (!(job = qemuBlockJobDiskNew(vm, NULL, QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE,
> +                                    g_strdup_printf("snapsave%d", vm->def->id)))) {
> +        virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("failed to create new blockjob"));
> +        return -1;
> +    }
> +
> +    qemuBlockJobSyncBegin(job);
> +    if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) {
> +        ret = -1;
> +        goto cleanup;
> +    }
> +
> +    rc = qemuMonitorSnapshotSave(qemuDomainGetMonitor(vm), job->name,
> +                                 name, wrdevs[0], wrdevs);
> +    qemuDomainObjExitMonitor(vm);
> +    if (rc == 0) {
> +        qemuBlockJobStarted(job, vm);
> +        ret = 0;
> +    }
> +
> + cleanup:
> +    qemuBlockJobStartupFinalize(vm, job);
> +    return ret;
> +}
> +

With regards,
Daniel
-- 
|: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org         -o-            https://fstop138.berrange.com :|
|: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|
Re: [PATCH 3/4] qemu snapshot: use QMP snapshot-save/delete for internal snapshots
Posted by Denis V. Lunev via Devel 2 months ago
On 7/16/24 00:42, Nikolai Barybin wrote:
> The usage of HMP commands are highly discouraged by qemu. Moreover,
> current snapshot creation routine does not provide flexibility in
> choosing target device for VM state snapshot.
>
> This patch makes use of QMP commands snapshot-save/delete and by
> default chooses first writable disk (if present) as target for VM
> state, NVRAM - otherwise.
>
> Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com>
> ---
>   src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++---
>   1 file changed, 148 insertions(+), 10 deletions(-)
>
> diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
> index f5260c4a22..83949a9a27 100644
> --- a/src/qemu/qemu_snapshot.c
> +++ b/src/qemu/qemu_snapshot.c
> @@ -308,6 +308,96 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver,
>       return ret;
>   }
>   
> +static int
> +qemuSnapshotActiveInternalGetWrdevListHelper(virDomainObj *vm,
> +                                             char **wrdevs)
> +{
> +    size_t wrdevCount = 0;
> +    size_t i = 0;
> +
> +    for (i = 0; i < vm->def->ndisks; i++) {
> +        virDomainDiskDef *disk = vm->def->disks[i];
> +        if (!disk->src->readonly) {
> +            wrdevs[wrdevCount] = g_strdup(disk->src->nodenameformat);
> +            wrdevCount++;
> +        }
> +    }
> +
> +    if (wrdevCount == 0) {
> +        if (vm->def->os.loader->nvram) {
> +            wrdevs[0] = g_strdup(vm->def->os.loader->nvram->nodenameformat);
> +        } else {
> +            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> +                           _("no writable device for internal snapshot creation/deletion"));
> +            return -1;
> +        }
> +    }
> +
> +    return 0;
> +}
> +
> +
> +static int
> +qemuSnapshotCreateActiveInternalDone(virDomainObj *vm)
> +{
> +    qemuBlockJobData *job = NULL;
> +    qemuDomainObjPrivate *priv = vm->privateData;
> +
> +    if (!(job = virHashLookup(priv->blockjobs, g_strdup_printf("snapsave%d", vm->def->id)))) {
> +        virReportError(VIR_ERR_INTERNAL_ERROR,
> +                       _("failed to lookup blockjob 'snapsave%1$d'"), vm->def->id);
> +        return -1;
> +    }
> +
> +    qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
> +    if (job->state == VIR_DOMAIN_BLOCK_JOB_FAILED) {
> +        virReportError(VIR_ERR_INTERNAL_ERROR,
> +                       _("snapshot-save job failed: %1$s"), NULLSTR(job->errmsg));
> +        return -1;
> +    }
> +
> +    return job->state == VIR_DOMAIN_BLOCK_JOB_COMPLETED ? 1 : 0;
> +}
> +
> +
> +static int
> +qemuSnapshotCreateActiveInternalStart(virDomainObj *vm,
> +                                      const char *name)
> +{
> +    qemuBlockJobData *job = NULL;
> +    g_autofree char** wrdevs = NULL;
> +    int ret = -1;
> +    int rc = 0;
> +
> +    wrdevs = g_new0(char *, vm->def->ndisks + 1);
> +    if (qemuSnapshotActiveInternalGetWrdevListHelper(vm, wrdevs) < 0)
> +        return -1;
> +
> +    if (!(job = qemuBlockJobDiskNew(vm, NULL, QEMU_BLOCKJOB_TYPE_SNAPSHOT_SAVE,
> +                                    g_strdup_printf("snapsave%d", vm->def->id)))) {
> +        virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("failed to create new blockjob"));
> +        return -1;
> +    }
> +
> +    qemuBlockJobSyncBegin(job);
> +    if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) {
> +        ret = -1;
> +        goto cleanup;
> +    }
> +
> +    rc = qemuMonitorSnapshotSave(qemuDomainGetMonitor(vm), job->name,
> +                                 name, wrdevs[0], wrdevs);
> +    qemuDomainObjExitMonitor(vm);
> +    if (rc == 0) {
> +        qemuBlockJobStarted(job, vm);
> +        ret = 0;
> +    }
> +
> + cleanup:
> +    qemuBlockJobStartupFinalize(vm, job);
> +    return ret;
> +}
> +
>   
>   /* The domain is expected to be locked and active. */
>   static int
> @@ -316,11 +406,11 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
>                                    virDomainMomentObj *snap,
>                                    unsigned int flags)
>   {
> -    qemuDomainObjPrivate *priv = vm->privateData;
>       virObjectEvent *event = NULL;
>       bool resume = false;
>       virDomainSnapshotDef *snapdef = virDomainSnapshotObjGetDef(snap);
>       int ret = -1;
> +    int rv = 0;
>   
>       if (!qemuMigrationSrcIsAllowed(vm, false, VIR_ASYNC_JOB_SNAPSHOT, 0))
>           goto cleanup;
> @@ -342,15 +432,17 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
>           }
>       }
>   
> -    if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) {
> +    if ((ret = qemuSnapshotCreateActiveInternalStart(vm, snap->def->name)) < 0) {
>           resume = false;
>           goto cleanup;
>       }
>   
> -    ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name);
> -    qemuDomainObjExitMonitor(vm);
> -    if (ret < 0)
> -        goto cleanup;

'snapshot-save' has been added in QEMU 6.0.
Right now we are at 8.2

Should we still support pre 6.0 versions, i.e. check the availability of the
command via proper capability?

Den
Re: [PATCH 3/4] qemu snapshot: use QMP snapshot-save/delete for internal snapshots
Posted by Peter Krempa 2 months ago
On Tue, Jul 16, 2024 at 10:27:29 +0200, Denis V. Lunev via Devel wrote:
> On 7/16/24 00:42, Nikolai Barybin wrote:
> > The usage of HMP commands are highly discouraged by qemu. Moreover,
> > current snapshot creation routine does not provide flexibility in
> > choosing target device for VM state snapshot.
> > 
> > This patch makes use of QMP commands snapshot-save/delete and by
> > default chooses first writable disk (if present) as target for VM
> > state, NVRAM - otherwise.
> > 
> > Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com>
> > ---
> >   src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++---
> >   1 file changed, 148 insertions(+), 10 deletions(-)

[...]

> > @@ -342,15 +432,17 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
> >           }
> >       }
> > -    if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) {
> > +    if ((ret = qemuSnapshotCreateActiveInternalStart(vm, snap->def->name)) < 0) {
> >           resume = false;
> >           goto cleanup;
> >       }
> > -    ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name);
> > -    qemuDomainObjExitMonitor(vm);
> > -    if (ret < 0)
> > -        goto cleanup;
> 
> 'snapshot-save' has been added in QEMU 6.0.
> Right now we are at 8.2
> 
> Should we still support pre 6.0 versions, i.e. check the availability of the
> command via proper capability?

Libvirt currently supports qemu-5.2 as the oldest version, and since
that does not yet support the new commands we will need to use
capabilities and retain the old code.

This also means that patch 4/4 must be dropped as it's required for that
one release unfortunately.

qemu-5.2 seems to be currently the version in Debian 11 and openSUSE Leap 15.3 per commit 073bf167843ca2e788cbc581781c729ff26c80f5
which bumped it recently
Re: [PATCH 3/4] qemu snapshot: use QMP snapshot-save/delete for internal snapshots
Posted by Denis V. Lunev via Devel 2 months ago
On 7/16/24 00:42, Nikolai Barybin wrote:
> The usage of HMP commands are highly discouraged by qemu. Moreover,
> current snapshot creation routine does not provide flexibility in
> choosing target device for VM state snapshot.
>
> This patch makes use of QMP commands snapshot-save/delete and by
> default chooses first writable disk (if present) as target for VM
> state, NVRAM - otherwise.
>
> Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com>
> ---
>   src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++---
>   1 file changed, 148 insertions(+), 10 deletions(-)
>
> diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
> index f5260c4a22..83949a9a27 100644
> --- a/src/qemu/qemu_snapshot.c
> +++ b/src/qemu/qemu_snapshot.c
> @@ -308,6 +308,96 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver,
>       return ret;
>   }
>   
> +static int
> +qemuSnapshotActiveInternalGetWrdevListHelper(virDomainObj *vm,
> +                                             char **wrdevs)
> +{
> +    size_t wrdevCount = 0;
> +    size_t i = 0;
> +
> +    for (i = 0; i < vm->def->ndisks; i++) {
> +        virDomainDiskDef *disk = vm->def->disks[i];
> +        if (!disk->src->readonly) {
> +            wrdevs[wrdevCount] = g_strdup(disk->src->nodenameformat);
> +            wrdevCount++;
> +        }
> +    }
> +
> +    if (wrdevCount == 0) {
> +        if (vm->def->os.loader->nvram) {
> +            wrdevs[0] = g_strdup(vm->def->os.loader->nvram->nodenameformat);
> +        } else {
> +            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> +                           _("no writable device for internal snapshot creation/deletion"));
> +            return -1;
> +        }
> +    }
> +

should we select NVRAM at all?

IMHO that would be very problematic.
I think that we should generate error in this case.

Den
Re: [PATCH 3/4] qemu snapshot: use QMP snapshot-save/delete for internal snapshots
Posted by Peter Krempa 2 months ago
On Tue, Jul 16, 2024 at 10:21:12 +0200, Denis V. Lunev via Devel wrote:
> On 7/16/24 00:42, Nikolai Barybin wrote:
> > The usage of HMP commands are highly discouraged by qemu. Moreover,
> > current snapshot creation routine does not provide flexibility in
> > choosing target device for VM state snapshot.
> > 
> > This patch makes use of QMP commands snapshot-save/delete and by
> > default chooses first writable disk (if present) as target for VM
> > state, NVRAM - otherwise.
> > 
> > Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com>
> > ---
> >   src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++---
> >   1 file changed, 148 insertions(+), 10 deletions(-)
> > 
> > diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
> > index f5260c4a22..83949a9a27 100644
> > --- a/src/qemu/qemu_snapshot.c
> > +++ b/src/qemu/qemu_snapshot.c
> > @@ -308,6 +308,96 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver,
> >       return ret;
> >   }
> > +static int
> > +qemuSnapshotActiveInternalGetWrdevListHelper(virDomainObj *vm,
> > +                                             char **wrdevs)
> > +{
> > +    size_t wrdevCount = 0;
> > +    size_t i = 0;
> > +
> > +    for (i = 0; i < vm->def->ndisks; i++) {
> > +        virDomainDiskDef *disk = vm->def->disks[i];
> > +        if (!disk->src->readonly) {
> > +            wrdevs[wrdevCount] = g_strdup(disk->src->nodenameformat);
> > +            wrdevCount++;
> > +        }
> > +    }
> > +
> > +    if (wrdevCount == 0) {
> > +        if (vm->def->os.loader->nvram) {
> > +            wrdevs[0] = g_strdup(vm->def->os.loader->nvram->nodenameformat);
> > +        } else {
> > +            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> > +                           _("no writable device for internal snapshot creation/deletion"));
> > +            return -1;
> > +        }
> > +    }
> > +
> 
> should we select NVRAM at all?
> 
> IMHO that would be very problematic.
> I think that we should generate error in this case.

No NVRAM image must *never* be selected as target for the VM state.
Libvirt curretnly forbids internal snapshots with UEFI precisely because
NVRAM image could have been selected historically and we do not want
that ever happening.
Re: [PATCH 3/4] qemu snapshot: use QMP snapshot-save/delete for internal snapshots
Posted by Daniel P. Berrangé 2 months ago
On Tue, Jul 16, 2024 at 10:32:56AM +0200, Peter Krempa wrote:
> On Tue, Jul 16, 2024 at 10:21:12 +0200, Denis V. Lunev via Devel wrote:
> > On 7/16/24 00:42, Nikolai Barybin wrote:
> > > The usage of HMP commands are highly discouraged by qemu. Moreover,
> > > current snapshot creation routine does not provide flexibility in
> > > choosing target device for VM state snapshot.
> > > 
> > > This patch makes use of QMP commands snapshot-save/delete and by
> > > default chooses first writable disk (if present) as target for VM
> > > state, NVRAM - otherwise.
> > > 
> > > Signed-off-by: Nikolai Barybin <nikolai.barybin@virtuozzo.com>
> > > ---
> > >   src/qemu/qemu_snapshot.c | 158 ++++++++++++++++++++++++++++++++++++---
> > >   1 file changed, 148 insertions(+), 10 deletions(-)
> > > 
> > > diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
> > > index f5260c4a22..83949a9a27 100644
> > > --- a/src/qemu/qemu_snapshot.c
> > > +++ b/src/qemu/qemu_snapshot.c
> > > @@ -308,6 +308,96 @@ qemuSnapshotCreateInactiveExternal(virQEMUDriver *driver,
> > >       return ret;
> > >   }
> > > +static int
> > > +qemuSnapshotActiveInternalGetWrdevListHelper(virDomainObj *vm,
> > > +                                             char **wrdevs)
> > > +{
> > > +    size_t wrdevCount = 0;
> > > +    size_t i = 0;
> > > +
> > > +    for (i = 0; i < vm->def->ndisks; i++) {
> > > +        virDomainDiskDef *disk = vm->def->disks[i];
> > > +        if (!disk->src->readonly) {
> > > +            wrdevs[wrdevCount] = g_strdup(disk->src->nodenameformat);
> > > +            wrdevCount++;
> > > +        }
> > > +    }
> > > +
> > > +    if (wrdevCount == 0) {
> > > +        if (vm->def->os.loader->nvram) {
> > > +            wrdevs[0] = g_strdup(vm->def->os.loader->nvram->nodenameformat);
> > > +        } else {
> > > +            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> > > +                           _("no writable device for internal snapshot creation/deletion"));
> > > +            return -1;
> > > +        }
> > > +    }
> > > +
> > 
> > should we select NVRAM at all?
> > 
> > IMHO that would be very problematic.
> > I think that we should generate error in this case.
> 
> No NVRAM image must *never* be selected as target for the VM state.
> Libvirt curretnly forbids internal snapshots with UEFI precisely because
> NVRAM image could have been selected historically and we do not want
> that ever happening.

If we DO have a writable disk though, and the NVRAM is in qcow2 format,
then we should be including NVRAM in the snapshot operation, but the
state snapshot should be stored in the primary disk, not the NVRAM.

With regards,
Daniel
-- 
|: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org         -o-            https://fstop138.berrange.com :|
|: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|