From nobody Fri Mar 29 10:43:07 2024
Delivered-To: importer@patchew.org
Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28
as permitted sender) client-ip=209.132.183.28;
envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com;
Authentication-Results: mx.zohomail.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass(p=none dis=none) header.from=redhat.com
ARC-Seal: i=1; a=rsa-sha256; t=1562604980; cv=none;
d=zoho.com; s=zohoarc;
b=ENts/lWjeBfhlz96CL6U1fNHwOrX4dWxBtdh6clRCLQnDNu9oS3RMKHQ4nv4TV/yI5dvYSphbDsCqTcJ9vk3TJecGBA+u5jbkfPaB50ew26Kk+aEU5SdEIG6HMzU3FBibalN16SwaL4p06QgGQdnF8wQ8j5/yS5Sz4XcJlm+WiY=
ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zoho.com;
s=zohoarc;
t=1562604980;
h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To:ARC-Authentication-Results;
bh=XieaKq7rrX9PRHRPbyIOpXCuCkspmxyRmAgPBqY3V2s=;
b=C0NsllLtcaQiVH5lEvJOwjbHZxRBGBfOQoQ3NtewMXsErg0KHMZDfTzJnLOFMmzHI5xZRJxACmZVVJy2rZyfqreEYp0dbXrmhr8wdyoolx3lxbW+GJ9lHzAV2v4bg0CCpKF4IYujruSbb5pqTeGACyEo95a2FKMJ8XPnKsyULvU=
ARC-Authentication-Results: i=1; mx.zoho.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass header.from= (p=none dis=none)
header.from=
Return-Path:
Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by
mx.zohomail.com
with SMTPS id 1562604980378163.24586026561417;
Mon, 8 Jul 2019 09:56:20 -0700 (PDT)
Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com
[10.5.11.22])
(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
(No client certificate requested)
by mx1.redhat.com (Postfix) with ESMTPS id F30982E97C5;
Mon, 8 Jul 2019 16:56:08 +0000 (UTC)
Received: from colo-mx.corp.redhat.com
(colo-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.21])
by smtp.corp.redhat.com (Postfix) with ESMTPS id D06DA1001B19;
Mon, 8 Jul 2019 16:56:05 +0000 (UTC)
Received: from lists01.pubmisc.prod.ext.phx2.redhat.com
(lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33])
by colo-mx.corp.redhat.com (Postfix) with ESMTP id 3631541F53;
Mon, 8 Jul 2019 16:55:58 +0000 (UTC)
Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com
[10.5.11.22])
by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP
id x68GtvfD027876 for ;
Mon, 8 Jul 2019 12:55:57 -0400
Received: by smtp.corp.redhat.com (Postfix)
id A61BD1001959; Mon, 8 Jul 2019 16:55:57 +0000 (UTC)
Received: from blue.redhat.com (ovpn-116-78.phx2.redhat.com [10.3.116.78])
by smtp.corp.redhat.com (Postfix) with ESMTP id 384851001B18;
Mon, 8 Jul 2019 16:55:57 +0000 (UTC)
From: Eric Blake
To: libvir-list@redhat.com
Date: Mon, 8 Jul 2019 11:55:44 -0500
Message-Id: <20190708165553.18452-2-eblake@redhat.com>
In-Reply-To: <20190708165553.18452-1-eblake@redhat.com>
References: <20190708165553.18452-1-eblake@redhat.com>
MIME-Version: 1.0
X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22
X-loop: libvir-list@redhat.com
Cc: nsoffer@redhat.com, eshenitz@redhat.com, pkrempa@redhat.com
Subject: [libvirt] [PATCH v9 01/10] backup: qemu: Implement
VIR_DOMAIN_CHECKPOINT_XML_SIZE flag
X-BeenThere: libvir-list@redhat.com
X-Mailman-Version: 2.1.12
Precedence: junk
List-Id: Development discussions about the libvirt library & tools
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Content-Transfer-Encoding: quoted-printable
Sender: libvir-list-bounces@redhat.com
Errors-To: libvir-list-bounces@redhat.com
X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22
X-Greylist: Sender IP whitelisted,
not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.29]);
Mon, 08 Jul 2019 16:56:18 +0000 (UTC)
Content-Type: text/plain; charset="utf-8"
Once a checkpoint has been created, it is desirable to estimate the
size of the disk delta that is represented between the checkpoint and
the current operation. To do this, we have to scrape information out
of QMP query-block on a request from the user.
Signed-off-by: Eric Blake
---
src/qemu/qemu_monitor.h | 4 ++
src/qemu/qemu_monitor_json.h | 3 ++
src/qemu/qemu_driver.c | 56 +++++++++++++++++++++++++-
src/qemu/qemu_monitor.c | 11 ++++++
src/qemu/qemu_monitor_json.c | 76 ++++++++++++++++++++++++++++++++++++
5 files changed, 149 insertions(+), 1 deletion(-)
diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h
index c41428b048..9667dac284 100644
--- a/src/qemu/qemu_monitor.h
+++ b/src/qemu/qemu_monitor.h
@@ -24,6 +24,7 @@
#include "internal.h"
#include "domain_conf.h"
+#include "checkpoint_conf.h"
#include "virbitmap.h"
#include "virhash.h"
#include "virjson.h"
@@ -627,6 +628,9 @@ int qemuMonitorBlockStatsUpdateCapacity(qemuMonitorPtr =
mon,
int qemuMonitorBlockStatsUpdateCapacityBlockdev(qemuMonitorPtr mon,
virHashTablePtr stats)
ATTRIBUTE_NONNULL(2);
+int qemuMonitorUpdateCheckpointSize(qemuMonitorPtr mon,
+ virDomainCheckpointDefPtr chk)
+ ATTRIBUTE_NONNULL(2);
int qemuMonitorBlockResize(qemuMonitorPtr mon,
const char *device,
diff --git a/src/qemu/qemu_monitor_json.h b/src/qemu/qemu_monitor_json.h
index d0b519c88e..609e0f744c 100644
--- a/src/qemu/qemu_monitor_json.h
+++ b/src/qemu/qemu_monitor_json.h
@@ -97,6 +97,9 @@ int qemuMonitorJSONBlockResize(qemuMonitorPtr mon,
const char *nodename,
unsigned long long size);
+int qemuMonitorJSONUpdateCheckpointSize(qemuMonitorPtr mon,
+ virDomainCheckpointDefPtr chk);
+
int qemuMonitorJSONSetPassword(qemuMonitorPtr mon,
const char *protocol,
const char *password,
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 4131367245..f9605027be 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -17363,11 +17363,15 @@ qemuDomainCheckpointGetXMLDesc(virDomainCheckpoin=
tPtr checkpoint,
virDomainObjPtr vm =3D NULL;
char *xml =3D NULL;
virDomainMomentObjPtr chk =3D NULL;
+ qemuDomainObjPrivatePtr priv;
+ int rc;
+ size_t i;
virDomainCheckpointDefPtr chkdef;
unsigned int format_flags;
virCheckFlags(VIR_DOMAIN_CHECKPOINT_XML_SECURE |
- VIR_DOMAIN_CHECKPOINT_XML_NO_DOMAIN, NULL);
+ VIR_DOMAIN_CHECKPOINT_XML_NO_DOMAIN |
+ VIR_DOMAIN_CHECKPOINT_XML_SIZE, NULL);
if (!(vm =3D qemuDomObjFromCheckpoint(checkpoint)))
return NULL;
@@ -17379,12 +17383,62 @@ qemuDomainCheckpointGetXMLDesc(virDomainCheckpoin=
tPtr checkpoint,
goto cleanup;
chkdef =3D virDomainCheckpointObjGetDef(chk);
+ if (flags & VIR_DOMAIN_CHECKPOINT_XML_SIZE) {
+ /* TODO: for non-current checkpoint, this requires a QMP sequence =
per
+ disk, since the stat of one bitmap in isolation is too low,
+ and merely adding bitmap sizes may be too high:
+ block-dirty-bitmap-create tmp
+ for each bitmap from checkpoint to current:
+ add bitmap to src_list
+ block-dirty-bitmap-merge dst=3Dtmp src_list
+ query-block and read tmp size
+ block-dirty-bitmap-remove tmp
+ So for now, go with simpler query-blocks only for current.
+ */
+ if (virDomainCheckpointGetCurrent(vm->checkpoints) !=3D chk) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
+ _("cannot compute size for non-current checkpoi=
nt '%s'"),
+ checkpoint->name);
+ goto cleanup;
+ }
+
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ goto cleanup;
+
+ if (virDomainObjCheckActive(vm) < 0)
+ goto endjob;
+
+ if (qemuBlockNodeNamesDetect(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ goto endjob;
+
+ /* TODO: Shouldn't need to recompute node names. */
+ for (i =3D 0; i < chkdef->ndisks; i++) {
+ virDomainCheckpointDiskDef *disk =3D &chkdef->disks[i];
+
+ if (disk->type !=3D VIR_DOMAIN_CHECKPOINT_TYPE_BITMAP)
+ continue;
+ VIR_FREE(chk->def->dom->disks[disk->idx]->src->nodeformat);
+ if (VIR_STRDUP(chk->def->dom->disks[disk->idx]->src->nodeforma=
t,
+ qemuBlockNodeLookup(vm, disk->name)) < 0)
+ goto endjob;
+ }
+
+ priv =3D vm->privateData;
+ qemuDomainObjEnterMonitor(driver, vm);
+ rc =3D qemuMonitorUpdateCheckpointSize(priv->mon, chkdef);
+ if (qemuDomainObjExitMonitor(driver, vm) < 0)
+ goto endjob;
+ if (rc < 0)
+ goto endjob;
+ }
+
format_flags =3D virDomainCheckpointFormatConvertXMLFlags(flags);
if (chk =3D=3D virDomainCheckpointGetCurrent(vm->checkpoints))
format_flags |=3D VIR_DOMAIN_CHECKPOINT_FORMAT_CURRENT;
xml =3D virDomainCheckpointDefFormat(chkdef, driver->caps, driver->xml=
opt,
format_flags);
+ endjob:
if (flags & VIR_DOMAIN_CHECKPOINT_XML_SIZE)
qemuDomainObjEndJob(driver, vm);
diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c
index 731be2e5a6..19c769ae41 100644
--- a/src/qemu/qemu_monitor.c
+++ b/src/qemu/qemu_monitor.c
@@ -2308,6 +2308,17 @@ qemuMonitorBlockStatsUpdateCapacityBlockdev(qemuMoni=
torPtr mon,
return qemuMonitorJSONBlockStatsUpdateCapacityBlockdev(mon, stats);
}
+/* Updates "chk" to fill in size of the associated bitmap */
+int qemuMonitorUpdateCheckpointSize(qemuMonitorPtr mon,
+ virDomainCheckpointDefPtr chk)
+{
+ VIR_DEBUG("chk=3D%p", chk);
+
+ QEMU_CHECK_MONITOR(mon);
+
+ return qemuMonitorJSONUpdateCheckpointSize(mon, chk);
+}
+
int
qemuMonitorBlockResize(qemuMonitorPtr mon,
const char *device,
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index 8723ff49c7..1769fa70cb 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -2876,6 +2876,82 @@ int qemuMonitorJSONBlockResize(qemuMonitorPtr mon,
return ret;
}
+int qemuMonitorJSONUpdateCheckpointSize(qemuMonitorPtr mon,
+ virDomainCheckpointDefPtr chk)
+{
+ int ret =3D -1;
+ size_t i, j;
+ virJSONValuePtr devices;
+
+ if (!(devices =3D qemuMonitorJSONQueryBlock(mon)))
+ return -1;
+
+ for (i =3D 0; i < virJSONValueArraySize(devices); i++) {
+ virJSONValuePtr dev =3D virJSONValueArrayGet(devices, i);
+ virJSONValuePtr inserted;
+ virJSONValuePtr bitmaps =3D NULL;
+ const char *node;
+ virDomainCheckpointDiskDefPtr disk;
+
+ if (!(dev =3D qemuMonitorJSONGetBlockDev(devices, i)))
+ goto cleanup;
+
+ if (!(inserted =3D virJSONValueObjectGetObject(dev, "inserted")))
+ continue;
+ if (!(node =3D virJSONValueObjectGetString(inserted, "node-name"))=
) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("query-block device entry was not in expected=
format"));
+ goto cleanup;
+ }
+
+ for (j =3D 0; j < chk->ndisks; j++) {
+ disk =3D &chk->disks[j];
+ if (disk->type !=3D VIR_DOMAIN_CHECKPOINT_TYPE_BITMAP)
+ continue;
+ if (STREQ(chk->parent.dom->disks[disk->idx]->src->nodeformat, =
node))
+ break;
+ }
+ if (j =3D=3D chk->ndisks) {
+ VIR_DEBUG("query-block did not find node %s", node);
+ continue;
+ }
+ if (!(bitmaps =3D virJSONValueObjectGetArray(dev, "dirty-bitmaps")=
)) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("disk %s dirty bitmaps missing"), disk->name);
+ goto cleanup;
+ }
+ for (j =3D 0; j < virJSONValueArraySize(bitmaps); j++) {
+ virJSONValuePtr map =3D virJSONValueArrayGet(bitmaps, j);
+ const char *name;
+
+ if (!(name =3D virJSONValueObjectGetString(map, "name"))) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("dirty bitmaps entry was not in expected form=
at"));
+ goto cleanup;
+ }
+ if (STRNEQ(name, disk->bitmap))
+ continue;
+ if (virJSONValueObjectGetNumberUlong(map, "count", &disk->size=
) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("invalid bitmap count"));
+ goto cleanup;
+ }
+ break;
+ }
+ if (j =3D=3D virJSONValueArraySize(bitmaps)) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("disk %s dirty bitmap info missing"), disk->n=
ame);
+ goto cleanup;
+ }
+ }
+
+ ret =3D 0;
+
+ cleanup:
+ virJSONValueFree(devices);
+ return ret;
+}
+
int qemuMonitorJSONSetPassword(qemuMonitorPtr mon,
const char *protocol,
--=20
2.20.1
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
From nobody Fri Mar 29 10:43:07 2024
Delivered-To: importer@patchew.org
Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28
as permitted sender) client-ip=209.132.183.28;
envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com;
Authentication-Results: mx.zohomail.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass(p=none dis=none) header.from=redhat.com
ARC-Seal: i=1; a=rsa-sha256; t=1562604996; cv=none;
d=zoho.com; s=zohoarc;
b=YObRGVjMQDDcx6KGY25U88a2xyU2SmLo9ro/IyWf8+yZ9IswYJXiTlfqozarsBf1vZj9+hTmDaqgWQ1Lc+M5YK8TjQsetv3NdYm6jx7+4LQHWyxicWzO4floauqaX/sjenxqmkyVwk/UPRtdGWPJW5ok9okZ6iNiFpR6HedotHs=
ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zoho.com;
s=zohoarc;
t=1562604996;
h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To:ARC-Authentication-Results;
bh=lCdqmYDCd1ZGEYWeKfjrb2xKfsq8KY4QU9K9SOACveM=;
b=gKGvFiOVKN3ArJkst+R3w5u2zc9HszE20nzn6bnaAj7KDj6Id02gPsW9XPD/0U6Rm/f0hJu7QD/6DlKR/8RnA3NySJ76pAnedGhjGmNUr4aAnaqZlnWulpPTZ/Wp8Ky5kZmCB/pKcHguS7YRb/mDc4eGDMe7t3I5YZ18Pl1GPLQ=
ARC-Authentication-Results: i=1; mx.zoho.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass header.from= (p=none dis=none)
header.from=
Return-Path:
Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by
mx.zohomail.com
with SMTPS id 1562604996187717.0710129897539;
Mon, 8 Jul 2019 09:56:36 -0700 (PDT)
Received: from smtp.corp.redhat.com (int-mx01.intmail.prod.int.phx2.redhat.com
[10.5.11.11])
(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
(No client certificate requested)
by mx1.redhat.com (Postfix) with ESMTPS id 7C643C0586C4;
Mon, 8 Jul 2019 16:56:12 +0000 (UTC)
Received: from colo-mx.corp.redhat.com
(colo-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.20])
by smtp.corp.redhat.com (Postfix) with ESMTPS id 08F84189E1;
Mon, 8 Jul 2019 16:56:09 +0000 (UTC)
Received: from lists01.pubmisc.prod.ext.phx2.redhat.com
(lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33])
by colo-mx.corp.redhat.com (Postfix) with ESMTP id 750B018184AC;
Mon, 8 Jul 2019 16:56:01 +0000 (UTC)
Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com
[10.5.11.22])
by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP
id x68GtwA2027881 for ;
Mon, 8 Jul 2019 12:55:58 -0400
Received: by smtp.corp.redhat.com (Postfix)
id 5384C1001B29; Mon, 8 Jul 2019 16:55:58 +0000 (UTC)
Received: from blue.redhat.com (ovpn-116-78.phx2.redhat.com [10.3.116.78])
by smtp.corp.redhat.com (Postfix) with ESMTP id C93DB1001959;
Mon, 8 Jul 2019 16:55:57 +0000 (UTC)
From: Eric Blake
To: libvir-list@redhat.com
Date: Mon, 8 Jul 2019 11:55:45 -0500
Message-Id: <20190708165553.18452-3-eblake@redhat.com>
In-Reply-To: <20190708165553.18452-1-eblake@redhat.com>
References: <20190708165553.18452-1-eblake@redhat.com>
MIME-Version: 1.0
X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22
X-loop: libvir-list@redhat.com
Cc: nsoffer@redhat.com, eshenitz@redhat.com, pkrempa@redhat.com
Subject: [libvirt] [PATCH v9 02/10] backup: Document new XML for backups
X-BeenThere: libvir-list@redhat.com
X-Mailman-Version: 2.1.12
Precedence: junk
List-Id: Development discussions about the libvirt library & tools
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Content-Transfer-Encoding: quoted-printable
Sender: libvir-list-bounces@redhat.com
Errors-To: libvir-list-bounces@redhat.com
X-Scanned-By: MIMEDefang 2.79 on 10.5.11.11
X-Greylist: Sender IP whitelisted,
not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.31]);
Mon, 08 Jul 2019 16:56:24 +0000 (UTC)
Content-Type: text/plain; charset="utf-8"
Prepare for new backup APIs by describing the XML that will represent
a backup. The XML resembles snapshots and checkpoints in being able
to select actions for a set of disks, but has other differences. It
can support both push model (the hypervisor does the backup directly
into the destination file) and pull model (the hypervisor exposes an
access port for a third party to grab what is necessary). Add
testsuite coverage for some minimal uses of the XML.
The element within tries to model the same
elements as a under , but sharing the RNG grammar
proved to be hairy. That is in part because while use
diff --git a/docs/index.html.in b/docs/index.html.in
index 7d0ab650e3..26e8406917 100644
--- a/docs/index.html.in
+++ b/docs/index.html.in
@@ -59,7 +59,8 @@
node devices,
secrets,
snapshots,
- checkpoints
+ checkpoints,
+ backup jobs
Wiki
Read further community contributed content
diff --git a/docs/schemas/domainbackup.rng b/docs/schemas/domainbackup.rng
new file mode 100644
index 0000000000..92327e7077
--- /dev/null
+++ b/docs/schemas/domainbackup.rng
@@ -0,0 +1,219 @@
+
+
+
+
+ [
+ ]
+
+
+
+
+
+
+
+ [
+ ]
+
+
+
+
+
+
+
+
+
+
+
+ push
+
+
+ [
+ ]
+
+
+ pull
+
+
+
+
+
+
+
+ tcp
+
+
+
+
+ [
+ ][
+ ]
+
+
+
+ [
+ ]
+
+
+
+
+
+ unix
+
+
+ [
+ ]
+
+
+
+ [
+ ]
+
+
+
+
+
+
+
+
+
+
+ [
+ ]
+
+
+
+
+
+
+
+
+
+ begin
+ inprogress
+ ready
+
+
+
+
+
+
+
+
+
+
+
+
+ [
+ ][
+ ]
+
+
+
+ on
+
+
+
+
+
+ no
+
+
+
+
+ [
+
+
+ file
+
+
+
+
+
+
+ ][
+ ]
+
+
+ [
+ ]
+
+
+ [
+
+ block
+
+
+
+
+
+ ][
+ ]
+
+
+ [
+ ]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ [
+ ][
+ ]
+
+
+
+
+
+ file
+
+
+
+
+
+ [
+ ]
+
+
+
+
+
+ block
+
+
+
+
+ [
+ ]
+
+
+
+
+
+
+
+
+
+
+
diff --git a/libvirt.spec.in b/libvirt.spec.in
index ffadaf2d49..358f5fa200 100644
--- a/libvirt.spec.in
+++ b/libvirt.spec.in
@@ -1779,6 +1779,7 @@ exit 0
%{_datadir}/libvirt/schemas/capability.rng
%{_datadir}/libvirt/schemas/cputypes.rng
%{_datadir}/libvirt/schemas/domain.rng
+%{_datadir}/libvirt/schemas/domainbackup.rng
%{_datadir}/libvirt/schemas/domaincaps.rng
%{_datadir}/libvirt/schemas/domaincheckpoint.rng
%{_datadir}/libvirt/schemas/domaincommon.rng
diff --git a/mingw-libvirt.spec.in b/mingw-libvirt.spec.in
index a20c4b7d74..a56fca45fd 100644
--- a/mingw-libvirt.spec.in
+++ b/mingw-libvirt.spec.in
@@ -231,6 +231,7 @@ rm -rf $RPM_BUILD_ROOT%{mingw64_libexecdir}/libvirt-gue=
sts.sh
%{mingw32_datadir}/libvirt/schemas/capability.rng
%{mingw32_datadir}/libvirt/schemas/cputypes.rng
%{mingw32_datadir}/libvirt/schemas/domain.rng
+%{mingw32_datadir}/libvirt/schemas/domainbackup.rng
%{mingw32_datadir}/libvirt/schemas/domaincaps.rng
%{mingw32_datadir}/libvirt/schemas/domaincheckpoint.rng
%{mingw32_datadir}/libvirt/schemas/domaincommon.rng
@@ -322,6 +323,7 @@ rm -rf $RPM_BUILD_ROOT%{mingw64_libexecdir}/libvirt-gue=
sts.sh
%{mingw64_datadir}/libvirt/schemas/capability.rng
%{mingw64_datadir}/libvirt/schemas/cputypes.rng
%{mingw64_datadir}/libvirt/schemas/domain.rng
+%{mingw64_datadir}/libvirt/schemas/domainbackup.rng
%{mingw64_datadir}/libvirt/schemas/domaincaps.rng
%{mingw64_datadir}/libvirt/schemas/domaincheckpoint.rng
%{mingw64_datadir}/libvirt/schemas/domaincommon.rng
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 0ae1e51e03..ed5e6c6303 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -82,6 +82,8 @@ EXTRA_DIST =3D \
capabilityschemadata \
commanddata \
cputestdata \
+ domainbackupxml2xmlin \
+ domainbackupxml2xmlout \
domaincapsschemadata \
domainconfdata \
domainschemadata \
diff --git a/tests/domainbackupxml2xmlin/backup-pull.xml b/tests/domainback=
upxml2xmlin/backup-pull.xml
new file mode 100644
index 0000000000..2ce5cd6711
--- /dev/null
+++ b/tests/domainbackupxml2xmlin/backup-pull.xml
@@ -0,0 +1,9 @@
+
+ 1525889631
+
+
+
+
+
+
+
diff --git a/tests/domainbackupxml2xmlin/backup-push.xml b/tests/domainback=
upxml2xmlin/backup-push.xml
new file mode 100644
index 0000000000..1b7d3061fd
--- /dev/null
+++ b/tests/domainbackupxml2xmlin/backup-push.xml
@@ -0,0 +1,9 @@
+
+ 1525889631
+
+
+
+
+
+
+
diff --git a/tests/domainbackupxml2xmlin/empty.xml b/tests/domainbackupxml2=
xmlin/empty.xml
new file mode 100644
index 0000000000..7ed511f97b
--- /dev/null
+++ b/tests/domainbackupxml2xmlin/empty.xml
@@ -0,0 +1 @@
+
diff --git a/tests/domainbackupxml2xmlout/backup-pull.xml b/tests/domainbac=
kupxml2xmlout/backup-pull.xml
new file mode 100644
index 0000000000..2ce5cd6711
--- /dev/null
+++ b/tests/domainbackupxml2xmlout/backup-pull.xml
@@ -0,0 +1,9 @@
+
+ 1525889631
+
+
+
+
+
+
+
diff --git a/tests/domainbackupxml2xmlout/backup-push.xml b/tests/domainbac=
kupxml2xmlout/backup-push.xml
new file mode 100644
index 0000000000..1b7d3061fd
--- /dev/null
+++ b/tests/domainbackupxml2xmlout/backup-push.xml
@@ -0,0 +1,9 @@
+
+ 1525889631
+
+
+
+
+
+
+
diff --git a/tests/domainbackupxml2xmlout/empty.xml b/tests/domainbackupxml=
2xmlout/empty.xml
new file mode 100644
index 0000000000..13600fbb1c
--- /dev/null
+++ b/tests/domainbackupxml2xmlout/empty.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
diff --git a/tests/virschematest.c b/tests/virschematest.c
index 8f5101df21..29a3b4eebb 100644
--- a/tests/virschematest.c
+++ b/tests/virschematest.c
@@ -221,6 +221,8 @@ mymain(void)
"lxcxml2xmloutdata", "bhyvexml2argvdata", "genericxml2xmli=
ndata",
"genericxml2xmloutdata", "xlconfigdata", "libxlxml2domconf=
igdata",
"qemuhotplugtestdomains");
+ DO_TEST_DIR("domainbackup.rng", "domainbackupxml2xmlin",
+ "domainbackupxml2xmlout");
DO_TEST_DIR("domaincaps.rng", "domaincapsschemadata");
DO_TEST_DIR("domaincheckpoint.rng", "qemudomaincheckpointxml2xmlin",
"qemudomaincheckpointxml2xmlout");
--=20
2.20.1
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
From nobody Fri Mar 29 10:43:07 2024
Delivered-To: importer@patchew.org
Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28
as permitted sender) client-ip=209.132.183.28;
envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com;
Authentication-Results: mx.zohomail.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass(p=none dis=none) header.from=redhat.com
ARC-Seal: i=1; a=rsa-sha256; t=1562605035; cv=none;
d=zoho.com; s=zohoarc;
b=HMeBvDQ9UHCwEakrjPdn/rQhMNWAe0G4HxcAYk74d5jkuAYS3hzLUtPByf8jKTrS2v9lF+/WUB+Lz7L+ZI0vLI7bZaV9cdBdX/0LpYf3NJVYBvo5Mz5nEH8fIdxLzXAXImSk6Cyysc+Wxfyo0xJHAG6IQmmyrJtnmq/eVO0/5P8=
ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zoho.com;
s=zohoarc;
t=1562605035;
h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To:ARC-Authentication-Results;
bh=pZIJbPmLcbbOEy+XynkmWol/4pnzoeGtm9ETNAjGp10=;
b=TfEpkCIPLr3PvOIsDf+HyNtB2+MngJo+yQ8+Xr9fLC7MTviaBGyhhKL32niyQOZPTlrdLBZCDeIqZc+c83HeXm0XYq3aHeSB5DZCFnwK4/7nWvHyu2ZFeztKowrFJgOYoib+ho5zCqK69N0WpOByK5MKwufesW7TMMqmbDWshdI=
ARC-Authentication-Results: i=1; mx.zoho.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass header.from= (p=none dis=none)
header.from=
Return-Path:
Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by
mx.zohomail.com
with SMTPS id 1562605035243972.7848921028308;
Mon, 8 Jul 2019 09:57:15 -0700 (PDT)
Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com
[10.5.11.15])
(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
(No client certificate requested)
by mx1.redhat.com (Postfix) with ESMTPS id 35C973092669;
Mon, 8 Jul 2019 16:57:00 +0000 (UTC)
Received: from colo-mx.corp.redhat.com
(colo-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.20])
by smtp.corp.redhat.com (Postfix) with ESMTPS id 0E4922FC5D;
Mon, 8 Jul 2019 16:56:57 +0000 (UTC)
Received: from lists01.pubmisc.prod.ext.phx2.redhat.com
(lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33])
by colo-mx.corp.redhat.com (Postfix) with ESMTP id C5BE118184B9;
Mon, 8 Jul 2019 16:56:56 +0000 (UTC)
Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com
[10.5.11.22])
by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP
id x68GuS01027920 for ;
Mon, 8 Jul 2019 12:56:28 -0400
Received: by smtp.corp.redhat.com (Postfix)
id 873301001B33; Mon, 8 Jul 2019 16:56:28 +0000 (UTC)
Received: from blue.redhat.com (ovpn-116-78.phx2.redhat.com [10.3.116.78])
by smtp.corp.redhat.com (Postfix) with ESMTP id F3FB11001959;
Mon, 8 Jul 2019 16:56:24 +0000 (UTC)
From: Eric Blake
To: libvir-list@redhat.com
Date: Mon, 8 Jul 2019 11:55:46 -0500
Message-Id: <20190708165553.18452-4-eblake@redhat.com>
In-Reply-To: <20190708165553.18452-1-eblake@redhat.com>
References: <20190708165553.18452-1-eblake@redhat.com>
MIME-Version: 1.0
X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22
X-loop: libvir-list@redhat.com
Cc: nsoffer@redhat.com, eshenitz@redhat.com, pkrempa@redhat.com
Subject: [libvirt] [PATCH v9 03/10] backup: Introduce virDomainBackup APIs
X-BeenThere: libvir-list@redhat.com
X-Mailman-Version: 2.1.12
Precedence: junk
List-Id: Development discussions about the libvirt library & tools
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: quoted-printable
Sender: libvir-list-bounces@redhat.com
Errors-To: libvir-list-bounces@redhat.com
X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15
X-Greylist: Sender IP whitelisted,
not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.43]);
Mon, 08 Jul 2019 16:57:08 +0000 (UTC)
Introduce a few new public APIs related to incremental backups. This
builds on the previous notion of a checkpoint (without an existing
checkpoint, the new API is a full backup, differing from
virDomainBlockCopy in the point of time chosen and in operation on
multiple disks at once); and also allows creation of a new checkpoint
at the same time as starting the backup (after all, an incremental
backup is only useful if it covers the state since the previous
backup).
A backup job also affects filtering a listing of domains, as well as
adding event reporting for signaling when a push model backup
completes (where the hypervisor creates the backup); note that the
pull model does not have an event (starting the backup lets a third
party access the data, and only the third party knows when it is
finished).
Since multiple backup jobs can be run in parallel in the future (well,
qemu doesn't support it yet, but we don't want to preclude the idea),
virDomainBackupBegin() returns a positive job id, and the id is also
visible in the backup XML. But until a future libvirt release adds a
bunch of APIs related to parallel job management where job ids will
actually matter, the documentation is also clear that job id 0 means
the 'currently running backup job' (provided one exists), for use in
virDomainBackupGetXMLDesc() and virDomainBackupEnd().
The full list of new APIs:
virDomainBackupBegin;
virDomainBackupEnd;
virDomainBackupGetXMLDesc;
Signed-off-by: Eric Blake
Reviewed-by: Daniel P. Berrang=C3=A9
---
include/libvirt/libvirt-domain.h | 41 +++++-
src/driver-hypervisor.h | 14 ++
src/qemu/qemu_blockjob.h | 1 +
examples/c/misc/event-test.c | 3 +
src/conf/domain_conf.c | 2 +-
src/libvirt-domain-checkpoint.c | 7 +-
src/libvirt-domain.c | 219 +++++++++++++++++++++++++++++++
src/libvirt_public.syms | 3 +
tools/virsh-domain.c | 8 +-
9 files changed, 291 insertions(+), 7 deletions(-)
diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-dom=
ain.h
index f160ee88b5..8aae7889f7 100644
--- a/include/libvirt/libvirt-domain.h
+++ b/include/libvirt/libvirt-domain.h
@@ -3,7 +3,7 @@
* Summary: APIs for management of domains
* Description: Provides APIs for the management of domains
*
- * Copyright (C) 2006-2015 Red Hat, Inc.
+ * Copyright (C) 2006-2019 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -2446,6 +2446,9 @@ typedef enum {
* exists as long as sync is active */
VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_COMMIT =3D 4,
+ /* Backup (virDomainBackupBegin), job exists until virDomainBackupEnd =
*/
+ VIR_DOMAIN_BLOCK_JOB_TYPE_BACKUP =3D 5,
+
# ifdef VIR_ENUM_SENTINELS
VIR_DOMAIN_BLOCK_JOB_TYPE_LAST
# endif
@@ -3267,6 +3270,7 @@ typedef enum {
VIR_DOMAIN_JOB_OPERATION_SNAPSHOT =3D 6,
VIR_DOMAIN_JOB_OPERATION_SNAPSHOT_REVERT =3D 7,
VIR_DOMAIN_JOB_OPERATION_DUMP =3D 8,
+ VIR_DOMAIN_JOB_OPERATION_BACKUP =3D 9,
# ifdef VIR_ENUM_SENTINELS
VIR_DOMAIN_JOB_OPERATION_LAST
@@ -3282,6 +3286,14 @@ typedef enum {
*/
# define VIR_DOMAIN_JOB_OPERATION "operation"
+/**
+ * VIR_DOMAIN_JOB_ID:
+ *
+ * virDomainGetJobStats field: the id of the job (so far, only for jobs
+ * started by virDomainBackupBegin()), as VIR_TYPED_PARAM_INT.
+ */
+# define VIR_DOMAIN_JOB_ID "id"
+
/**
* VIR_DOMAIN_JOB_TIME_ELAPSED:
*
@@ -4106,7 +4118,8 @@ typedef void (*virConnectDomainEventMigrationIteratio=
nCallback)(virConnectPtr co
* @nparams: size of the params array
* @opaque: application specific data
*
- * This callback occurs when a job (such as migration) running on the doma=
in
+ * This callback occurs when a job (such as migration or push-model
+ * virDomainBackupBegin()) running on the domain
* is completed. The params array will contain statistics of the just comp=
leted
* job as virDomainGetJobStats would return. The callback must not free @p=
arams
* (the array will be freed once the callback finishes).
@@ -4902,4 +4915,28 @@ int virDomainGetLaunchSecurityInfo(virDomainPtr doma=
in,
int *nparams,
unsigned int flags);
+typedef enum {
+ VIR_DOMAIN_BACKUP_BEGIN_NO_METADATA =3D (1 << 0), /* Make checkpoint w=
ithout
+ remembering it */
+ VIR_DOMAIN_BACKUP_BEGIN_QUIESCE =3D (1 << 1), /* use guest agent to
+ quiesce all mounted
+ file systems within
+ the domain */
+} virDomainBackupBeginFlags;
+
+/* Begin an incremental backup job, possibly creating a checkpoint. */
+int virDomainBackupBegin(virDomainPtr domain, const char *diskXml,
+ const char *checkpointXml, unsigned int flags);
+
+/* Learn about an ongoing backup job. */
+char *virDomainBackupGetXMLDesc(virDomainPtr domain, int id,
+ unsigned int flags);
+
+typedef enum {
+ VIR_DOMAIN_BACKUP_END_ABORT =3D (1 << 0), /* Abandon a push model back=
up */
+} virDomainBackupEndFlags;
+
+/* Complete (or abort) an incremental backup job. */
+int virDomainBackupEnd(virDomainPtr domain, int id, unsigned int flags);
+
#endif /* LIBVIRT_DOMAIN_H */
diff --git a/src/driver-hypervisor.h b/src/driver-hypervisor.h
index 395b710a26..9b127071c7 100644
--- a/src/driver-hypervisor.h
+++ b/src/driver-hypervisor.h
@@ -1363,6 +1363,17 @@ typedef int
(*virDrvDomainCheckpointDelete)(virDomainCheckpointPtr checkpoint,
unsigned int flags);
+typedef int
+(*virDrvDomainBackupBegin)(virDomainPtr domain, const char *diskXml,
+ const char *checkpointXml, unsigned int flags);
+
+typedef char *
+(*virDrvDomainBackupGetXMLDesc)(virDomainPtr domain, int id,
+ unsigned int flags);
+
+typedef int
+(*virDrvDomainBackupEnd)(virDomainPtr domain, int id, unsigned int flags);
+
typedef struct _virHypervisorDriver virHypervisorDriver;
typedef virHypervisorDriver *virHypervisorDriverPtr;
@@ -1622,4 +1633,7 @@ struct _virHypervisorDriver {
virDrvDomainCheckpointGetParent domainCheckpointGetParent;
virDrvDomainCheckpointDelete domainCheckpointDelete;
virDrvDomainCheckpointIsCurrent domainCheckpointIsCurrent;
+ virDrvDomainBackupBegin domainBackupBegin;
+ virDrvDomainBackupGetXMLDesc domainBackupGetXMLDesc;
+ virDrvDomainBackupEnd domainBackupEnd;
};
diff --git a/src/qemu/qemu_blockjob.h b/src/qemu/qemu_blockjob.h
index da529090ad..70438085ec 100644
--- a/src/qemu/qemu_blockjob.h
+++ b/src/qemu/qemu_blockjob.h
@@ -54,6 +54,7 @@ typedef enum {
QEMU_BLOCKJOB_TYPE_COPY =3D VIR_DOMAIN_BLOCK_JOB_TYPE_COPY,
QEMU_BLOCKJOB_TYPE_COMMIT =3D VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT,
QEMU_BLOCKJOB_TYPE_ACTIVE_COMMIT =3D VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_=
COMMIT,
+ QEMU_BLOCKJOB_TYPE_BACKUP =3D VIR_DOMAIN_BLOCK_JOB_TYPE_BACKUP,
/* Additional enum values local to qemu */
QEMU_BLOCKJOB_TYPE_INTERNAL,
QEMU_BLOCKJOB_TYPE_LAST
diff --git a/examples/c/misc/event-test.c b/examples/c/misc/event-test.c
index fcf4492470..98337ad185 100644
--- a/examples/c/misc/event-test.c
+++ b/examples/c/misc/event-test.c
@@ -891,6 +891,9 @@ blockJobTypeToStr(int type)
case VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_COMMIT:
return "active layer block commit";
+
+ case VIR_DOMAIN_BLOCK_JOB_TYPE_BACKUP:
+ return "backup";
}
return "unknown";
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index 906d799d0a..10695eba41 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -1217,7 +1217,7 @@ VIR_ENUM_IMPL(virDomainOsDefFirmware,
VIR_ENUM_DECL(virDomainBlockJob);
VIR_ENUM_IMPL(virDomainBlockJob,
VIR_DOMAIN_BLOCK_JOB_TYPE_LAST,
- "", "", "copy", "", "active-commit",
+ "", "", "copy", "", "active-commit", "",
);
VIR_ENUM_IMPL(virDomainMemoryModel,
diff --git a/src/libvirt-domain-checkpoint.c b/src/libvirt-domain-checkpoin=
t.c
index e1fd81ede0..113cddf08b 100644
--- a/src/libvirt-domain-checkpoint.c
+++ b/src/libvirt-domain-checkpoint.c
@@ -102,8 +102,11 @@ virDomainCheckpointGetConnect(virDomainCheckpointPtr c=
heckpoint)
* @flags: bitwise-OR of supported virDomainCheckpointCreateFlags
*
* Create a new checkpoint using @xmlDesc, with a top-level
- * element, on a running @domain. Note that @xmlDesc
- * must validate against the XML schema.
+ * element, on a running @domain. Note that
+ * @xmlDesc must validate against the XML schema.
+ * Typically, it is more common to create a new checkpoint as part of
+ * kicking off a backup job with virDomainBackupBegin(); however, it
+ * is also possible to start a checkpoint without a backup.
*
* See Checkpoint XM=
L
* for more details on @xmlDesc. In particular, some hypervisors may requi=
re
diff --git a/src/libvirt-domain.c b/src/libvirt-domain.c
index 2fe9bb8e91..60a5b6c446 100644
--- a/src/libvirt-domain.c
+++ b/src/libvirt-domain.c
@@ -10352,6 +10352,12 @@ virDomainBlockRebase(virDomainPtr dom, const char =
*disk,
* over the destination format, the ability to copy to a destination that
* is not a local file, and the possibility of additional tuning parameter=
s.
*
+ * The copy created by this API is not finalized until the job ends,
+ * and does not lend itself to incremental backups (beyond what
+ * VIR_DOMAIN_BLOCK_COPY_SHALLOW provides) nor to third-party control
+ * over the data being copied. For those features, use
+ * virDomainBackupBegin().
+ *
* Returns 0 if the operation has started, -1 on failure.
*/
int
@@ -12375,3 +12381,216 @@ int virDomainGetLaunchSecurityInfo(virDomainPtr d=
omain,
virDispatchError(domain->conn);
return -1;
}
+
+
+/**
+ * virDomainBackupBegin:
+ * @domain: a domain object
+ * @diskXml: description of storage to utilize and expose during
+ * the backup, or NULL
+ * @checkpointXml: description of a checkpoint to create or NULL
+ * @flags: bitwise-OR of supported virDomainBackupBeginFlags
+ *
+ * Start a point-in-time backup job for the specified disks of a
+ * running domain.
+ *
+ * A backup job is mutually exclusive with domain migration
+ * (particularly when the job sets up an NBD export, since it is not
+ * possible to tell any NBD clients about a server migrating between
+ * hosts). For now, backup jobs are also mutually exclusive with any
+ * other block job on the same device, although this restriction may
+ * be lifted in a future release. Progress of the backup job can be
+ * tracked via virDomainGetJobStats(). The job remains active until a
+ * subsequent call to virDomainBackupEnd(), even if it no longer has
+ * anything to copy.
+ *
+ * This API differs from virDomainBlockCopy() because it can grab the
+ * state of more than one disk in parallel, and because the state is
+ * captured as of the start of the job, rather than the end.
+ *
+ * There are two fundamental backup approaches. The first, called a
+ * push model, instructs the hypervisor to copy the state of the guest
+ * disk to the designated storage destination (which may be on the
+ * local file system or a network device). In this mode, the
+ * hypervisor writes the content of the guest disk to the destination,
+ * then emits VIR_DOMAIN_EVENT_ID_JOB_COMPLETED when the backup is
+ * either complete or failed (the backup image is invalid if the job
+ * fails or virDomainBackupEnd() is used prior to the event being
+ * emitted).
+ *
+ * The second, called a pull model, instructs the hypervisor to expose
+ * the state of the guest disk over an NBD export. A third-party
+ * client can then connect to this export and read whichever portions
+ * of the disk it desires. In this mode, there is no event; libvirt
+ * has to be informed via virDomainBackupEnd() when the third-party
+ * NBD client is done and the backup resources can be released.
+ *
+ * The @diskXml parameter is optional but usually provided and
+ * contains details about the backup in the top-level element
+ * , including which backup mode to use, whether the
+ * backup is incremental from a previous checkpoint, which disks
+ * participate in the backup, the destination for a push model backup,
+ * and the temporary storage and NBD server details for a pull model
+ * backup. If omitted, the backup attempts to default to a push mode
+ * full backup of all disks, where libvirt generates a filename for
+ * each disk by appending a suffix of a timestamp in seconds since the
+ * Epoch. virDomainBackupGetXMLDesc() can be called to learn actual
+ * values selected. For more information, see
+ * formatcheckpoint.html#BackupAttributes.
+ *
+ * The @checkpointXml parameter is optional; if non-NULL, then libvirt
+ * behaves as if virDomainCheckpointCreateXML() were called to create
+ * a checkpoint atomically covering the same point in time as the
+ * backup, using @checkpointXml and forwarding flags
+ * VIR_DOMAIN_BACKUP_BEGIN_QUIESCE and
+ * VIR_DOMAIN_BACKUP_BEGIN_NO_METADATA. The creation of a new
+ * checkpoint allows for future incremental backups. Note that some
+ * hypervisors may require a particular disk format, such as qcow2, in
+ * order to take advantage of checkpoints, while allowing arbitrary
+ * formats if checkpoints are not involved.
+ *
+ * Returns a non-negative job id on success or negative on failure.
+ * This id is then passed to virDomainBackupGetXMLDesc() and
+ * virDomainBackupEnd(); it can also be obtained from
+ * virDomainListJobIds(). This operation returns quickly, such that a
+ * user can choose to start a backup job between virDomainFSFreeze()
+ * and virDomainFSThaw() in order to create the backup while guest I/O
+ * is quiesced.
+ */
+int
+virDomainBackupBegin(virDomainPtr domain,
+ const char *diskXml,
+ const char *checkpointXml,
+ unsigned int flags)
+{
+ virConnectPtr conn;
+
+ VIR_DOMAIN_DEBUG(domain, "diskXml=3D%s, checkpointXml=3D%s, flags=3D0x=
%x",
+ NULLSTR(diskXml), NULLSTR(checkpointXml), flags);
+
+ virResetLastError();
+
+ virCheckDomainReturn(domain, -1);
+ conn =3D domain->conn;
+
+ virCheckReadOnlyGoto(conn->flags, error);
+ if (flags & VIR_DOMAIN_BACKUP_BEGIN_NO_METADATA)
+ virCheckNonNullArgGoto(checkpointXml, error);
+
+ if (conn->driver->domainBackupBegin) {
+ int ret;
+ ret =3D conn->driver->domainBackupBegin(domain, diskXml, checkpoin=
tXml,
+ flags);
+ if (!ret)
+ goto error;
+ return ret;
+ }
+
+ virReportUnsupportedError();
+ error:
+ virDispatchError(conn);
+ return -1;
+}
+
+
+/**
+ * virDomainBackupGetXMLDesc:
+ * @domain: a domain object
+ * @id: the id of an active backup job
+ * @flags: extra flags; not used yet, so callers should always pass 0
+ *
+ * In some cases, a user can start a backup job without supplying all
+ * details and rely on libvirt to fill in the rest (for example,
+ * selecting the port used for an NBD export). This API can then be
+ * used to learn what default values were chosen. At present, none of
+ * the information provided is security sensitive.
+ *
+ * @id can either be the return value of a previous
+ * virDomainBackupBegin() or the value 0 to select the current backup
+ * job (the latter usage is an error if the hypervisor supports
+ * parallel jobs and has more than one running).
+ *
+ * Returns a NUL-terminated UTF-8 encoded XML instance or NULL in
+ * case of error. The caller must free() the returned value.
+ */
+char *
+virDomainBackupGetXMLDesc(virDomainPtr domain, int id, unsigned int flags)
+{
+ virConnectPtr conn;
+
+ VIR_DOMAIN_DEBUG(domain, "id=3D%d, flags=3D0x%x", id, flags);
+
+ virResetLastError();
+
+ virCheckDomainReturn(domain, NULL);
+ conn =3D domain->conn;
+
+ virCheckNonNegativeArgGoto(id, error);
+
+ if (conn->driver->domainBackupGetXMLDesc) {
+ char *ret;
+ ret =3D conn->driver->domainBackupGetXMLDesc(domain, id, flags);
+ if (!ret)
+ goto error;
+ return ret;
+ }
+
+ virReportUnsupportedError();
+ error:
+ virDispatchError(conn);
+ return NULL;
+}
+
+
+/**
+ * virDomainBackupEnd:
+ * @domain: a domain object
+ * @id: the id of an active backup job
+ * @flags: bitwise-OR of supported virDomainBackupEndFlags
+ *
+ * Conclude a point-in-time backup job of the given domain.
+ *
+ * @id can either be the return value of a previous
+ * virDomainBackupBegin() or the value 0 to select the current backup
+ * job (the latter usage is an error if the hypervisor supports
+ * parallel jobs and has more than one running).
+ *
+ * If the backup job uses the push model, but the event marking that
+ * all data has been copied has not yet been emitted, then the command
+ * fails unless @flags includes VIR_DOMAIN_BACKUP_END_ABORT. If the
+ * event has been issued, or if the backup uses the pull model, the
+ * flag has no effect.
+ *
+ * Returns 1 if the backup job completed successfully (the backup
+ * destination file in a push model is consistent), 0 if the job was
+ * aborted successfully (only when VIR_DOMAIN_BACKUP_END_ABORT is
+ * passed; the destination file is unusable), and -1 on failure.
+ */
+int
+virDomainBackupEnd(virDomainPtr domain, int id, unsigned int flags)
+{
+ virConnectPtr conn;
+
+ VIR_DOMAIN_DEBUG(domain, "id=3D%d, flags=3D0x%x", id, flags);
+
+ virResetLastError();
+
+ virCheckDomainReturn(domain, -1);
+ conn =3D domain->conn;
+
+ virCheckReadOnlyGoto(conn->flags, error);
+ virCheckNonNegativeArgGoto(id, error);
+
+ if (conn->driver->domainBackupEnd) {
+ int ret;
+ ret =3D conn->driver->domainBackupEnd(domain, id, flags);
+ if (!ret)
+ goto error;
+ return ret;
+ }
+
+ virReportUnsupportedError();
+ error:
+ virDispatchError(conn);
+ return -1;
+}
diff --git a/src/libvirt_public.syms b/src/libvirt_public.syms
index 6401916a81..a1394d3ef5 100644
--- a/src/libvirt_public.syms
+++ b/src/libvirt_public.syms
@@ -838,6 +838,9 @@ LIBVIRT_5.5.0 {
LIBVIRT_5.6.0 {
global:
+ virDomainBackupBegin;
+ virDomainBackupEnd;
+ virDomainBackupGetXMLDesc;
virDomainCheckpointCreateXML;
virDomainCheckpointDelete;
virDomainCheckpointFree;
diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c
index 0b5aca4758..abc9001508 100644
--- a/tools/virsh-domain.c
+++ b/tools/virsh-domain.c
@@ -2562,7 +2562,9 @@ VIR_ENUM_IMPL(virshDomainBlockJob,
N_("Block Pull"),
N_("Block Copy"),
N_("Block Commit"),
- N_("Active Block Commit"));
+ N_("Active Block Commit"),
+ N_("Backup"),
+);
static const char *
virshDomainBlockJobToString(int type)
@@ -6080,7 +6082,9 @@ VIR_ENUM_IMPL(virshDomainJobOperation,
N_("Outgoing migration"),
N_("Snapshot"),
N_("Snapshot revert"),
- N_("Dump"));
+ N_("Dump"),
+ N_("Backup"),
+);
static const char *
virshDomainJobOperationToString(int op)
--=20
2.20.1
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
From nobody Fri Mar 29 10:43:07 2024
Delivered-To: importer@patchew.org
Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28
as permitted sender) client-ip=209.132.183.28;
envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com;
Authentication-Results: mx.zohomail.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass(p=none dis=none) header.from=redhat.com
ARC-Seal: i=1; a=rsa-sha256; t=1562605027; cv=none;
d=zoho.com; s=zohoarc;
b=ax6IbVIFDYnHWFhjd/CCc8bDSrq/OwQh3qeDdlDYkZnPAYKMXJ52WUsPhT/lA7HYmt4MZObA+CHG0JecVKCJsGbUhYjgH3DEWt1v7usNtRrz3QLB2ZS3vwMzH018tOaxZJg695/tnCRu4SQri7qesSgPSFkakWe7Sn5FtQ/pChE=
ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zoho.com;
s=zohoarc;
t=1562605027;
h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To:ARC-Authentication-Results;
bh=0tN18OFG38BDY93BliaY/Ou4zCOygxOcJCO7EKGTyis=;
b=FbNMuVHVWRDCpb8ERbc+xAG8PU+K/17zY0vZoTC+K1yFEnmPq0VFPH6JsiP7FYECcgqN6SbS8UZ+lsMLZNgCel3zkyPxMFGj/MWP2tZLPWDDl74be4mA1VwJ64TbqlIlSuoJo7fvv80DFTAuVK5LeqxY4tMOF6GgXvz1pT5xGmw=
ARC-Authentication-Results: i=1; mx.zoho.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass header.from= (p=none dis=none)
header.from=
Return-Path:
Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by
mx.zohomail.com
with SMTPS id 1562605027985657.105165613581;
Mon, 8 Jul 2019 09:57:07 -0700 (PDT)
Received: from smtp.corp.redhat.com (int-mx01.intmail.prod.int.phx2.redhat.com
[10.5.11.11])
(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
(No client certificate requested)
by mx1.redhat.com (Postfix) with ESMTPS id C7E6730C584E;
Mon, 8 Jul 2019 16:57:04 +0000 (UTC)
Received: from colo-mx.corp.redhat.com
(colo-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.21])
by smtp.corp.redhat.com (Postfix) with ESMTPS id 6235F197FE;
Mon, 8 Jul 2019 16:57:03 +0000 (UTC)
Received: from lists01.pubmisc.prod.ext.phx2.redhat.com
(lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33])
by colo-mx.corp.redhat.com (Postfix) with ESMTP id 192F924B25;
Mon, 8 Jul 2019 16:57:01 +0000 (UTC)
Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com
[10.5.11.22])
by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP
id x68GuThs027926 for ;
Mon, 8 Jul 2019 12:56:29 -0400
Received: by smtp.corp.redhat.com (Postfix)
id 3694C1001B33; Mon, 8 Jul 2019 16:56:29 +0000 (UTC)
Received: from blue.redhat.com (ovpn-116-78.phx2.redhat.com [10.3.116.78])
by smtp.corp.redhat.com (Postfix) with ESMTP id AB1DB1001B18;
Mon, 8 Jul 2019 16:56:28 +0000 (UTC)
From: Eric Blake
To: libvir-list@redhat.com
Date: Mon, 8 Jul 2019 11:55:47 -0500
Message-Id: <20190708165553.18452-5-eblake@redhat.com>
In-Reply-To: <20190708165553.18452-1-eblake@redhat.com>
References: <20190708165553.18452-1-eblake@redhat.com>
MIME-Version: 1.0
X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22
X-loop: libvir-list@redhat.com
Cc: nsoffer@redhat.com, eshenitz@redhat.com, pkrempa@redhat.com
Subject: [libvirt] [PATCH v9 04/10] backup: Implement backup APIs for remote
driver
X-BeenThere: libvir-list@redhat.com
X-Mailman-Version: 2.1.12
Precedence: junk
List-Id: Development discussions about the libvirt library & tools
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: quoted-printable
Sender: libvir-list-bounces@redhat.com
Errors-To: libvir-list-bounces@redhat.com
X-Scanned-By: MIMEDefang 2.79 on 10.5.11.11
X-Greylist: Sender IP whitelisted,
not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.46]);
Mon, 08 Jul 2019 16:57:06 +0000 (UTC)
This one is fairly straightforward - the generator already does what
we need.
Signed-off-by: Eric Blake
Reviewed-by: Daniel P. Berrang=C3=A9
---
src/remote/remote_driver.c | 3 ++
src/remote/remote_protocol.x | 53 +++++++++++++++++++++++++++++++++++-
src/remote_protocol-structs | 28 +++++++++++++++++++
3 files changed, 83 insertions(+), 1 deletion(-)
diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c
index 9145aa91ff..0c54caf31b 100644
--- a/src/remote/remote_driver.c
+++ b/src/remote/remote_driver.c
@@ -8597,6 +8597,9 @@ static virHypervisorDriver hypervisor_driver =3D {
.domainCheckpointGetParent =3D remoteDomainCheckpointGetParent, /* 5.6=
.0 */
.domainCheckpointDelete =3D remoteDomainCheckpointDelete, /* 5.6.0 */
.domainCheckpointIsCurrent =3D remoteDomainCheckpointIsCurrent, /* 5.6=
.0 */
+ .domainBackupBegin =3D remoteDomainBackupBegin, /* 5.6.0 */
+ .domainBackupGetXMLDesc =3D remoteDomainBackupGetXMLDesc, /* 5.6.0 */
+ .domainBackupEnd =3D remoteDomainBackupEnd, /* 5.6.0 */
};
static virNetworkDriver network_driver =3D {
diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x
index a2bd30360f..06f2b41a63 100644
--- a/src/remote/remote_protocol.x
+++ b/src/remote/remote_protocol.x
@@ -3732,6 +3732,37 @@ struct remote_domain_checkpoint_delete_args {
unsigned int flags;
};
+struct remote_domain_backup_begin_args {
+ remote_nonnull_domain dom;
+ remote_string disk_xml;
+ remote_string checkpoint_xml;
+ unsigned int flags;
+};
+
+struct remote_domain_backup_begin_ret {
+ int id;
+};
+
+struct remote_domain_backup_get_xml_desc_args {
+ remote_nonnull_domain dom;
+ int id;
+ unsigned int flags;
+};
+
+struct remote_domain_backup_get_xml_desc_ret {
+ remote_nonnull_string xml;
+};
+
+struct remote_domain_backup_end_args {
+ remote_nonnull_domain dom;
+ int id;
+ unsigned int flags;
+};
+
+struct remote_domain_backup_end_ret {
+ int retcode;
+};
+
/*----- Protocol. -----*/
/* Define the program number, protocol version and procedure numbers here.=
*/
@@ -6595,10 +6626,30 @@ enum remote_procedure {
*/
REMOTE_PROC_DOMAIN_CHECKPOINT_DELETE =3D 417,
+ /**
+ * @generate: both
+ * @acl: domain:checkpoint
+ */
+ REMOTE_PROC_DOMAIN_CHECKPOINT_IS_CURRENT =3D 418,
+
+ /**
+ * @generate: both
+ * @acl: domain:checkpoint
+ * @acl: domain:block_write
+ * @acl: domain:fs_freeze:VIR_DOMAIN_BACKUP_BEGIN_QUIESCE
+ */
+ REMOTE_PROC_DOMAIN_BACKUP_BEGIN =3D 419,
+
/**
* @generate: both
* @priority: high
* @acl: domain:read
*/
- REMOTE_PROC_DOMAIN_CHECKPOINT_IS_CURRENT =3D 418
+ REMOTE_PROC_DOMAIN_BACKUP_GET_XML_DESC =3D 420,
+
+ /**
+ * @generate: both
+ * @acl: domain:checkpoint
+ */
+ REMOTE_PROC_DOMAIN_BACKUP_END =3D 421
};
diff --git a/src/remote_protocol-structs b/src/remote_protocol-structs
index ba4337b1bd..535ed2c484 100644
--- a/src/remote_protocol-structs
+++ b/src/remote_protocol-structs
@@ -3112,6 +3112,31 @@ struct remote_domain_checkpoint_delete_args {
remote_nonnull_domain_checkpoint checkpoint;
u_int flags;
};
+struct remote_domain_backup_begin_args {
+ remote_nonnull_domain dom;
+ remote_string disk_xml;
+ remote_string checkpoint_xml;
+ u_int flags;
+};
+struct remote_domain_backup_begin_ret {
+ int id;
+};
+struct remote_domain_backup_get_xml_desc_args {
+ remote_nonnull_domain dom;
+ int id;
+ u_int flags;
+};
+struct remote_domain_backup_get_xml_desc_ret {
+ remote_nonnull_string xml;
+};
+struct remote_domain_backup_end_args {
+ remote_nonnull_domain dom;
+ int id;
+ u_int flags;
+};
+struct remote_domain_backup_end_ret {
+ int retcode;
+};
enum remote_procedure {
REMOTE_PROC_CONNECT_OPEN =3D 1,
REMOTE_PROC_CONNECT_CLOSE =3D 2,
@@ -3531,4 +3556,7 @@ enum remote_procedure {
REMOTE_PROC_DOMAIN_CHECKPOINT_GET_PARENT =3D 416,
REMOTE_PROC_DOMAIN_CHECKPOINT_DELETE =3D 417,
REMOTE_PROC_DOMAIN_CHECKPOINT_IS_CURRENT =3D 418,
+ REMOTE_PROC_DOMAIN_BACKUP_BEGIN =3D 419,
+ REMOTE_PROC_DOMAIN_BACKUP_GET_XML_DESC =3D 420,
+ REMOTE_PROC_DOMAIN_BACKUP_END =3D 421,
};
--=20
2.20.1
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
From nobody Fri Mar 29 10:43:07 2024
Delivered-To: importer@patchew.org
Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28
as permitted sender) client-ip=209.132.183.28;
envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com;
Authentication-Results: mx.zohomail.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass(p=none dis=none) header.from=redhat.com
ARC-Seal: i=1; a=rsa-sha256; t=1562605050; cv=none;
d=zoho.com; s=zohoarc;
b=LXpDMUMn4jn5e/J2o3A0j56vkIxHWnmMAc5z2w54ADIHbG605R3zWeiXCMROLM+YMrI9psj1kUqCQ3RMIcJabIL5JWAoCDWHvjOjMwAEIx3++k69WSgJMTVGQc2Ev9t7t4+rKyieNG/0d37ZYOfJksDksTBKPKL9jTjHK3EdSRE=
ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zoho.com;
s=zohoarc;
t=1562605050;
h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To:ARC-Authentication-Results;
bh=YSA0eEBwgh365FvLmd+F+n9xmkUAJkN7+LkdXGWSHyk=;
b=MJMHuaEuIapT9717ISXC9vtG98+w7SvmaWNfqtKO1TIUfkHg88l5q0RYAOUxZJl2pPOcETCHj5IBxRPJWZynd1TojVIJrG4y1IXb+Fz4Px1n/1OZahAnnd51ze8zKs/K8mkqc7Uhnom4gYB/0JqZ3lJRxGCh377wrWncjDADNos=
ARC-Authentication-Results: i=1; mx.zoho.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass header.from= (p=none dis=none)
header.from=
Return-Path:
Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by
mx.zohomail.com
with SMTPS id 1562605050124188.43486173591054;
Mon, 8 Jul 2019 09:57:30 -0700 (PDT)
Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com
[10.5.11.14])
(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
(No client certificate requested)
by mx1.redhat.com (Postfix) with ESMTPS id EEFBDB2DDE;
Mon, 8 Jul 2019 16:57:20 +0000 (UTC)
Received: from colo-mx.corp.redhat.com
(colo-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.20])
by smtp.corp.redhat.com (Postfix) with ESMTPS id BBE9D5D9C8;
Mon, 8 Jul 2019 16:57:18 +0000 (UTC)
Received: from lists01.pubmisc.prod.ext.phx2.redhat.com
(lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33])
by colo-mx.corp.redhat.com (Postfix) with ESMTP id 8690118184AD;
Mon, 8 Jul 2019 16:57:15 +0000 (UTC)
Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com
[10.5.11.22])
by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP
id x68GuTAK027935 for ;
Mon, 8 Jul 2019 12:56:29 -0400
Received: by smtp.corp.redhat.com (Postfix)
id E54AE1001B19; Mon, 8 Jul 2019 16:56:29 +0000 (UTC)
Received: from blue.redhat.com (ovpn-116-78.phx2.redhat.com [10.3.116.78])
by smtp.corp.redhat.com (Postfix) with ESMTP id 5B8B41001B29;
Mon, 8 Jul 2019 16:56:29 +0000 (UTC)
From: Eric Blake
To: libvir-list@redhat.com
Date: Mon, 8 Jul 2019 11:55:48 -0500
Message-Id: <20190708165553.18452-6-eblake@redhat.com>
In-Reply-To: <20190708165553.18452-1-eblake@redhat.com>
References: <20190708165553.18452-1-eblake@redhat.com>
MIME-Version: 1.0
X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22
X-loop: libvir-list@redhat.com
Cc: nsoffer@redhat.com, eshenitz@redhat.com, pkrempa@redhat.com
Subject: [libvirt] [PATCH v9 05/10] backup: Parse and output backup XML
X-BeenThere: libvir-list@redhat.com
X-Mailman-Version: 2.1.12
Precedence: junk
List-Id: Development discussions about the libvirt library & tools
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Content-Transfer-Encoding: quoted-printable
Sender: libvir-list-bounces@redhat.com
Errors-To: libvir-list-bounces@redhat.com
X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14
X-Greylist: Sender IP whitelisted,
not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.26]);
Mon, 08 Jul 2019 16:57:28 +0000 (UTC)
Content-Type: text/plain; charset="utf-8"
Accept XML describing a generic block job, and output it again as
needed. This may still need a few tweaks to match the documented XML
and RNG schema.
Signed-off-by: Eric Blake
---
src/conf/backup_conf.h | 94 +++++++
src/conf/virconftypes.h | 3 +
src/conf/Makefile.inc.am | 2 +
src/conf/backup_conf.c | 546 +++++++++++++++++++++++++++++++++++++++
src/libvirt_private.syms | 8 +-
5 files changed, 652 insertions(+), 1 deletion(-)
create mode 100644 src/conf/backup_conf.h
create mode 100644 src/conf/backup_conf.c
diff --git a/src/conf/backup_conf.h b/src/conf/backup_conf.h
new file mode 100644
index 0000000000..1714315a1f
--- /dev/null
+++ b/src/conf/backup_conf.h
@@ -0,0 +1,94 @@
+/*
+ * backup_conf.h: domain backup XML processing
+ * (based on domain_conf.h)
+ *
+ * Copyright (C) 2006-2019 Red Hat, Inc.
+ * Copyright (C) 2006-2008 Daniel P. Berrange
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library. If not, see
+ * .
+ */
+
+#pragma once
+
+#include "internal.h"
+#include "domain_conf.h"
+#include "moment_conf.h"
+#include "virenum.h"
+
+/* Items related to incremental backup state */
+
+typedef enum {
+ VIR_DOMAIN_BACKUP_TYPE_DEFAULT =3D 0,
+ VIR_DOMAIN_BACKUP_TYPE_PUSH,
+ VIR_DOMAIN_BACKUP_TYPE_PULL,
+
+ VIR_DOMAIN_BACKUP_TYPE_LAST
+} virDomainBackupType;
+
+typedef enum {
+ VIR_DOMAIN_BACKUP_DISK_STATE_DEFAULT =3D 0, /* Initial */
+ VIR_DOMAIN_BACKUP_DISK_STATE_CREATED, /* File created */
+ VIR_DOMAIN_BACKUP_DISK_STATE_LABEL, /* Security labels applied */
+ VIR_DOMAIN_BACKUP_DISK_STATE_READY, /* Handed to guest */
+ VIR_DOMAIN_BACKUP_DISK_STATE_BITMAP, /* Associated temp bitmap created=
*/
+ VIR_DOMAIN_BACKUP_DISK_STATE_EXPORT, /* NBD export created */
+ VIR_DOMAIN_BACKUP_DISK_STATE_COMPLETE, /* Push job finished */
+} virDomainBackupDiskState;
+
+/* Stores disk-backup information */
+typedef struct _virDomainBackupDiskDef virDomainBackupDiskDef;
+typedef virDomainBackupDiskDef *virDomainBackupDiskDefPtr;
+struct _virDomainBackupDiskDef {
+ char *name; /* name matching the dom->disks that matches na=
me */
+
+ /* details of target for push-mode, or of the scratch file for pull-mo=
de */
+ virStorageSourcePtr store;
+ int state; /* virDomainBackupDiskState, not stored in XML */
+};
+
+/* Stores the complete backup metadata */
+typedef struct _virDomainBackupDef virDomainBackupDef;
+typedef virDomainBackupDef *virDomainBackupDefPtr;
+struct _virDomainBackupDef {
+ /* Public XML. */
+ int type; /* virDomainBackupType */
+ int id;
+ char *incremental;
+ virStorageNetHostDefPtr server; /* only when type =3D=3D PULL */
+
+ size_t ndisks; /* should not exceed dom->ndisks */
+ virDomainBackupDiskDef *disks;
+};
+
+VIR_ENUM_DECL(virDomainBackup);
+
+typedef enum {
+ VIR_DOMAIN_BACKUP_PARSE_INTERNAL =3D 1 << 0,
+} virDomainBackupParseFlags;
+
+virDomainBackupDefPtr virDomainBackupDefParseString(const char *xmlStr,
+ virDomainXMLOptionPtr =
xmlopt,
+ unsigned int flags);
+virDomainBackupDefPtr virDomainBackupDefParseNode(xmlDocPtr xml,
+ xmlNodePtr root,
+ virDomainXMLOptionPtr xm=
lopt,
+ unsigned int flags);
+void virDomainBackupDefFree(virDomainBackupDefPtr def);
+int virDomainBackupDefFormat(virBufferPtr buf,
+ virDomainBackupDefPtr def,
+ bool internal);
+int virDomainBackupAlignDisks(virDomainBackupDefPtr backup,
+ virDomainDefPtr dom, const char *suffix);
diff --git a/src/conf/virconftypes.h b/src/conf/virconftypes.h
index e8e9b6c314..d6e5fe5c49 100644
--- a/src/conf/virconftypes.h
+++ b/src/conf/virconftypes.h
@@ -93,6 +93,9 @@ typedef virDomainABIStability *virDomainABIStabilityPtr;
typedef struct _virDomainActualNetDef virDomainActualNetDef;
typedef virDomainActualNetDef *virDomainActualNetDefPtr;
+typedef struct _virDomainBackupDef virDomainBackupDef;
+typedef virDomainBackupDef *virDomainBackupDefPtr;
+
typedef struct _virDomainBIOSDef virDomainBIOSDef;
typedef virDomainBIOSDef *virDomainBIOSDefPtr;
diff --git a/src/conf/Makefile.inc.am b/src/conf/Makefile.inc.am
index b72ebbbda5..6913a5c5d8 100644
--- a/src/conf/Makefile.inc.am
+++ b/src/conf/Makefile.inc.am
@@ -12,6 +12,8 @@ NETDEV_CONF_SOURCES =3D \
$(NULL)
DOMAIN_CONF_SOURCES =3D \
+ conf/backup_conf.c \
+ conf/backup_conf.h \
conf/capabilities.c \
conf/capabilities.h \
conf/checkpoint_conf.c \
diff --git a/src/conf/backup_conf.c b/src/conf/backup_conf.c
new file mode 100644
index 0000000000..2bd94c1d73
--- /dev/null
+++ b/src/conf/backup_conf.c
@@ -0,0 +1,546 @@
+/*
+ * backup_conf.c: domain backup XML processing
+ *
+ * Copyright (C) 2006-2019 Red Hat, Inc.
+ * Copyright (C) 2006-2008 Daniel P. Berrange
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library. If not, see
+ * .
+ */
+
+#include
+
+#include "internal.h"
+#include "virbitmap.h"
+#include "virbuffer.h"
+#include "datatypes.h"
+#include "domain_conf.h"
+#include "virlog.h"
+#include "viralloc.h"
+#include "backup_conf.h"
+#include "virstoragefile.h"
+#include "viruuid.h"
+#include "virfile.h"
+#include "virerror.h"
+#include "virxml.h"
+#include "virstring.h"
+
+#define VIR_FROM_THIS VIR_FROM_DOMAIN
+
+VIR_LOG_INIT("conf.backup_conf");
+
+VIR_ENUM_IMPL(virDomainBackup,
+ VIR_DOMAIN_BACKUP_TYPE_LAST,
+ "default", "push", "pull");
+
+static void
+virDomainBackupDiskDefClear(virDomainBackupDiskDefPtr disk)
+{
+ VIR_FREE(disk->name);
+ virStorageSourceClear(disk->store);
+ disk->store =3D NULL;
+}
+
+void
+virDomainBackupDefFree(virDomainBackupDefPtr def)
+{
+ size_t i;
+
+ if (!def)
+ return;
+
+ VIR_FREE(def->incremental);
+ virStorageNetHostDefFree(1, def->server);
+ for (i =3D 0; i < def->ndisks; i++)
+ virDomainBackupDiskDefClear(&def->disks[i]);
+ VIR_FREE(def->disks);
+ VIR_FREE(def);
+}
+
+static int
+virDomainBackupDiskDefParseXML(xmlNodePtr node,
+ xmlXPathContextPtr ctxt,
+ virDomainBackupDiskDefPtr def,
+ bool push, bool internal,
+ virDomainXMLOptionPtr xmlopt)
+{
+ int ret =3D -1;
+ // char *backup =3D NULL; /* backup=3D"yes|no"? */
+ char *type =3D NULL;
+ char *driver =3D NULL;
+ xmlNodePtr cur;
+ xmlNodePtr saved =3D ctxt->node;
+
+ ctxt->node =3D node;
+
+ if (VIR_ALLOC(def->store) < 0)
+ goto cleanup;
+
+ def->name =3D virXMLPropString(node, "name");
+ if (!def->name) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("missing name from disk backup element"));
+ goto cleanup;
+ }
+
+ /* Needed? A way for users to list a disk and explicitly mark it
+ * as not participating, and then output shows all disks rather
+ * than just active disks */
+#if 0
+ backup =3D virXMLPropString(node, "backup");
+ if (backup) {
+ def->type =3D virDomainCheckpointTypeFromString(checkpoint);
+ if (def->type <=3D 0) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("unknown disk checkpoint setting '%s'"),
+ checkpoint);
+ goto cleanup;
+ }
+ }
+#endif
+
+ if ((type =3D virXMLPropString(node, "type"))) {
+ if ((def->store->type =3D virStorageTypeFromString(type)) <=3D 0 ||
+ def->store->type =3D=3D VIR_STORAGE_TYPE_VOLUME ||
+ def->store->type =3D=3D VIR_STORAGE_TYPE_DIR) {
+ virReportError(VIR_ERR_XML_ERROR,
+ _("unknown disk backup type '%s'"), type);
+ goto cleanup;
+ }
+ } else {
+ def->store->type =3D VIR_STORAGE_TYPE_FILE;
+ }
+
+ if ((cur =3D virXPathNode(push ? "./target" : "./scratch", ctxt)) &&
+ virDomainStorageSourceParse(cur, ctxt, def->store, 0, xmlopt) < 0)
+ goto cleanup;
+
+ if (internal) {
+ int detected;
+ if (virXPathInt("string(./node/@detected)", ctxt, &detected) < 0)
+ goto cleanup;
+ def->store->detected =3D detected;
+ def->store->nodeformat =3D virXPathString("string(./node)", ctxt);
+ }
+
+ if ((driver =3D virXPathString("string(./driver/@type)", ctxt))) {
+ def->store->format =3D virStorageFileFormatTypeFromString(driver);
+ if (def->store->format <=3D 0) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("unknown disk backup driver '%s'"), driver);
+ goto cleanup;
+ } else if (!push && def->store->format !=3D VIR_STORAGE_FILE_QCOW2=
) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("pull mode requires qcow2 driver, not '%s'"),
+ driver);
+ goto cleanup;
+ }
+ }
+
+ /* validate that the passed path is absolute */
+ if (virStorageSourceIsRelative(def->store)) {
+ virReportError(VIR_ERR_XML_ERROR,
+ _("disk backup image path '%s' must be absolute"),
+ def->store->path);
+ goto cleanup;
+ }
+
+ ret =3D 0;
+ cleanup:
+ ctxt->node =3D saved;
+
+ VIR_FREE(driver);
+// VIR_FREE(backup);
+ VIR_FREE(type);
+ if (ret < 0)
+ virDomainBackupDiskDefClear(def);
+ return ret;
+}
+
+static virDomainBackupDefPtr
+virDomainBackupDefParse(xmlXPathContextPtr ctxt,
+ virDomainXMLOptionPtr xmlopt,
+ unsigned int flags)
+{
+ virDomainBackupDefPtr def =3D NULL;
+ virDomainBackupDefPtr ret =3D NULL;
+ xmlNodePtr *nodes =3D NULL;
+ xmlNodePtr node =3D NULL;
+ char *mode =3D NULL;
+ bool push;
+ size_t i;
+ int n;
+
+ if (VIR_ALLOC(def) < 0)
+ goto cleanup;
+
+ mode =3D virXMLPropString(ctxt->node, "mode");
+ if (mode) {
+ def->type =3D virDomainBackupTypeFromString(mode);
+ if (def->type <=3D 0) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("unknown backup mode '%s'"), mode);
+ goto cleanup;
+ }
+ } else {
+ def->type =3D VIR_DOMAIN_BACKUP_TYPE_PUSH;
+ }
+ push =3D def->type =3D=3D VIR_DOMAIN_BACKUP_TYPE_PUSH;
+
+ if (flags & VIR_DOMAIN_BACKUP_PARSE_INTERNAL) {
+ char *tmp =3D virXMLPropString(ctxt->node, "id");
+ if (tmp && virStrToLong_i(tmp, NULL, 10, &def->id) < 0) {
+ virReportError(VIR_ERR_XML_ERROR,
+ _("invalid 'id' value '%s'"), tmp);
+ VIR_FREE(tmp);
+ goto cleanup;
+ }
+ VIR_FREE(tmp);
+ }
+
+ def->incremental =3D virXPathString("string(./incremental)", ctxt);
+
+ node =3D virXPathNode("./server", ctxt);
+ if (node) {
+ if (def->type !=3D VIR_DOMAIN_BACKUP_TYPE_PULL) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("use of requires pull mode backup"));
+ goto cleanup;
+ }
+ if (VIR_ALLOC(def->server) < 0)
+ goto cleanup;
+ if (virDomainStorageNetworkParseHost(node, def->server) < 0)
+ goto cleanup;
+ if (def->server->transport =3D=3D VIR_STORAGE_NET_HOST_TRANS_RDMA)=
{
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("transport rdma is not supported for =
"));
+ goto cleanup;
+ }
+ if (def->server->transport =3D=3D VIR_STORAGE_NET_HOST_TRANS_UNIX =
&&
+ def->server->socket[0] !=3D '/') {
+ virReportError(VIR_ERR_XML_ERROR,
+ _("backup socket path '%s' must be absolute"),
+ def->server->socket);
+ goto cleanup;
+ }
+ }
+
+ if ((n =3D virXPathNodeSet("./disks/*", ctxt, &nodes)) < 0)
+ goto cleanup;
+ if (n && VIR_ALLOC_N(def->disks, n) < 0)
+ goto cleanup;
+ def->ndisks =3D n;
+ for (i =3D 0; i < def->ndisks; i++) {
+ if (virDomainBackupDiskDefParseXML(nodes[i], ctxt,
+ &def->disks[i], push,
+ flags & VIR_DOMAIN_BACKUP_PARSE=
_INTERNAL,
+ xmlopt) < 0)
+ goto cleanup;
+ }
+ VIR_FREE(nodes);
+
+ VIR_STEAL_PTR(ret, def);
+
+ cleanup:
+ VIR_FREE(mode);
+ VIR_FREE(nodes);
+ virDomainBackupDefFree(def);
+
+ return ret;
+}
+
+virDomainBackupDefPtr
+virDomainBackupDefParseString(const char *xmlStr,
+ virDomainXMLOptionPtr xmlopt,
+ unsigned int flags)
+{
+ virDomainBackupDefPtr ret =3D NULL;
+ xmlDocPtr xml;
+ int keepBlanksDefault =3D xmlKeepBlanksDefault(0);
+
+ if ((xml =3D virXMLParse(NULL, xmlStr, _("(domain_backup)")))) {
+ xmlKeepBlanksDefault(keepBlanksDefault);
+ ret =3D virDomainBackupDefParseNode(xml, xmlDocGetRootElement(xml),
+ xmlopt, flags);
+ xmlFreeDoc(xml);
+ }
+ xmlKeepBlanksDefault(keepBlanksDefault);
+
+ return ret;
+}
+
+virDomainBackupDefPtr
+virDomainBackupDefParseNode(xmlDocPtr xml,
+ xmlNodePtr root,
+ virDomainXMLOptionPtr xmlopt,
+ unsigned int flags)
+{
+ xmlXPathContextPtr ctxt =3D NULL;
+ virDomainBackupDefPtr def =3D NULL;
+
+ if (!virXMLNodeNameEqual(root, "domainbackup")) {
+ virReportError(VIR_ERR_XML_ERROR, "%s", _("domainbackup"));
+ goto cleanup;
+ }
+
+ ctxt =3D xmlXPathNewContext(xml);
+ if (ctxt =3D=3D NULL) {
+ virReportOOMError();
+ goto cleanup;
+ }
+
+ ctxt->node =3D root;
+ def =3D virDomainBackupDefParse(ctxt, xmlopt, flags);
+ cleanup:
+ xmlXPathFreeContext(ctxt);
+ return def;
+}
+
+static int
+virDomainBackupDiskDefFormat(virBufferPtr buf,
+ virDomainBackupDiskDefPtr disk,
+ bool push, bool internal)
+{
+ int type =3D disk->store->type;
+ virBuffer attrBuf =3D VIR_BUFFER_INITIALIZER;
+ virBuffer childBuf =3D VIR_BUFFER_INITIALIZER;
+ int ret =3D -1;
+
+ if (!disk->name)
+ return 0;
+
+ virBufferEscapeString(buf, "name);
+ /* TODO: per-disk backup=3Doff? */
+
+ virBufferAsprintf(buf, " type=3D'%s'>\n", virStorageTypeToString(type)=
);
+ virBufferAdjustIndent(buf, 2);
+
+ if (disk->store->format > 0)
+ virBufferEscapeString(buf, "\n",
+ virStorageFileFormatTypeToString(disk->store=
->format));
+ /* TODO: should node names be part of storage file xml, rather
+ * than a one-off hack for qemu? */
+ if (internal) {
+ virBufferEscapeString(buf, "store->detected ? "1" : "0");
+ virBufferEscapeString(buf, ">%s\n", disk->store->nodeformat=
);
+ }
+
+ if (virDomainDiskSourceFormat(buf, disk->store, push ? "target" : "scr=
atch",
+ 0, false, 0, NULL) < 0)
+ goto cleanup;
+ virBufferAdjustIndent(buf, -2);
+ virBufferAddLit(buf, "\n");
+
+ ret =3D 0;
+
+ cleanup:
+ virBufferFreeAndReset(&attrBuf);
+ virBufferFreeAndReset(&childBuf);
+ return ret;
+}
+
+int
+virDomainBackupDefFormat(virBufferPtr buf, virDomainBackupDefPtr def,
+ bool internal)
+{
+ size_t i;
+
+ virBufferAsprintf(buf, "type));
+ if (def->id)
+ virBufferAsprintf(buf, " id=3D'%d'", def->id);
+ virBufferAddLit(buf, ">\n");
+ virBufferAdjustIndent(buf, 2);
+
+ virBufferEscapeString(buf, "%s\n",
+ def->incremental);
+ if (def->server) {
+ virBufferAsprintf(buf, "serv=
er->transport));
+ virBufferEscapeString(buf, " name=3D'%s'", def->server->name);
+ if (def->server->port)
+ virBufferAsprintf(buf, " port=3D'%u'", def->server->port);
+ virBufferEscapeString(buf, " socket=3D'%s'", def->server->socket);
+ virBufferAddLit(buf, "/>\n");
+ }
+
+ if (def->ndisks) {
+ virBufferAddLit(buf, "\n");
+ virBufferAdjustIndent(buf, 2);
+ for (i =3D 0; i < def->ndisks; i++) {
+ if (!def->disks[i].store)
+ continue;
+ if (virDomainBackupDiskDefFormat(buf, &def->disks[i],
+ def->type =3D=3D VIR_DOMAIN_B=
ACKUP_TYPE_PUSH,
+ internal) < 0)
+ return -1;
+ }
+ virBufferAdjustIndent(buf, -2);
+ virBufferAddLit(buf, "\n");
+ }
+
+ virBufferAdjustIndent(buf, -2);
+ virBufferAddLit(buf, "\n");
+
+ return virBufferCheckError(buf);
+}
+
+
+static int
+virDomainBackupCompareDiskIndex(const void *a, const void *b)
+{
+ const virDomainBackupDiskDef *diska =3D a;
+ const virDomainBackupDiskDef *diskb =3D b;
+
+ /* Integer overflow shouldn't be a problem here. */
+ return diska->idx - diskb->idx;
+}
+
+static int
+virDomainBackupDefAssignStore(virDomainBackupDiskDefPtr disk,
+ virStorageSourcePtr src,
+ const char *suffix)
+{
+ int ret =3D -1;
+
+ if (virStorageSourceIsEmpty(src)) {
+ if (disk->store) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("disk '%s' has no media"), disk->name);
+ goto cleanup;
+ }
+ } else if (src->readonly && disk->store) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("backup of readonly disk '%s' makes no sense"),
+ disk->name);
+ goto cleanup;
+ } else if (!disk->store) {
+ if (virStorageSourceGetActualType(src) =3D=3D VIR_STORAGE_TYPE_FIL=
E) {
+ if (VIR_ALLOC(disk->store) < 0)
+ goto cleanup;
+ disk->store->type =3D VIR_STORAGE_TYPE_FILE;
+ if (virAsprintf(&disk->store->path, "%s.%s", src->path,
+ suffix) < 0)
+ goto cleanup;
+ disk->store->detected =3D true;
+ } else {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("refusing to generate file name for disk '%s'=
"),
+ disk->name);
+ goto cleanup;
+ }
+ }
+ ret =3D 0;
+ cleanup:
+ return ret;
+}
+
+/* Align def->disks to domain. Sort the list of def->disks,
+ * generating storage names using suffix as needed. Convert paths to
+ * disk targets for uniformity. Issue an error and return -1 if any
+ * def->disks[n]->name appears more than once or does not map to
+ * dom->disks. */
+int
+virDomainBackupAlignDisks(virDomainBackupDefPtr def, virDomainDefPtr dom,
+ const char *suffix)
+{
+ int ret =3D -1;
+ virBitmapPtr map =3D NULL;
+ size_t i;
+ int ndisks;
+ bool alloc_all =3D false;
+
+ if (def->ndisks > dom->ndisks) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("too many disk backup requests for domain"));
+ goto cleanup;
+ }
+
+ /* Unlikely to have a guest without disks but technically possible. */
+ if (!dom->ndisks) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("domain must have at least one disk to perform "
+ "backups"));
+ goto cleanup;
+ }
+
+ if (!(map =3D virBitmapNew(dom->ndisks)))
+ goto cleanup;
+
+ /* Double check requested disks. */
+ for (i =3D 0; i < def->ndisks; i++) {
+ virDomainBackupDiskDefPtr disk =3D &def->disks[i];
+ int idx =3D virDomainDiskIndexByName(dom, disk->name, false);
+
+ if (idx < 0) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("no disk named '%s'"), disk->name);
+ goto cleanup;
+ }
+
+ if (virBitmapIsBitSet(map, idx)) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("disk '%s' specified twice"),
+ disk->name);
+ goto cleanup;
+ }
+ ignore_value(virBitmapSetBit(map, idx));
+ disk->idx =3D idx;
+
+ if (STRNEQ(disk->name, dom->disks[idx]->dst)) {
+ VIR_FREE(disk->name);
+ if (VIR_STRDUP(disk->name, dom->disks[idx]->dst) < 0)
+ goto cleanup;
+ }
+ if (disk->store && !disk->store->path) {
+ virStorageSourceClear(disk->store);
+ disk->store =3D NULL;
+ }
+ if (virDomainBackupDefAssignStore(disk, dom->disks[idx]->src,
+ suffix) < 0)
+ goto cleanup;
+ }
+
+ /* Provide fillers for all remaining disks, for easier iteration. */
+ if (!def->ndisks)
+ alloc_all =3D true;
+ ndisks =3D def->ndisks;
+ if (VIR_EXPAND_N(def->disks, def->ndisks,
+ dom->ndisks - def->ndisks) < 0)
+ goto cleanup;
+
+ for (i =3D 0; i < dom->ndisks; i++) {
+ virDomainBackupDiskDefPtr disk;
+
+ if (virBitmapIsBitSet(map, i))
+ continue;
+ disk =3D &def->disks[ndisks++];
+ if (VIR_STRDUP(disk->name, dom->disks[i]->dst) < 0)
+ goto cleanup;
+ disk->idx =3D i;
+ if (alloc_all &&
+ virDomainBackupDefAssignStore(disk, dom->disks[i]->src, suffix=
) < 0)
+ goto cleanup;
+ }
+
+ qsort(&def->disks[0], def->ndisks, sizeof(def->disks[0]),
+ virDomainBackupCompareDiskIndex);
+
+ ret =3D 0;
+
+ cleanup:
+ virBitmapFree(map);
+ return ret;
+}
diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index 28f654bbf8..de25aa05ed 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -70,6 +70,13 @@ virCapabilitiesSetNetPrefix;
# conf/checkpoint_conf.h
+virDomainBackupAlignDisks;
+virDomainBackupDefFormat;
+virDomainBackupDefFree;
+virDomainBackupDefParseNode;
+virDomainBackupDefParseString;
+virDomainBackupTypeFromString;
+virDomainBackupTypeToString;
virDomainCheckpointAlignDisks;
virDomainCheckpointDefFormat;
virDomainCheckpointDefNew;
@@ -79,7 +86,6 @@ virDomainCheckpointRedefinePrep;
virDomainCheckpointTypeFromString;
virDomainCheckpointTypeToString;
-
# conf/cpu_conf.h
virCPUCacheModeTypeFromString;
virCPUCacheModeTypeToString;
--=20
2.20.1
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
From nobody Fri Mar 29 10:43:07 2024
Delivered-To: importer@patchew.org
Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28
as permitted sender) client-ip=209.132.183.28;
envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com;
Authentication-Results: mx.zohomail.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass(p=none dis=none) header.from=redhat.com
ARC-Seal: i=1; a=rsa-sha256; t=1562605057; cv=none;
d=zoho.com; s=zohoarc;
b=Lm6W9dH700wheqiP3HyTV+7+t1WlMk0rx19/XYmiHF/nG74cXrsSATeMHuOicRwiCXfAkHxswTK69xnqKc2MM5B0X16A4vDbQ1w0wVh1XLSa2kIH7WikAW/lwe8VXOUfmYKv5+VgdUUL7z/25h4q8pWZ54fxhEuCk715oVxfUKY=
ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zoho.com;
s=zohoarc;
t=1562605057;
h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To:ARC-Authentication-Results;
bh=QseIpDQ4L01BNIDKDQtN5a8JC7BLChFX0DGE2cr6g9o=;
b=njgmz9wJDu3s9aygST2yY9ukJlb98B7L82To6SDNE03NUUniCgZw4lDvfkSUYWdtb060ziJHxr3VRjjqD54qCRYI/0DjF4JsiFJQ1Ohpe47QxL6iC33Kf1QUGERjOqed+RcJ1KNeJyNU0hgW29b4XmhMy8mXj5QTbySZE1EUKmo=
ARC-Authentication-Results: i=1; mx.zoho.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass header.from= (p=none dis=none)
header.from=
Return-Path:
Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by
mx.zohomail.com
with SMTPS id 156260505773291.32114644047044;
Mon, 8 Jul 2019 09:57:37 -0700 (PDT)
Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com
[10.5.11.13])
(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
(No client certificate requested)
by mx1.redhat.com (Postfix) with ESMTPS id 80CEA30B96FC;
Mon, 8 Jul 2019 16:57:30 +0000 (UTC)
Received: from colo-mx.corp.redhat.com
(colo-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.20])
by smtp.corp.redhat.com (Postfix) with ESMTPS id 51FEE53CE7;
Mon, 8 Jul 2019 16:57:27 +0000 (UTC)
Received: from lists01.pubmisc.prod.ext.phx2.redhat.com
(lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33])
by colo-mx.corp.redhat.com (Postfix) with ESMTP id 1722E18184B7;
Mon, 8 Jul 2019 16:57:26 +0000 (UTC)
Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com
[10.5.11.22])
by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP
id x68GuU8C027940 for ;
Mon, 8 Jul 2019 12:56:30 -0400
Received: by smtp.corp.redhat.com (Postfix)
id 8184F1001B32; Mon, 8 Jul 2019 16:56:30 +0000 (UTC)
Received: from blue.redhat.com (ovpn-116-78.phx2.redhat.com [10.3.116.78])
by smtp.corp.redhat.com (Postfix) with ESMTP id 14E031001B19;
Mon, 8 Jul 2019 16:56:29 +0000 (UTC)
From: Eric Blake
To: libvir-list@redhat.com
Date: Mon, 8 Jul 2019 11:55:49 -0500
Message-Id: <20190708165553.18452-7-eblake@redhat.com>
In-Reply-To: <20190708165553.18452-1-eblake@redhat.com>
References: <20190708165553.18452-1-eblake@redhat.com>
MIME-Version: 1.0
X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22
X-loop: libvir-list@redhat.com
Cc: nsoffer@redhat.com, eshenitz@redhat.com, pkrempa@redhat.com
Subject: [libvirt] [PATCH v9 06/10] backup: Implement virsh support for
backup
X-BeenThere: libvir-list@redhat.com
X-Mailman-Version: 2.1.12
Precedence: junk
List-Id: Development discussions about the libvirt library & tools
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Content-Transfer-Encoding: quoted-printable
Sender: libvir-list-bounces@redhat.com
Errors-To: libvir-list-bounces@redhat.com
X-Scanned-By: MIMEDefang 2.79 on 10.5.11.13
X-Greylist: Sender IP whitelisted,
not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.49]);
Mon, 08 Jul 2019 16:57:36 +0000 (UTC)
Content-Type: text/plain; charset="utf-8"
Introduce a few more new virsh commands for performing backup jobs.
At this time, I did not opt for a convenience command
'backup-begin-as' that cobbles together appropriate XML from the
user's command line arguments, but that may be a viable future
extension. Similarly, since backup is a potentially long-running
operation, it might be nice to add some sugar that automatically
handles waiting for the job to end, rather than making the user have
to poll or figure out virsh event to do the same. Eventually, we
will also need a way to create a checkpoint atomically with an
external snapshot.
Signed-off-by: Eric Blake
---
tools/virsh-domain.c | 245 +++++++++++++++++++++++++++++++++++++++++++
tools/virsh.pod | 49 +++++++++
2 files changed, 294 insertions(+)
diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c
index abc9001508..83c89400eb 100644
--- a/tools/virsh-domain.c
+++ b/tools/virsh-domain.c
@@ -14041,6 +14041,233 @@ cmdDomFSInfo(vshControl *ctl, const vshCmd *cmd)
return ret;
}
+
+/*
+ * "backup-begin" command
+ */
+static const vshCmdInfo info_backup_begin[] =3D {
+ {.name =3D "help",
+ .data =3D N_("Start a disk backup of a live domain")
+ },
+ {.name =3D "desc",
+ .data =3D N_("Use XML to start a full or incremental disk backup of a=
live "
+ "domain, optionally creating a checkpoint")
+ },
+ {.name =3D NULL}
+};
+
+static const vshCmdOptDef opts_backup_begin[] =3D {
+ VIRSH_COMMON_OPT_DOMAIN_FULL(0),
+ {.name =3D "xmlfile",
+ .type =3D VSH_OT_STRING,
+ .help =3D N_("domain backup XML"),
+ },
+ {.name =3D "checkpointxml",
+ .type =3D VSH_OT_STRING,
+ .help =3D N_("domain checkpoint XML"),
+ },
+ {.name =3D "no-metadata",
+ .type =3D VSH_OT_BOOL,
+ .help =3D N_("create checkpoint but don't track metadata"),
+ },
+ {.name =3D "quiesce",
+ .type =3D VSH_OT_BOOL,
+ .help =3D N_("quiesce guest's file systems"),
+ },
+ /* TODO: --wait/--verbose/--timeout flags for push model backups? */
+ {.name =3D NULL}
+};
+
+static bool
+cmdBackupBegin(vshControl *ctl,
+ const vshCmd *cmd)
+{
+ virDomainPtr dom =3D NULL;
+ bool ret =3D false;
+ const char *backup_from =3D NULL;
+ char *backup_buffer =3D NULL;
+ const char *check_from =3D NULL;
+ char *check_buffer =3D NULL;
+ unsigned int flags =3D 0;
+ int id;
+
+ if (vshCommandOptBool(cmd, "no-metadata"))
+ flags |=3D VIR_DOMAIN_BACKUP_BEGIN_NO_METADATA;
+ if (vshCommandOptBool(cmd, "quiesce"))
+ flags |=3D VIR_DOMAIN_BACKUP_BEGIN_QUIESCE;
+
+ if (!(dom =3D virshCommandOptDomain(ctl, cmd, NULL)))
+ goto cleanup;
+
+ if (vshCommandOptStringReq(ctl, cmd, "xmlfile", &backup_from) < 0)
+ goto cleanup;
+ if (!backup_from) {
+ backup_buffer =3D vshStrdup(ctl, "");
+ } else {
+ if (virFileReadAll(backup_from, VSH_MAX_XML_FILE, &backup_buffer) =
< 0) {
+ vshSaveLibvirtError();
+ goto cleanup;
+ }
+ }
+
+ if (vshCommandOptStringReq(ctl, cmd, "checkpointxml", &check_from) < 0)
+ goto cleanup;
+ if (check_from) {
+ if (virFileReadAll(check_from, VSH_MAX_XML_FILE, &check_buffer) < =
0) {
+ vshSaveLibvirtError();
+ goto cleanup;
+ }
+ }
+
+ id =3D virDomainBackupBegin(dom, backup_buffer, check_buffer, flags);
+
+ if (id < 0)
+ goto cleanup;
+
+ vshPrint(ctl, _("Backup id %d started\n"), id);
+ if (backup_from)
+ vshPrintExtra(ctl, _("backup used description from '%s'\n"),
+ backup_from);
+ if (check_buffer)
+ vshPrintExtra(ctl, _("checkpoint created from '%s'\n"), check_from=
);
+
+ ret =3D true;
+
+ cleanup:
+ VIR_FREE(backup_buffer);
+ VIR_FREE(check_buffer);
+ virshDomainFree(dom);
+
+ return ret;
+}
+
+/* TODO: backup-begin-as? */
+
+/*
+ * "backup-dumpxml" command
+ */
+static const vshCmdInfo info_backup_dumpxml[] =3D {
+ {.name =3D "help",
+ .data =3D N_("Dump XML for an ongoing domain block backup job")
+ },
+ {.name =3D "desc",
+ .data =3D N_("Backup Dump XML")
+ },
+ {.name =3D NULL}
+};
+
+static const vshCmdOptDef opts_backup_dumpxml[] =3D {
+ VIRSH_COMMON_OPT_DOMAIN_FULL(0),
+ {.name =3D "id",
+ .type =3D VSH_OT_INT,
+ .help =3D N_("backup job id"),
+ /* TODO: Add API for listing active jobs, then adding a completer? */
+ },
+ /* TODO - worth adding this flag?
+ {.name =3D "checkpoint",
+ .type =3D VSH_OT_BOOL,
+ .help =3D N_("if the backup created a checkpoint, also dump that XML")
+ },
+ */
+ {.name =3D NULL}
+};
+
+static bool
+cmdBackupDumpXML(vshControl *ctl,
+ const vshCmd *cmd)
+{
+ virDomainPtr dom =3D NULL;
+ bool ret =3D false;
+ char *xml =3D NULL;
+ unsigned int flags =3D 0;
+ int id =3D 0;
+
+ if (vshCommandOptBool(cmd, "security-info"))
+ flags |=3D VIR_DOMAIN_XML_SECURE;
+
+ if (vshCommandOptInt(ctl, cmd, "id", &id) < 0)
+ return false;
+
+ if (!(dom =3D virshCommandOptDomain(ctl, cmd, NULL)))
+ return false;
+
+ if (!(xml =3D virDomainBackupGetXMLDesc(dom, id, flags)))
+ goto cleanup;
+
+ vshPrint(ctl, "%s", xml);
+ ret =3D true;
+
+ cleanup:
+ VIR_FREE(xml);
+ virshDomainFree(dom);
+
+ return ret;
+}
+
+
+/*
+ * "backup-end" command
+ */
+static const vshCmdInfo info_backup_end[] =3D {
+ {.name =3D "help",
+ .data =3D N_("Conclude a disk backup of a live domain")
+ },
+ {.name =3D "desc",
+ .data =3D N_("End a domain block backup job")
+ },
+ {.name =3D NULL}
+};
+
+static const vshCmdOptDef opts_backup_end[] =3D {
+ VIRSH_COMMON_OPT_DOMAIN_FULL(0),
+ {.name =3D "id",
+ .type =3D VSH_OT_INT,
+ .help =3D N_("backup job id"),
+ /* TODO: Add API for listing active jobs, then adding a completer? */
+ },
+ {.name =3D "abort",
+ .type =3D VSH_OT_BOOL,
+ .help =3D N_("abandon a push model backup that has not yet completed")
+ },
+ {.name =3D NULL}
+};
+
+static bool
+cmdBackupEnd(vshControl *ctl, const vshCmd *cmd)
+{
+ virDomainPtr dom =3D NULL;
+ bool ret =3D false;
+ unsigned int flags =3D 0;
+ int id =3D 0;
+ int rc;
+
+ if (vshCommandOptBool(cmd, "abort"))
+ flags |=3D VIR_DOMAIN_BACKUP_END_ABORT;
+
+ if (vshCommandOptInt(ctl, cmd, "id", &id) < 0)
+ return false;
+
+ if (!(dom =3D virshCommandOptDomain(ctl, cmd, NULL)))
+ goto cleanup;
+
+ rc =3D virDomainBackupEnd(dom, id, flags);
+
+ if (rc < 0)
+ goto cleanup;
+ if (rc =3D=3D 0)
+ vshPrint(ctl, _("Backup id %d aborted"), id);
+ else
+ vshPrint(ctl, _("Backup id %d completed"), id);
+
+ ret =3D true;
+
+ cleanup:
+ virshDomainFree(dom);
+
+ return ret;
+}
+
+
const vshCmdDef domManagementCmds[] =3D {
{.name =3D "attach-device",
.handler =3D cmdAttachDevice,
@@ -14066,6 +14293,24 @@ const vshCmdDef domManagementCmds[] =3D {
.info =3D info_autostart,
.flags =3D 0
},
+ {.name =3D "backup-begin",
+ .handler =3D cmdBackupBegin,
+ .opts =3D opts_backup_begin,
+ .info =3D info_backup_begin,
+ .flags =3D 0
+ },
+ {.name =3D "backup-dumpxml",
+ .handler =3D cmdBackupDumpXML,
+ .opts =3D opts_backup_dumpxml,
+ .info =3D info_backup_dumpxml,
+ .flags =3D 0
+ },
+ {.name =3D "backup-end",
+ .handler =3D cmdBackupEnd,
+ .opts =3D opts_backup_end,
+ .info =3D info_backup_end,
+ .flags =3D 0
+ },
{.name =3D "blkdeviotune",
.handler =3D cmdBlkdeviotune,
.opts =3D opts_blkdeviotune,
diff --git a/tools/virsh.pod b/tools/virsh.pod
index 8d69d349e9..7ca8869412 100644
--- a/tools/virsh.pod
+++ b/tools/virsh.pod
@@ -1352,6 +1352,55 @@ I specifies copying bandwidth limit in Mi=
B/s. For further information
on the I argument see the corresponding section for the B
command.
+=3Ditem B I [I]
+[I [I<--no-metadata>]] [I<--quiesce>]
+
+Begin a new backup job, and output the resulting job id on success. If
+I is omitted, this defaults to a full backup using a push
+model to filenames generated by libvirt; supplying XML allows
+fine-tuning such as requesting an incremental backup relative to an
+earlier checkpoint, controlling which disks participate or which
+filenames are involved, or requesting the use of a pull model backup.
+The B command shows any resulting values assigned by
+libvirt. For more information on backup XML, see:
+L.
+
+If I is specified, a second file with a top-level
+element of is used to create a simultaneous
+checkpoint, for doing a later incremental backup relative to the time
+the snapshot was created. If I<--no-metadata> is specified, then the
+checkpoint is created, but any metadata is immediately discarded. See
+B for more details on checkpoints.
+
+If I<--quiesce> is specified, libvirt will try to use guest agent
+to freeze and unfreeze domain's mounted file systems. However,
+if domain has no guest agent, the backup job will fail.
+
+This command returns as soon as possible, and the backup job runs in
+the background; the progress of a push model backup can be checked
+with B or by waiting for an event with B (the
+progress of a pull model backup is under the control of whatever third
+party connects to the NBD export). The job is ended with B.
+
+=3Ditem B I [I]
+
+Output XML describing the backup job I. The default for I is
+0, which works as long as there are no parallel jobs; it is also
+possible to use the positive id printed by B on success.
+
+=3Ditem B I [I] [I<--abort>]
+
+End the currentbackup job I. The default for I is 0, which
+works as long as there are no parallel jobs; it is also possible to
+use the positive id printed by B on success.
+
+If the backup job is a push job, but the hypervisor is not yet
+complete, this command will fail unless I<--abort> is given; if
+aborted, the backup file is incomplete. If the backup job is a pull
+job, I<--abort> has no effect, because libvirt assumes the third-party
+client is done performing the backup.
+
+
=3Ditem B I I
[[I<--config>] [I<--live>] | [I<--current>]]
[[I] | [I] [I]]
--=20
2.20.1
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
From nobody Fri Mar 29 10:43:07 2024
Delivered-To: importer@patchew.org
Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28
as permitted sender) client-ip=209.132.183.28;
envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com;
Authentication-Results: mx.zohomail.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass(p=none dis=none) header.from=redhat.com
ARC-Seal: i=1; a=rsa-sha256; t=1562605072; cv=none;
d=zoho.com; s=zohoarc;
b=ldjyS+6KyFfa18+O2GnPKzb0TR4UWSFuOyogoJ80JFNyX4irHtW75lDcU9USYWorLul459m/j0pf3b870midhnIAT1/G6iXQT2g5KPtQWWv2x79+DnF9ssOoiltXeR9WkDLn2LHFJxvz8mb5JIsSbNiE+N70hX8UVQPoDEmJIT0=
ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zoho.com;
s=zohoarc;
t=1562605072;
h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To:ARC-Authentication-Results;
bh=1qfrxVsktJGkCxfbjR2CTSgYF5NdT9G2A8cVe7cIpMg=;
b=drObaSdJC8oebhXdhNmBwzk01i4yr9icW1V6MhpI9oLsY6AOWcak5MoxqcvQqHcL3PiBRyazdmvpCNGuzuuRfG22YwzkKu6K/o8Su87hFWa4F5zXZX/qN0iPsI4+D8CLJnVxAJZdSIUJH4oDNjgLXyTkLZ/seQ4hhb6CiK2ux8o=
ARC-Authentication-Results: i=1; mx.zoho.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass header.from= (p=none dis=none)
header.from=
Return-Path:
Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by
mx.zohomail.com
with SMTPS id 1562605072776632.3282242993125;
Mon, 8 Jul 2019 09:57:52 -0700 (PDT)
Received: from smtp.corp.redhat.com (int-mx08.intmail.prod.int.phx2.redhat.com
[10.5.11.23])
(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
(No client certificate requested)
by mx1.redhat.com (Postfix) with ESMTPS id 105D0C05B1CD;
Mon, 8 Jul 2019 16:57:39 +0000 (UTC)
Received: from colo-mx.corp.redhat.com
(colo-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.20])
by smtp.corp.redhat.com (Postfix) with ESMTPS id 9C399196F0;
Mon, 8 Jul 2019 16:57:38 +0000 (UTC)
Received: from lists01.pubmisc.prod.ext.phx2.redhat.com
(lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33])
by colo-mx.corp.redhat.com (Postfix) with ESMTP id 633EC1833004;
Mon, 8 Jul 2019 16:57:36 +0000 (UTC)
Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com
[10.5.11.22])
by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP
id x68GuV1X027951 for ;
Mon, 8 Jul 2019 12:56:31 -0400
Received: by smtp.corp.redhat.com (Postfix)
id 1F9B81001B29; Mon, 8 Jul 2019 16:56:31 +0000 (UTC)
Received: from blue.redhat.com (ovpn-116-78.phx2.redhat.com [10.3.116.78])
by smtp.corp.redhat.com (Postfix) with ESMTP id A59B01001B19;
Mon, 8 Jul 2019 16:56:30 +0000 (UTC)
From: Eric Blake
To: libvir-list@redhat.com
Date: Mon, 8 Jul 2019 11:55:50 -0500
Message-Id: <20190708165553.18452-8-eblake@redhat.com>
In-Reply-To: <20190708165553.18452-1-eblake@redhat.com>
References: <20190708165553.18452-1-eblake@redhat.com>
MIME-Version: 1.0
X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22
X-loop: libvir-list@redhat.com
Cc: nsoffer@redhat.com, eshenitz@redhat.com, pkrempa@redhat.com
Subject: [libvirt] [PATCH v9 07/10] backup: qemu: Implement framework for
backup job APIs
X-BeenThere: libvir-list@redhat.com
X-Mailman-Version: 2.1.12
Precedence: junk
List-Id: Development discussions about the libvirt library & tools
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Content-Transfer-Encoding: quoted-printable
Sender: libvir-list-bounces@redhat.com
Errors-To: libvir-list-bounces@redhat.com
X-Scanned-By: MIMEDefang 2.84 on 10.5.11.23
X-Greylist: Sender IP whitelisted,
not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.32]);
Mon, 08 Jul 2019 16:57:51 +0000 (UTC)
Content-Type: text/plain; charset="utf-8"
Still needs to actually kick off the right QMP commands, but at
least allows validation of backup XML, including the fact that
a backup job can survive a libvirtd restart. Atomically creating
a checkpoint alongside the backup still needs implementing.
Signed-off-by: Eric Blake
---
src/qemu/qemu_domain.h | 4 +
src/qemu/qemu_domain.c | 29 ++++++-
src/qemu/qemu_driver.c | 185 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 214 insertions(+), 4 deletions(-)
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index b1ec4af82f..8f8a08f6cb 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -386,6 +386,10 @@ struct _qemuDomainObjPrivate {
/* true if global -mem-prealloc appears on cmd line */
bool memPrealloc;
+
+ /* Any currently running backup job.
+ * FIXME: allow jobs in parallel. For now, at most one job, always id =
1. */
+ virDomainBackupDefPtr backup;
};
#define QEMU_DOMAIN_PRIVATE(vm) \
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 2fca7bd0b8..64c15d8ae6 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -56,6 +56,7 @@
#include "locking/domain_lock.h"
#include "virdomainsnapshotobjlist.h"
#include "virdomaincheckpointobjlist.h"
+#include "backup_conf.h"
#ifdef MAJOR_IN_MKDEV
# include
@@ -2310,13 +2311,25 @@ static int
qemuDomainObjPrivateXMLFormatBlockjobs(virBufferPtr buf,
virDomainObjPtr vm)
{
+ qemuDomainObjPrivatePtr priv =3D vm->privateData;
virBuffer attrBuf =3D VIR_BUFFER_INITIALIZER;
bool bj =3D qemuDomainHasBlockjob(vm, false);
+ int ret =3D -1;
virBufferAsprintf(&attrBuf, " active=3D'%s'",
virTristateBoolTypeToString(virTristateBoolFromBool(=
bj)));
- return virXMLFormatElement(buf, "blockjobs", &attrBuf, NULL);
+ if (virXMLFormatElement(buf, "blockjobs", &attrBuf, NULL) < 0)
+ goto cleanup;
+
+ /* TODO: merge other blockjobs and backups into uniform space? */
+ if (priv->backup && virDomainBackupDefFormat(buf, priv->backup, true) =
< 0)
+ goto cleanup;
+
+ ret =3D 0;
+ cleanup:
+ virBufferFreeAndReset(&attrBuf);
+ return ret;
}
@@ -2657,16 +2670,24 @@ qemuDomainObjPrivateXMLParseAutomaticPlacement(xmlX=
PathContextPtr ctxt,
static int
-qemuDomainObjPrivateXMLParseBlockjobs(qemuDomainObjPrivatePtr priv,
+qemuDomainObjPrivateXMLParseBlockjobs(virQEMUDriverPtr driver,
+ qemuDomainObjPrivatePtr priv,
xmlXPathContextPtr ctxt)
{
- VIR_AUTOFREE(char *) active =3D NULL;
+ xmlNodePtr node;
int tmp;
+ VIR_AUTOFREE(char *) active =3D NULL;
if ((active =3D virXPathString("string(./blockjobs/@active)", ctxt)) &&
(tmp =3D virTristateBoolTypeFromString(active)) > 0)
priv->reconnectBlockjobs =3D tmp;
+ if ((node =3D virXPathNode("./domainbackup", ctxt)) &&
+ !(priv->backup =3D virDomainBackupDefParseNode(ctxt->doc, node,
+ driver->xmlopt,
+ VIR_DOMAIN_BACKUP_PAR=
SE_INTERNAL)))
+ return -1;
+
return 0;
}
@@ -3024,7 +3045,7 @@ qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt,
qemuDomainObjPrivateXMLParsePR(ctxt, &priv->prDaemonRunning);
- if (qemuDomainObjPrivateXMLParseBlockjobs(priv, ctxt) < 0)
+ if (qemuDomainObjPrivateXMLParseBlockjobs(driver, priv, ctxt) < 0)
goto error;
qemuDomainStorageIdReset(priv);
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index f9605027be..ec5f37fe94 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -104,6 +104,7 @@
#include "virdomainsnapshotobjlist.h"
#include "virenum.h"
#include "virdomaincheckpointobjlist.h"
+#include "backup_conf.h"
#define VIR_FROM_THIS VIR_FROM_QEMU
@@ -17554,6 +17555,187 @@ qemuDomainCheckpointDelete(virDomainCheckpointPtr=
checkpoint,
return ret;
}
+static int qemuDomainBackupBegin(virDomainPtr domain, const char *diskXml,
+ const char *checkpointXml, unsigned int f=
lags)
+{
+ virQEMUDriverPtr driver =3D domain->conn->privateData;
+ virDomainObjPtr vm =3D NULL;
+ virDomainBackupDefPtr def =3D NULL;
+ virQEMUDriverConfigPtr cfg =3D NULL;
+ virCapsPtr caps =3D NULL;
+ qemuDomainObjPrivatePtr priv;
+ int ret =3D -1;
+ struct timeval tv;
+ char *suffix =3D NULL;
+
+ virCheckFlags(VIR_DOMAIN_BACKUP_BEGIN_NO_METADATA, -1);
+ /* TODO: VIR_DOMAIN_BACKUP_BEGIN_QUIESCE */
+
+ // FIXME: Support non-null checkpointXML for incremental - what
+ // code can be shared with CheckpointCreateXML, then add to transaction
+ // to create new checkpoint at same time as starting blockdev-backup
+ if (checkpointXml) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("cannot create incremental backups yet"));
+ return -1;
+ }
+ // if (chk) VIR_STRDUP(suffix, chk->name);
+ gettimeofday(&tv, NULL);
+ if (virAsprintf(&suffix, "%lld", (long long)tv.tv_sec) < 0)
+ goto cleanup;
+
+ if (!(vm =3D qemuDomObjFromDomain(domain)))
+ goto cleanup;
+
+ cfg =3D virQEMUDriverGetConfig(driver);
+
+ if (virDomainBackupBeginEnsureACL(domain->conn, vm->def, flags) < 0)
+ goto cleanup;
+
+ if (!(caps =3D virQEMUDriverGetCapabilities(driver, false)))
+ goto cleanup;
+
+ if (qemuProcessAutoDestroyActive(driver, vm)) {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("domain is marked for auto destroy"));
+ goto cleanup;
+ }
+
+ if (!virDomainObjIsActive(vm)) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("cannot perform disk backup for inactive domain")=
);
+ goto cleanup;
+ }
+ if (!(def =3D virDomainBackupDefParseString(diskXml, driver->xmlopt, 0=
)))
+ goto cleanup;
+
+ /* We are going to modify the domain below. */
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ goto cleanup;
+
+ priv =3D vm->privateData;
+ if (priv->backup) {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("another backup job is already running"));
+ goto endjob;
+ }
+
+ if (virDomainBackupAlignDisks(def, vm->def, suffix) < 0)
+ goto endjob;
+
+ /* actually start the checkpoint. 2x2 array of push/pull, full/incr,
+ plus additional tweak if checkpoint requested */
+ /* TODO: issue QMP commands:
+ - pull: nbd-server-start with from user (or autogenerate s=
erver)
+ - push/pull: blockdev-add per
+ - incr: bitmap-add of tmp, bitmap-merge per
+ - transaction, containing:
+ - push+full: blockdev-backup sync:full
+ - push+incr: blockdev-backup sync:incremental bitmap:tmp
+ - pull+full: blockdev-backup sync:none
+ - pull+incr: blockdev-backup sync:none, bitmap-disable of tmp
+ - if checkpoint: bitmap-disable of old, bitmap-add of new
+ - pull: nbd-server-add per , including bitmap for incr
+ */
+
+ VIR_STEAL_PTR(priv->backup, def);
+ ret =3D priv->backup->id =3D 1; /* Hard-coded job id for now */
+ if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm,
+ driver->caps) < 0)
+ VIR_WARN("Unable to save status on vm %s after backup job",
+ vm->def->name);
+
+ endjob:
+ qemuDomainObjEndJob(driver, vm);
+
+ cleanup:
+ VIR_FREE(suffix);
+ virDomainObjEndAPI(&vm);
+ virDomainBackupDefFree(def);
+ virObjectUnref(caps);
+ virObjectUnref(cfg);
+ return ret;
+}
+
+static char *qemuDomainBackupGetXMLDesc(virDomainPtr domain, int id,
+ unsigned int flags)
+{
+ virDomainObjPtr vm =3D NULL;
+ char *xml =3D NULL;
+ qemuDomainObjPrivatePtr priv;
+ virBuffer buf =3D VIR_BUFFER_INITIALIZER;
+
+ virCheckFlags(0, NULL);
+
+ if (!(vm =3D qemuDomObjFromDomain(domain)))
+ return NULL;
+
+ if (virDomainBackupGetXMLDescEnsureACL(domain->conn, vm->def) < 0)
+ goto cleanup;
+
+ /* TODO: Allow more than one hard-coded job id */
+ priv =3D vm->privateData;
+ if ((id !=3D 0 && id !=3D 1) || !priv->backup) {
+ virReportError(VIR_ERR_NO_DOMAIN_CHECKPOINT,
+ _("no domain backup job with id '%d'"), id);
+ goto cleanup;
+ }
+
+ if (virDomainBackupDefFormat(&buf, priv->backup, false) < 0)
+ goto cleanup;
+ xml =3D virBufferContentAndReset(&buf);
+
+ cleanup:
+ virDomainObjEndAPI(&vm);
+ return xml;
+}
+
+static int qemuDomainBackupEnd(virDomainPtr domain, int id, unsigned int f=
lags)
+{
+ virQEMUDriverPtr driver =3D domain->conn->privateData;
+ virQEMUDriverConfigPtr cfg =3D NULL;
+ virDomainObjPtr vm =3D NULL;
+ int ret =3D -1;
+ virDomainBackupDefPtr backup =3D NULL;
+ qemuDomainObjPrivatePtr priv;
+ bool want_abort =3D flags & VIR_DOMAIN_BACKUP_END_ABORT;
+
+ virCheckFlags(VIR_DOMAIN_BACKUP_END_ABORT, -1);
+
+ if (!(vm =3D qemuDomObjFromDomain(domain)))
+ return -1;
+
+ cfg =3D virQEMUDriverGetConfig(driver);
+ if (virDomainBackupEndEnsureACL(domain->conn, vm->def) < 0)
+ goto cleanup;
+
+ /* TODO: Allow more than one hard-coded job id */
+ priv =3D vm->privateData;
+ if ((id !=3D 0 && id !=3D 1) || !priv->backup) {
+ virReportError(VIR_ERR_NO_DOMAIN_CHECKPOINT,
+ _("no domain backup job with id '%d'"), id);
+ goto cleanup;
+ }
+
+ if (priv->backup->type !=3D VIR_DOMAIN_BACKUP_TYPE_PUSH)
+ want_abort =3D false;
+
+ /* TODO: QMP commands to actually cancel the pending job, and on
+ * pull, also tear down the NBD server */
+ VIR_STEAL_PTR(backup, priv->backup);
+ if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm,
+ driver->caps) < 0)
+ VIR_WARN("Unable to save status on vm %s after backup job",
+ vm->def->name);
+
+ ret =3D want_abort ? 0 : 1;
+
+ cleanup:
+ virDomainBackupDefFree(backup);
+ virDomainObjEndAPI(&vm);
+ return ret;
+}
+
static int qemuDomainQemuMonitorCommand(virDomainPtr domain, const char *c=
md,
char **result, unsigned int flags)
{
@@ -23256,6 +23438,9 @@ static virHypervisorDriver qemuHypervisorDriver =3D=
{
.domainCheckpointGetParent =3D qemuDomainCheckpointGetParent, /* 5.6.0=
*/
.domainCheckpointDelete =3D qemuDomainCheckpointDelete, /* 5.6.0 */
.domainCheckpointIsCurrent =3D qemuDomainCheckpointIsCurrent, /* 5.6.0=
*/
+ .domainBackupBegin =3D qemuDomainBackupBegin, /* 5.6.0 */
+ .domainBackupGetXMLDesc =3D qemuDomainBackupGetXMLDesc, /* 5.6.0 */
+ .domainBackupEnd =3D qemuDomainBackupEnd, /* 5.6.0 */
};
--=20
2.20.1
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
From nobody Fri Mar 29 10:43:07 2024
Delivered-To: importer@patchew.org
Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28
as permitted sender) client-ip=209.132.183.28;
envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com;
Authentication-Results: mx.zohomail.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass(p=none dis=none) header.from=redhat.com
ARC-Seal: i=1; a=rsa-sha256; t=1562605034; cv=none;
d=zoho.com; s=zohoarc;
b=b2HkPgD67bDoTStq5D3FsS9LyfWDKNvOCGps18LMSrhERGLKkqqKnVaeZZcWV7JSArILzYrMY4XHnYsq9IJd8+g0nIYRGdEMndorNJUAhtdJR8ezqmUuApYY02N+uXxk8WsRp9ULphQoPybmP7J1lnPkC+5CLk5urr6stoxumGo=
ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zoho.com;
s=zohoarc;
t=1562605034;
h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To:ARC-Authentication-Results;
bh=LIG1Cnh4cjxCKSCeLpdW2ixlt5ErH24xeh4TSGTeaoo=;
b=HSzgZVL+nx0NA8gioSjrJEmsxaIBs8P3T1lz5MlH/cIGz1e1/q8fxOmBXgti6plyW3dWX/cH4IBMprg9KyRFjOd9FmK8rJ1ozhW0rM8+t382zYbpuSo7BYQvuRMVjp9W4srG3HlQ0KX8GtCNWhHMnq830WTLDvgCEpL75HfOOHQ=
ARC-Authentication-Results: i=1; mx.zoho.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass header.from= (p=none dis=none)
header.from=
Return-Path:
Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by
mx.zohomail.com
with SMTPS id 1562605034689320.7404666704815;
Mon, 8 Jul 2019 09:57:14 -0700 (PDT)
Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com
[10.5.11.13])
(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
(No client certificate requested)
by mx1.redhat.com (Postfix) with ESMTPS id CD3E13002738;
Mon, 8 Jul 2019 16:57:01 +0000 (UTC)
Received: from colo-mx.corp.redhat.com
(colo-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.21])
by smtp.corp.redhat.com (Postfix) with ESMTPS id 9BFE153CE7;
Mon, 8 Jul 2019 16:56:59 +0000 (UTC)
Received: from lists01.pubmisc.prod.ext.phx2.redhat.com
(lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33])
by colo-mx.corp.redhat.com (Postfix) with ESMTP id 61BDB3FAEC;
Mon, 8 Jul 2019 16:56:56 +0000 (UTC)
Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com
[10.5.11.22])
by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP
id x68GuVom027956 for ;
Mon, 8 Jul 2019 12:56:31 -0400
Received: by smtp.corp.redhat.com (Postfix)
id B61D9100194A; Mon, 8 Jul 2019 16:56:31 +0000 (UTC)
Received: from blue.redhat.com (ovpn-116-78.phx2.redhat.com [10.3.116.78])
by smtp.corp.redhat.com (Postfix) with ESMTP id 438401001B18;
Mon, 8 Jul 2019 16:56:31 +0000 (UTC)
From: Eric Blake
To: libvir-list@redhat.com
Date: Mon, 8 Jul 2019 11:55:51 -0500
Message-Id: <20190708165553.18452-9-eblake@redhat.com>
In-Reply-To: <20190708165553.18452-1-eblake@redhat.com>
References: <20190708165553.18452-1-eblake@redhat.com>
MIME-Version: 1.0
X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22
X-loop: libvir-list@redhat.com
Cc: nsoffer@redhat.com, eshenitz@redhat.com, pkrempa@redhat.com
Subject: [libvirt] [PATCH v9 08/10] backup: Wire up qemu full pull backup
commands over QMP
X-BeenThere: libvir-list@redhat.com
X-Mailman-Version: 2.1.12
Precedence: junk
List-Id: Development discussions about the libvirt library & tools
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Content-Transfer-Encoding: quoted-printable
Sender: libvir-list-bounces@redhat.com
Errors-To: libvir-list-bounces@redhat.com
X-Scanned-By: MIMEDefang 2.79 on 10.5.11.13
X-Greylist: Sender IP whitelisted,
not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.46]);
Mon, 08 Jul 2019 16:57:08 +0000 (UTC)
Content-Type: text/plain; charset="utf-8"
Time to actually issue the QMP transactions that start and
stop backup commands (for now, just pull mode, not push).
Starting a job has to kick off several pre-req steps, then
a transaction, and additionally spawn an NBD server for pull
mode; ending a job as well as failing partway through
beginning a job has to unwind the earlier steps. Implementing
push mode, as well as incremental pull and checkpoint creation,
is deferred to later patches.
Signed-off-by: Eric Blake
---
src/qemu/qemu_domain.c | 18 +-
src/qemu/qemu_driver.c | 310 ++++++++++++++++++++++++++++++++++-
src/qemu/qemu_monitor_json.c | 4 +
src/qemu/qemu_process.c | 9 +
4 files changed, 327 insertions(+), 14 deletions(-)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 64c15d8ae6..70fe250880 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -2676,17 +2676,25 @@ qemuDomainObjPrivateXMLParseBlockjobs(virQEMUDriver=
Ptr driver,
{
xmlNodePtr node;
int tmp;
+ size_t i;
VIR_AUTOFREE(char *) active =3D NULL;
if ((active =3D virXPathString("string(./blockjobs/@active)", ctxt)) &&
(tmp =3D virTristateBoolTypeFromString(active)) > 0)
priv->reconnectBlockjobs =3D tmp;
- if ((node =3D virXPathNode("./domainbackup", ctxt)) &&
- !(priv->backup =3D virDomainBackupDefParseNode(ctxt->doc, node,
- driver->xmlopt,
- VIR_DOMAIN_BACKUP_PAR=
SE_INTERNAL)))
- return -1;
+ if ((node =3D virXPathNode("./domainbackup", ctxt))) {
+ if (!(priv->backup =3D virDomainBackupDefParseNode(ctxt->doc, node,
+ driver->xmlopt,
+ VIR_DOMAIN_BACKUP=
_PARSE_INTERNAL)))
+ return -1;
+ /* The backup job is only stored in XML if backupBegin
+ * succeeded at exporting the disk, so no need to store disk
+ * state when we can just force-reset it to a known-good
+ * value. */
+ for (i =3D 0; i < priv->backup->ndisks; i++)
+ priv->backup->disks[i].state =3D VIR_DOMAIN_BACKUP_DISK_STATE_=
EXPORT;
+ }
return 0;
}
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index ec5f37fe94..d9abcfa4c8 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -17555,8 +17555,80 @@ qemuDomainCheckpointDelete(virDomainCheckpointPtr =
checkpoint,
return ret;
}
-static int qemuDomainBackupBegin(virDomainPtr domain, const char *diskXml,
- const char *checkpointXml, unsigned int f=
lags)
+static int
+qemuDomainBackupPrepare(virQEMUDriverPtr driver, virDomainObjPtr vm,
+ virDomainBackupDefPtr def)
+{
+ int ret =3D -1;
+ size_t i;
+
+ if (qemuBlockNodeNamesDetect(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ goto cleanup;
+ for (i =3D 0; i < def->ndisks; i++) {
+ virDomainBackupDiskDef *disk =3D &def->disks[i];
+ virStorageSourcePtr src =3D vm->def->disks[disk->idx]->src;
+
+ if (!disk->store)
+ continue;
+ if (virAsprintf(&disk->store->nodeformat, "tmp-%s", disk->name) < =
0)
+ goto cleanup;
+ if (!disk->store->format)
+ disk->store->format =3D VIR_STORAGE_FILE_QCOW2;
+ if (def->incremental) {
+ if (src->format !=3D VIR_STORAGE_FILE_QCOW2) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
+ _("incremental backup of %s requires qcow2"=
),
+ disk->name);
+ goto cleanup;
+ }
+ }
+ }
+ ret =3D 0;
+ cleanup:
+ return ret;
+}
+
+/* Called while monitor lock is held. Best-effort cleanup. */
+static int
+qemuDomainBackupDiskCleanup(virQEMUDriverPtr driver, virDomainObjPtr vm,
+ virDomainBackupDiskDef *disk, bool incremental)
+{
+ qemuDomainObjPrivatePtr priv =3D vm->privateData;
+ const char *node =3D vm->def->disks[disk->idx]->src->nodeformat;
+ int ret =3D 0;
+
+ if (!disk->store)
+ return 0;
+ if (disk->state >=3D VIR_DOMAIN_BACKUP_DISK_STATE_EXPORT) {
+ /* No real need to use nbd-server-remove, since we will
+ * shortly be calling nbd-server-stop. */
+ }
+ if (incremental && disk->state >=3D VIR_DOMAIN_BACKUP_DISK_STATE_BITMA=
P &&
+ qemuMonitorDeleteBitmap(priv->mon, node, disk->store->nodeformat) =
< 0) {
+ VIR_WARN("Unable to remove temp bitmap for disk %s after backup",
+ disk->name);
+ ret =3D -1;
+ }
+ if (disk->state >=3D VIR_DOMAIN_BACKUP_DISK_STATE_READY &&
+ qemuMonitorBlockdevDel(priv->mon, disk->store->nodeformat) < 0) {
+ VIR_WARN("Unable to remove temp disk %s after backup",
+ disk->name);
+ ret =3D -1;
+ }
+ if (disk->state >=3D VIR_DOMAIN_BACKUP_DISK_STATE_LABEL)
+ qemuDomainStorageSourceAccessRevoke(driver, vm, disk->store);
+ if (disk->state >=3D VIR_DOMAIN_BACKUP_DISK_STATE_CREATED &&
+ disk->store->detected && unlink(disk->store->path) < 0) {
+ VIR_WARN("Unable to unlink temp disk %s after backup",
+ disk->store->path);
+ ret =3D -1;
+ }
+ return ret;
+}
+
+static int
+qemuDomainBackupBegin(virDomainPtr domain, const char *diskXml,
+ const char *checkpointXml, unsigned int flags)
{
virQEMUDriverPtr driver =3D domain->conn->privateData;
virDomainObjPtr vm =3D NULL;
@@ -17565,8 +17637,14 @@ static int qemuDomainBackupBegin(virDomainPtr doma=
in, const char *diskXml,
virCapsPtr caps =3D NULL;
qemuDomainObjPrivatePtr priv;
int ret =3D -1;
+ virJSONValuePtr json =3D NULL;
+ bool job_started =3D false;
+ bool nbd_running =3D false;
+ size_t i;
struct timeval tv;
char *suffix =3D NULL;
+ virCommandPtr cmd =3D NULL;
+ const char *qemuImgPath;
virCheckFlags(VIR_DOMAIN_BACKUP_BEGIN_NO_METADATA, -1);
/* TODO: VIR_DOMAIN_BACKUP_BEGIN_QUIESCE */
@@ -17587,6 +17665,7 @@ static int qemuDomainBackupBegin(virDomainPtr domai=
n, const char *diskXml,
if (!(vm =3D qemuDomObjFromDomain(domain)))
goto cleanup;
+ priv =3D vm->privateData;
cfg =3D virQEMUDriverGetConfig(driver);
if (virDomainBackupBeginEnsureACL(domain->conn, vm->def, flags) < 0)
@@ -17609,25 +17688,145 @@ static int qemuDomainBackupBegin(virDomainPtr do=
main, const char *diskXml,
if (!(def =3D virDomainBackupDefParseString(diskXml, driver->xmlopt, 0=
)))
goto cleanup;
+ if (def->type =3D=3D VIR_DOMAIN_BACKUP_TYPE_PULL) {
+ if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_BITMAP)) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("qemu binary lacks pull-mode backup support")=
);
+ goto cleanup;
+ }
+ if (!def->server) {
+ if (VIR_ALLOC(def->server) < 0)
+ goto cleanup;
+ def->server->transport =3D VIR_STORAGE_NET_HOST_TRANS_TCP;
+ if (VIR_STRDUP(def->server->name, "localhost") < 0)
+ goto cleanup;
+ }
+ switch ((virStorageNetHostTransport)def->server->transport) {
+ case VIR_STORAGE_NET_HOST_TRANS_TCP:
+ /* TODO: Update qemu.conf to provide a port range,
+ * probably starting at 10809, for obtaining automatic
+ * port via virPortAllocatorAcquire, as well as store
+ * somewhere if we need to call virPortAllocatorRelease
+ * during BackupEnd. Until then, user must provide port */
+ if (!def->server->port) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _(" must specify TCP port for=
now"));
+ goto cleanup;
+ }
+ break;
+ case VIR_STORAGE_NET_HOST_TRANS_UNIX:
+ /* TODO: Do we need to mess with selinux? */
+ break;
+ case VIR_STORAGE_NET_HOST_TRANS_RDMA:
+ case VIR_STORAGE_NET_HOST_TRANS_LAST:
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("unexpected transport in "));
+ goto cleanup;
+ }
+ } else {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("push mode backups not supported yet"));
+ goto cleanup;
+ }
+ if (def->incremental) {
+ if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BITMAP_MERGE)) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("qemu binary lacks persistent bitmaps support=
"));
+ goto cleanup;
+ }
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("cannot create incremental backups yet"));
+ goto cleanup;
+ }
+
+ if (!(qemuImgPath =3D qemuFindQemuImgBinary(driver)))
+ goto cleanup;
+
/* We are going to modify the domain below. */
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
- priv =3D vm->privateData;
if (priv->backup) {
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("another backup job is already running"));
goto endjob;
}
- if (virDomainBackupAlignDisks(def, vm->def, suffix) < 0)
+ if (virDomainBackupAlignDisks(def, vm->def, suffix) < 0 ||
+ qemuDomainBackupPrepare(driver, vm, def) < 0)
goto endjob;
/* actually start the checkpoint. 2x2 array of push/pull, full/incr,
plus additional tweak if checkpoint requested */
- /* TODO: issue QMP commands:
- - pull: nbd-server-start with from user (or autogenerate s=
erver)
- - push/pull: blockdev-add per
+ qemuDomainObjEnterMonitor(driver, vm);
+ /* - push/pull: blockdev-add per */
+ for (i =3D 0; i < def->ndisks; i++) {
+ virDomainBackupDiskDef *disk =3D &def->disks[i];
+ virJSONValuePtr file;
+ virStorageSourcePtr src =3D vm->def->disks[disk->idx]->src;
+ const char *node =3D src->nodeformat;
+
+ if (!disk->store)
+ continue;
+ if (qemuDomainStorageFileInit(driver, vm, disk->store, src) < 0)
+ goto endmon;
+ if (disk->store->detected) {
+ if (virStorageFileCreate(disk->store) < 0) {
+ virReportSystemError(errno,
+ _("failed to create image file '%s'"),
+ NULLSTR(disk->store->path));
+ goto endmon;
+ }
+ disk->state =3D VIR_DOMAIN_BACKUP_DISK_STATE_CREATED;
+ }
+ if (qemuDomainStorageSourceAccessAllow(driver, vm, disk->store, fa=
lse,
+ true) < 0)
+ goto endmon;
+ disk->state =3D VIR_DOMAIN_BACKUP_DISK_STATE_LABEL;
+ if (disk->store->detected) {
+ virBuffer buf =3D VIR_BUFFER_INITIALIZER;
+
+ /* Force initialization of scratch/target file to new qcow2 */
+ if (!(cmd =3D virCommandNewArgList(qemuImgPath,
+ "create",
+ "-f",
+ virStorageFileFormatTypeToStr=
ing(disk->store->format),
+ "-o",
+ NULL)))
+ goto endmon;
+ virBufferAsprintf(&buf, "backing_fmt=3D%s,backing_file=3D",
+ virStorageFileFormatTypeToString(src->format=
));
+ virQEMUBuildBufferEscapeComma(&buf, src->path);
+ virCommandAddArgBuffer(cmd, &buf);
+
+ virQEMUBuildBufferEscapeComma(&buf, disk->store->path);
+ virCommandAddArgBuffer(cmd, &buf);
+ if (virCommandRun(cmd, NULL) < 0)
+ goto endmon;
+ virCommandFree(cmd);
+ cmd =3D NULL;
+ }
+
+ if (virJSONValueObjectCreate(&file,
+ "s:driver", "file",
+ "s:filename", disk->store->path,
+ NULL) < 0)
+ goto endmon;
+ if (virJSONValueObjectCreate(&json,
+ "s:driver", virStorageFileFormatTypeT=
oString(disk->store->format),
+ "s:node-name", disk->store->nodeforma=
t,
+ "a:file", &file,
+ "s:backing", node, NULL) < 0) {
+ virJSONValueFree(file);
+ goto endmon;
+ }
+ if (qemuMonitorBlockdevAdd(priv->mon, json) < 0)
+ goto endmon;
+ json =3D NULL;
+ disk->state =3D VIR_DOMAIN_BACKUP_DISK_STATE_READY;
+ }
+
+ /* TODO:
- incr: bitmap-add of tmp, bitmap-merge per
- transaction, containing:
- push+full: blockdev-backup sync:full
@@ -17635,8 +17834,76 @@ static int qemuDomainBackupBegin(virDomainPtr doma=
in, const char *diskXml,
- pull+full: blockdev-backup sync:none
- pull+incr: blockdev-backup sync:none, bitmap-disable of tmp
- if checkpoint: bitmap-disable of old, bitmap-add of new
+ */
+ if (!(json =3D virJSONValueNewArray()))
+ goto endmon;
+ for (i =3D 0; i < def->ndisks; i++) {
+ virDomainBackupDiskDef *disk =3D &def->disks[i];
+ virStorageSourcePtr src =3D vm->def->disks[disk->idx]->src;
+
+ if (!disk->store)
+ continue;
+ if (qemuMonitorJSONTransactionAdd(json,
+ "blockdev-backup",
+ "s:device", src->nodeformat,
+ "s:target", disk->store->nodefor=
mat,
+ "s:sync", "none",
+ "s:job-id", disk->name,
+ NULL) < 0)
+ goto endmon;
+ }
+ if (qemuMonitorTransaction(priv->mon, &json) < 0)
+ goto endmon;
+ job_started =3D true;
+
+ /*
+ - pull: nbd-server-start with from user (or autogenerate s=
erver)
- pull: nbd-server-add per , including bitmap for incr
*/
+ if (def->type =3D=3D VIR_DOMAIN_BACKUP_TYPE_PULL) {
+ if (qemuMonitorNBDServerStart(priv->mon, def->server, NULL) < 0)
+ goto endmon;
+ nbd_running =3D true;
+ for (i =3D 0; i < def->ndisks; i++) {
+ virDomainBackupDiskDef *disk =3D &def->disks[i];
+
+ if (!disk->store)
+ continue;
+ if (qemuMonitorNBDServerAdd(priv->mon, disk->store->nodeformat,
+ disk->name, false,
+ def->incremental ? disk->name :
+ NULL) < 0)
+ goto endmon;
+ disk->state =3D VIR_DOMAIN_BACKUP_DISK_STATE_EXPORT;
+ }
+ }
+
+ ret =3D 0;
+ endmon:
+ /* Best effort cleanup if we fail partway through */
+ if (ret < 0) {
+ virErrorPtr save_err =3D virSaveLastError();
+
+ if (nbd_running &&
+ qemuMonitorNBDServerStop(priv->mon) < 0)
+ VIR_WARN("Unable to stop NBD server on vm %s after backup job",
+ vm->def->name);
+ for (i =3D 0; i < def->ndisks; i++) {
+ virDomainBackupDiskDef *disk =3D &def->disks[i];
+
+ if (job_started &&
+ qemuMonitorBlockJobCancel(priv->mon, disk->name) < 0)
+ VIR_WARN("Unable to stop backup job %s on vm %s after fail=
ure",
+ disk->store->nodeformat, vm->def->name);
+ qemuDomainBackupDiskCleanup(driver, vm, disk, !!def->increment=
al);
+ }
+ virSetError(save_err);
+ virFreeError(save_err);
+ }
+ if (qemuDomainObjExitMonitor(driver, vm) < 0)
+ ret =3D -1;
+ if (ret < 0)
+ goto endjob;
VIR_STEAL_PTR(priv->backup, def);
ret =3D priv->backup->id =3D 1; /* Hard-coded job id for now */
@@ -17649,7 +17916,9 @@ static int qemuDomainBackupBegin(virDomainPtr domai=
n, const char *diskXml,
qemuDomainObjEndJob(driver, vm);
cleanup:
+ virCommandFree(cmd);
VIR_FREE(suffix);
+ virJSONValueFree(json);
virDomainObjEndAPI(&vm);
virDomainBackupDefFree(def);
virObjectUnref(caps);
@@ -17699,6 +17968,8 @@ static int qemuDomainBackupEnd(virDomainPtr domain,=
int id, unsigned int flags)
virDomainBackupDefPtr backup =3D NULL;
qemuDomainObjPrivatePtr priv;
bool want_abort =3D flags & VIR_DOMAIN_BACKUP_END_ABORT;
+ virDomainBackupDefPtr def;
+ size_t i;
virCheckFlags(VIR_DOMAIN_BACKUP_END_ABORT, -1);
@@ -17719,9 +17990,27 @@ static int qemuDomainBackupEnd(virDomainPtr domain=
, int id, unsigned int flags)
if (priv->backup->type !=3D VIR_DOMAIN_BACKUP_TYPE_PUSH)
want_abort =3D false;
+ def =3D priv->backup;
+
+ /* We are going to modify the domain below. */
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ goto cleanup;
+
+ qemuDomainObjEnterMonitor(driver, vm);
+ if (def->type =3D=3D VIR_DOMAIN_BACKUP_TYPE_PULL)
+ ret =3D qemuMonitorNBDServerStop(priv->mon);
+ for (i =3D 0; i < def->ndisks; i++) {
+ if (qemuMonitorBlockJobCancel(priv->mon,
+ def->disks[i].name) < 0 ||
+ qemuDomainBackupDiskCleanup(driver, vm, &def->disks[i],
+ !!def->incremental) < 0)
+ ret =3D -1;
+ }
+ if (qemuDomainObjExitMonitor(driver, vm) < 0 || ret < 0) {
+ ret =3D -1;
+ goto endjob;
+ }
- /* TODO: QMP commands to actually cancel the pending job, and on
- * pull, also tear down the NBD server */
VIR_STEAL_PTR(backup, priv->backup);
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm,
driver->caps) < 0)
@@ -17730,6 +18019,9 @@ static int qemuDomainBackupEnd(virDomainPtr domain,=
int id, unsigned int flags)
ret =3D want_abort ? 0 : 1;
+ endjob:
+ qemuDomainObjEndJob(driver, vm);
+
cleanup:
virDomainBackupDefFree(backup);
virDomainObjEndAPI(&vm);
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index 1769fa70cb..233a11fc31 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -1127,6 +1127,8 @@ qemuMonitorJSONHandleBlockJobImpl(qemuMonitorPtr mon,
type =3D VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT;
else if (STREQ(type_str, "mirror"))
type =3D VIR_DOMAIN_BLOCK_JOB_TYPE_COPY;
+ else if (STREQ(type_str, "backup"))
+ type =3D VIR_DOMAIN_BLOCK_JOB_TYPE_BACKUP;
switch ((virConnectDomainEventBlockJobStatus) event) {
case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
@@ -4884,6 +4886,8 @@ qemuMonitorJSONParseBlockJobInfo(virHashTablePtr bloc=
kJobs,
info->type =3D VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT;
else if (STREQ(type, "mirror"))
info->type =3D VIR_DOMAIN_BLOCK_JOB_TYPE_COPY;
+ else if (STREQ(type, "backup"))
+ info->type =3D VIR_DOMAIN_BLOCK_JOB_TYPE_BACKUP;
else
info->type =3D VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index aa09ef175a..fdab26fb17 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -89,6 +89,7 @@
#include "virresctrl.h"
#include "virvsock.h"
#include "viridentity.h"
+#include "backup_conf.h"
#define VIR_FROM_THIS VIR_FROM_QEMU
@@ -933,6 +934,7 @@ qemuProcessHandleBlockJob(qemuMonitorPtr mon ATTRIBUTE_=
UNUSED,
void *opaque)
{
virQEMUDriverPtr driver =3D opaque;
+ qemuDomainObjPrivatePtr priv;
struct qemuProcessEvent *processEvent =3D NULL;
virDomainDiskDefPtr disk;
qemuBlockJobDataPtr job =3D NULL;
@@ -943,6 +945,13 @@ qemuProcessHandleBlockJob(qemuMonitorPtr mon ATTRIBUTE=
_UNUSED,
VIR_DEBUG("Block job for device %s (domain: %p,%s) type %d status %d",
diskAlias, vm, vm->def->name, type, status);
+ priv =3D vm->privateData;
+ if (type =3D=3D VIR_DOMAIN_BLOCK_JOB_TYPE_BACKUP &&
+ (!priv->backup || priv->backup->type =3D=3D VIR_DOMAIN_BACKUP_TYPE=
_PULL)) {
+ /* Event for canceling a pull-mode backup is side-effect that
+ * should not be forwarded on to user */
+ goto cleanup;
+ }
if (!(disk =3D qemuProcessFindDomainDiskByAliasOrQOM(vm, diskAlias, NU=
LL)))
goto cleanup;
--=20
2.20.1
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
From nobody Fri Mar 29 10:43:07 2024
Delivered-To: importer@patchew.org
Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28
as permitted sender) client-ip=209.132.183.28;
envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com;
Authentication-Results: mx.zohomail.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass(p=none dis=none) header.from=redhat.com
ARC-Seal: i=1; a=rsa-sha256; t=1562605044; cv=none;
d=zoho.com; s=zohoarc;
b=XoBY4+iQE1SfuyOfRiMsl9/diN6pimC6iCbGwDsKHWuwZcTfrh5GBsIVLLbMSHvJyYDmIClQ0uKgOXFGvaS7lkTZbF1yGGju+sxhU2VSz7JvQm5K2/5bN69W8QC9NMmJR1Oq/awHgKaImtPadYyO03C51wLEj5uKCqcAs7q//Ic=
ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zoho.com;
s=zohoarc;
t=1562605044;
h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To:ARC-Authentication-Results;
bh=fcAiYY7gwrA4khOSOfIUhZtxaOnpSXIbdihwlDNHhTE=;
b=GGOZ2KCkOZFu2Ru18b++sKzczC1arSTEEqRwuxigmkgjmFHnvOvjfk6GP9muqRUR1uC6bjECwHitGLRF+C5nzpvOeOOqoUF8Xd1w/7lhuAE/T9nXS+8cLUQsoAzbEN/bDXPe8JEVIqMnlg12zEn3JGnbf/czA7wB1CY+ZNR6DX0=
ARC-Authentication-Results: i=1; mx.zoho.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass header.from= (p=none dis=none)
header.from=
Return-Path:
Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by
mx.zohomail.com
with SMTPS id 156260504445556.200459427669784;
Mon, 8 Jul 2019 09:57:24 -0700 (PDT)
Received: from smtp.corp.redhat.com (int-mx01.intmail.prod.int.phx2.redhat.com
[10.5.11.11])
(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
(No client certificate requested)
by mx1.redhat.com (Postfix) with ESMTPS id E013E7FDCC;
Mon, 8 Jul 2019 16:57:22 +0000 (UTC)
Received: from colo-mx.corp.redhat.com
(colo-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.21])
by smtp.corp.redhat.com (Postfix) with ESMTPS id B97225B081;
Mon, 8 Jul 2019 16:57:20 +0000 (UTC)
Received: from lists01.pubmisc.prod.ext.phx2.redhat.com
(lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33])
by colo-mx.corp.redhat.com (Postfix) with ESMTP id 7ADEB206D3;
Mon, 8 Jul 2019 16:57:17 +0000 (UTC)
Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com
[10.5.11.22])
by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP
id x68GuW62027967 for ;
Mon, 8 Jul 2019 12:56:32 -0400
Received: by smtp.corp.redhat.com (Postfix)
id 53DFB1001B32; Mon, 8 Jul 2019 16:56:32 +0000 (UTC)
Received: from blue.redhat.com (ovpn-116-78.phx2.redhat.com [10.3.116.78])
by smtp.corp.redhat.com (Postfix) with ESMTP id DA4731001B18;
Mon, 8 Jul 2019 16:56:31 +0000 (UTC)
From: Eric Blake
To: libvir-list@redhat.com
Date: Mon, 8 Jul 2019 11:55:52 -0500
Message-Id: <20190708165553.18452-10-eblake@redhat.com>
In-Reply-To: <20190708165553.18452-1-eblake@redhat.com>
References: <20190708165553.18452-1-eblake@redhat.com>
MIME-Version: 1.0
X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22
X-loop: libvir-list@redhat.com
Cc: nsoffer@redhat.com, eshenitz@redhat.com, pkrempa@redhat.com
Subject: [libvirt] [PATCH v9 09/10] backup: qemu: Wire up qemu full push
backup commands over QMP
X-BeenThere: libvir-list@redhat.com
X-Mailman-Version: 2.1.12
Precedence: junk
List-Id: Development discussions about the libvirt library & tools
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Content-Transfer-Encoding: quoted-printable
Sender: libvir-list-bounces@redhat.com
Errors-To: libvir-list-bounces@redhat.com
X-Scanned-By: MIMEDefang 2.79 on 10.5.11.11
X-Greylist: Sender IP whitelisted,
not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.27]);
Mon, 08 Jul 2019 16:57:23 +0000 (UTC)
Content-Type: text/plain; charset="utf-8"
Update the code to support push backups; for now, the destination file
still has to be local, although the XML could be extended into
supporting remote destinations (where we will have to use the full
power of blockdev-add). This also touches up the event handling to
inform the user when the job is complete. (However, there are probably
bugs lurking in the event code; pull mode is more tested than push
mode at the time I write this).
Signed-off-by: Eric Blake
---
src/qemu/qemu_driver.c | 81 +++++++++++++++++++++++++++++-------------
1 file changed, 56 insertions(+), 25 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index d9abcfa4c8..a7a9526d1e 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -17591,14 +17591,13 @@ qemuDomainBackupPrepare(virQEMUDriverPtr driver, =
virDomainObjPtr vm,
/* Called while monitor lock is held. Best-effort cleanup. */
static int
qemuDomainBackupDiskCleanup(virQEMUDriverPtr driver, virDomainObjPtr vm,
- virDomainBackupDiskDef *disk, bool incremental)
+ virDomainBackupDiskDef *disk, bool push,
+ bool incremental, bool completed)
{
qemuDomainObjPrivatePtr priv =3D vm->privateData;
const char *node =3D vm->def->disks[disk->idx]->src->nodeformat;
int ret =3D 0;
- if (!disk->store)
- return 0;
if (disk->state >=3D VIR_DOMAIN_BACKUP_DISK_STATE_EXPORT) {
/* No real need to use nbd-server-remove, since we will
* shortly be calling nbd-server-stop. */
@@ -17611,16 +17610,17 @@ qemuDomainBackupDiskCleanup(virQEMUDriverPtr driv=
er, virDomainObjPtr vm,
}
if (disk->state >=3D VIR_DOMAIN_BACKUP_DISK_STATE_READY &&
qemuMonitorBlockdevDel(priv->mon, disk->store->nodeformat) < 0) {
- VIR_WARN("Unable to remove temp disk %s after backup",
- disk->name);
+ VIR_WARN("Unable to remove %s disk %s after backup",
+ push ? "target" : "scratch", disk->name);
ret =3D -1;
}
if (disk->state >=3D VIR_DOMAIN_BACKUP_DISK_STATE_LABEL)
qemuDomainStorageSourceAccessRevoke(driver, vm, disk->store);
- if (disk->state >=3D VIR_DOMAIN_BACKUP_DISK_STATE_CREATED &&
+ if ((!push || !completed) &&
+ disk->state >=3D VIR_DOMAIN_BACKUP_DISK_STATE_CREATED &&
disk->store->detected && unlink(disk->store->path) < 0) {
- VIR_WARN("Unable to unlink temp disk %s after backup",
- disk->store->path);
+ VIR_WARN("Unable to unlink %s disk %s after backup",
+ push ? "failed target" : "scratch", disk->store->path);
ret =3D -1;
}
return ret;
@@ -17640,6 +17640,7 @@ qemuDomainBackupBegin(virDomainPtr domain, const ch=
ar *diskXml,
virJSONValuePtr json =3D NULL;
bool job_started =3D false;
bool nbd_running =3D false;
+ bool push;
size_t i;
struct timeval tv;
char *suffix =3D NULL;
@@ -17688,7 +17689,8 @@ qemuDomainBackupBegin(virDomainPtr domain, const ch=
ar *diskXml,
if (!(def =3D virDomainBackupDefParseString(diskXml, driver->xmlopt, 0=
)))
goto cleanup;
- if (def->type =3D=3D VIR_DOMAIN_BACKUP_TYPE_PULL) {
+ push =3D def->type =3D=3D VIR_DOMAIN_BACKUP_TYPE_PUSH;
+ if (!push) {
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_BITMAP)) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("qemu binary lacks pull-mode backup support")=
);
@@ -17723,10 +17725,6 @@ qemuDomainBackupBegin(virDomainPtr domain, const c=
har *diskXml,
_("unexpected transport in "));
goto cleanup;
}
- } else {
- virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
- _("push mode backups not supported yet"));
- goto cleanup;
}
if (def->incremental) {
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BITMAP_MERGE)) {
@@ -17807,6 +17805,7 @@ qemuDomainBackupBegin(virDomainPtr domain, const ch=
ar *diskXml,
cmd =3D NULL;
}
+ /* FIXME: allow non-local files for push destinations */
if (virJSONValueObjectCreate(&file,
"s:driver", "file",
"s:filename", disk->store->path,
@@ -17847,7 +17846,7 @@ qemuDomainBackupBegin(virDomainPtr domain, const ch=
ar *diskXml,
"blockdev-backup",
"s:device", src->nodeformat,
"s:target", disk->store->nodefor=
mat,
- "s:sync", "none",
+ "s:sync", push ? "full" : "none",
"s:job-id", disk->name,
NULL) < 0)
goto endmon;
@@ -17860,7 +17859,7 @@ qemuDomainBackupBegin(virDomainPtr domain, const ch=
ar *diskXml,
- pull: nbd-server-start with from user (or autogenerate s=
erver)
- pull: nbd-server-add per , including bitmap for incr
*/
- if (def->type =3D=3D VIR_DOMAIN_BACKUP_TYPE_PULL) {
+ if (!push) {
if (qemuMonitorNBDServerStart(priv->mon, def->server, NULL) < 0)
goto endmon;
nbd_running =3D true;
@@ -17891,11 +17890,14 @@ qemuDomainBackupBegin(virDomainPtr domain, const =
char *diskXml,
for (i =3D 0; i < def->ndisks; i++) {
virDomainBackupDiskDef *disk =3D &def->disks[i];
+ if (!disk->store)
+ continue;
if (job_started &&
qemuMonitorBlockJobCancel(priv->mon, disk->name) < 0)
VIR_WARN("Unable to stop backup job %s on vm %s after fail=
ure",
disk->store->nodeformat, vm->def->name);
- qemuDomainBackupDiskCleanup(driver, vm, disk, !!def->increment=
al);
+ qemuDomainBackupDiskCleanup(driver, vm, disk, push,
+ !!def->incremental, false);
}
virSetError(save_err);
virFreeError(save_err);
@@ -17970,6 +17972,8 @@ static int qemuDomainBackupEnd(virDomainPtr domain,=
int id, unsigned int flags)
bool want_abort =3D flags & VIR_DOMAIN_BACKUP_END_ABORT;
virDomainBackupDefPtr def;
size_t i;
+ bool push =3D true;
+ bool completed =3D true;
virCheckFlags(VIR_DOMAIN_BACKUP_END_ABORT, -1);
@@ -17988,23 +17992,50 @@ static int qemuDomainBackupEnd(virDomainPtr domai=
n, int id, unsigned int flags)
goto cleanup;
}
- if (priv->backup->type !=3D VIR_DOMAIN_BACKUP_TYPE_PUSH)
- want_abort =3D false;
def =3D priv->backup;
+ if (def->type !=3D VIR_DOMAIN_BACKUP_TYPE_PUSH) {
+ want_abort =3D false;
+ push =3D false;
+ }
/* We are going to modify the domain below. */
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
qemuDomainObjEnterMonitor(driver, vm);
- if (def->type =3D=3D VIR_DOMAIN_BACKUP_TYPE_PULL)
+ if (push) {
+ for (i =3D 0; i < def->ndisks; i++) {
+ virDomainBackupDiskDef *disk =3D &def->disks[i];
+
+ if (!disk->store)
+ continue;
+ if (disk->state !=3D VIR_DOMAIN_BACKUP_DISK_STATE_COMPLETE)
+ completed =3D false;
+ }
+ } else {
ret =3D qemuMonitorNBDServerStop(priv->mon);
- for (i =3D 0; i < def->ndisks; i++) {
- if (qemuMonitorBlockJobCancel(priv->mon,
- def->disks[i].name) < 0 ||
- qemuDomainBackupDiskCleanup(driver, vm, &def->disks[i],
- !!def->incremental) < 0)
- ret =3D -1;
+ }
+ if (!completed && !want_abort) {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ _("backup job id '%d' not complete yet"), id);
+ } else {
+ for (i =3D 0; i < def->ndisks; i++) {
+ virDomainBackupDiskDef *disk =3D &def->disks[i];
+
+ if (!disk->store)
+ continue;
+ if (!push || disk->state < VIR_DOMAIN_BACKUP_DISK_STATE_COMPLE=
TE) {
+ if (qemuMonitorBlockJobCancel(priv->mon,
+ disk->name) < 0 &&
+ !want_abort) {
+ ret =3D -1;
+ continue;
+ }
+ }
+ if (qemuDomainBackupDiskCleanup(driver, vm, disk, push,
+ !!def->incremental, completed)=
< 0)
+ ret =3D -1;
+ }
}
if (qemuDomainObjExitMonitor(driver, vm) < 0 || ret < 0) {
ret =3D -1;
--=20
2.20.1
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
From nobody Fri Mar 29 10:43:07 2024
Delivered-To: importer@patchew.org
Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28
as permitted sender) client-ip=209.132.183.28;
envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com;
Authentication-Results: mx.zohomail.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass(p=none dis=none) header.from=redhat.com
ARC-Seal: i=1; a=rsa-sha256; t=1562605086; cv=none;
d=zoho.com; s=zohoarc;
b=mBTO7tUrvfuTEVNxmxQyLuJChq0Y7YRyKFYXIHQGLLLgc8ffigywcxpyECEqocNe/gkHUCUjdOW4PS6BQTda9yxYyfIAPFKWJoYnRj052X5Ymw0+F042RLkpSm3KGdzveocpjSRIfJ9PAmkQNz1R3QZbs7NROtyna1F0CUuzIzc=
ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zoho.com;
s=zohoarc;
t=1562605086;
h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To:ARC-Authentication-Results;
bh=hzubi+Vn7ap+/XroJ+5+PsiI06nDVZl2rk1zx5u3kcY=;
b=JV8H0jX0NnDYIJRcixYgPynJfYPXOPOF5aiP6FtQGvqose9RXkGaZHLvmqJyrofMa3arZ6olU+edsrTXagu0mYML/1d4lYmKwvMf5PRSc63uqjUlSD/zhvN0FiQYW3vsAdRBMcgUZ/FyY+FbNI7nUvXihjEuiN7ZpPSSTS7le+M=
ARC-Authentication-Results: i=1; mx.zoho.com;
spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as
permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com;
dmarc=pass header.from= (p=none dis=none)
header.from=
Return-Path:
Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by
mx.zohomail.com
with SMTPS id 1562605086572159.27644338295624;
Mon, 8 Jul 2019 09:58:06 -0700 (PDT)
Received: from smtp.corp.redhat.com (int-mx08.intmail.prod.int.phx2.redhat.com
[10.5.11.23])
(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
(No client certificate requested)
by mx1.redhat.com (Postfix) with ESMTPS id 92FD730860AF;
Mon, 8 Jul 2019 16:57:50 +0000 (UTC)
Received: from colo-mx.corp.redhat.com
(colo-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.20])
by smtp.corp.redhat.com (Postfix) with ESMTPS id 702C81A924;
Mon, 8 Jul 2019 16:57:49 +0000 (UTC)
Received: from lists01.pubmisc.prod.ext.phx2.redhat.com
(lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33])
by colo-mx.corp.redhat.com (Postfix) with ESMTP id 34B381833006;
Mon, 8 Jul 2019 16:57:49 +0000 (UTC)
Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com
[10.5.11.22])
by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP
id x68GuWYk027980 for ;
Mon, 8 Jul 2019 12:56:32 -0400
Received: by smtp.corp.redhat.com (Postfix)
id E60951001B18; Mon, 8 Jul 2019 16:56:32 +0000 (UTC)
Received: from blue.redhat.com (ovpn-116-78.phx2.redhat.com [10.3.116.78])
by smtp.corp.redhat.com (Postfix) with ESMTP id 7856D1001B29;
Mon, 8 Jul 2019 16:56:32 +0000 (UTC)
From: Eric Blake
To: libvir-list@redhat.com
Date: Mon, 8 Jul 2019 11:55:53 -0500
Message-Id: <20190708165553.18452-11-eblake@redhat.com>
In-Reply-To: <20190708165553.18452-1-eblake@redhat.com>
References: <20190708165553.18452-1-eblake@redhat.com>
MIME-Version: 1.0
X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22
X-loop: libvir-list@redhat.com
Cc: nsoffer@redhat.com, eshenitz@redhat.com, pkrempa@redhat.com
Subject: [libvirt] [PATCH v9 10/10] backup: Implement qemu incremental pull
backup
X-BeenThere: libvir-list@redhat.com
X-Mailman-Version: 2.1.12
Precedence: junk
List-Id: Development discussions about the libvirt library & tools
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Content-Transfer-Encoding: quoted-printable
Sender: libvir-list-bounces@redhat.com
Errors-To: libvir-list-bounces@redhat.com
X-Scanned-By: MIMEDefang 2.84 on 10.5.11.23
X-Greylist: Sender IP whitelisted,
not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.44]);
Mon, 08 Jul 2019 16:58:05 +0000 (UTC)
Content-Type: text/plain; charset="utf-8"
Complete wiring up incremental backup, by adding in support for
creating a checkpoint at the same time as a backup (make the
transaction have a few more steps) as well as exposing the dirty
bitmap for a prior backup over NBD (requires creating a temporary
bitmap, merging all appropriate bitmaps in, then exposing that
bitmap over NBD).
Signed-off-by: Eric Blake
---
src/qemu/qemu_driver.c | 198 +++++++++++++++++++++++++++++++++++------
1 file changed, 170 insertions(+), 28 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index a7a9526d1e..69aa6b99e7 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -16977,6 +16977,24 @@ qemuDomainCheckpointPrepare(virQEMUDriverPtr drive=
r, virCapsPtr caps,
if (disk->type !=3D VIR_DOMAIN_CHECKPOINT_TYPE_BITMAP)
continue;
+ /* We want to name temporary bitmap after disk name during
+ * incremental backup, which is not possible if that is a
+ * persistent bitmap name. We can also make life easier by
+ * enforcing bitmap names match checkpoint name, although this
+ * is not technically necessary. */
+ if (STREQ(disk->name, disk->bitmap)) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("checkpoint for disk %s must have distinct bi=
tmap name"),
+ disk->name);
+ goto cleanup;
+ }
+ if (STRNEQ(disk->bitmap, def->parent.name)) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("disk %s bitmap should match checkpoint name =
%s"),
+ disk->name, def->parent.name);
+ goto cleanup;
+ }
+
if (vm->def->disks[i]->src->format > 0 &&
vm->def->disks[i]->src->format !=3D VIR_STORAGE_FILE_QCOW2) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
@@ -17557,19 +17575,44 @@ qemuDomainCheckpointDelete(virDomainCheckpointPtr=
checkpoint,
static int
qemuDomainBackupPrepare(virQEMUDriverPtr driver, virDomainObjPtr vm,
- virDomainBackupDefPtr def)
+ virDomainBackupDefPtr def,
+ virDomainMomentObjPtr chk)
{
int ret =3D -1;
size_t i;
+ virDomainCheckpointDefPtr chkdef;
+ chkdef =3D chk ? virDomainCheckpointObjGetDef(chk) : NULL;
+ if (chk && def->ndisks !=3D chkdef->ndisks) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("inconsistency between backup and checkpoint disk=
s"));
+ goto cleanup;
+ }
if (qemuBlockNodeNamesDetect(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
goto cleanup;
for (i =3D 0; i < def->ndisks; i++) {
virDomainBackupDiskDef *disk =3D &def->disks[i];
virStorageSourcePtr src =3D vm->def->disks[disk->idx]->src;
- if (!disk->store)
+ /* For now, insist that atomic checkpoint affect same disks as
+ * those being backed up. */
+ if (!disk->store) {
+ if (chk &&
+ chkdef->disks[i].type !=3D VIR_DOMAIN_CHECKPOINT_TYPE_NONE=
) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
+ _("disk %s requested checkpoint without bac=
kup"),
+ disk->name);
+ goto cleanup;
+ }
continue;
+ }
+ if (chk &&
+ chkdef->disks[i].type !=3D VIR_DOMAIN_CHECKPOINT_TYPE_BITMAP) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
+ _("disk %s requested backup without checkpoint"=
),
+ disk->name);
+ goto cleanup;
+ }
if (virAsprintf(&disk->store->nodeformat, "tmp-%s", disk->name) < =
0)
goto cleanup;
if (!disk->store->format)
@@ -17603,7 +17646,7 @@ qemuDomainBackupDiskCleanup(virQEMUDriverPtr driver=
, virDomainObjPtr vm,
* shortly be calling nbd-server-stop. */
}
if (incremental && disk->state >=3D VIR_DOMAIN_BACKUP_DISK_STATE_BITMA=
P &&
- qemuMonitorDeleteBitmap(priv->mon, node, disk->store->nodeformat) =
< 0) {
+ qemuMonitorDeleteBitmap(priv->mon, node, disk->name) < 0) {
VIR_WARN("Unable to remove temp bitmap for disk %s after backup",
disk->name);
ret =3D -1;
@@ -17641,28 +17684,22 @@ qemuDomainBackupBegin(virDomainPtr domain, const =
char *diskXml,
bool job_started =3D false;
bool nbd_running =3D false;
bool push;
+ const char *mode;
size_t i;
struct timeval tv;
char *suffix =3D NULL;
virCommandPtr cmd =3D NULL;
const char *qemuImgPath;
+ virDomainMomentObjPtr chk =3D NULL;
+ virDomainMomentObjPtr other =3D NULL;
+ virDomainMomentObjPtr parent =3D NULL;
+ virDomainMomentObjPtr current;
+ virJSONValuePtr arr =3D NULL;
+ VIR_AUTOUNREF(virDomainCheckpointDefPtr) chkdef =3D NULL;
virCheckFlags(VIR_DOMAIN_BACKUP_BEGIN_NO_METADATA, -1);
/* TODO: VIR_DOMAIN_BACKUP_BEGIN_QUIESCE */
- // FIXME: Support non-null checkpointXML for incremental - what
- // code can be shared with CheckpointCreateXML, then add to transaction
- // to create new checkpoint at same time as starting blockdev-backup
- if (checkpointXml) {
- virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
- _("cannot create incremental backups yet"));
- return -1;
- }
- // if (chk) VIR_STRDUP(suffix, chk->name);
- gettimeofday(&tv, NULL);
- if (virAsprintf(&suffix, "%lld", (long long)tv.tv_sec) < 0)
- goto cleanup;
-
if (!(vm =3D qemuDomObjFromDomain(domain)))
goto cleanup;
@@ -17689,6 +17726,18 @@ qemuDomainBackupBegin(virDomainPtr domain, const c=
har *diskXml,
if (!(def =3D virDomainBackupDefParseString(diskXml, driver->xmlopt, 0=
)))
goto cleanup;
+ if (checkpointXml) {
+ if (!(chkdef =3D qemuDomainCheckpointDefParseString(driver, caps,
+ checkpointXml,
+ NULL, 0)) ||
+ VIR_STRDUP(suffix, chkdef->parent.name) < 0)
+ goto cleanup;
+ } else {
+ gettimeofday(&tv, NULL);
+ if (virAsprintf(&suffix, "%lld", (long long)tv.tv_sec) < 0)
+ goto cleanup;
+ }
+
push =3D def->type =3D=3D VIR_DOMAIN_BACKUP_TYPE_PUSH;
if (!push) {
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_BITMAP)) {
@@ -17726,15 +17775,25 @@ qemuDomainBackupBegin(virDomainPtr domain, const =
char *diskXml,
goto cleanup;
}
}
+ current =3D virDomainCheckpointGetCurrent(vm->checkpoints);
if (def->incremental) {
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BITMAP_MERGE)) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("qemu binary lacks persistent bitmaps support=
"));
goto cleanup;
}
- virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
- _("cannot create incremental backups yet"));
- goto cleanup;
+ for (other =3D current; other;
+ other =3D other->def->parent_name ?
+ virDomainCheckpointFindByName(vm->checkpoints,
+ other->def->parent_name) : =
NULL)
+ if (STREQ(other->def->name, def->incremental))
+ break;
+ if (!other) {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ _("could not locate checkpoint '%s' for increme=
ntal backup"),
+ def->incremental);
+ goto cleanup;
+ }
}
if (!(qemuImgPath =3D qemuFindQemuImgBinary(driver)))
@@ -17750,14 +17809,38 @@ qemuDomainBackupBegin(virDomainPtr domain, const =
char *diskXml,
goto endjob;
}
+ if (chkdef) {
+ if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BITMAP_MERGE)) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("qemu binary lacks persistent bitmaps support=
"));
+ goto endjob;
+ }
+
+ if (qemuDomainCheckpointPrepare(driver, caps, vm, chkdef) < 0)
+ goto endjob;
+ if (!(chk =3D virDomainCheckpointAssignDef(vm->checkpoints, chkdef=
)))
+ goto endjob;
+ chkdef =3D NULL;
+ if (current) {
+ parent =3D current;
+ if (VIR_STRDUP(chk->def->parent_name, parent->def->name) < 0)
+ goto endjob;
+ if (qemuDomainCheckpointWriteMetadata(vm, parent, driver->caps,
+ driver->xmlopt,
+ cfg->checkpointDir) < 0)
+ goto endjob;
+ }
+ }
+
if (virDomainBackupAlignDisks(def, vm->def, suffix) < 0 ||
- qemuDomainBackupPrepare(driver, vm, def) < 0)
+ qemuDomainBackupPrepare(driver, vm, def, chk) < 0)
goto endjob;
/* actually start the checkpoint. 2x2 array of push/pull, full/incr,
plus additional tweak if checkpoint requested */
qemuDomainObjEnterMonitor(driver, vm);
- /* - push/pull: blockdev-add per */
+ /* - push/pull: blockdev-add per
+ - incr: bitmap-add of tmp, bitmap-merge per */
for (i =3D 0; i < def->ndisks; i++) {
virDomainBackupDiskDef *disk =3D &def->disks[i];
virJSONValuePtr file;
@@ -17823,11 +17906,32 @@ qemuDomainBackupBegin(virDomainPtr domain, const =
char *diskXml,
goto endmon;
json =3D NULL;
disk->state =3D VIR_DOMAIN_BACKUP_DISK_STATE_READY;
+
+ if (def->incremental) {
+ if (!(arr =3D virJSONValueNewArray()))
+ goto endmon;
+ if (qemuMonitorAddBitmap(priv->mon, node, disk->name, false) <=
0) {
+ virJSONValueFree(arr);
+ goto endmon;
+ }
+ disk->state =3D VIR_DOMAIN_BACKUP_DISK_STATE_BITMAP;
+ for (other =3D parent ? parent : current; other;
+ other =3D other->def->parent_name ?
+ virDomainCheckpointFindByName(vm->checkpoints,
+ other->def->parent_name=
) : NULL) {
+ if (virJSONValueArrayAppendString(arr, other->def->name) <=
0) {
+ virJSONValueFree(arr);
+ goto endmon;
+ }
+ if (STREQ(other->def->name, def->incremental))
+ break;
+ }
+ if (qemuMonitorMergeBitmaps(priv->mon, node, disk->name, &arr)=
< 0)
+ goto endmon;
+ }
}
- /* TODO:
- - incr: bitmap-add of tmp, bitmap-merge per
- - transaction, containing:
+ /* - transaction, containing:
- push+full: blockdev-backup sync:full
- push+incr: blockdev-backup sync:incremental bitmap:tmp
- pull+full: blockdev-backup sync:none
@@ -17836,24 +17940,61 @@ qemuDomainBackupBegin(virDomainPtr domain, const =
char *diskXml,
*/
if (!(json =3D virJSONValueNewArray()))
goto endmon;
+ if (push)
+ mode =3D def->incremental ? "incremental" : "full";
+ else
+ mode =3D "none";
for (i =3D 0; i < def->ndisks; i++) {
virDomainBackupDiskDef *disk =3D &def->disks[i];
- virStorageSourcePtr src =3D vm->def->disks[disk->idx]->src;
+ const char *node;
+ const char *push_bitmap =3D NULL;
if (!disk->store)
continue;
+ node =3D qemuBlockNodeLookup(vm, disk->name);
+ if (push && def->incremental)
+ push_bitmap =3D disk->name;
if (qemuMonitorJSONTransactionAdd(json,
"blockdev-backup",
- "s:device", src->nodeformat,
+ "s:device", node,
"s:target", disk->store->nodefor=
mat,
- "s:sync", push ? "full" : "none",
+ "s:sync", mode,
+ "S:bitmap", push_bitmap,
"s:job-id", disk->name,
NULL) < 0)
goto endmon;
+ if (def->incremental && !push &&
+ qemuMonitorJSONTransactionAdd(json,
+ "block-dirty-bitmap-disable",
+ "s:node", node,
+ "s:name", disk->name,
+ NULL) < 0)
+ goto endmon;
}
+ if (chk && qemuDomainCheckpointAddActions(vm, json, parent,
+ virDomainCheckpointObjGetDef=
(chk)) < 0)
+ goto endmon;
if (qemuMonitorTransaction(priv->mon, &json) < 0)
goto endmon;
job_started =3D true;
+ if (chk) {
+ if (qemuDomainCheckpointWriteMetadata(vm, chk, driver->caps,
+ driver->xmlopt,
+ cfg->checkpointDir) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("unable to save metadata for checkpoint %s"),
+ chk->def->name);
+ virDomainCheckpointObjListRemove(vm->checkpoints, chk);
+ goto endmon;
+ }
+ virDomainCheckpointSetCurrent(vm->checkpoints, chk);
+ other =3D virDomainCheckpointFindByName(vm->checkpoints,
+ chk->def->parent_name);
+ chk->parent =3D other;
+ other->nchildren++;
+ chk->sibling =3D other->first_child;
+ other->first_child =3D chk;
+ }
/*
- pull: nbd-server-start with from user (or autogenerate s=
erver)
@@ -17895,7 +18036,7 @@ qemuDomainBackupBegin(virDomainPtr domain, const ch=
ar *diskXml,
if (job_started &&
qemuMonitorBlockJobCancel(priv->mon, disk->name) < 0)
VIR_WARN("Unable to stop backup job %s on vm %s after fail=
ure",
- disk->store->nodeformat, vm->def->name);
+ disk->name, vm->def->name);
qemuDomainBackupDiskCleanup(driver, vm, disk, push,
!!def->incremental, false);
}
@@ -17918,6 +18059,7 @@ qemuDomainBackupBegin(virDomainPtr domain, const ch=
ar *diskXml,
qemuDomainObjEndJob(driver, vm);
cleanup:
+ virJSONValueFree(arr);
virCommandFree(cmd);
VIR_FREE(suffix);
virJSONValueFree(json);
--=20
2.20.1
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list