From nobody Sun Feb 8 20:53:36 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28 as permitted sender) client-ip=209.132.183.28; envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com; Authentication-Results: mx.zohomail.com; spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com ARC-Seal: i=1; a=rsa-sha256; t=1564002482; cv=none; d=zoho.com; s=zohoarc; b=YVRs0/dSXJ6lx4XqvSOckGinil7n9YWWsxtti0VakDRhPbl9BnsagIop/lRMyIRYSnuixCthCdTwaTaFSAX6EHaF/PqpPLoGQJKq2cx0PCHEbWh4OvrkT55idTM52MdMGimDAvbqfYgpphMhUUDs1XCQtkWSi2Gzc3cpBPCvLus= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zoho.com; s=zohoarc; t=1564002482; h=Content-Type:Content-Transfer-Encoding:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To:ARC-Authentication-Results; bh=Tr7vxlQklZyGpMoOrKLGq4ILxlJ+MCFMw22DZ4pZMh0=; b=eUNlTD1HL2IR7lKhRp4O699uuXi1cY6Y3VpU42Vsh8Bt+hsKNXgQWKpFLkVfO7AziDMhm1WaK0RH8txuReD6lPvtofTKuHd2nGuPCeMzIAPDki7GH0D4BvfHbCrkb1PuboKYg1pZ7mpvqdfKS8jTEGsyDd7oHFCSWvASkySvhDk= ARC-Authentication-Results: i=1; mx.zoho.com; spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass header.from= (p=none dis=none) header.from= Return-Path: Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by mx.zohomail.com with SMTPS id 1564002482405791.6386717047736; Wed, 24 Jul 2019 14:08:02 -0700 (PDT) Received: from smtp.corp.redhat.com (int-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.11]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id E6B1730C62A8; Wed, 24 Jul 2019 21:08:00 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.21]) by smtp.corp.redhat.com (Postfix) with ESMTPS id BEBA8605C3; Wed, 24 Jul 2019 21:08:00 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id 8AE3D19732; Wed, 24 Jul 2019 21:08:00 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.14]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id x6OL7kLH003707 for ; Wed, 24 Jul 2019 17:07:46 -0400 Received: by smtp.corp.redhat.com (Postfix) id 71EC95D967; Wed, 24 Jul 2019 21:07:46 +0000 (UTC) Received: from angien.brq.redhat.com (unknown [10.43.2.229]) by smtp.corp.redhat.com (Postfix) with ESMTP id CBC5D5DA34 for ; Wed, 24 Jul 2019 21:07:45 +0000 (UTC) From: Peter Krempa To: libvir-list@redhat.com Date: Wed, 24 Jul 2019 23:07:35 +0200 Message-Id: <6e200c76e51b8589aceecb3eeb0e3dbebf650b28.1564002117.git.pkrempa@redhat.com> In-Reply-To: References: MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14 X-loop: libvir-list@redhat.com Subject: [libvirt] [PATCH 8/9] qemu: Add -blockdev support for block pull job X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Transfer-Encoding: quoted-printable Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.79 on 10.5.11.11 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.46]); Wed, 24 Jul 2019 21:08:01 +0000 (UTC) Content-Type: text/plain; charset="utf-8" Introduce the handler for finalizing a block pull job which will allow to use it with blockdev. This patch also contains some additional machinery which is required to store all the relevant job data in the status XML which will also be reused with other block job types. Signed-off-by: Peter Krempa Reviewed-by: J=C3=A1n Tomko --- src/qemu/qemu_blockjob.c | 191 +++++++++++++++++- src/qemu/qemu_blockjob.h | 18 ++ src/qemu/qemu_domain.c | 77 +++++++ src/qemu/qemu_driver.c | 33 ++- .../blockjob-blockdev-in.xml | 4 + 5 files changed, 313 insertions(+), 10 deletions(-) diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c index 0c0ae89f10..a29af7ec48 100644 --- a/src/qemu/qemu_blockjob.c +++ b/src/qemu/qemu_blockjob.c @@ -26,6 +26,7 @@ #include "qemu_blockjob.h" #include "qemu_block.h" #include "qemu_domain.h" +#include "qemu_alias.h" #include "conf/domain_conf.h" #include "conf/domain_event.h" @@ -207,6 +208,35 @@ qemuBlockJobDiskNew(virDomainObjPtr vm, } +qemuBlockJobDataPtr +qemuBlockJobDiskNewPull(virDomainObjPtr vm, + virDomainDiskDefPtr disk, + virStorageSourcePtr base) +{ + qemuDomainObjPrivatePtr priv =3D vm->privateData; + VIR_AUTOUNREF(qemuBlockJobDataPtr) job =3D NULL; + VIR_AUTOFREE(char *) jobname =3D NULL; + + if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) { + if (virAsprintf(&jobname, "pull-%s-%s", disk->dst, disk->src->node= format) < 0) + return NULL; + } else { + if (!(jobname =3D qemuAliasDiskDriveFromDisk(disk))) + return NULL; + } + + if (!(job =3D qemuBlockJobDataNew(QEMU_BLOCKJOB_TYPE_PULL, jobname))) + return NULL; + + job->data.pull.base =3D base; + + if (qemuBlockJobRegister(job, vm, disk, true) < 0) + return NULL; + + VIR_RETURN_PTR(job); +} + + /** * qemuBlockJobDiskRegisterMirror: * @job: block job to register 'mirror' chain on @@ -630,16 +660,175 @@ qemuBlockJobEventProcessConcludedRemoveChain(virQEMU= DriverPtr driver, } +/** + * qemuBlockJobGetConfigDisk: + * @vm: domain object + * @disk: disk from the running definition + * @diskChainBottom: the last element of backing chain of @disk which is r= elevant + * + * Finds and returns the disk corresponding to @disk in the inactive defin= ition. + * The inactive disk must have the backing chain starting from the source = until + * @@diskChainBottom identical. If @diskChainBottom is NULL the whole back= ing + * chains of both @disk and the persistent config definition equivalent mu= st + * be identical. + */ +static virDomainDiskDefPtr +qemuBlockJobGetConfigDisk(virDomainObjPtr vm, + virDomainDiskDefPtr disk, + virStorageSourcePtr diskChainBottom) +{ + virStorageSourcePtr disksrc =3D NULL; + virStorageSourcePtr cfgsrc =3D NULL; + virDomainDiskDefPtr ret =3D NULL; + + if (!vm->newDef || !disk) + return NULL; + + disksrc =3D disk->src; + + if (!(ret =3D virDomainDiskByName(vm->newDef, disk->dst, false))) + return NULL; + + cfgsrc =3D ret->src; + + while (disksrc && cfgsrc) { + if (!virStorageSourceIsSameLocation(disksrc, cfgsrc)) + return NULL; + + if (diskChainBottom && diskChainBottom =3D=3D disksrc) + return ret; + + disksrc =3D disksrc->backingStore; + cfgsrc =3D cfgsrc->backingStore; + } + + if (disksrc || cfgsrc) + return NULL; + + return ret; +} + + +/** + * qemuBlockJobClearConfigChain: + * @vm: domain object + * @disk: disk object from running definition of @vm + * + * In cases when the backing chain definitions of the live disk differ from + * the definition for the next start config and the backing chain would to= uch + * it we'd not be able to restore the chain in the next start config prope= rly. + * + * This function checks that the source of the running disk definition and= the + * config disk definition are the same and if such it clears the backing c= hain + * data. + */ +static void +qemuBlockJobClearConfigChain(virDomainObjPtr vm, + virDomainDiskDefPtr disk) +{ + virDomainDiskDefPtr cfgdisk =3D NULL; + + if (!vm->newDef || !disk) + return; + + if (!(cfgdisk =3D virDomainDiskByName(vm->newDef, disk->dst, false))) + return; + + if (!virStorageSourceIsSameLocation(disk->src, cfgdisk->src)) + return; + + virObjectUnref(cfgdisk->src->backingStore); + cfgdisk->src->backingStore =3D NULL; +} + + +/** + * qemuBlockJobProcessEventCompletedPull: + * @driver: qemu driver object + * @vm: domain object + * @job: job data + * @asyncJob: qemu asynchronous job type (for monitor interaction) + * + * This function executes the finalizing steps after a successful block pu= ll job + * (block-stream in qemu terminology. The pull job copies all the data fro= m the + * images in the backing chain up to the 'base' image. The 'base' image be= comes + * the backing store of the active top level image. If 'base' was not used + * everything is pulled into the top level image and the top level image w= ill + * cease to have backing store. All intermediate images between the active= image + * and base image are no longer required and can be unplugged. + */ +static void +qemuBlockJobProcessEventCompletedPull(virQEMUDriverPtr driver, + virDomainObjPtr vm, + qemuBlockJobDataPtr job, + qemuDomainAsyncJob asyncJob) +{ + virStorageSourcePtr baseparent =3D NULL; + virDomainDiskDefPtr cfgdisk =3D NULL; + virStorageSourcePtr cfgbase =3D NULL; + virStorageSourcePtr cfgbaseparent =3D NULL; + virStorageSourcePtr n; + virStorageSourcePtr tmp; + + VIR_DEBUG("pull job '%s' on VM '%s' completed", job->name, vm->def->na= me); + + /* if the job isn't associated with a disk there's nothing to do */ + if (!job->disk) + return; + + if ((cfgdisk =3D qemuBlockJobGetConfigDisk(vm, job->disk, job->data.pu= ll.base))) + cfgbase =3D cfgdisk->src->backingStore; + + if (!cfgdisk) + qemuBlockJobClearConfigChain(vm, job->disk); + + /* when pulling if 'base' is right below the top image we don't have t= o modify it */ + if (job->disk->src->backingStore =3D=3D job->data.pull.base) + return; + + if (job->data.pull.base) { + for (n =3D job->disk->src->backingStore; n && n !=3D job->data.pul= l.base; n =3D n->backingStore) { + /* find the image on top of 'base' */ + + if (cfgbase) { + cfgbaseparent =3D cfgbase; + cfgbase =3D cfgbase->backingStore; + } + + baseparent =3D n; + } + } + + tmp =3D job->disk->src->backingStore; + job->disk->src->backingStore =3D job->data.pull.base; + if (baseparent) + baseparent->backingStore =3D NULL; + qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob, tmp= ); + virObjectUnref(tmp); + + if (cfgdisk) { + tmp =3D cfgdisk->src->backingStore; + cfgdisk->src->backingStore =3D cfgbase; + if (cfgbaseparent) + cfgbaseparent->backingStore =3D NULL; + virObjectUnref(tmp); + } +} + + static void qemuBlockJobEventProcessConcludedTransition(qemuBlockJobDataPtr job, virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob AT= TRIBUTE_UNUSED) + qemuDomainAsyncJob asyncJob) { switch ((qemuBlockjobState) job->newstate) { case QEMU_BLOCKJOB_STATE_COMPLETED: switch ((qemuBlockJobType) job->type) { case QEMU_BLOCKJOB_TYPE_PULL: + qemuBlockJobProcessEventCompletedPull(driver, vm, job, asyncJo= b); + break; + case QEMU_BLOCKJOB_TYPE_COMMIT: case QEMU_BLOCKJOB_TYPE_ACTIVE_COMMIT: case QEMU_BLOCKJOB_TYPE_COPY: diff --git a/src/qemu/qemu_blockjob.h b/src/qemu/qemu_blockjob.h index 3299207610..d5848fb72c 100644 --- a/src/qemu/qemu_blockjob.h +++ b/src/qemu/qemu_blockjob.h @@ -68,6 +68,15 @@ verify((int)QEMU_BLOCKJOB_TYPE_INTERNAL =3D=3D VIR_DOMAI= N_BLOCK_JOB_TYPE_LAST); VIR_ENUM_DECL(qemuBlockjob); + +typedef struct _qemuBlockJobPullData qemuBlockJobPullData; +typedef qemuBlockJobPullData *qemuBlockJobDataPullPtr; + +struct _qemuBlockJobPullData { + virStorageSourcePtr base; +}; + + typedef struct _qemuBlockJobData qemuBlockJobData; typedef qemuBlockJobData *qemuBlockJobDataPtr; @@ -80,6 +89,10 @@ struct _qemuBlockJobData { virStorageSourcePtr chain; /* Reference to the chain the job operates = on. */ virStorageSourcePtr mirrorChain; /* reference to 'mirror' part of the = job */ + union { + qemuBlockJobPullData pull; + } data; + int type; /* qemuBlockJobType */ int state; /* qemuBlockjobState */ char *errmsg; @@ -114,6 +127,11 @@ void qemuBlockJobDiskRegisterMirror(qemuBlockJobDataPtr job) ATTRIBUTE_NONNULL(1); +qemuBlockJobDataPtr +qemuBlockJobDiskNewPull(virDomainObjPtr vm, + virDomainDiskDefPtr disk, + virStorageSourcePtr base); + qemuBlockJobDataPtr qemuBlockJobDiskGetJob(virDomainDiskDefPtr disk) ATTRIBUTE_NONNULL(1); diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index c508f55287..ec1dda4870 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -2390,6 +2390,21 @@ qemuDomainObjPrivateXMLFormatBlockjobIterator(void *= payload, return -1; } + switch ((qemuBlockJobType) job->type) { + case QEMU_BLOCKJOB_TYPE_PULL: + if (job->data.pull.base) + virBufferAsprintf(&childBuf, "\n", job-= >data.pull.base->nodeformat); + break; + + case QEMU_BLOCKJOB_TYPE_COMMIT: + case QEMU_BLOCKJOB_TYPE_ACTIVE_COMMIT: + case QEMU_BLOCKJOB_TYPE_COPY: + case QEMU_BLOCKJOB_TYPE_NONE: + case QEMU_BLOCKJOB_TYPE_INTERNAL: + case QEMU_BLOCKJOB_TYPE_LAST: + break; + } + return virXMLFormatElement(data->buf, "blockjob", &attrBuf, &childBuf); } @@ -2793,6 +2808,64 @@ qemuDomainObjPrivateXMLParseBlockjobChain(xmlNodePtr= node, } +static void +qemuDomainObjPrivateXMLParseBlockjobNodename(qemuBlockJobDataPtr job, + const char *xpath, + virStorageSourcePtr *src, + xmlXPathContextPtr ctxt) +{ + VIR_AUTOFREE(char *) nodename =3D NULL; + + *src =3D NULL; + + if (!(nodename =3D virXPathString(xpath, ctxt))) + return; + + if (job->disk && + (*src =3D virStorageSourceFindByNodeName(job->disk->src, nodename,= NULL))) + return; + + if (job->chain && + (*src =3D virStorageSourceFindByNodeName(job->chain, nodename, NUL= L))) + return; + + if (job->mirrorChain && + (*src =3D virStorageSourceFindByNodeName(job->mirrorChain, nodenam= e, NULL))) + return; + + /* the node was in the XML but was not found in the job definitions */ + VIR_DEBUG("marking block job '%s' as invalid: node name '%s' missing", + job->name, nodename); + job->invalidData =3D true; +} + + +static void +qemuDomainObjPrivateXMLParseBlockjobDataSpecific(qemuBlockJobDataPtr job, + xmlXPathContextPtr ctxt) +{ + switch ((qemuBlockJobType) job->type) { + case QEMU_BLOCKJOB_TYPE_PULL: + qemuDomainObjPrivateXMLParseBlockjobNodename(job, + "string(./base/@n= ode)", + &job->data.pull.b= ase, + ctxt); + /* base is not present if pulling everything */ + break; + + case QEMU_BLOCKJOB_TYPE_COMMIT: + case QEMU_BLOCKJOB_TYPE_ACTIVE_COMMIT: + case QEMU_BLOCKJOB_TYPE_COPY: + case QEMU_BLOCKJOB_TYPE_NONE: + case QEMU_BLOCKJOB_TYPE_INTERNAL: + case QEMU_BLOCKJOB_TYPE_LAST: + break; + } + + return; +} + + static int qemuDomainObjPrivateXMLParseBlockjobData(virDomainObjPtr vm, xmlNodePtr node, @@ -2863,10 +2936,14 @@ qemuDomainObjPrivateXMLParseBlockjobData(virDomainO= bjPtr vm, job->errmsg =3D virXPathString("string(./errmsg)", ctxt); job->invalidData =3D invalidData; job->disk =3D disk; + if (invalidData) + VIR_DEBUG("marking block job '%s' as invalid: basic data broken", = job->name); if (mirror) qemuBlockJobDiskRegisterMirror(job); + qemuDomainObjPrivateXMLParseBlockjobDataSpecific(job, ctxt); + if (qemuBlockJobRegister(job, vm, disk, false) < 0) return -1; diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index b6d705b679..705c1a06c0 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -17040,7 +17040,8 @@ qemuDomainBlockPullCommon(virQEMUDriverPtr driver, unsigned int flags) { qemuDomainObjPrivatePtr priv =3D vm->privateData; - VIR_AUTOFREE(char *) device =3D NULL; + const char *device =3D NULL; + const char *jobname =3D NULL; virDomainDiskDefPtr disk; virStorageSourcePtr baseSource =3D NULL; unsigned int baseIndex =3D 0; @@ -17048,6 +17049,8 @@ qemuDomainBlockPullCommon(virQEMUDriverPtr driver, VIR_AUTOFREE(char *) backingPath =3D NULL; unsigned long long speed =3D bandwidth; qemuBlockJobDataPtr job =3D NULL; + bool persistjob =3D false; + const char *nodebase =3D NULL; int ret =3D -1; if (flags & VIR_DOMAIN_BLOCK_REBASE_RELATIVE && !base) { @@ -17066,9 +17069,6 @@ qemuDomainBlockPullCommon(virQEMUDriverPtr driver, if (!(disk =3D qemuDomainDiskByName(vm->def, path))) goto endjob; - if (!(device =3D qemuAliasDiskDriveFromDisk(disk))) - goto endjob; - if (qemuDomainDiskBlockJobIsActive(disk)) goto endjob; @@ -17111,16 +17111,31 @@ qemuDomainBlockPullCommon(virQEMUDriverPtr driver, speed <<=3D 20; } - if (!(job =3D qemuBlockJobDiskNew(vm, disk, QEMU_BLOCKJOB_TYPE_PULL, d= evice))) + if (!(job =3D qemuBlockJobDiskNewPull(vm, disk, baseSource))) goto endjob; + if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) { + jobname =3D job->name; + persistjob =3D true; + if (baseSource) { + nodebase =3D baseSource->nodeformat; + if (!backingPath && + !(backingPath =3D qemuBlockGetBackingStoreString(baseSourc= e))) + goto endjob; + } + device =3D disk->src->nodeformat; + } else { + device =3D job->name; + } + qemuDomainObjEnterMonitor(driver, vm); - if (baseSource) + if (baseSource && !jobname) basePath =3D qemuMonitorDiskNameLookup(priv->mon, device, disk->sr= c, baseSource); - if (!baseSource || basePath) - ret =3D qemuMonitorBlockStream(priv->mon, device, NULL, false, bas= ePath, - NULL, backingPath, speed); + + if (!baseSource || basePath || jobname) + ret =3D qemuMonitorBlockStream(priv->mon, device, jobname, persist= job, basePath, + nodebase, backingPath, speed); if (qemuDomainObjExitMonitor(driver, vm) < 0) ret =3D -1; diff --git a/tests/qemustatusxml2xmldata/blockjob-blockdev-in.xml b/tests/q= emustatusxml2xmldata/blockjob-blockdev-in.xml index 7b9282d059..e962b837ac 100644 --- a/tests/qemustatusxml2xmldata/blockjob-blockdev-in.xml +++ b/tests/qemustatusxml2xmldata/blockjob-blockdev-in.xml @@ -234,6 +234,10 @@ + + + + --=20 2.21.0 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list