From nobody Mon Sep 8 17:03:33 2025 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28 as permitted sender) client-ip=209.132.183.28; envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com; Authentication-Results: mx.zohomail.com; spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com Return-Path: Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by mx.zohomail.com with SMTPS id 1522853353592671.376810558031; Wed, 4 Apr 2018 07:49:13 -0700 (PDT) Received: from smtp.corp.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 40B237FEC7; Wed, 4 Apr 2018 14:49:12 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.21]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 1064D8270B; Wed, 4 Apr 2018 14:49:12 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id C0495153C0; Wed, 4 Apr 2018 14:49:11 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.rdu2.redhat.com [10.11.54.5]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id w34Eg64B020235 for ; Wed, 4 Apr 2018 10:42:08 -0400 Received: by smtp.corp.redhat.com (Postfix) id 76AE384437; Wed, 4 Apr 2018 14:42:08 +0000 (UTC) Received: from virval.usersys.redhat.com (unknown [10.43.2.20]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 1D0868443A for ; Wed, 4 Apr 2018 14:42:08 +0000 (UTC) Received: by virval.usersys.redhat.com (Postfix, from userid 500) id 4B316104634; Wed, 4 Apr 2018 16:42:02 +0200 (CEST) From: Jiri Denemark To: libvir-list@redhat.com Date: Wed, 4 Apr 2018 16:41:51 +0200 Message-Id: <313f4e2e7016559845fe72964e7180038aa8ef35.1522852107.git.jdenemar@redhat.com> In-Reply-To: References: In-Reply-To: References: Mail-Followup-To: libvir-list@redhat.com X-Scanned-By: MIMEDefang 2.79 on 10.11.54.5 X-loop: libvir-list@redhat.com Subject: [libvirt] [PATCH 62/68] qemu: Store API flags for async jobs in qemuDomainJobObj X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.79 on 10.5.11.12 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.25]); Wed, 04 Apr 2018 14:49:12 +0000 (UTC) X-ZohoMail: RSF_0 Z_629925259 SPT_0 Content-Type: text/plain; charset="utf-8" When an async job is running, we sometimes need to know how it was started to distinguish between several types of the job, e.g., post-copy vs. normal migration. So far we added a specific bool item to qemuDomainJobObj for such cases, which doesn't scale very well and storing such bools in status XML would be painful so we didn't do it. A better approach is to store the flags passed to the API which started the async job, which can be easily stored in status XML. Signed-off-by: Jiri Denemark --- src/qemu/qemu_domain.c | 10 ++++++++-- src/qemu/qemu_domain.h | 4 +++- src/qemu/qemu_driver.c | 31 +++++++++++++++++++------------ src/qemu/qemu_migration.c | 20 +++++++++++++------- src/qemu/qemu_process.c | 5 +++-- src/qemu/qemu_process.h | 3 ++- 6 files changed, 48 insertions(+), 25 deletions(-) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index cef08343ea..3f3a49f064 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -339,6 +339,7 @@ qemuDomainObjResetAsyncJob(qemuDomainObjPrivatePtr priv) VIR_FREE(job->current); qemuMigrationParamsFree(job->migParams); job->migParams =3D NULL; + job->apiFlags =3D 0; } =20 void @@ -354,6 +355,7 @@ qemuDomainObjRestoreJob(virDomainObjPtr obj, job->asyncOwner =3D priv->job.asyncOwner; job->phase =3D priv->job.phase; VIR_STEAL_PTR(job->migParams, priv->job.migParams); + job->apiFlags =3D priv->job.apiFlags; =20 qemuDomainObjResetJob(priv); qemuDomainObjResetAsyncJob(priv); @@ -5788,7 +5790,8 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, asyncDuration =3D now - priv->job.asyncStarted; =20 VIR_WARN("Cannot start job (%s, %s) for domain %s; " - "current job is (%s, %s) owned by (%llu %s, %llu %s) " + "current job is (%s, %s) " + "owned by (%llu %s, %llu %s (flags=3D0x%lx)) " "for (%llus, %llus)", qemuDomainJobTypeToString(job), qemuDomainAsyncJobTypeToString(asyncJob), @@ -5797,6 +5800,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, qemuDomainAsyncJobTypeToString(priv->job.asyncJob), priv->job.owner, NULLSTR(priv->job.ownerAPI), priv->job.asyncOwner, NULLSTR(priv->job.asyncOwnerAPI), + priv->job.apiFlags, duration / 1000, asyncDuration / 1000); =20 if (nested || qemuDomainNestedJobAllowed(priv, job)) @@ -5860,7 +5864,8 @@ int qemuDomainObjBeginJob(virQEMUDriverPtr driver, int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj, qemuDomainAsyncJob asyncJob, - virDomainJobOperation operation) + virDomainJobOperation operation, + unsigned long apiFlags) { qemuDomainObjPrivatePtr priv; =20 @@ -5870,6 +5875,7 @@ int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr drive= r, =20 priv =3D obj->privateData; priv->job.current->operation =3D operation; + priv->job.apiFlags =3D apiFlags; return 0; } =20 diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index 2bb3e0a788..2146ff00a9 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -182,6 +182,7 @@ struct _qemuDomainJobObj { bool dumpCompleted; /* dump completed */ =20 qemuMigrationParamsPtr migParams; + unsigned long apiFlags; /* flags passed to the API which started the a= sync job */ }; =20 typedef void (*qemuDomainCleanupCallback)(virQEMUDriverPtr driver, @@ -493,7 +494,8 @@ int qemuDomainObjBeginJob(virQEMUDriverPtr driver, int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj, qemuDomainAsyncJob asyncJob, - virDomainJobOperation operation) + virDomainJobOperation operation, + unsigned long apiFlags) ATTRIBUTE_RETURN_CHECK; int qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, virDomainObjPtr obj, diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 93dec7c2ce..e15eb49a5c 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -267,7 +267,7 @@ qemuAutostartDomain(virDomainObjPtr vm, if (vm->autostart && !virDomainObjIsActive(vm)) { if (qemuProcessBeginJob(driver, vm, - VIR_DOMAIN_JOB_OPERATION_START) < 0) { + VIR_DOMAIN_JOB_OPERATION_START, flags) < 0= ) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Failed to start job on VM '%s': %s"), vm->def->name, virGetLastErrorMessage()); @@ -1779,7 +1779,8 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr= conn, virObjectRef(vm); def =3D NULL; =20 - if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START) < = 0) { + if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START, + flags) < 0) { qemuDomainRemoveInactiveJob(driver, vm); goto cleanup; } @@ -3345,7 +3346,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, goto cleanup; =20 if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SAVE, - VIR_DOMAIN_JOB_OPERATION_SAVE) < 0) + VIR_DOMAIN_JOB_OPERATION_SAVE, flags) <= 0) goto cleanup; =20 if (!virDomainObjIsActive(vm)) { @@ -3953,7 +3954,8 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, =20 if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP, - VIR_DOMAIN_JOB_OPERATION_DUMP) < 0) + VIR_DOMAIN_JOB_OPERATION_DUMP, + flags) < 0) goto cleanup; =20 if (!virDomainObjIsActive(vm)) { @@ -4178,7 +4180,8 @@ processWatchdogEvent(virQEMUDriverPtr driver, case VIR_DOMAIN_WATCHDOG_ACTION_DUMP: if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP, - VIR_DOMAIN_JOB_OPERATION_DUMP) < 0)= { + VIR_DOMAIN_JOB_OPERATION_DUMP, + flags) < 0) { goto cleanup; } =20 @@ -4264,9 +4267,10 @@ processGuestPanicEvent(virQEMUDriverPtr driver, virObjectEventPtr event =3D NULL; virQEMUDriverConfigPtr cfg =3D virQEMUDriverGetConfig(driver); bool removeInactive =3D false; + unsigned long flags =3D VIR_DUMP_MEMORY_ONLY; =20 if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP, - VIR_DOMAIN_JOB_OPERATION_DUMP) < 0) + VIR_DOMAIN_JOB_OPERATION_DUMP, flags) <= 0) goto cleanup; =20 if (!virDomainObjIsActive(vm)) { @@ -4297,7 +4301,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, =20 switch (action) { case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_DESTROY: - if (doCoreDumpToAutoDumpPath(driver, vm, VIR_DUMP_MEMORY_ONLY) < 0) + if (doCoreDumpToAutoDumpPath(driver, vm, flags) < 0) goto endjob; ATTRIBUTE_FALLTHROUGH; =20 @@ -4314,7 +4318,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, break; =20 case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_RESTART: - if (doCoreDumpToAutoDumpPath(driver, vm, VIR_DUMP_MEMORY_ONLY) < 0) + if (doCoreDumpToAutoDumpPath(driver, vm, flags) < 0) goto endjob; ATTRIBUTE_FALLTHROUGH; =20 @@ -6769,7 +6773,8 @@ qemuDomainRestoreFlags(virConnectPtr conn, priv->hookRun =3D true; } =20 - if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_RESTORE) = < 0) + if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_RESTORE, + flags) < 0) goto cleanup; =20 ret =3D qemuDomainSaveImageStartVM(conn, driver, vm, &fd, data, path, @@ -7357,7 +7362,8 @@ qemuDomainCreateWithFlags(virDomainPtr dom, unsigned = int flags) if (virDomainCreateWithFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; =20 - if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START) < = 0) + if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START, + flags) < 0) goto cleanup; =20 if (virDomainObjIsActive(vm)) { @@ -15208,7 +15214,7 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain, * 'savevm' blocks the monitor. External snapshot will then modify the * job mask appropriately. */ if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SNAPSHOT, - VIR_DOMAIN_JOB_OPERATION_SNAPSHOT) < 0) + VIR_DOMAIN_JOB_OPERATION_SNAPSHOT, flag= s) < 0) goto cleanup; =20 qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE); @@ -15807,7 +15813,8 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr sna= pshot, } =20 if (qemuProcessBeginJob(driver, vm, - VIR_DOMAIN_JOB_OPERATION_SNAPSHOT_REVERT) < 0) + VIR_DOMAIN_JOB_OPERATION_SNAPSHOT_REVERT, + flags) < 0) goto cleanup; =20 if (!(snap =3D qemuSnapObjFromSnapshot(vm, snapshot))) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 2a0431ea6f..3ce180dd39 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -80,7 +80,8 @@ VIR_ENUM_IMPL(qemuMigrationJobPhase, QEMU_MIGRATION_PHASE= _LAST, static int qemuMigrationJobStart(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob job) + qemuDomainAsyncJob job, + unsigned long apiFlags) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK; =20 static void @@ -1984,7 +1985,8 @@ qemuMigrationSrcBegin(virConnectPtr conn, qemuDomainAsyncJob asyncJob; =20 if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) { - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT= ) < 0) + if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + flags) < 0) goto cleanup; asyncJob =3D QEMU_ASYNC_JOB_MIGRATION_OUT; } else { @@ -2317,7 +2319,8 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, !!(flags & VIR_MIGRATE_NON_SHARED= _INC)) < 0) goto cleanup; =20 - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) + if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, + flags) < 0) goto cleanup; qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PREPARE); =20 @@ -4433,7 +4436,8 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, virQEMUDriverConfigPtr cfg =3D virQEMUDriverGetConfig(driver); qemuDomainObjPrivatePtr priv =3D vm->privateData; =20 - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < = 0) + if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + flags) < 0) goto cleanup; =20 if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { @@ -4545,7 +4549,8 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, =20 /* If we didn't start the job in the begin phase, start it now. */ if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) { - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT= ) < 0) + if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + flags) < 0) goto cleanup; } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)= ) { goto cleanup; @@ -5282,7 +5287,8 @@ qemuMigrationSrcCancel(virQEMUDriverPtr driver, static int qemuMigrationJobStart(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob job) + qemuDomainAsyncJob job, + unsigned long apiFlags) { qemuDomainObjPrivatePtr priv =3D vm->privateData; virDomainJobOperation op; @@ -5298,7 +5304,7 @@ qemuMigrationJobStart(virQEMUDriverPtr driver, JOB_MASK(QEMU_JOB_MIGRATION_OP); } =20 - if (qemuDomainObjBeginAsyncJob(driver, vm, job, op) < 0) + if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0) return -1; =20 priv->job.current->statsType =3D QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index c4d3f67d19..89669c9765 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -4185,10 +4185,11 @@ qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps, int qemuProcessBeginJob(virQEMUDriverPtr driver, virDomainObjPtr vm, - virDomainJobOperation operation) + virDomainJobOperation operation, + unsigned long apiFlags) { if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START, - operation) < 0) + operation, apiFlags) < 0) return -1; =20 qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE); diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index 2741115673..9dd5c97642 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -68,7 +68,8 @@ void qemuProcessIncomingDefFree(qemuProcessIncomingDefPtr= inc); =20 int qemuProcessBeginJob(virQEMUDriverPtr driver, virDomainObjPtr vm, - virDomainJobOperation operation); + virDomainJobOperation operation, + unsigned long apiFlags); void qemuProcessEndJob(virQEMUDriverPtr driver, virDomainObjPtr vm); =20 --=20 2.17.0 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list