From nobody Sun Feb 8 21:48:28 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28 as permitted sender) client-ip=209.132.183.28; envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com; Authentication-Results: mx.zohomail.com; spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com Return-Path: Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by mx.zohomail.com with SMTPS id 1537881582529180.14513824748997; Tue, 25 Sep 2018 06:19:42 -0700 (PDT) Received: from smtp.corp.redhat.com (int-mx08.intmail.prod.int.phx2.redhat.com [10.5.11.23]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 2290D3002701; Tue, 25 Sep 2018 13:19:39 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.21]) by smtp.corp.redhat.com (Postfix) with ESMTPS id D9B981852D; Tue, 25 Sep 2018 13:19:38 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id DC5E84A46C; Tue, 25 Sep 2018 13:19:37 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id w8PDJEvv025513 for ; Tue, 25 Sep 2018 09:19:14 -0400 Received: by smtp.corp.redhat.com (Postfix) id 59D9610021B2; Tue, 25 Sep 2018 13:19:14 +0000 (UTC) Received: from virval.usersys.redhat.com (unknown [10.43.2.20]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 04D33108131B for ; Tue, 25 Sep 2018 13:19:14 +0000 (UTC) Received: by virval.usersys.redhat.com (Postfix, from userid 500) id 4D5811047CD; Tue, 25 Sep 2018 15:19:09 +0200 (CEST) From: Jiri Denemark To: libvir-list@redhat.com Date: Tue, 25 Sep 2018 15:19:05 +0200 Message-Id: <6cde01d3c1ea2460216adf3b2f766c78fada980a.1537881453.git.jdenemar@redhat.com> In-Reply-To: References: MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-loop: libvir-list@redhat.com Subject: [libvirt] [PATCH v2 5/5] qemu: Avoid duplicate resume events and state changes X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Transfer-Encoding: quoted-printable Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.84 on 10.5.11.23 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.46]); Tue, 25 Sep 2018 13:19:39 +0000 (UTC) X-ZohoMail: RDMRC_0 RSF_0 Z_629925259 SPT_0 Content-Type: text/plain; charset="utf-8" The only place where VIR_DOMAIN_EVENT_RESUMED should be generated is the RESUME event handler to make sure we don't generate duplicate events or state changes. In the worse case the duplicity can revert or cover changes done by other event handlers. For example, after QEMU sent RESUME, BLOCK_IO_ERROR, and STOP events we could happily mark the domain as running and report VIR_DOMAIN_EVENT_RESUMED to registered clients. https://bugzilla.redhat.com/show_bug.cgi?id=3D1612943 Signed-off-by: Jiri Denemark --- Notes: Version 2: - keep VIR_DOMAIN_EVENT_RESUMED_MIGRATED event at the end of post-copy migration src/qemu/qemu_driver.c | 13 ----------- src/qemu/qemu_migration.c | 49 ++++++++++++++++----------------------- src/qemu/qemu_process.c | 10 ++++---- 3 files changed, 24 insertions(+), 48 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 0c1afbfd4e..126c783a0f 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -1861,7 +1861,6 @@ static int qemuDomainResume(virDomainPtr dom) virQEMUDriverPtr driver =3D dom->conn->privateData; virDomainObjPtr vm; int ret =3D -1; - virObjectEventPtr event =3D NULL; int state; int reason; virQEMUDriverConfigPtr cfg =3D NULL; @@ -1900,9 +1899,6 @@ static int qemuDomainResume(virDomainPtr dom) "%s", _("resume operation failed")); goto endjob; } - event =3D virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_UNPAUSED= ); } if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->cap= s) < 0) goto endjob; @@ -1913,7 +1909,6 @@ static int qemuDomainResume(virDomainPtr dom) =20 cleanup: virDomainObjEndAPI(&vm); - virObjectEventStateQueue(driver->domainEventState, event); virObjectUnref(cfg); return ret; } @@ -15983,7 +15978,6 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr sna= pshot, virDomainDefPtr config =3D NULL; virQEMUDriverConfigPtr cfg =3D NULL; virCapsPtr caps =3D NULL; - bool was_running =3D false; bool was_stopped =3D false; qemuDomainSaveCookiePtr cookie; virCPUDefPtr origCPU =3D NULL; @@ -16174,7 +16168,6 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr sna= pshot, priv =3D vm->privateData; if (virDomainObjGetState(vm, NULL) =3D=3D VIR_DOMAIN_RUNNING) { /* Transitions 5, 6 */ - was_running =3D true; if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_FROM_SNAPSHOT, QEMU_ASYNC_JOB_START) < 0) @@ -16271,12 +16264,6 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr sn= apshot, event =3D virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STARTED, detail); - } else if (!was_running) { - /* Transition 8 */ - detail =3D VIR_DOMAIN_EVENT_RESUMED_FROM_SNAPSHOT; - event =3D virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - detail); } } break; diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 825a9d399b..67940330aa 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2982,14 +2982,10 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr drive= r, virFreeError(orig_err); =20 if (virDomainObjGetState(vm, &reason) =3D=3D VIR_DOMAIN_PAUSED && - reason =3D=3D VIR_DOMAIN_PAUSED_POSTCOPY) { + reason =3D=3D VIR_DOMAIN_PAUSED_POSTCOPY) qemuMigrationAnyPostcopyFailed(driver, vm); - } else if (qemuMigrationSrcRestoreDomainState(driver, vm)) { - event =3D virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RES= UMED, - VIR_DOMAIN_EVENT_RES= UMED_MIGRATED); - virObjectEventStateQueue(driver->domainEventState, event); - } + else + qemuMigrationSrcRestoreDomainState(driver, vm); =20 qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, priv->job.migParams, priv->job.apiFlags); @@ -4624,11 +4620,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, priv->job.migParams, priv->job.apiFlags); =20 - if (qemuMigrationSrcRestoreDomainState(driver, vm)) { - event =3D virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_MIGRATED= ); - } + qemuMigrationSrcRestoreDomainState(driver, vm); =20 qemuMigrationJobFinish(driver, vm); if (!virDomainObjIsActive(vm) && ret =3D=3D 0) { @@ -4672,7 +4664,6 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, unsigned long resource) { qemuDomainObjPrivatePtr priv =3D vm->privateData; - virObjectEventPtr event =3D NULL; int ret =3D -1; =20 /* If we didn't start the job in the begin phase, start it now. */ @@ -4694,11 +4685,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, nmigrate_disks, migrate_disks, mig= Params); =20 if (ret < 0) { - if (qemuMigrationSrcRestoreDomainState(driver, vm)) { - event =3D virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RES= UMED, - VIR_DOMAIN_EVENT_RES= UMED_MIGRATED); - } + qemuMigrationSrcRestoreDomainState(driver, vm); goto endjob; } =20 @@ -4722,7 +4709,6 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, =20 cleanup: virDomainObjEndAPI(&vm); - virObjectEventStateQueue(driver->domainEventState, event); return ret; } =20 @@ -5074,13 +5060,8 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, goto endjob; } =20 - if (inPostCopy) { + if (inPostCopy) doKill =3D false; - event =3D virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_POSTCOPY); - virObjectEventStateQueue(driver->domainEventState, event); - } } =20 if (mig->jobInfo) { @@ -5111,10 +5092,20 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, =20 dom =3D virGetDomain(dconn, vm->def->name, vm->def->uuid, vm->def->id); =20 - event =3D virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_MIG= RATED); - virObjectEventStateQueue(driver->domainEventState, event); + if (inPostCopy) { + /* The only RESUME event during post-copy migration is triggered by + * QEMU when the running domain moves from the source to the + * destination host, but then the migration keeps running until all + * modified memory is transferred from the source host. This will + * result in VIR_DOMAIN_EVENT_RESUMED with RESUMED_POSTCOPY detail. + * However, our API documentation says we need to fire another RES= UMED + * event at the very end of migration with RESUMED_MIGRATED detail. + */ + event =3D virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_RESUMED, + VIR_DOMAIN_EVENT_RESUMED= _MIGRATED); + virObjectEventStateQueue(driver->domainEventState, event); + } =20 if (virDomainObjGetState(vm, NULL) =3D=3D VIR_DOMAIN_PAUSED) { virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER= ); diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index a7d71962da..29b0ba1590 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -456,7 +456,6 @@ qemuProcessFakeReboot(void *opaque) virDomainObjPtr vm =3D opaque; qemuDomainObjPrivatePtr priv =3D vm->privateData; virQEMUDriverPtr driver =3D priv->driver; - virObjectEventPtr event =3D NULL; virQEMUDriverConfigPtr cfg =3D virQEMUDriverGetConfig(driver); virDomainRunningReason reason =3D VIR_DOMAIN_RUNNING_BOOTED; int ret =3D -1, rc; @@ -493,9 +492,6 @@ qemuProcessFakeReboot(void *opaque) goto endjob; } priv->gotShutdown =3D false; - event =3D virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_UNPAUSED); =20 if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->cap= s) < 0) { VIR_WARN("Unable to save status on vm %s after state change", @@ -511,7 +507,6 @@ qemuProcessFakeReboot(void *opaque) if (ret =3D=3D -1) ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_FORCE)); virDomainObjEndAPI(&vm); - virObjectEventStateQueue(driver->domainEventState, event); virObjectUnref(cfg); } =20 @@ -3109,7 +3104,10 @@ qemuProcessStartCPUs(virQEMUDriverPtr driver, virDom= ainObjPtr vm, if (ret < 0) goto release; =20 - virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, reason); + /* The RESUME event handler will change the domain state with the reas= on + * saved in priv->runningReason and it will also emit corresponding do= main + * lifecycle event. + */ =20 cleanup: virObjectUnref(cfg); --=20 2.19.0 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list