From nobody Sat Jul 27 07:32:29 2024 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of redhat.com designates 170.10.129.124 as permitted sender) client-ip=170.10.129.124; envelope-from=libvir-list-bounces@redhat.com; helo=us-smtp-delivery-124.mimecast.com; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of redhat.com designates 170.10.129.124 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com ARC-Seal: i=1; a=rsa-sha256; t=1695043801; cv=none; d=zohomail.com; s=zohoarc; b=UULHKaY3KLOPVMOMCNBoako7iGMNaVN0RkzSLPVwLTu7u35YtW+oh4X1huUwQvPmH7JLy58a3PDaoMfwfz+K3M6C9EMfCN0+p4vQf6eYinpLNQzJ1NvxhD068baCsD10SXFfHrv12aSKuHhR3SsGw2IPJ4cIl9ZBKB6+vAzeYrM= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1695043801; h=Content-Type:Content-Transfer-Encoding:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Archive:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To; bh=id7+JdSFFgj3CzqbC4TuSgpWVlsKIDC/MjqIFIMddkQ=; b=i96l7D1Rty1abXyo2P5IhPEKJEWuOd4sfyTyh8qoLa1RSbVnOxqu6zQhI30deNDz7ANWi1xtlQEGRc1q9VksaJeS5+wy/b+Yqkv5EEpGi0wBTmCZ6vgrazxc5hyytIzJF4oUs91OzBUuKjZ8RIWBtzyjlflDpel3NeXlX+6WMx0= ARC-Authentication-Results: i=1; mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of redhat.com designates 170.10.129.124 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass header.from= (p=none dis=none) Return-Path: Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by mx.zohomail.com with SMTPS id 1695043801897721.7565121482365; Mon, 18 Sep 2023 06:30:01 -0700 (PDT) Received: from mimecast-mx02.redhat.com (mx-ext.redhat.com [66.187.233.73]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-669-ftAwVpQMOW-zCvVzHoBPpQ-1; Mon, 18 Sep 2023 09:29:55 -0400 Received: from smtp.corp.redhat.com (int-mx08.intmail.prod.int.rdu2.redhat.com [10.11.54.8]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mimecast-mx02.redhat.com (Postfix) with ESMTPS id 2014C1C0515E; Mon, 18 Sep 2023 13:29:53 +0000 (UTC) Received: from mm-prod-listman-01.mail-001.prod.us-east-1.aws.redhat.com (mm-prod-listman-01.mail-001.prod.us-east-1.aws.redhat.com [10.30.29.100]) by smtp.corp.redhat.com (Postfix) with ESMTP id 099AAC03292; Mon, 18 Sep 2023 13:29:53 +0000 (UTC) Received: from mm-prod-listman-01.mail-001.prod.us-east-1.aws.redhat.com (localhost [IPv6:::1]) by mm-prod-listman-01.mail-001.prod.us-east-1.aws.redhat.com (Postfix) with ESMTP id D813219465A0; Mon, 18 Sep 2023 13:29:41 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.rdu2.redhat.com [10.11.54.5]) by mm-prod-listman-01.mail-001.prod.us-east-1.aws.redhat.com (Postfix) with ESMTP id 688FA194658F for ; Mon, 18 Sep 2023 13:29:35 +0000 (UTC) Received: by smtp.corp.redhat.com (Postfix) id 571161C5BB; Mon, 18 Sep 2023 13:29:35 +0000 (UTC) Received: from localhost.localdomain (unknown [10.45.226.127]) by smtp.corp.redhat.com (Postfix) with ESMTP id ED7A11C554 for ; Mon, 18 Sep 2023 13:29:34 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1695043800; h=from:from:sender:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references:list-id:list-help: list-unsubscribe:list-subscribe:list-post; bh=id7+JdSFFgj3CzqbC4TuSgpWVlsKIDC/MjqIFIMddkQ=; b=bkWSRvlkqIfo/7Zr1wBD/nU9gIAEujM4ETv7O//itzknIb4f2pk2eQJZEaJWtPf77AT8lf LTw+s0Ep/ccjlPIGkeTV2QQfrBCbzMshLBINNDtA2k/AvRjBhYftYW2Czg/HfbhLf6j3hQ vA9geLhbNn+c7W4z5er+dAZya88XJi4= X-MC-Unique: ftAwVpQMOW-zCvVzHoBPpQ-1 X-Original-To: libvir-list@listman.corp.redhat.com From: Pavel Hrdina To: libvir-list@redhat.com Subject: [libvirt PATCH v3 03/10] qemu_saveimage: move qemuSaveImageStartProcess to qemu_process Date: Mon, 18 Sep 2023 15:29:20 +0200 Message-ID: In-Reply-To: References: MIME-Version: 1.0 X-Scanned-By: MIMEDefang 3.1 on 10.11.54.5 X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: libvir-list-bounces@redhat.com Sender: "libvir-list" X-Scanned-By: MIMEDefang 3.1 on 10.11.54.8 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @redhat.com) X-ZM-MESSAGEID: 1695043803779100001 Content-Type: text/plain; charset="utf-8"; x-default="true" The function will no longer be used only when restoring VM as it will be used when reverting snapshot as well so move it to qemu_process and rename it accordingly. Signed-off-by: Pavel Hrdina --- src/qemu/qemu_process.c | 73 +++++++++++++++++++++++++++++++++++++ src/qemu/qemu_process.h | 11 ++++++ src/qemu/qemu_saveimage.c | 75 ++------------------------------------- src/qemu/qemu_saveimage.h | 11 ------ 4 files changed, 86 insertions(+), 84 deletions(-) diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 42837c4a8a..0de8ceb244 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -8157,6 +8157,79 @@ qemuProcessStart(virConnectPtr conn, } =20 =20 +/** + * qemuProcessStartWithMemoryState: + * @conn: connection object + * @driver: qemu driver object + * @vm: domain object + * @fd: FD pointer of memory state file + * @path: path to memory state file + * @data: data from memory state file + * @asyncJob: type of asynchronous job + * @start_flags: flags to start QEMU process with + * @started: boolean to store if QEMU process was started + * + * Start VM with existing memory state. Make sure that the stored memory s= tate + * is correctly decompressed so it can be loaded by QEMU process. + * + * Returns 0 on success, -1 on error. + */ +int +qemuProcessStartWithMemoryState(virConnectPtr conn, + virQEMUDriver *driver, + virDomainObj *vm, + int *fd, + const char *path, + virQEMUSaveData *data, + virDomainAsyncJob asyncJob, + unsigned int start_flags, + bool *started) +{ + qemuDomainObjPrivate *priv =3D vm->privateData; + g_autoptr(qemuDomainSaveCookie) cookie =3D NULL; + VIR_AUTOCLOSE intermediatefd =3D -1; + g_autoptr(virCommand) cmd =3D NULL; + g_autofree char *errbuf =3D NULL; + int rc =3D 0; + + if (virSaveCookieParseString(data->cookie, (virObject **)&cookie, + virDomainXMLOptionGetSaveCookie(driver->x= mlopt)) < 0) + return -1; + + if (qemuSaveImageDecompressionStart(data, fd, &intermediatefd, &errbuf= , &cmd) < 0) + return -1; + + /* No cookie means libvirt which saved the domain was too old to mess = up + * the CPU definitions. + */ + if (cookie && + qemuDomainFixupCPUs(vm, &cookie->cpu) < 0) + return -1; + + if (cookie && !cookie->slirpHelper) + priv->disableSlirp =3D true; + + if (qemuProcessStart(conn, driver, vm, cookie ? cookie->cpu : NULL, + asyncJob, "stdio", *fd, path, NULL, + VIR_NETDEV_VPORT_PROFILE_OP_RESTORE, + start_flags) =3D=3D 0) + *started =3D true; + + rc =3D qemuSaveImageDecompressionStop(cmd, fd, &intermediatefd, errbuf= , *started, path); + + virDomainAuditStart(vm, "restored", *started); + if (!*started || rc < 0) + return -1; + + /* qemuProcessStart doesn't unset the qemu error reporting infrastruct= ure + * in case of migration (which is used in this case) so we need to res= et it + * so that the handle to virtlogd is not held open unnecessarily */ + qemuMonitorSetDomainLog(qemuDomainGetMonitor(vm), NULL, NULL, NULL); + + return 0; +} + + int qemuProcessCreatePretendCmdPrepare(virQEMUDriver *driver, virDomainObj *vm, diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index cae1b49756..d758b4f51a 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -23,6 +23,7 @@ =20 #include "qemu_conf.h" #include "qemu_domain.h" +#include "qemu_saveimage.h" #include "vireventthread.h" =20 int qemuProcessPrepareMonitorChr(virDomainChrSourceDef *monConfig, @@ -90,6 +91,16 @@ int qemuProcessStart(virConnectPtr conn, virNetDevVPortProfileOp vmop, unsigned int flags); =20 +int qemuProcessStartWithMemoryState(virConnectPtr conn, + virQEMUDriver *driver, + virDomainObj *vm, + int *fd, + const char *path, + virQEMUSaveData *data, + virDomainAsyncJob asyncJob, + unsigned int start_flags, + bool *started); + int qemuProcessCreatePretendCmdPrepare(virQEMUDriver *driver, virDomainObj *vm, const char *migrateURI, diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c index 95fabee907..1fbc7891b1 100644 --- a/src/qemu/qemu_saveimage.c +++ b/src/qemu/qemu_saveimage.c @@ -675,77 +675,6 @@ qemuSaveImageOpen(virQEMUDriver *driver, return ret; } =20 -/** - * qemuSaveImageStartProcess: - * @conn: connection object - * @driver: qemu driver object - * @vm: domain object - * @fd: FD pointer of memory state file - * @path: path to memory state file - * @data: data from memory state file - * @asyncJob: type of asynchronous job - * @start_flags: flags to start QEMU process with - * @started: boolean to store if QEMU process was started - * - * Start VM with existing memory state. Make sure that the stored memory s= tate - * is correctly decompressed so it can be loaded by QEMU process. - * - * Returns 0 on success, -1 on error. - */ -int -qemuSaveImageStartProcess(virConnectPtr conn, - virQEMUDriver *driver, - virDomainObj *vm, - int *fd, - const char *path, - virQEMUSaveData *data, - virDomainAsyncJob asyncJob, - unsigned int start_flags, - bool *started) -{ - qemuDomainObjPrivate *priv =3D vm->privateData; - g_autoptr(qemuDomainSaveCookie) cookie =3D NULL; - VIR_AUTOCLOSE intermediatefd =3D -1; - g_autoptr(virCommand) cmd =3D NULL; - g_autofree char *errbuf =3D NULL; - int rc =3D 0; - - if (virSaveCookieParseString(data->cookie, (virObject **)&cookie, - virDomainXMLOptionGetSaveCookie(driver->x= mlopt)) < 0) - return -1; - - if (qemuSaveImageDecompressionStart(data, fd, &intermediatefd, &errbuf= , &cmd) < 0) - return -1; - - /* No cookie means libvirt which saved the domain was too old to mess = up - * the CPU definitions. - */ - if (cookie && - qemuDomainFixupCPUs(vm, &cookie->cpu) < 0) - return -1; - - if (cookie && !cookie->slirpHelper) - priv->disableSlirp =3D true; - - if (qemuProcessStart(conn, driver, vm, cookie ? cookie->cpu : NULL, - asyncJob, "stdio", *fd, path, NULL, - VIR_NETDEV_VPORT_PROFILE_OP_RESTORE, - start_flags) =3D=3D 0) - *started =3D true; - - rc =3D qemuSaveImageDecompressionStop(cmd, fd, &intermediatefd, errbuf= , *started, path); - - virDomainAuditStart(vm, "restored", *started); - if (!*started || rc < 0) - return -1; - - /* qemuProcessStart doesn't unset the qemu error reporting infrastruct= ure - * in case of migration (which is used in this case) so we need to res= et it - * so that the handle to virtlogd is not held open unnecessarily */ - qemuMonitorSetDomainLog(qemuDomainGetMonitor(vm), NULL, NULL, NULL); - - return 0; -} =20 int qemuSaveImageStartVM(virConnectPtr conn, @@ -769,8 +698,8 @@ qemuSaveImageStartVM(virConnectPtr conn, if (reset_nvram) start_flags |=3D VIR_QEMU_PROCESS_START_RESET_NVRAM; =20 - if (qemuSaveImageStartProcess(conn, driver, vm, fd, path, data, - asyncJob, start_flags, &started) < 0) { + if (qemuProcessStartWithMemoryState(conn, driver, vm, fd, path, data, + asyncJob, start_flags, &started) <= 0) { goto cleanup; } =20 diff --git a/src/qemu/qemu_saveimage.h b/src/qemu/qemu_saveimage.h index dcee482066..e541792153 100644 --- a/src/qemu/qemu_saveimage.h +++ b/src/qemu/qemu_saveimage.h @@ -57,17 +57,6 @@ qemuSaveImageUpdateDef(virQEMUDriver *driver, virDomainDef *def, const char *newxml); =20 -int -qemuSaveImageStartProcess(virConnectPtr conn, - virQEMUDriver *driver, - virDomainObj *vm, - int *fd, - const char *path, - virQEMUSaveData *data, - virDomainAsyncJob asyncJob, - unsigned int start_flags, - bool *started); - int qemuSaveImageStartVM(virConnectPtr conn, virQEMUDriver *driver, --=20 2.41.0