From nobody Mon Feb 9 15:09:54 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28 as permitted sender) client-ip=209.132.183.28; envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com; Authentication-Results: mx.zohomail.com; spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com Return-Path: Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by mx.zohomail.com with SMTPS id 1553060481575117.7068811329234; Tue, 19 Mar 2019 22:41:21 -0700 (PDT) Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 83EFC3082E25; Wed, 20 Mar 2019 05:41:19 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.21]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 509EC5D70A; Wed, 20 Mar 2019 05:41:19 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id 07B0741F3F; Wed, 20 Mar 2019 05:41:19 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx08.intmail.prod.int.phx2.redhat.com [10.5.11.23]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id x2K5fDJD017201 for ; Wed, 20 Mar 2019 01:41:13 -0400 Received: by smtp.corp.redhat.com (Postfix) id 67F2E1974C; Wed, 20 Mar 2019 05:41:13 +0000 (UTC) Received: from blue.redhat.com (ovpn-116-65.phx2.redhat.com [10.3.116.65]) by smtp.corp.redhat.com (Postfix) with ESMTP id 0A0611A913; Wed, 20 Mar 2019 05:41:12 +0000 (UTC) From: Eric Blake To: libvir-list@redhat.com Date: Wed, 20 Mar 2019 00:40:53 -0500 Message-Id: <20190320054105.17689-5-eblake@redhat.com> In-Reply-To: <20190320054105.17689-1-eblake@redhat.com> References: <20190320054105.17689-1-eblake@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.23 X-loop: libvir-list@redhat.com Cc: jtomko@redhat.com Subject: [libvirt] [PATCH 04/16] vbox: Clean up some snapshot usage X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Transfer-Encoding: quoted-printable Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.46]); Wed, 20 Mar 2019 05:41:20 +0000 (UTC) Content-Type: text/plain; charset="utf-8" An upcoming patch will be reworking virDomainSnapshotDef to have a base class; minimize the churn by using a local variable to reduce the number of dereferences required when acessing the domain definition associated with the snapshot. Signed-off-by: Eric Blake Reviewed-by: John Ferlan --- src/vbox/vbox_common.c | 65 ++++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 31 deletions(-) diff --git a/src/vbox/vbox_common.c b/src/vbox/vbox_common.c index 49c7e8a27d..ac7e02eed6 100644 --- a/src/vbox/vbox_common.c +++ b/src/vbox/vbox_common.c @@ -5984,6 +5984,7 @@ vboxSnapshotGetReadOnlyDisks(virDomainSnapshotDefPtr = def, vboxArray mediumAttachments =3D VBOX_ARRAY_INITIALIZER; size_t i =3D 0, diskCount =3D 0, sdCount =3D 0; int ret =3D -1; + virDomainDefPtr defdom =3D def->dom; if (!data->vboxObj) return ret; @@ -6027,26 +6028,26 @@ vboxSnapshotGetReadOnlyDisks(virDomainSnapshotDefPt= r def, goto cleanup; } if (medium) { - def->dom->ndisks++; + defdom->ndisks++; VBOX_RELEASE(medium); } } } /* Allocate mem, if fails return error */ - if (VIR_ALLOC_N(def->dom->disks, def->dom->ndisks) >=3D 0) { - for (i =3D 0; i < def->dom->ndisks; i++) { + if (VIR_ALLOC_N(defdom->disks, defdom->ndisks) >=3D 0) { + for (i =3D 0; i < defdom->ndisks; i++) { virDomainDiskDefPtr diskDef =3D virDomainDiskDefNew(NULL); if (!diskDef) goto cleanup; - def->dom->disks[i] =3D diskDef; + defdom->disks[i] =3D diskDef; } } else { goto cleanup; } /* get the attachment details here */ - for (i =3D 0; i < mediumAttachments.count && diskCount < def->dom->ndi= sks; i++) { + for (i =3D 0; i < mediumAttachments.count && diskCount < defdom->ndisk= s; i++) { PRUnichar *storageControllerName =3D NULL; PRUint32 deviceType =3D DeviceType_Null; PRUint32 storageBus =3D StorageBus_Null; @@ -6125,7 +6126,7 @@ vboxSnapshotGetReadOnlyDisks(virDomainSnapshotDefPtr = def, } VBOX_UTF16_TO_UTF8(mediumLocUtf16, &mediumLocUtf8); VBOX_UTF16_FREE(mediumLocUtf16); - if (VIR_STRDUP(def->dom->disks[diskCount]->src->path, mediumLocUtf= 8) < 0) + if (VIR_STRDUP(defdom->disks[diskCount]->src->path, mediumLocUtf8)= < 0) goto cleanup; VBOX_UTF8_FREE(mediumLocUtf8); @@ -6136,11 +6137,11 @@ vboxSnapshotGetReadOnlyDisks(virDomainSnapshotDefPt= r def, goto cleanup; } - def->dom->disks[diskCount]->dst =3D vboxGenerateMediumName(storage= Bus, - devicePor= t, - deviceSlo= t, - sdCount); - if (!def->dom->disks[diskCount]->dst) { + defdom->disks[diskCount]->dst =3D vboxGenerateMediumName(storageBu= s, + devicePort, + deviceSlot, + sdCount); + if (!defdom->disks[diskCount]->dst) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Could not generate medium name for the disk " "at: port:%d, slot:%d"), devicePort, deviceSl= ot); @@ -6149,16 +6150,16 @@ vboxSnapshotGetReadOnlyDisks(virDomainSnapshotDefPt= r def, } if (storageBus =3D=3D StorageBus_IDE) { - def->dom->disks[diskCount]->bus =3D VIR_DOMAIN_DISK_BUS_IDE; + defdom->disks[diskCount]->bus =3D VIR_DOMAIN_DISK_BUS_IDE; } else if (storageBus =3D=3D StorageBus_SATA) { sdCount++; - def->dom->disks[diskCount]->bus =3D VIR_DOMAIN_DISK_BUS_SATA; + defdom->disks[diskCount]->bus =3D VIR_DOMAIN_DISK_BUS_SATA; } else if (storageBus =3D=3D StorageBus_SCSI || storageBus =3D=3D StorageBus_SAS) { sdCount++; - def->dom->disks[diskCount]->bus =3D VIR_DOMAIN_DISK_BUS_SCSI; + defdom->disks[diskCount]->bus =3D VIR_DOMAIN_DISK_BUS_SCSI; } else if (storageBus =3D=3D StorageBus_Floppy) { - def->dom->disks[diskCount]->bus =3D VIR_DOMAIN_DISK_BUS_FDC; + defdom->disks[diskCount]->bus =3D VIR_DOMAIN_DISK_BUS_FDC; } rc =3D gVBoxAPI.UIMediumAttachment.GetType(imediumattach, &deviceT= ype); @@ -6168,15 +6169,15 @@ vboxSnapshotGetReadOnlyDisks(virDomainSnapshotDefPt= r def, goto cleanup; } if (deviceType =3D=3D DeviceType_HardDisk) - def->dom->disks[diskCount]->device =3D VIR_DOMAIN_DISK_DEVICE_= DISK; + defdom->disks[diskCount]->device =3D VIR_DOMAIN_DISK_DEVICE_DI= SK; else if (deviceType =3D=3D DeviceType_Floppy) - def->dom->disks[diskCount]->device =3D VIR_DOMAIN_DISK_DEVICE_= FLOPPY; + defdom->disks[diskCount]->device =3D VIR_DOMAIN_DISK_DEVICE_FL= OPPY; else if (deviceType =3D=3D DeviceType_DVD) - def->dom->disks[diskCount]->device =3D VIR_DOMAIN_DISK_DEVICE_= CDROM; + defdom->disks[diskCount]->device =3D VIR_DOMAIN_DISK_DEVICE_CD= ROM; if (readOnly =3D=3D PR_TRUE) - def->dom->disks[diskCount]->src->readonly =3D true; - def->dom->disks[diskCount]->src->type =3D VIR_STORAGE_TYPE_FILE; + defdom->disks[diskCount]->src->readonly =3D true; + defdom->disks[diskCount]->src->type =3D VIR_STORAGE_TYPE_FILE; diskCount++; } @@ -6209,6 +6210,7 @@ static char *vboxDomainSnapshotGetXMLDesc(virDomainSn= apshotPtr snapshot, PRBool online =3D PR_FALSE; char uuidstr[VIR_UUID_STRING_BUFLEN]; char *ret =3D NULL; + virDomainDefPtr defdom; if (!data->vboxObj) return ret; @@ -6223,6 +6225,7 @@ static char *vboxDomainSnapshotGetXMLDesc(virDomainSn= apshotPtr snapshot, if (VIR_ALLOC(def) < 0 || !(def->dom =3D virDomainDefNew())) goto cleanup; + defdom =3D def->dom; if (VIR_STRDUP(def->name, virSnapName(snapshot)) < 0) goto cleanup; @@ -6233,25 +6236,25 @@ static char *vboxDomainSnapshotGetXMLDesc(virDomain= SnapshotPtr snapshot, PRUint32 memorySize =3D 0; PRUint32 CPUCount =3D 0; - def->dom->virtType =3D VIR_DOMAIN_VIRT_VBOX; - def->dom->id =3D dom->id; - memcpy(def->dom->uuid, dom->uuid, VIR_UUID_BUFLEN); - if (VIR_STRDUP(def->dom->name, dom->name) < 0) + defdom->virtType =3D VIR_DOMAIN_VIRT_VBOX; + defdom->id =3D dom->id; + memcpy(defdom->uuid, dom->uuid, VIR_UUID_BUFLEN); + if (VIR_STRDUP(defdom->name, dom->name) < 0) goto cleanup; gVBoxAPI.UIMachine.GetMemorySize(machine, &memorySize); - def->dom->mem.cur_balloon =3D memorySize * 1024; + defdom->mem.cur_balloon =3D memorySize * 1024; /* Currently setting memory and maxMemory as same, cause * the notation here seems to be inconsistent while * reading and while dumping xml */ - virDomainDefSetMemoryTotal(def->dom, memorySize * 1024); - def->dom->os.type =3D VIR_DOMAIN_OSTYPE_HVM; - def->dom->os.arch =3D virArchFromHost(); + virDomainDefSetMemoryTotal(defdom, memorySize * 1024); + defdom->os.type =3D VIR_DOMAIN_OSTYPE_HVM; + defdom->os.arch =3D virArchFromHost(); gVBoxAPI.UIMachine.GetCPUCount(machine, &CPUCount); - if (virDomainDefSetVcpusMax(def->dom, CPUCount, data->xmlopt) < 0) + if (virDomainDefSetVcpusMax(defdom, CPUCount, data->xmlopt) < 0) goto cleanup; - if (virDomainDefSetVcpus(def->dom, CPUCount) < 0) + if (virDomainDefSetVcpus(defdom, CPUCount) < 0) goto cleanup; if (vboxSnapshotGetReadWriteDisks(def, snapshot) < 0) @@ -6325,7 +6328,7 @@ static char *vboxDomainSnapshotGetXMLDesc(virDomainSn= apshotPtr snapshot, def->state =3D VIR_DOMAIN_SNAPSHOT_SHUTOFF; virUUIDFormat(dom->uuid, uuidstr); - memcpy(def->dom->uuid, dom->uuid, VIR_UUID_BUFLEN); + memcpy(defdom->uuid, dom->uuid, VIR_UUID_BUFLEN); ret =3D virDomainSnapshotDefFormat(uuidstr, def, data->caps, data->xml= opt, 0); cleanup: --=20 2.20.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list