From nobody Mon Feb 9 19:52:25 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) client-ip=208.118.235.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Authentication-Results: mx.zohomail.com; spf=pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) by mx.zohomail.com with SMTPS id 1525091857097218.94499012944414; Mon, 30 Apr 2018 05:37:37 -0700 (PDT) Received: from localhost ([::1]:59554 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1fD83k-00087B-34 for importer@patchew.org; Mon, 30 Apr 2018 08:37:36 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:49114) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1fD80b-0005Oe-DW for qemu-devel@nongnu.org; Mon, 30 Apr 2018 08:34:22 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1fD80a-0000py-B8 for qemu-devel@nongnu.org; Mon, 30 Apr 2018 08:34:21 -0400 Received: from smtp03.citrix.com ([162.221.156.55]:34970) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1fD80V-0000lV-C3; Mon, 30 Apr 2018 08:34:15 -0400 X-IronPort-AV: E=Sophos;i="5.49,346,1520899200"; d="scan'208";a="53202006" From: Paul Durrant To: , , Date: Mon, 30 Apr 2018 13:01:38 +0100 Message-ID: <1525089699-13411-4-git-send-email-paul.durrant@citrix.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1525089699-13411-1-git-send-email-paul.durrant@citrix.com> References: <1525089699-13411-1-git-send-email-paul.durrant@citrix.com> MIME-Version: 1.0 X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 162.221.156.55 Subject: [Qemu-devel] [PATCH 3/4] block/xen_disk: use a single entry iovec X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Anthony Perard , Kevin Wolf , Paul Durrant , Stefano Stabellini , Max Reitz Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" X-ZohoMail: RSF_0 Z_629925259 SPT_0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Since xen_disk now always copies data to and from a guest there is no need to maintain a vector entry corresponding to every page of a request. This means there is less per-request state to maintain so the ioreq structure can shrink significantly. Signed-off-by: Paul Durrant --- Cc: Stefano Stabellini Cc: Anthony Perard Cc: Kevin Wolf Cc: Max Reitz --- hw/block/xen_disk.c | 103 ++++++++++++++++--------------------------------= ---- 1 file changed, 31 insertions(+), 72 deletions(-) diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c index 8f4e229..6d737fd 100644 --- a/hw/block/xen_disk.c +++ b/hw/block/xen_disk.c @@ -46,13 +46,10 @@ struct ioreq { /* parsed request */ off_t start; QEMUIOVector v; + void *buf; + size_t size; int presync; =20 - uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - void *pages; - /* aio status */ int aio_inflight; int aio_errors; @@ -110,13 +107,10 @@ static void ioreq_reset(struct ioreq *ioreq) memset(&ioreq->req, 0, sizeof(ioreq->req)); ioreq->status =3D 0; ioreq->start =3D 0; + ioreq->buf =3D NULL; + ioreq->size =3D 0; ioreq->presync =3D 0; =20 - memset(ioreq->domids, 0, sizeof(ioreq->domids)); - memset(ioreq->refs, 0, sizeof(ioreq->refs)); - memset(ioreq->page, 0, sizeof(ioreq->page)); - ioreq->pages =3D NULL; - ioreq->aio_inflight =3D 0; ioreq->aio_errors =3D 0; =20 @@ -139,7 +133,7 @@ static struct ioreq *ioreq_start(struct XenBlkDev *blkd= ev) ioreq =3D g_malloc0(sizeof(*ioreq)); ioreq->blkdev =3D blkdev; blkdev->requests_total++; - qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST); + qemu_iovec_init(&ioreq->v, 1); } else { /* get one from freelist */ ioreq =3D QLIST_FIRST(&blkdev->freelist); @@ -184,7 +178,6 @@ static void ioreq_release(struct ioreq *ioreq, bool fin= ish) static int ioreq_parse(struct ioreq *ioreq) { struct XenBlkDev *blkdev =3D ioreq->blkdev; - uintptr_t mem; size_t len; int i; =20 @@ -231,14 +224,10 @@ static int ioreq_parse(struct ioreq *ioreq) goto err; } =20 - ioreq->domids[i] =3D blkdev->xendev.dom; - ioreq->refs[i] =3D ioreq->req.seg[i].gref; - - mem =3D ioreq->req.seg[i].first_sect * blkdev->file_blk; len =3D (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sec= t + 1) * blkdev->file_blk; - qemu_iovec_add(&ioreq->v, (void*)mem, len); + ioreq->size +=3D len; } - if (ioreq->start + ioreq->v.size > blkdev->file_size) { + if (ioreq->start + ioreq->size > blkdev->file_size) { xen_pv_printf(&blkdev->xendev, 0, "error: access beyond end of fil= e\n"); goto err; } @@ -249,85 +238,55 @@ err: return -1; } =20 -static void ioreq_free_copy_buffers(struct ioreq *ioreq) -{ - int i; - - for (i =3D 0; i < ioreq->v.niov; i++) { - ioreq->page[i] =3D NULL; - } - - qemu_vfree(ioreq->pages); -} - -static int ioreq_init_copy_buffers(struct ioreq *ioreq) -{ - int i; - - if (ioreq->v.niov =3D=3D 0) { - return 0; - } - - ioreq->pages =3D qemu_memalign(XC_PAGE_SIZE, ioreq->v.niov * XC_PAGE_S= IZE); - - for (i =3D 0; i < ioreq->v.niov; i++) { - ioreq->page[i] =3D ioreq->pages + i * XC_PAGE_SIZE; - ioreq->v.iov[i].iov_base =3D ioreq->page[i]; - } - - return 0; -} - static int ioreq_grant_copy(struct ioreq *ioreq) { - xengnttab_handle *gnt =3D ioreq->blkdev->xendev.gnttabdev; + struct XenBlkDev *blkdev =3D ioreq->blkdev; + xengnttab_handle *gnt =3D blkdev->xendev.gnttabdev; + void *virt =3D ioreq->buf; xengnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - int i, count, rc; - int64_t file_blk =3D ioreq->blkdev->file_blk; - - if (ioreq->v.niov =3D=3D 0) { - return 0; - } + int i, rc; + int64_t file_blk =3D blkdev->file_blk; =20 - count =3D ioreq->v.niov; - - for (i =3D 0; i < count; i++) { + for (i =3D 0; i < ioreq->req.nr_segments; i++) { if (ioreq->req.operation =3D=3D BLKIF_OP_READ) { segs[i].flags =3D GNTCOPY_dest_gref; - segs[i].dest.foreign.ref =3D ioreq->refs[i]; - segs[i].dest.foreign.domid =3D ioreq->domids[i]; + segs[i].dest.foreign.ref =3D ioreq->req.seg[i].gref; + segs[i].dest.foreign.domid =3D blkdev->xendev.dom; segs[i].dest.foreign.offset =3D ioreq->req.seg[i].first_sect *= file_blk; - segs[i].source.virt =3D ioreq->v.iov[i].iov_base; + segs[i].source.virt =3D virt; } else { segs[i].flags =3D GNTCOPY_source_gref; - segs[i].source.foreign.ref =3D ioreq->refs[i]; - segs[i].source.foreign.domid =3D ioreq->domids[i]; + segs[i].source.foreign.ref =3D ioreq->req.seg[i].gref; + segs[i].source.foreign.domid =3D blkdev->xendev.dom; segs[i].source.foreign.offset =3D ioreq->req.seg[i].first_sect= * file_blk; - segs[i].dest.virt =3D ioreq->v.iov[i].iov_base; + segs[i].dest.virt =3D virt; } segs[i].len =3D (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * file_blk; + virt +=3D segs[i].len; } =20 - rc =3D xengnttab_grant_copy(gnt, count, segs); + rc =3D xengnttab_grant_copy(gnt, ioreq->req.nr_segments, segs); =20 if (rc) { - xen_pv_printf(&ioreq->blkdev->xendev, 0, + xen_pv_printf(&blkdev->xendev, 0, "failed to copy data %d\n", rc); ioreq->aio_errors++; return -1; } =20 - for (i =3D 0; i < count; i++) { + for (i =3D 0; i < ioreq->req.nr_segments; i++) { if (segs[i].status !=3D GNTST_okay) { - xen_pv_printf(&ioreq->blkdev->xendev, 3, + xen_pv_printf(&blkdev->xendev, 3, "failed to copy data %d for gref %d, domid %d\n", - segs[i].status, ioreq->refs[i], ioreq->domids[i]= ); + segs[i].status, ioreq->req.seg[i].gref, + blkdev->xendev.dom); ioreq->aio_errors++; rc =3D -1; } } =20 + qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size); return rc; } =20 @@ -362,14 +321,14 @@ static void qemu_aio_complete(void *opaque, int ret) if (ret =3D=3D 0) { ioreq_grant_copy(ioreq); } - ioreq_free_copy_buffers(ioreq); + qemu_vfree(ioreq->buf); break; case BLKIF_OP_WRITE: case BLKIF_OP_FLUSH_DISKCACHE: if (!ioreq->req.nr_segments) { break; } - ioreq_free_copy_buffers(ioreq); + qemu_vfree(ioreq->buf); break; default: break; @@ -437,12 +396,12 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq) { struct XenBlkDev *blkdev =3D ioreq->blkdev; =20 - ioreq_init_copy_buffers(ioreq); + ioreq->buf =3D qemu_memalign(XC_PAGE_SIZE, ioreq->size); if (ioreq->req.nr_segments && (ioreq->req.operation =3D=3D BLKIF_OP_WRITE || ioreq->req.operation =3D=3D BLKIF_OP_FLUSH_DISKCACHE) && ioreq_grant_copy(ioreq)) { - ioreq_free_copy_buffers(ioreq); + qemu_vfree(ioreq->buf); goto err; } =20 --=20 2.1.4