From nobody Sun Feb 8 18:09:32 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) client-ip=208.118.235.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Authentication-Results: mx.zohomail.com; spf=pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (208.118.235.17 [208.118.235.17]) by mx.zohomail.com with SMTPS id 1525091752030262.5907740061866; Mon, 30 Apr 2018 05:35:52 -0700 (PDT) Received: from localhost ([::1]:59546 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1fD81x-00068G-MB for importer@patchew.org; Mon, 30 Apr 2018 08:35:45 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:49081) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1fD80U-0005IQ-Hd for qemu-devel@nongnu.org; Mon, 30 Apr 2018 08:34:16 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1fD80S-0000kj-UJ for qemu-devel@nongnu.org; Mon, 30 Apr 2018 08:34:14 -0400 Received: from smtp03.citrix.com ([162.221.156.55]:34961) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1fD80O-0000fC-K1; Mon, 30 Apr 2018 08:34:09 -0400 X-IronPort-AV: E=Sophos;i="5.49,346,1520899200"; d="scan'208";a="53202002" From: Paul Durrant To: , , Date: Mon, 30 Apr 2018 13:01:36 +0100 Message-ID: <1525089699-13411-2-git-send-email-paul.durrant@citrix.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1525089699-13411-1-git-send-email-paul.durrant@citrix.com> References: <1525089699-13411-1-git-send-email-paul.durrant@citrix.com> MIME-Version: 1.0 X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 162.221.156.55 Subject: [Qemu-devel] [PATCH 1/4] block/xen_disk: remove persistent grant code X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Anthony Perard , Kevin Wolf , Paul Durrant , Stefano Stabellini , Max Reitz Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" X-ZohoMail: RSF_0 Z_629925259 SPT_0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The grant copy operation was added to libxengnttab in Xen 4.8.0. If grant copy is available then persistent grants will not be used. The xen_disk source can be siginificantly simplified by removing this now redundant code. Signed-off-by: Paul Durrant --- Cc: Stefano Stabellini Cc: Anthony Perard Cc: Kevin Wolf Cc: Max Reitz --- hw/block/xen_disk.c | 237 +++++-------------------------------------------= ---- 1 file changed, 21 insertions(+), 216 deletions(-) diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c index f74fcd4..b33611a 100644 --- a/hw/block/xen_disk.c +++ b/hw/block/xen_disk.c @@ -43,20 +43,6 @@ static int batch_maps =3D 0; #define BLOCK_SIZE 512 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2) =20 -struct PersistentGrant { - void *page; - struct XenBlkDev *blkdev; -}; - -typedef struct PersistentGrant PersistentGrant; - -struct PersistentRegion { - void *addr; - int num; -}; - -typedef struct PersistentRegion PersistentRegion; - struct ioreq { blkif_request_t req; int16_t status; @@ -73,7 +59,6 @@ struct ioreq { int prot; void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST]; void *pages; - int num_unmap; =20 /* aio status */ int aio_inflight; @@ -115,13 +100,7 @@ struct XenBlkDev { int requests_finished; unsigned int max_requests; =20 - /* Persistent grants extension */ gboolean feature_discard; - gboolean feature_persistent; - GTree *persistent_gnts; - GSList *persistent_regions; - unsigned int persistent_gnt_count; - unsigned int max_grants; =20 /* qemu block driver */ DriveInfo *dinfo; @@ -158,46 +137,6 @@ static void ioreq_reset(struct ioreq *ioreq) qemu_iovec_reset(&ioreq->v); } =20 -static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data) -{ - uint ua =3D GPOINTER_TO_UINT(a); - uint ub =3D GPOINTER_TO_UINT(b); - return (ua > ub) - (ua < ub); -} - -static void destroy_grant(gpointer pgnt) -{ - PersistentGrant *grant =3D pgnt; - xengnttab_handle *gnt =3D grant->blkdev->xendev.gnttabdev; - - if (xengnttab_unmap(gnt, grant->page, 1) !=3D 0) { - xen_pv_printf(&grant->blkdev->xendev, 0, - "xengnttab_unmap failed: %s\n", - strerror(errno)); - } - grant->blkdev->persistent_gnt_count--; - xen_pv_printf(&grant->blkdev->xendev, 3, - "unmapped grant %p\n", grant->page); - g_free(grant); -} - -static void remove_persistent_region(gpointer data, gpointer dev) -{ - PersistentRegion *region =3D data; - struct XenBlkDev *blkdev =3D dev; - xengnttab_handle *gnt =3D blkdev->xendev.gnttabdev; - - if (xengnttab_unmap(gnt, region->addr, region->num) !=3D 0) { - xen_pv_printf(&blkdev->xendev, 0, - "xengnttab_unmap region %p failed: %s\n", - region->addr, strerror(errno)); - } - xen_pv_printf(&blkdev->xendev, 3, - "unmapped grant region %p with %d pages\n", - region->addr, region->num); - g_free(region); -} - static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) { struct ioreq *ioreq =3D NULL; @@ -327,22 +266,22 @@ static void ioreq_unmap(struct ioreq *ioreq) xengnttab_handle *gnt =3D ioreq->blkdev->xendev.gnttabdev; int i; =20 - if (ioreq->num_unmap =3D=3D 0 || ioreq->mapped =3D=3D 0) { + if (ioreq->v.niov =3D=3D 0 || ioreq->mapped =3D=3D 0) { return; } if (batch_maps) { if (!ioreq->pages) { return; } - if (xengnttab_unmap(gnt, ioreq->pages, ioreq->num_unmap) !=3D 0) { + if (xengnttab_unmap(gnt, ioreq->pages, ioreq->v.niov) !=3D 0) { xen_pv_printf(&ioreq->blkdev->xendev, 0, "xengnttab_unmap failed: %s\n", strerror(errno)); } - ioreq->blkdev->cnt_map -=3D ioreq->num_unmap; + ioreq->blkdev->cnt_map -=3D ioreq->v.niov; ioreq->pages =3D NULL; } else { - for (i =3D 0; i < ioreq->num_unmap; i++) { + for (i =3D 0; i < ioreq->v.niov; i++) { if (!ioreq->page[i]) { continue; } @@ -361,138 +300,44 @@ static void ioreq_unmap(struct ioreq *ioreq) static int ioreq_map(struct ioreq *ioreq) { xengnttab_handle *gnt =3D ioreq->blkdev->xendev.gnttabdev; - uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - int i, j, new_maps =3D 0; - PersistentGrant *grant; - PersistentRegion *region; - /* domids and refs variables will contain the information necessary - * to map the grants that are needed to fulfill this request. - * - * After mapping the needed grants, the page array will contain the - * memory address of each granted page in the order specified in ioreq - * (disregarding if it's a persistent grant or not). - */ + int i; =20 if (ioreq->v.niov =3D=3D 0 || ioreq->mapped =3D=3D 1) { return 0; } - if (ioreq->blkdev->feature_persistent) { - for (i =3D 0; i < ioreq->v.niov; i++) { - grant =3D g_tree_lookup(ioreq->blkdev->persistent_gnts, - GUINT_TO_POINTER(ioreq->refs[i])); - - if (grant !=3D NULL) { - page[i] =3D grant->page; - xen_pv_printf(&ioreq->blkdev->xendev, 3, - "using persistent-grant %" PRIu32 "\n", - ioreq->refs[i]); - } else { - /* Add the grant to the list of grants that - * should be mapped - */ - domids[new_maps] =3D ioreq->domids[i]; - refs[new_maps] =3D ioreq->refs[i]; - page[i] =3D NULL; - new_maps++; - } - } - /* Set the protection to RW, since grants may be reused later - * with a different protection than the one needed for this request - */ - ioreq->prot =3D PROT_WRITE | PROT_READ; - } else { - /* All grants in the request should be mapped */ - memcpy(refs, ioreq->refs, sizeof(refs)); - memcpy(domids, ioreq->domids, sizeof(domids)); - memset(page, 0, sizeof(page)); - new_maps =3D ioreq->v.niov; - } - - if (batch_maps && new_maps) { + if (batch_maps) { ioreq->pages =3D xengnttab_map_grant_refs - (gnt, new_maps, domids, refs, ioreq->prot); + (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot); if (ioreq->pages =3D=3D NULL) { xen_pv_printf(&ioreq->blkdev->xendev, 0, "can't map %d grant refs (%s, %d maps)\n", - new_maps, strerror(errno), ioreq->blkdev->cnt_ma= p); + ioreq->v.niov, strerror(errno), + ioreq->blkdev->cnt_map); return -1; } - for (i =3D 0, j =3D 0; i < ioreq->v.niov; i++) { - if (page[i] =3D=3D NULL) { - page[i] =3D ioreq->pages + (j++) * XC_PAGE_SIZE; - } + for (i =3D 0; i < ioreq->v.niov; i++) { + ioreq->v.iov[i].iov_base =3D ioreq->pages + i * XC_PAGE_SIZE + + (uintptr_t)ioreq->v.iov[i].iov_base; } - ioreq->blkdev->cnt_map +=3D new_maps; - } else if (new_maps) { - for (i =3D 0; i < new_maps; i++) { + ioreq->blkdev->cnt_map +=3D ioreq->v.niov; + } else { + for (i =3D 0; i < ioreq->v.niov; i++) { ioreq->page[i] =3D xengnttab_map_grant_ref - (gnt, domids[i], refs[i], ioreq->prot); + (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot); if (ioreq->page[i] =3D=3D NULL) { xen_pv_printf(&ioreq->blkdev->xendev, 0, "can't map grant ref %d (%s, %d maps)\n", - refs[i], strerror(errno), ioreq->blkdev->cnt= _map); + ioreq->refs[i], strerror(errno), + ioreq->blkdev->cnt_map); ioreq->mapped =3D 1; ioreq_unmap(ioreq); return -1; } - ioreq->blkdev->cnt_map++; - } - for (i =3D 0, j =3D 0; i < ioreq->v.niov; i++) { - if (page[i] =3D=3D NULL) { - page[i] =3D ioreq->page[j++]; - } + ioreq->v.iov[i].iov_base =3D ioreq->page[i] + + (uintptr_t)ioreq->v.iov[i].iov_base; } } - if (ioreq->blkdev->feature_persistent && new_maps !=3D 0 && - (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <= =3D - ioreq->blkdev->max_grants))) { - /* - * If we are using persistent grants and batch mappings only - * add the new maps to the list of persistent grants if the whole - * area can be persistently mapped. - */ - if (batch_maps) { - region =3D g_malloc0(sizeof(*region)); - region->addr =3D ioreq->pages; - region->num =3D new_maps; - ioreq->blkdev->persistent_regions =3D g_slist_append( - ioreq->blkdev->persistent_regi= ons, - region); - } - while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_g= rants) - && new_maps) { - /* Go through the list of newly mapped grants and add as many - * as possible to the list of persistently mapped grants. - * - * Since we start at the end of ioreq->page(s), we only need - * to decrease new_maps to prevent this granted pages from - * being unmapped in ioreq_unmap. - */ - grant =3D g_malloc0(sizeof(*grant)); - new_maps--; - if (batch_maps) { - grant->page =3D ioreq->pages + (new_maps) * XC_PAGE_SIZE; - } else { - grant->page =3D ioreq->page[new_maps]; - } - grant->blkdev =3D ioreq->blkdev; - xen_pv_printf(&ioreq->blkdev->xendev, 3, - "adding grant %" PRIu32 " page: %p\n", - refs[new_maps], grant->page); - g_tree_insert(ioreq->blkdev->persistent_gnts, - GUINT_TO_POINTER(refs[new_maps]), - grant); - ioreq->blkdev->persistent_gnt_count++; - } - assert(!batch_maps || new_maps =3D=3D 0); - } - for (i =3D 0; i < ioreq->v.niov; i++) { - ioreq->v.iov[i].iov_base +=3D (uintptr_t)page[i]; - } ioreq->mapped =3D 1; - ioreq->num_unmap =3D new_maps; return 0; } =20 @@ -1039,8 +884,6 @@ static int blk_init(struct XenDevice *xendev) * blk_connect supplies sector-size and sectors */ xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1); - xenstore_write_be_int(&blkdev->xendev, "feature-persistent", - !xen_feature_grant_copy); xenstore_write_be_int(&blkdev->xendev, "info", info); =20 xenstore_write_be_int(&blkdev->xendev, "max-ring-page-order", @@ -1079,7 +922,7 @@ out_error: static int blk_connect(struct XenDevice *xendev) { struct XenBlkDev *blkdev =3D container_of(xendev, struct XenBlkDev, xe= ndev); - int pers, index, qflags; + int index, qflags; bool readonly =3D true; bool writethrough =3D true; int order, ring_ref; @@ -1202,11 +1045,6 @@ static int blk_connect(struct XenDevice *xendev) &blkdev->xendev.remote_port) =3D=3D -1) { return -1; } - if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)= ) { - blkdev->feature_persistent =3D FALSE; - } else { - blkdev->feature_persistent =3D !!pers; - } =20 if (!blkdev->xendev.protocol) { blkdev->protocol =3D BLKIF_PROTOCOL_NATIVE; @@ -1301,19 +1139,6 @@ static int blk_connect(struct XenDevice *xendev) } } =20 - if (blkdev->feature_persistent) { - /* Init persistent grants */ - blkdev->max_grants =3D blkdev->max_requests * - BLKIF_MAX_SEGMENTS_PER_REQUEST; - blkdev->persistent_gnts =3D g_tree_new_full((GCompareDataFunc)int_= cmp, - NULL, NULL, - batch_maps ? - (GDestroyNotify)g_free : - (GDestroyNotify)destroy_grant= ); - blkdev->persistent_regions =3D NULL; - blkdev->persistent_gnt_count =3D 0; - } - blk_set_aio_context(blkdev->blk, blkdev->ctx); =20 xen_be_bind_evtchn(&blkdev->xendev); @@ -1350,26 +1175,6 @@ static void blk_disconnect(struct XenDevice *xendev) blkdev->sring =3D NULL; } =20 - /* - * Unmap persistent grants before switching to the closed state - * so the frontend can free them. - * - * In the !batch_maps case g_tree_destroy will take care of unmapping - * the grant, but in the batch_maps case we need to iterate over every - * region in persistent_regions and unmap it. - */ - if (blkdev->feature_persistent) { - g_tree_destroy(blkdev->persistent_gnts); - assert(batch_maps || blkdev->persistent_gnt_count =3D=3D 0); - if (batch_maps) { - blkdev->persistent_gnt_count =3D 0; - g_slist_foreach(blkdev->persistent_regions, - (GFunc)remove_persistent_region, blkdev); - g_slist_free(blkdev->persistent_regions); - } - blkdev->feature_persistent =3D false; - } - if (blkdev->xendev.gnttabdev) { xengnttab_close(blkdev->xendev.gnttabdev); blkdev->xendev.gnttabdev =3D NULL; --=20 2.1.4 From nobody Sun Feb 8 18:09:32 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) client-ip=208.118.235.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Authentication-Results: mx.zohomail.com; spf=pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) by mx.zohomail.com with SMTPS id 1525092016582746.4906913931064; Mon, 30 Apr 2018 05:40:16 -0700 (PDT) Received: from localhost ([::1]:59570 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1fD86J-0001pm-RC for importer@patchew.org; Mon, 30 Apr 2018 08:40:15 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:49189) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1fD80j-0005X3-KO for qemu-devel@nongnu.org; Mon, 30 Apr 2018 08:34:31 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1fD80h-0000vp-UR for qemu-devel@nongnu.org; Mon, 30 Apr 2018 08:34:29 -0400 Received: from smtp03.citrix.com ([162.221.156.55]:34970) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1fD80c-0000lV-KC; Mon, 30 Apr 2018 08:34:22 -0400 X-IronPort-AV: E=Sophos;i="5.49,346,1520899200"; d="scan'208";a="53202005" From: Paul Durrant To: , , Date: Mon, 30 Apr 2018 13:01:37 +0100 Message-ID: <1525089699-13411-3-git-send-email-paul.durrant@citrix.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1525089699-13411-1-git-send-email-paul.durrant@citrix.com> References: <1525089699-13411-1-git-send-email-paul.durrant@citrix.com> MIME-Version: 1.0 X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 162.221.156.55 Subject: [Qemu-devel] [PATCH 2/4] block/xen_disk: remove use of grant map/unmap X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Anthony Perard , Kevin Wolf , Paul Durrant , Stefano Stabellini , Max Reitz Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" X-ZohoMail: RSF_0 Z_629925259 SPT_0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The grant copy operation was added to libxengnttab in Xen 4.8.0. If grant copy is available then data from the guest will be copied rather than mapped. The xen_disk source can be significantly simplified by removing this now redundant code. Signed-off-by: Paul Durrant --- Cc: Stefano Stabellini Cc: Anthony Perard Cc: Kevin Wolf Cc: Max Reitz --- hw/block/xen_disk.c | 194 ++++++++----------------------------------------= ---- 1 file changed, 27 insertions(+), 167 deletions(-) diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c index b33611a..8f4e229 100644 --- a/hw/block/xen_disk.c +++ b/hw/block/xen_disk.c @@ -36,10 +36,6 @@ =20 /* ------------------------------------------------------------- */ =20 -static int batch_maps =3D 0; - -/* ------------------------------------------------------------- */ - #define BLOCK_SIZE 512 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2) =20 @@ -51,12 +47,9 @@ struct ioreq { off_t start; QEMUIOVector v; int presync; - uint8_t mapped; =20 - /* grant mapping */ uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST]; uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - int prot; void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST]; void *pages; =20 @@ -89,7 +82,6 @@ struct XenBlkDev { int protocol; blkif_back_rings_t rings; int more_work; - int cnt_map; =20 /* request lists */ QLIST_HEAD(inflight_head, ioreq) inflight; @@ -119,11 +111,9 @@ static void ioreq_reset(struct ioreq *ioreq) ioreq->status =3D 0; ioreq->start =3D 0; ioreq->presync =3D 0; - ioreq->mapped =3D 0; =20 memset(ioreq->domids, 0, sizeof(ioreq->domids)); memset(ioreq->refs, 0, sizeof(ioreq->refs)); - ioreq->prot =3D 0; memset(ioreq->page, 0, sizeof(ioreq->page)); ioreq->pages =3D NULL; =20 @@ -204,7 +194,6 @@ static int ioreq_parse(struct ioreq *ioreq) ioreq->req.handle, ioreq->req.id, ioreq->req.sector_numb= er); switch (ioreq->req.operation) { case BLKIF_OP_READ: - ioreq->prot =3D PROT_WRITE; /* to memory */ break; case BLKIF_OP_FLUSH_DISKCACHE: ioreq->presync =3D 1; @@ -213,7 +202,6 @@ static int ioreq_parse(struct ioreq *ioreq) } /* fall through */ case BLKIF_OP_WRITE: - ioreq->prot =3D PROT_READ; /* from memory */ break; case BLKIF_OP_DISCARD: return 0; @@ -261,88 +249,6 @@ err: return -1; } =20 -static void ioreq_unmap(struct ioreq *ioreq) -{ - xengnttab_handle *gnt =3D ioreq->blkdev->xendev.gnttabdev; - int i; - - if (ioreq->v.niov =3D=3D 0 || ioreq->mapped =3D=3D 0) { - return; - } - if (batch_maps) { - if (!ioreq->pages) { - return; - } - if (xengnttab_unmap(gnt, ioreq->pages, ioreq->v.niov) !=3D 0) { - xen_pv_printf(&ioreq->blkdev->xendev, 0, - "xengnttab_unmap failed: %s\n", - strerror(errno)); - } - ioreq->blkdev->cnt_map -=3D ioreq->v.niov; - ioreq->pages =3D NULL; - } else { - for (i =3D 0; i < ioreq->v.niov; i++) { - if (!ioreq->page[i]) { - continue; - } - if (xengnttab_unmap(gnt, ioreq->page[i], 1) !=3D 0) { - xen_pv_printf(&ioreq->blkdev->xendev, 0, - "xengnttab_unmap failed: %s\n", - strerror(errno)); - } - ioreq->blkdev->cnt_map--; - ioreq->page[i] =3D NULL; - } - } - ioreq->mapped =3D 0; -} - -static int ioreq_map(struct ioreq *ioreq) -{ - xengnttab_handle *gnt =3D ioreq->blkdev->xendev.gnttabdev; - int i; - - if (ioreq->v.niov =3D=3D 0 || ioreq->mapped =3D=3D 1) { - return 0; - } - if (batch_maps) { - ioreq->pages =3D xengnttab_map_grant_refs - (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot); - if (ioreq->pages =3D=3D NULL) { - xen_pv_printf(&ioreq->blkdev->xendev, 0, - "can't map %d grant refs (%s, %d maps)\n", - ioreq->v.niov, strerror(errno), - ioreq->blkdev->cnt_map); - return -1; - } - for (i =3D 0; i < ioreq->v.niov; i++) { - ioreq->v.iov[i].iov_base =3D ioreq->pages + i * XC_PAGE_SIZE + - (uintptr_t)ioreq->v.iov[i].iov_base; - } - ioreq->blkdev->cnt_map +=3D ioreq->v.niov; - } else { - for (i =3D 0; i < ioreq->v.niov; i++) { - ioreq->page[i] =3D xengnttab_map_grant_ref - (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot); - if (ioreq->page[i] =3D=3D NULL) { - xen_pv_printf(&ioreq->blkdev->xendev, 0, - "can't map grant ref %d (%s, %d maps)\n", - ioreq->refs[i], strerror(errno), - ioreq->blkdev->cnt_map); - ioreq->mapped =3D 1; - ioreq_unmap(ioreq); - return -1; - } - ioreq->v.iov[i].iov_base =3D ioreq->page[i] + - (uintptr_t)ioreq->v.iov[i].iov_base; - } - } - ioreq->mapped =3D 1; - return 0; -} - -#if CONFIG_XEN_CTRL_INTERFACE_VERSION >=3D 40800 - static void ioreq_free_copy_buffers(struct ioreq *ioreq) { int i; @@ -424,22 +330,6 @@ static int ioreq_grant_copy(struct ioreq *ioreq) =20 return rc; } -#else -static void ioreq_free_copy_buffers(struct ioreq *ioreq) -{ - abort(); -} - -static int ioreq_init_copy_buffers(struct ioreq *ioreq) -{ - abort(); -} - -static int ioreq_grant_copy(struct ioreq *ioreq) -{ - abort(); -} -#endif =20 static int ioreq_runio_qemu_aio(struct ioreq *ioreq); =20 @@ -466,32 +356,28 @@ static void qemu_aio_complete(void *opaque, int ret) goto done; } =20 - if (xen_feature_grant_copy) { - switch (ioreq->req.operation) { - case BLKIF_OP_READ: - /* in case of failure ioreq->aio_errors is increased */ - if (ret =3D=3D 0) { - ioreq_grant_copy(ioreq); - } - ioreq_free_copy_buffers(ioreq); - break; - case BLKIF_OP_WRITE: - case BLKIF_OP_FLUSH_DISKCACHE: - if (!ioreq->req.nr_segments) { - break; - } - ioreq_free_copy_buffers(ioreq); - break; - default: + switch (ioreq->req.operation) { + case BLKIF_OP_READ: + /* in case of failure ioreq->aio_errors is increased */ + if (ret =3D=3D 0) { + ioreq_grant_copy(ioreq); + } + ioreq_free_copy_buffers(ioreq); + break; + case BLKIF_OP_WRITE: + case BLKIF_OP_FLUSH_DISKCACHE: + if (!ioreq->req.nr_segments) { break; } + ioreq_free_copy_buffers(ioreq); + break; + default: + break; } =20 ioreq->status =3D ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; - if (!xen_feature_grant_copy) { - ioreq_unmap(ioreq); - } ioreq_finish(ioreq); + switch (ioreq->req.operation) { case BLKIF_OP_WRITE: case BLKIF_OP_FLUSH_DISKCACHE: @@ -551,18 +437,13 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq) { struct XenBlkDev *blkdev =3D ioreq->blkdev; =20 - if (xen_feature_grant_copy) { - ioreq_init_copy_buffers(ioreq); - if (ioreq->req.nr_segments && (ioreq->req.operation =3D=3D BLKIF_O= P_WRITE || - ioreq->req.operation =3D=3D BLKIF_OP_FLUSH_DISKCACHE) && - ioreq_grant_copy(ioreq)) { - ioreq_free_copy_buffers(ioreq); - goto err; - } - } else { - if (ioreq->req.nr_segments && ioreq_map(ioreq)) { - goto err; - } + ioreq_init_copy_buffers(ioreq); + if (ioreq->req.nr_segments && + (ioreq->req.operation =3D=3D BLKIF_OP_WRITE || + ioreq->req.operation =3D=3D BLKIF_OP_FLUSH_DISKCACHE) && + ioreq_grant_copy(ioreq)) { + ioreq_free_copy_buffers(ioreq); + goto err; } =20 ioreq->aio_inflight++; @@ -603,9 +484,6 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq) } default: /* unknown operation (shouldn't happen -- parse catches this) */ - if (!xen_feature_grant_copy) { - ioreq_unmap(ioreq); - } goto err; } =20 @@ -791,10 +669,6 @@ static void blk_alloc(struct XenDevice *xendev) =20 blkdev->ctx =3D iothread_get_aio_context(blkdev->iothread); blkdev->bh =3D aio_bh_new(blkdev->ctx, blk_bh, blkdev); - - if (xen_mode !=3D XEN_EMULATE) { - batch_maps =3D 1; - } } =20 static void blk_parse_discard(struct XenBlkDev *blkdev) @@ -877,8 +751,9 @@ static int blk_init(struct XenDevice *xendev) =20 blkdev->file_blk =3D BLOCK_SIZE; =20 - xen_pv_printf(&blkdev->xendev, 3, "grant copy operation %s\n", - xen_feature_grant_copy ? "enabled" : "disabled"); + if (!xen_feature_grant_copy) { + goto out_error; + } =20 /* fill info * blk_connect supplies sector-size and sectors @@ -910,15 +785,6 @@ out_error: return -1; } =20 -/* - * We need to account for the grant allocations requiring contiguous - * chunks; the worst case number would be - * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1, - * but in order to keep things simple just use - * 2 * max_req * max_seg. - */ -#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg)) - static int blk_connect(struct XenDevice *xendev) { struct XenBlkDev *blkdev =3D container_of(xendev, struct XenBlkDev, xe= ndev); @@ -1079,11 +945,8 @@ static int blk_connect(struct XenDevice *xendev) return -1; } =20 - /* Calculate the maximum number of grants needed by ioreqs */ - max_grants =3D MAX_GRANTS(blkdev->max_requests, - BLKIF_MAX_SEGMENTS_PER_REQUEST); /* Add on the number needed for the ring pages */ - max_grants +=3D blkdev->nr_ring_ref; + max_grants =3D blkdev->nr_ring_ref; =20 blkdev->xendev.gnttabdev =3D xengnttab_open(NULL, 0); if (blkdev->xendev.gnttabdev =3D=3D NULL) { @@ -1114,8 +977,6 @@ static int blk_connect(struct XenDevice *xendev) return -1; } =20 - blkdev->cnt_map++; - switch (blkdev->protocol) { case BLKIF_PROTOCOL_NATIVE: { @@ -1171,7 +1032,6 @@ static void blk_disconnect(struct XenDevice *xendev) if (blkdev->sring) { xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring, blkdev->nr_ring_ref); - blkdev->cnt_map--; blkdev->sring =3D NULL; } =20 --=20 2.1.4 From nobody Sun Feb 8 18:09:32 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) client-ip=208.118.235.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Authentication-Results: mx.zohomail.com; spf=pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) by mx.zohomail.com with SMTPS id 1525091857097218.94499012944414; Mon, 30 Apr 2018 05:37:37 -0700 (PDT) Received: from localhost ([::1]:59554 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1fD83k-00087B-34 for importer@patchew.org; Mon, 30 Apr 2018 08:37:36 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:49114) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1fD80b-0005Oe-DW for qemu-devel@nongnu.org; Mon, 30 Apr 2018 08:34:22 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1fD80a-0000py-B8 for qemu-devel@nongnu.org; Mon, 30 Apr 2018 08:34:21 -0400 Received: from smtp03.citrix.com ([162.221.156.55]:34970) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1fD80V-0000lV-C3; Mon, 30 Apr 2018 08:34:15 -0400 X-IronPort-AV: E=Sophos;i="5.49,346,1520899200"; d="scan'208";a="53202006" From: Paul Durrant To: , , Date: Mon, 30 Apr 2018 13:01:38 +0100 Message-ID: <1525089699-13411-4-git-send-email-paul.durrant@citrix.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1525089699-13411-1-git-send-email-paul.durrant@citrix.com> References: <1525089699-13411-1-git-send-email-paul.durrant@citrix.com> MIME-Version: 1.0 X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 162.221.156.55 Subject: [Qemu-devel] [PATCH 3/4] block/xen_disk: use a single entry iovec X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Anthony Perard , Kevin Wolf , Paul Durrant , Stefano Stabellini , Max Reitz Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" X-ZohoMail: RSF_0 Z_629925259 SPT_0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Since xen_disk now always copies data to and from a guest there is no need to maintain a vector entry corresponding to every page of a request. This means there is less per-request state to maintain so the ioreq structure can shrink significantly. Signed-off-by: Paul Durrant --- Cc: Stefano Stabellini Cc: Anthony Perard Cc: Kevin Wolf Cc: Max Reitz --- hw/block/xen_disk.c | 103 ++++++++++++++++--------------------------------= ---- 1 file changed, 31 insertions(+), 72 deletions(-) diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c index 8f4e229..6d737fd 100644 --- a/hw/block/xen_disk.c +++ b/hw/block/xen_disk.c @@ -46,13 +46,10 @@ struct ioreq { /* parsed request */ off_t start; QEMUIOVector v; + void *buf; + size_t size; int presync; =20 - uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - void *pages; - /* aio status */ int aio_inflight; int aio_errors; @@ -110,13 +107,10 @@ static void ioreq_reset(struct ioreq *ioreq) memset(&ioreq->req, 0, sizeof(ioreq->req)); ioreq->status =3D 0; ioreq->start =3D 0; + ioreq->buf =3D NULL; + ioreq->size =3D 0; ioreq->presync =3D 0; =20 - memset(ioreq->domids, 0, sizeof(ioreq->domids)); - memset(ioreq->refs, 0, sizeof(ioreq->refs)); - memset(ioreq->page, 0, sizeof(ioreq->page)); - ioreq->pages =3D NULL; - ioreq->aio_inflight =3D 0; ioreq->aio_errors =3D 0; =20 @@ -139,7 +133,7 @@ static struct ioreq *ioreq_start(struct XenBlkDev *blkd= ev) ioreq =3D g_malloc0(sizeof(*ioreq)); ioreq->blkdev =3D blkdev; blkdev->requests_total++; - qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST); + qemu_iovec_init(&ioreq->v, 1); } else { /* get one from freelist */ ioreq =3D QLIST_FIRST(&blkdev->freelist); @@ -184,7 +178,6 @@ static void ioreq_release(struct ioreq *ioreq, bool fin= ish) static int ioreq_parse(struct ioreq *ioreq) { struct XenBlkDev *blkdev =3D ioreq->blkdev; - uintptr_t mem; size_t len; int i; =20 @@ -231,14 +224,10 @@ static int ioreq_parse(struct ioreq *ioreq) goto err; } =20 - ioreq->domids[i] =3D blkdev->xendev.dom; - ioreq->refs[i] =3D ioreq->req.seg[i].gref; - - mem =3D ioreq->req.seg[i].first_sect * blkdev->file_blk; len =3D (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sec= t + 1) * blkdev->file_blk; - qemu_iovec_add(&ioreq->v, (void*)mem, len); + ioreq->size +=3D len; } - if (ioreq->start + ioreq->v.size > blkdev->file_size) { + if (ioreq->start + ioreq->size > blkdev->file_size) { xen_pv_printf(&blkdev->xendev, 0, "error: access beyond end of fil= e\n"); goto err; } @@ -249,85 +238,55 @@ err: return -1; } =20 -static void ioreq_free_copy_buffers(struct ioreq *ioreq) -{ - int i; - - for (i =3D 0; i < ioreq->v.niov; i++) { - ioreq->page[i] =3D NULL; - } - - qemu_vfree(ioreq->pages); -} - -static int ioreq_init_copy_buffers(struct ioreq *ioreq) -{ - int i; - - if (ioreq->v.niov =3D=3D 0) { - return 0; - } - - ioreq->pages =3D qemu_memalign(XC_PAGE_SIZE, ioreq->v.niov * XC_PAGE_S= IZE); - - for (i =3D 0; i < ioreq->v.niov; i++) { - ioreq->page[i] =3D ioreq->pages + i * XC_PAGE_SIZE; - ioreq->v.iov[i].iov_base =3D ioreq->page[i]; - } - - return 0; -} - static int ioreq_grant_copy(struct ioreq *ioreq) { - xengnttab_handle *gnt =3D ioreq->blkdev->xendev.gnttabdev; + struct XenBlkDev *blkdev =3D ioreq->blkdev; + xengnttab_handle *gnt =3D blkdev->xendev.gnttabdev; + void *virt =3D ioreq->buf; xengnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; - int i, count, rc; - int64_t file_blk =3D ioreq->blkdev->file_blk; - - if (ioreq->v.niov =3D=3D 0) { - return 0; - } + int i, rc; + int64_t file_blk =3D blkdev->file_blk; =20 - count =3D ioreq->v.niov; - - for (i =3D 0; i < count; i++) { + for (i =3D 0; i < ioreq->req.nr_segments; i++) { if (ioreq->req.operation =3D=3D BLKIF_OP_READ) { segs[i].flags =3D GNTCOPY_dest_gref; - segs[i].dest.foreign.ref =3D ioreq->refs[i]; - segs[i].dest.foreign.domid =3D ioreq->domids[i]; + segs[i].dest.foreign.ref =3D ioreq->req.seg[i].gref; + segs[i].dest.foreign.domid =3D blkdev->xendev.dom; segs[i].dest.foreign.offset =3D ioreq->req.seg[i].first_sect *= file_blk; - segs[i].source.virt =3D ioreq->v.iov[i].iov_base; + segs[i].source.virt =3D virt; } else { segs[i].flags =3D GNTCOPY_source_gref; - segs[i].source.foreign.ref =3D ioreq->refs[i]; - segs[i].source.foreign.domid =3D ioreq->domids[i]; + segs[i].source.foreign.ref =3D ioreq->req.seg[i].gref; + segs[i].source.foreign.domid =3D blkdev->xendev.dom; segs[i].source.foreign.offset =3D ioreq->req.seg[i].first_sect= * file_blk; - segs[i].dest.virt =3D ioreq->v.iov[i].iov_base; + segs[i].dest.virt =3D virt; } segs[i].len =3D (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * file_blk; + virt +=3D segs[i].len; } =20 - rc =3D xengnttab_grant_copy(gnt, count, segs); + rc =3D xengnttab_grant_copy(gnt, ioreq->req.nr_segments, segs); =20 if (rc) { - xen_pv_printf(&ioreq->blkdev->xendev, 0, + xen_pv_printf(&blkdev->xendev, 0, "failed to copy data %d\n", rc); ioreq->aio_errors++; return -1; } =20 - for (i =3D 0; i < count; i++) { + for (i =3D 0; i < ioreq->req.nr_segments; i++) { if (segs[i].status !=3D GNTST_okay) { - xen_pv_printf(&ioreq->blkdev->xendev, 3, + xen_pv_printf(&blkdev->xendev, 3, "failed to copy data %d for gref %d, domid %d\n", - segs[i].status, ioreq->refs[i], ioreq->domids[i]= ); + segs[i].status, ioreq->req.seg[i].gref, + blkdev->xendev.dom); ioreq->aio_errors++; rc =3D -1; } } =20 + qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size); return rc; } =20 @@ -362,14 +321,14 @@ static void qemu_aio_complete(void *opaque, int ret) if (ret =3D=3D 0) { ioreq_grant_copy(ioreq); } - ioreq_free_copy_buffers(ioreq); + qemu_vfree(ioreq->buf); break; case BLKIF_OP_WRITE: case BLKIF_OP_FLUSH_DISKCACHE: if (!ioreq->req.nr_segments) { break; } - ioreq_free_copy_buffers(ioreq); + qemu_vfree(ioreq->buf); break; default: break; @@ -437,12 +396,12 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq) { struct XenBlkDev *blkdev =3D ioreq->blkdev; =20 - ioreq_init_copy_buffers(ioreq); + ioreq->buf =3D qemu_memalign(XC_PAGE_SIZE, ioreq->size); if (ioreq->req.nr_segments && (ioreq->req.operation =3D=3D BLKIF_OP_WRITE || ioreq->req.operation =3D=3D BLKIF_OP_FLUSH_DISKCACHE) && ioreq_grant_copy(ioreq)) { - ioreq_free_copy_buffers(ioreq); + qemu_vfree(ioreq->buf); goto err; } =20 --=20 2.1.4 From nobody Sun Feb 8 18:09:32 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) client-ip=208.118.235.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Authentication-Results: mx.zohomail.com; spf=pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) by mx.zohomail.com with SMTPS id 1525091945244828.6781189779521; Mon, 30 Apr 2018 05:39:05 -0700 (PDT) Received: from localhost ([::1]:59566 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1fD85A-0000kW-FA for importer@patchew.org; Mon, 30 Apr 2018 08:39:04 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:49174) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1fD80h-0005Wb-Nm for qemu-devel@nongnu.org; Mon, 30 Apr 2018 08:34:30 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1fD80e-0000tY-Ha for qemu-devel@nongnu.org; Mon, 30 Apr 2018 08:34:27 -0400 Received: from smtp03.citrix.com ([162.221.156.55]:34970) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1fD80Z-0000lV-Ks; Mon, 30 Apr 2018 08:34:20 -0400 X-IronPort-AV: E=Sophos;i="5.49,346,1520899200"; d="scan'208";a="53202010" From: Paul Durrant To: , , Date: Mon, 30 Apr 2018 13:01:39 +0100 Message-ID: <1525089699-13411-5-git-send-email-paul.durrant@citrix.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1525089699-13411-1-git-send-email-paul.durrant@citrix.com> References: <1525089699-13411-1-git-send-email-paul.durrant@citrix.com> MIME-Version: 1.0 X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 162.221.156.55 Subject: [Qemu-devel] [PATCH 4/4] block/xen_disk: be consistent with use of xendev and blkdev->xendev X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Anthony Perard , Kevin Wolf , Paul Durrant , Stefano Stabellini , Max Reitz Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" X-ZohoMail: RSF_0 Z_629925259 SPT_0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Certain functions in xen_disk are called with a pointer to xendev (struct XenDevice *). They then use continer_of() to acces the surrounding blkdev (struct XenBlkDev) but then in various places use &blkdev->xendev when use of the original xendev pointer is shorter to express and clearly equivalent. This patch is a purely cosmetic patch which makes sure there is a xendev pointer on stack for any function where the pointer is need on multiple occasions modified those functions to use it consistently. Signed-off-by: Paul Durrant --- Cc: Stefano Stabellini Cc: Anthony Perard Cc: Kevin Wolf Cc: Max Reitz --- hw/block/xen_disk.c | 116 +++++++++++++++++++++++++++---------------------= ---- 1 file changed, 60 insertions(+), 56 deletions(-) diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c index 6d737fd..b538d21 100644 --- a/hw/block/xen_disk.c +++ b/hw/block/xen_disk.c @@ -178,10 +178,11 @@ static void ioreq_release(struct ioreq *ioreq, bool f= inish) static int ioreq_parse(struct ioreq *ioreq) { struct XenBlkDev *blkdev =3D ioreq->blkdev; + struct XenDevice *xendev =3D &blkdev->xendev; size_t len; int i; =20 - xen_pv_printf(&blkdev->xendev, 3, + xen_pv_printf(xendev, 3, "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId= 64 "\n", ioreq->req.operation, ioreq->req.nr_segments, ioreq->req.handle, ioreq->req.id, ioreq->req.sector_numb= er); @@ -199,28 +200,28 @@ static int ioreq_parse(struct ioreq *ioreq) case BLKIF_OP_DISCARD: return 0; default: - xen_pv_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n= ", + xen_pv_printf(xendev, 0, "error: unknown operation (%d)\n", ioreq->req.operation); goto err; }; =20 if (ioreq->req.operation !=3D BLKIF_OP_READ && blkdev->mode[0] !=3D 'w= ') { - xen_pv_printf(&blkdev->xendev, 0, "error: write req for ro device\= n"); + xen_pv_printf(xendev, 0, "error: write req for ro device\n"); goto err; } =20 ioreq->start =3D ioreq->req.sector_number * blkdev->file_blk; for (i =3D 0; i < ioreq->req.nr_segments; i++) { if (i =3D=3D BLKIF_MAX_SEGMENTS_PER_REQUEST) { - xen_pv_printf(&blkdev->xendev, 0, "error: nr_segments too big\= n"); + xen_pv_printf(xendev, 0, "error: nr_segments too big\n"); goto err; } if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) { - xen_pv_printf(&blkdev->xendev, 0, "error: first > last sector\= n"); + xen_pv_printf(xendev, 0, "error: first > last sector\n"); goto err; } if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >=3D XC_PAGE_SIZE) { - xen_pv_printf(&blkdev->xendev, 0, "error: page crossing\n"); + xen_pv_printf(xendev, 0, "error: page crossing\n"); goto err; } =20 @@ -228,7 +229,7 @@ static int ioreq_parse(struct ioreq *ioreq) ioreq->size +=3D len; } if (ioreq->start + ioreq->size > blkdev->file_size) { - xen_pv_printf(&blkdev->xendev, 0, "error: access beyond end of fil= e\n"); + xen_pv_printf(xendev, 0, "error: access beyond end of file\n"); goto err; } return 0; @@ -241,7 +242,8 @@ err: static int ioreq_grant_copy(struct ioreq *ioreq) { struct XenBlkDev *blkdev =3D ioreq->blkdev; - xengnttab_handle *gnt =3D blkdev->xendev.gnttabdev; + struct XenDevice *xendev =3D &blkdev->xendev; + xengnttab_handle *gnt =3D xendev->gnttabdev; void *virt =3D ioreq->buf; xengnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; int i, rc; @@ -251,13 +253,13 @@ static int ioreq_grant_copy(struct ioreq *ioreq) if (ioreq->req.operation =3D=3D BLKIF_OP_READ) { segs[i].flags =3D GNTCOPY_dest_gref; segs[i].dest.foreign.ref =3D ioreq->req.seg[i].gref; - segs[i].dest.foreign.domid =3D blkdev->xendev.dom; + segs[i].dest.foreign.domid =3D xendev->dom; segs[i].dest.foreign.offset =3D ioreq->req.seg[i].first_sect *= file_blk; segs[i].source.virt =3D virt; } else { segs[i].flags =3D GNTCOPY_source_gref; segs[i].source.foreign.ref =3D ioreq->req.seg[i].gref; - segs[i].source.foreign.domid =3D blkdev->xendev.dom; + segs[i].source.foreign.domid =3D xendev->dom; segs[i].source.foreign.offset =3D ioreq->req.seg[i].first_sect= * file_blk; segs[i].dest.virt =3D virt; } @@ -269,7 +271,7 @@ static int ioreq_grant_copy(struct ioreq *ioreq) rc =3D xengnttab_grant_copy(gnt, ioreq->req.nr_segments, segs); =20 if (rc) { - xen_pv_printf(&blkdev->xendev, 0, + xen_pv_printf(xendev, 0, "failed to copy data %d\n", rc); ioreq->aio_errors++; return -1; @@ -277,10 +279,10 @@ static int ioreq_grant_copy(struct ioreq *ioreq) =20 for (i =3D 0; i < ioreq->req.nr_segments; i++) { if (segs[i].status !=3D GNTST_okay) { - xen_pv_printf(&blkdev->xendev, 3, + xen_pv_printf(xendev, 3, "failed to copy data %d for gref %d, domid %d\n", segs[i].status, ioreq->req.seg[i].gref, - blkdev->xendev.dom); + xendev->dom); ioreq->aio_errors++; rc =3D -1; } @@ -296,11 +298,12 @@ static void qemu_aio_complete(void *opaque, int ret) { struct ioreq *ioreq =3D opaque; struct XenBlkDev *blkdev =3D ioreq->blkdev; + struct XenDevice *xendev =3D &blkdev->xendev; =20 aio_context_acquire(blkdev->ctx); =20 if (ret !=3D 0) { - xen_pv_printf(&blkdev->xendev, 0, "%s I/O error\n", + xen_pv_printf(xendev, 0, "%s I/O error\n", ioreq->req.operation =3D=3D BLKIF_OP_READ ? "read" := "write"); ioreq->aio_errors++; } @@ -632,16 +635,17 @@ static void blk_alloc(struct XenDevice *xendev) =20 static void blk_parse_discard(struct XenBlkDev *blkdev) { + struct XenDevice *xendev =3D &blkdev->xendev; int enable; =20 blkdev->feature_discard =3D true; =20 - if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) = =3D=3D 0) { + if (xenstore_read_be_int(xendev, "discard-enable", &enable) =3D=3D 0) { blkdev->feature_discard =3D !!enable; } =20 if (blkdev->feature_discard) { - xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1); + xenstore_write_be_int(xendev, "feature-discard", 1); } } =20 @@ -656,7 +660,7 @@ static int blk_init(struct XenDevice *xendev) /* read xenstore entries */ if (blkdev->params =3D=3D NULL) { char *h =3D NULL; - blkdev->params =3D xenstore_read_be_str(&blkdev->xendev, "params"); + blkdev->params =3D xenstore_read_be_str(xendev, "params"); if (blkdev->params !=3D NULL) { h =3D strchr(blkdev->params, ':'); } @@ -676,18 +680,18 @@ static int blk_init(struct XenDevice *xendev) blkdev->fileproto =3D "vpc"; } if (blkdev->mode =3D=3D NULL) { - blkdev->mode =3D xenstore_read_be_str(&blkdev->xendev, "mode"); + blkdev->mode =3D xenstore_read_be_str(xendev, "mode"); } if (blkdev->type =3D=3D NULL) { - blkdev->type =3D xenstore_read_be_str(&blkdev->xendev, "type"); + blkdev->type =3D xenstore_read_be_str(xendev, "type"); } if (blkdev->dev =3D=3D NULL) { - blkdev->dev =3D xenstore_read_be_str(&blkdev->xendev, "dev"); + blkdev->dev =3D xenstore_read_be_str(xendev, "dev"); } if (blkdev->devtype =3D=3D NULL) { - blkdev->devtype =3D xenstore_read_be_str(&blkdev->xendev, "device-= type"); + blkdev->devtype =3D xenstore_read_be_str(xendev, "device-type"); } - directiosafe =3D xenstore_read_be_str(&blkdev->xendev, "direct-io-safe= "); + directiosafe =3D xenstore_read_be_str(xendev, "direct-io-safe"); blkdev->directiosafe =3D (directiosafe && atoi(directiosafe)); =20 /* do we have all we need? */ @@ -717,10 +721,10 @@ static int blk_init(struct XenDevice *xendev) /* fill info * blk_connect supplies sector-size and sectors */ - xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1); - xenstore_write_be_int(&blkdev->xendev, "info", info); + xenstore_write_be_int(xendev, "feature-flush-cache", 1); + xenstore_write_be_int(xendev, "info", info); =20 - xenstore_write_be_int(&blkdev->xendev, "max-ring-page-order", + xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_PAGE_ORDER); =20 blk_parse_discard(blkdev); @@ -773,7 +777,7 @@ static int blk_connect(struct XenDevice *xendev) } =20 /* init qemu block driver */ - index =3D (blkdev->xendev.dev - 202 * 256) / 16; + index =3D (xendev->dev - 202 * 256) / 16; blkdev->dinfo =3D drive_get(IF_XEN, 0, index); if (!blkdev->dinfo) { Error *local_err =3D NULL; @@ -785,11 +789,11 @@ static int blk_connect(struct XenDevice *xendev) } =20 /* setup via xenbus -> create new block driver instance */ - xen_pv_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\= n"); + xen_pv_printf(xendev, 2, "create new bdrv (xenbus setup)\n"); blkdev->blk =3D blk_new_open(blkdev->filename, NULL, options, qflags, &local_err); if (!blkdev->blk) { - xen_pv_printf(&blkdev->xendev, 0, "error: %s\n", + xen_pv_printf(xendev, 0, "error: %s\n", error_get_pretty(local_err)); error_free(local_err); return -1; @@ -797,11 +801,11 @@ static int blk_connect(struct XenDevice *xendev) blk_set_enable_write_cache(blkdev->blk, !writethrough); } else { /* setup via qemu cmdline -> already setup for us */ - xen_pv_printf(&blkdev->xendev, 2, + xen_pv_printf(xendev, 2, "get configured bdrv (cmdline setup)\n"); blkdev->blk =3D blk_by_legacy_dinfo(blkdev->dinfo); if (blk_is_read_only(blkdev->blk) && !readonly) { - xen_pv_printf(&blkdev->xendev, 0, "Unexpected read-only drive"= ); + xen_pv_printf(xendev, 0, "Unexpected read-only drive"); blkdev->blk =3D NULL; return -1; } @@ -814,7 +818,7 @@ static int blk_connect(struct XenDevice *xendev) if (blkdev->file_size < 0) { BlockDriverState *bs =3D blk_bs(blkdev->blk); const char *drv_name =3D bs ? bdrv_get_format_name(bs) : NULL; - xen_pv_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s= \n", + xen_pv_printf(xendev, 1, "blk_getlength: %d (%s) | drv %s\n", (int)blkdev->file_size, strerror(-blkdev->file_size), drv_name ?: "-"); blkdev->file_size =3D 0; @@ -826,15 +830,15 @@ static int blk_connect(struct XenDevice *xendev) blkdev->file_size, blkdev->file_size >> 20); =20 /* Fill in number of sector size and number of sectors */ - xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk= ); - xenstore_write_be_int64(&blkdev->xendev, "sectors", + xenstore_write_be_int(xendev, "sector-size", blkdev->file_blk); + xenstore_write_be_int64(xendev, "sectors", blkdev->file_size / blkdev->file_blk); =20 - if (xenstore_read_fe_int(&blkdev->xendev, "ring-page-order", + if (xenstore_read_fe_int(xendev, "ring-page-order", &order) =3D=3D -1) { blkdev->nr_ring_ref =3D 1; =20 - if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", + if (xenstore_read_fe_int(xendev, "ring-ref", &ring_ref) =3D=3D -1) { return -1; } @@ -851,7 +855,7 @@ static int blk_connect(struct XenDevice *xendev) return -1; } =20 - if (xenstore_read_fe_int(&blkdev->xendev, key, + if (xenstore_read_fe_int(xendev, key, &ring_ref) =3D=3D -1) { g_free(key); return -1; @@ -866,18 +870,18 @@ static int blk_connect(struct XenDevice *xendev) return -1; } =20 - if (xenstore_read_fe_int(&blkdev->xendev, "event-channel", - &blkdev->xendev.remote_port) =3D=3D -1) { + if (xenstore_read_fe_int(xendev, "event-channel", + &xendev->remote_port) =3D=3D -1) { return -1; } =20 - if (!blkdev->xendev.protocol) { + if (!xendev->protocol) { blkdev->protocol =3D BLKIF_PROTOCOL_NATIVE; - } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_NATIVE) = =3D=3D 0) { + } else if (strcmp(xendev->protocol, XEN_IO_PROTO_ABI_NATIVE) =3D=3D 0)= { blkdev->protocol =3D BLKIF_PROTOCOL_NATIVE; - } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) = =3D=3D 0) { + } else if (strcmp(xendev->protocol, XEN_IO_PROTO_ABI_X86_32) =3D=3D 0)= { blkdev->protocol =3D BLKIF_PROTOCOL_X86_32; - } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) = =3D=3D 0) { + } else if (strcmp(xendev->protocol, XEN_IO_PROTO_ABI_X86_64) =3D=3D 0)= { blkdev->protocol =3D BLKIF_PROTOCOL_X86_64; } else { blkdev->protocol =3D BLKIF_PROTOCOL_NATIVE; @@ -907,13 +911,13 @@ static int blk_connect(struct XenDevice *xendev) /* Add on the number needed for the ring pages */ max_grants =3D blkdev->nr_ring_ref; =20 - blkdev->xendev.gnttabdev =3D xengnttab_open(NULL, 0); - if (blkdev->xendev.gnttabdev =3D=3D NULL) { + xendev->gnttabdev =3D xengnttab_open(NULL, 0); + if (xendev->gnttabdev =3D=3D NULL) { xen_pv_printf(xendev, 0, "xengnttab_open failed: %s\n", strerror(errno)); return -1; } - if (xengnttab_set_max_grants(blkdev->xendev.gnttabdev, max_grants)) { + if (xengnttab_set_max_grants(xendev->gnttabdev, max_grants)) { xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n", strerror(errno)); return -1; @@ -921,10 +925,10 @@ static int blk_connect(struct XenDevice *xendev) =20 domids =3D g_new0(uint32_t, blkdev->nr_ring_ref); for (i =3D 0; i < blkdev->nr_ring_ref; i++) { - domids[i] =3D blkdev->xendev.dom; + domids[i] =3D xendev->dom; } =20 - blkdev->sring =3D xengnttab_map_grant_refs(blkdev->xendev.gnttabdev, + blkdev->sring =3D xengnttab_map_grant_refs(xendev->gnttabdev, blkdev->nr_ring_ref, domids, blkdev->ring_ref, @@ -961,12 +965,12 @@ static int blk_connect(struct XenDevice *xendev) =20 blk_set_aio_context(blkdev->blk, blkdev->ctx); =20 - xen_be_bind_evtchn(&blkdev->xendev); + xen_be_bind_evtchn(xendev); =20 - xen_pv_printf(&blkdev->xendev, 1, "ok: proto %s, nr-ring-ref %u, " + xen_pv_printf(xendev, 1, "ok: proto %s, nr-ring-ref %u, " "remote port %d, local port %d\n", - blkdev->xendev.protocol, blkdev->nr_ring_ref, - blkdev->xendev.remote_port, blkdev->xendev.local_port); + xendev->protocol, blkdev->nr_ring_ref, + xendev->remote_port, xendev->local_port); return 0; } =20 @@ -984,19 +988,19 @@ static void blk_disconnect(struct XenDevice *xendev) blk_unref(blkdev->blk); blkdev->blk =3D NULL; } - xen_pv_unbind_evtchn(&blkdev->xendev); + xen_pv_unbind_evtchn(xendev); =20 aio_context_release(blkdev->ctx); =20 if (blkdev->sring) { - xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring, + xengnttab_unmap(xendev->gnttabdev, blkdev->sring, blkdev->nr_ring_ref); blkdev->sring =3D NULL; } =20 - if (blkdev->xendev.gnttabdev) { - xengnttab_close(blkdev->xendev.gnttabdev); - blkdev->xendev.gnttabdev =3D NULL; + if (xendev->gnttabdev) { + xengnttab_close(xendev->gnttabdev); + xendev->gnttabdev =3D NULL; } } =20 --=20 2.1.4