From nobody Sun Feb 8 11:06:19 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id DB81229ACD7; Wed, 12 Nov 2025 19:48:38 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1762976919; cv=none; b=kICW7XuCSHz6oLYNqDX2iH3aRmLb7DGM96e+sktxALtkt55VDFjLvdtMIIbBEBh5RNsfLZbKIV+vU4s6KMwWsvuVcwkc4F0548lxuqvVqfel4IQbLFEEuhaZGHdsyUolGAaQAThkhOPJxy2zzFb2eUtwHcHeci+5uULvyv5AILg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1762976919; c=relaxed/simple; bh=0OVrR0rg8EjbNR5ckAFeFmJl+kOR9CbbOokNMNSF4AA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=k5b6nRRnnsVcuXTufNjdG1B9JwYdrcNofLLq/Xyy3/j5pNNXkNEQoWp1vzqC/nLRAwHdq9HwLOF2KFhZuqJWvTQOQZELo4J26Al/L+cDMdJg0qEvwNPRx9JFHa88W5IMs+3/4YyoGvIfmSiWqhu+iC0gNFxfsK+KCaL+H/H3MPU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=VPgszO7m; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="VPgszO7m" Received: by smtp.kernel.org (Postfix) with ESMTPSA id E2161C4CEF5; Wed, 12 Nov 2025 19:48:37 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1762976918; bh=0OVrR0rg8EjbNR5ckAFeFmJl+kOR9CbbOokNMNSF4AA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=VPgszO7mSxSHeFvzhyjJLQoiGK5WheNB3tcn6jQmjhQ8xGAeJ+5SdDSB649trcSyU LjD8j1yLKFXb8TnRqJC6p3qTxasxaBk/HZdsS5Volld/ykvZPH6z84+DQ29bnjKkty Qo6HMWYHlLlvXfmW4u+Sb7u6ft5/xkGJB0URx90mqhxU1OKA0gfyugUFI7GosNbOfb /+PdhCaleloZQLgST31PWlrmNOKXoM2QxfOa7P0RYx2V3WazgMwXrtM2lj2gFvNNGl OIONEVE1mBw17u4OBkCP8FSFjbuAcEr3vQjK5mI1nZKz2EvyS2zun2kEnOsT9PCse7 ly49riYKjUF3Q== From: Leon Romanovsky To: Jens Axboe , Keith Busch , Christoph Hellwig , Sagi Grimberg Cc: linux-block@vger.kernel.org, linux-kernel@vger.kernel.org, linux-nvme@lists.infradead.org Subject: [PATCH v4 1/2] nvme-pci: migrate to dma_map_phys instead of map_page Date: Wed, 12 Nov 2025 21:48:04 +0200 Message-ID: <20251112-block-with-mmio-v4-1-54aeb609d28d@nvidia.com> X-Mailer: git-send-email 2.51.1 In-Reply-To: <20251112-block-with-mmio-v4-0-54aeb609d28d@nvidia.com> References: <20251112-block-with-mmio-v4-0-54aeb609d28d@nvidia.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" X-Mailer: b4 0.15-dev-a6db3 Content-Transfer-Encoding: quoted-printable From: Leon Romanovsky After introduction of dma_map_phys(), there is no need to convert from physical address to struct page in order to map page. So let's use it directly. Reviewed-by: Keith Busch Reviewed-by: Christoph Hellwig Signed-off-by: Leon Romanovsky Reviewed-by: Chaitanya Kulkarni --- block/blk-mq-dma.c | 4 ++-- drivers/nvme/host/pci.c | 25 +++++++++++++------------ 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c index 449950029872..4ba7b0323da4 100644 --- a/block/blk-mq-dma.c +++ b/block/blk-mq-dma.c @@ -93,8 +93,8 @@ static bool blk_dma_map_bus(struct blk_dma_iter *iter, st= ruct phys_vec *vec) static bool blk_dma_map_direct(struct request *req, struct device *dma_dev, struct blk_dma_iter *iter, struct phys_vec *vec) { - iter->addr =3D dma_map_page(dma_dev, phys_to_page(vec->paddr), - offset_in_page(vec->paddr), vec->len, rq_dma_dir(req)); + iter->addr =3D dma_map_phys(dma_dev, vec->paddr, vec->len, + rq_dma_dir(req), 0); if (dma_mapping_error(dma_dev, iter->addr)) { iter->status =3D BLK_STS_RESOURCE; return false; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index c916176bd9f0..002412431940 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -685,20 +685,20 @@ static void nvme_free_descriptors(struct request *req) } } =20 -static void nvme_free_prps(struct request *req) +static void nvme_free_prps(struct request *req, unsigned int attrs) { struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); struct nvme_queue *nvmeq =3D req->mq_hctx->driver_data; unsigned int i; =20 for (i =3D 0; i < iod->nr_dma_vecs; i++) - dma_unmap_page(nvmeq->dev->dev, iod->dma_vecs[i].addr, - iod->dma_vecs[i].len, rq_dma_dir(req)); + dma_unmap_phys(nvmeq->dev->dev, iod->dma_vecs[i].addr, + iod->dma_vecs[i].len, rq_dma_dir(req), attrs); mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool); } =20 static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge, - struct nvme_sgl_desc *sg_list) + struct nvme_sgl_desc *sg_list, unsigned int attrs) { struct nvme_queue *nvmeq =3D req->mq_hctx->driver_data; enum dma_data_direction dir =3D rq_dma_dir(req); @@ -707,13 +707,14 @@ static void nvme_free_sgls(struct request *req, struc= t nvme_sgl_desc *sge, unsigned int i; =20 if (sge->type =3D=3D (NVME_SGL_FMT_DATA_DESC << 4)) { - dma_unmap_page(dma_dev, le64_to_cpu(sge->addr), len, dir); + dma_unmap_phys(dma_dev, le64_to_cpu(sge->addr), len, dir, + attrs); return; } =20 for (i =3D 0; i < len / sizeof(*sg_list); i++) - dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr), - le32_to_cpu(sg_list[i].length), dir); + dma_unmap_phys(dma_dev, le64_to_cpu(sg_list[i].addr), + le32_to_cpu(sg_list[i].length), dir, attrs); } =20 static void nvme_unmap_metadata(struct request *req) @@ -734,10 +735,10 @@ static void nvme_unmap_metadata(struct request *req) if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state, iod->meta_total_len)) { if (nvme_pci_cmd_use_meta_sgl(&iod->cmd)) - nvme_free_sgls(req, sge, &sge[1]); + nvme_free_sgls(req, sge, &sge[1], 0); else - dma_unmap_page(dma_dev, iod->meta_dma, - iod->meta_total_len, dir); + dma_unmap_phys(dma_dev, iod->meta_dma, + iod->meta_total_len, dir, 0); } =20 if (iod->meta_descriptor) @@ -762,9 +763,9 @@ static void nvme_unmap_data(struct request *req) if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) { if (nvme_pci_cmd_use_sgl(&iod->cmd)) nvme_free_sgls(req, iod->descriptors[0], - &iod->cmd.common.dptr.sgl); + &iod->cmd.common.dptr.sgl, 0); else - nvme_free_prps(req); + nvme_free_prps(req, 0); } =20 if (iod->nr_descriptors) --=20 2.51.1 From nobody Sun Feb 8 11:06:19 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 73CA4299929; Wed, 12 Nov 2025 19:48:43 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1762976923; cv=none; b=JEEHB9wJXVDWjSxvaLvpi3F1ErQn6dhUoVaiC6KVGkTfiRVMaymjtYU19EfaqguG0JF2pDCNcv+dVuZfyI3jWcc/FbEtXS8BW8/ql2CEsIR6NqUHzuW7Vw8kGXwMi+xRO818XaFc2XEyx+n8TPCUGJR7/WnUKV748ELH85PrDR4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1762976923; c=relaxed/simple; bh=jWSlgTAKU9hrx/fHpBL+b5+BacuYa0K4WcAOBPDut9s=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=s9J2jmN/ePXnyAY9fyfv0lHl3g0ViqLt/aBaKWL0sFC3IkeIOE79ZJT4MKlhKVVmpS2Kx6Q6NWSNhDqkWEKDY8WWHND94sTmmYrwJq2C9I7Qsfxmhb4XoLhD/qwbYc4dGv/4Lw7ZvQjZgoxmSkZV+pvJT3iR5cHwjrWlCC08WAU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=fgwnKUfg; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="fgwnKUfg" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 4B813C19423; Wed, 12 Nov 2025 19:48:42 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1762976923; bh=jWSlgTAKU9hrx/fHpBL+b5+BacuYa0K4WcAOBPDut9s=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=fgwnKUfgLbAdmOSSNf7KT7My1TP2DT6LSapUzbYlR8Hfmv9vN44nTeTKNQHQt5RKS 3zYYpOCa9BMoZI5TvO4l2cpvqrdSDWsLZtwmWx3Lhy0FMxlZBqPdvdxLAkgzXLHU0Q IdMx8D6YxKWv6Ht09OjJ+amSveDRJOiVuV3JF+oxJJyHWm/1RxfWh1Wm7Gz272CKgu bbQzSKTRkSjY4snk85cfacdn1gYdtnz3tfKCXcy5zpp6IjzaTk9Q06fhDk+U4KuoqT cHFtZRumPZwM4uJUERpalMSZZlkDhsR8PUKpM7p5kzVlmVY8yLRerckOUjz1V+NVRG yq8PR/jz53Jnw== From: Leon Romanovsky To: Jens Axboe , Keith Busch , Christoph Hellwig , Sagi Grimberg Cc: linux-block@vger.kernel.org, linux-kernel@vger.kernel.org, linux-nvme@lists.infradead.org Subject: [PATCH v4 2/2] block-dma: properly take MMIO path Date: Wed, 12 Nov 2025 21:48:05 +0200 Message-ID: <20251112-block-with-mmio-v4-2-54aeb609d28d@nvidia.com> X-Mailer: git-send-email 2.51.1 In-Reply-To: <20251112-block-with-mmio-v4-0-54aeb609d28d@nvidia.com> References: <20251112-block-with-mmio-v4-0-54aeb609d28d@nvidia.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" X-Mailer: b4 0.15-dev-a6db3 Content-Transfer-Encoding: quoted-printable From: Leon Romanovsky In commit eadaa8b255f3 ("dma-mapping: introduce new DMA attribute to indicate MMIO memory"), DMA_ATTR_MMIO attribute was added to describe MMIO addresses, which require to avoid any memory cache flushing, as an outcome of the discussion pointed in Link tag below. In case of PCI_P2PDMA_MAP_THRU_HOST_BRIDGE transfer, blk-mq-dm logic treated this as regular page and relied on "struct page" DMA flow. That flow performs CPU cache flushing, which shouldn't be done here, and doesn't set IOMMU_MMIO flag in DMA-IOMMU case. As a solution, let's encode peer-to-peer transaction type in NVMe IOD flags variable and provide it to blk-mq-dma API. Link: https://lore.kernel.org/all/f912c446-1ae9-4390-9c11-00dce7bf0fd3@arm.= com/ Reviewed-by: Christoph Hellwig Signed-off-by: Leon Romanovsky Reviewed-by: Chaitanya Kulkarni Reviewed-by: Keith Busch --- block/blk-mq-dma.c | 17 ++++++---- drivers/nvme/host/pci.c | 73 ++++++++++++++++++++++++++++++++++++++-= ---- include/linux/bio-integrity.h | 1 - include/linux/blk-integrity.h | 14 --------- include/linux/blk-mq-dma.h | 28 ++++++++--------- include/linux/blk_types.h | 2 -- 6 files changed, 89 insertions(+), 46 deletions(-) diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c index 4ba7b0323da4..98554929507a 100644 --- a/block/blk-mq-dma.c +++ b/block/blk-mq-dma.c @@ -93,8 +93,13 @@ static bool blk_dma_map_bus(struct blk_dma_iter *iter, s= truct phys_vec *vec) static bool blk_dma_map_direct(struct request *req, struct device *dma_dev, struct blk_dma_iter *iter, struct phys_vec *vec) { + unsigned int attrs =3D 0; + + if (iter->p2pdma.map =3D=3D PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) + attrs |=3D DMA_ATTR_MMIO; + iter->addr =3D dma_map_phys(dma_dev, vec->paddr, vec->len, - rq_dma_dir(req), 0); + rq_dma_dir(req), attrs); if (dma_mapping_error(dma_dev, iter->addr)) { iter->status =3D BLK_STS_RESOURCE; return false; @@ -109,14 +114,18 @@ static bool blk_rq_dma_map_iova(struct request *req, = struct device *dma_dev, { enum dma_data_direction dir =3D rq_dma_dir(req); unsigned int mapped =3D 0; + unsigned int attrs =3D 0; int error; =20 iter->addr =3D state->addr; iter->len =3D dma_iova_size(state); =20 + if (iter->p2pdma.map =3D=3D PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) + attrs |=3D DMA_ATTR_MMIO; + do { error =3D dma_iova_link(dma_dev, state, vec->paddr, mapped, - vec->len, dir, 0); + vec->len, dir, attrs); if (error) break; mapped +=3D vec->len; @@ -174,10 +183,6 @@ static bool blk_dma_map_iter_start(struct request *req= , struct device *dma_dev, switch (pci_p2pdma_state(&iter->p2pdma, dma_dev, phys_to_page(vec.paddr))) { case PCI_P2PDMA_MAP_BUS_ADDR: - if (iter->iter.is_integrity) - bio_integrity(req->bio)->bip_flags |=3D BIP_P2P_DMA; - else - req->cmd_flags |=3D REQ_P2PDMA; return blk_dma_map_bus(iter, &vec); case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 002412431940..62f23aae0943 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -260,8 +260,20 @@ enum nvme_iod_flags { /* single segment dma mapping */ IOD_SINGLE_SEGMENT =3D 1U << 2, =20 + /* Data payload contains p2p memory */ + IOD_DATA_P2P =3D 1U << 3, + + /* Metadata contains p2p memory */ + IOD_META_P2P =3D 1U << 4, + + /* Data payload contains MMIO memory */ + IOD_DATA_MMIO =3D 1U << 5, + + /* Metadata contains MMIO memory */ + IOD_META_MMIO =3D 1U << 6, + /* Metadata using non-coalesced MPTR */ - IOD_SINGLE_META_SEGMENT =3D 1U << 5, + IOD_SINGLE_META_SEGMENT =3D 1U << 7, }; =20 struct nvme_dma_vec { @@ -720,10 +732,12 @@ static void nvme_free_sgls(struct request *req, struc= t nvme_sgl_desc *sge, static void nvme_unmap_metadata(struct request *req) { struct nvme_queue *nvmeq =3D req->mq_hctx->driver_data; + enum pci_p2pdma_map_type map =3D PCI_P2PDMA_MAP_NONE; enum dma_data_direction dir =3D rq_dma_dir(req); struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); struct device *dma_dev =3D nvmeq->dev->dev; struct nvme_sgl_desc *sge =3D iod->meta_descriptor; + unsigned int attrs =3D 0; =20 if (iod->flags & IOD_SINGLE_META_SEGMENT) { dma_unmap_page(dma_dev, iod->meta_dma, @@ -732,13 +746,20 @@ static void nvme_unmap_metadata(struct request *req) return; } =20 - if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state, - iod->meta_total_len)) { + if (iod->flags & IOD_META_P2P) + map =3D PCI_P2PDMA_MAP_BUS_ADDR; + else if (iod->flags & IOD_META_MMIO) { + map =3D PCI_P2PDMA_MAP_THRU_HOST_BRIDGE; + attrs |=3D DMA_ATTR_MMIO; + } + + if (!blk_rq_dma_unmap(req, dma_dev, &iod->meta_dma_state, + iod->meta_total_len, map)) { if (nvme_pci_cmd_use_meta_sgl(&iod->cmd)) - nvme_free_sgls(req, sge, &sge[1], 0); + nvme_free_sgls(req, sge, &sge[1], attrs); else dma_unmap_phys(dma_dev, iod->meta_dma, - iod->meta_total_len, dir, 0); + iod->meta_total_len, dir, attrs); } =20 if (iod->meta_descriptor) @@ -748,9 +769,11 @@ static void nvme_unmap_metadata(struct request *req) =20 static void nvme_unmap_data(struct request *req) { + enum pci_p2pdma_map_type map =3D PCI_P2PDMA_MAP_NONE; struct nvme_iod *iod =3D blk_mq_rq_to_pdu(req); struct nvme_queue *nvmeq =3D req->mq_hctx->driver_data; struct device *dma_dev =3D nvmeq->dev->dev; + unsigned int attrs =3D 0; =20 if (iod->flags & IOD_SINGLE_SEGMENT) { static_assert(offsetof(union nvme_data_ptr, prp1) =3D=3D @@ -760,12 +783,20 @@ static void nvme_unmap_data(struct request *req) return; } =20 - if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) { + if (iod->flags & IOD_DATA_P2P) + map =3D PCI_P2PDMA_MAP_BUS_ADDR; + else if (iod->flags & IOD_DATA_MMIO) { + map =3D PCI_P2PDMA_MAP_THRU_HOST_BRIDGE; + attrs |=3D DMA_ATTR_MMIO; + } + + if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len, + map)) { if (nvme_pci_cmd_use_sgl(&iod->cmd)) nvme_free_sgls(req, iod->descriptors[0], - &iod->cmd.common.dptr.sgl, 0); + &iod->cmd.common.dptr.sgl, attrs); else - nvme_free_prps(req, 0); + nvme_free_prps(req, attrs); } =20 if (iod->nr_descriptors) @@ -1036,6 +1067,19 @@ static blk_status_t nvme_map_data(struct request *re= q) if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter)) return iter.status; =20 + switch (iter.p2pdma.map) { + case PCI_P2PDMA_MAP_BUS_ADDR: + iod->flags |=3D IOD_DATA_P2P; + break; + case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: + iod->flags |=3D IOD_DATA_MMIO; + break; + case PCI_P2PDMA_MAP_NONE: + break; + default: + return BLK_STS_RESOURCE; + } + if (use_sgl =3D=3D SGL_FORCED || (use_sgl =3D=3D SGL_SUPPORTED && (sgl_threshold && nvme_pci_avg_seg_size(req) >=3D sgl_threshold))) @@ -1058,6 +1102,19 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct = request *req) &iod->meta_dma_state, &iter)) return iter.status; =20 + switch (iter.p2pdma.map) { + case PCI_P2PDMA_MAP_BUS_ADDR: + iod->flags |=3D IOD_META_P2P; + break; + case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: + iod->flags |=3D IOD_META_MMIO; + break; + case PCI_P2PDMA_MAP_NONE: + break; + default: + return BLK_STS_RESOURCE; + } + if (blk_rq_dma_map_coalesce(&iod->meta_dma_state)) entries =3D 1; =20 diff --git a/include/linux/bio-integrity.h b/include/linux/bio-integrity.h index 851254f36eb3..0a25716820fe 100644 --- a/include/linux/bio-integrity.h +++ b/include/linux/bio-integrity.h @@ -13,7 +13,6 @@ enum bip_flags { BIP_CHECK_GUARD =3D 1 << 5, /* guard check */ BIP_CHECK_REFTAG =3D 1 << 6, /* reftag check */ BIP_CHECK_APPTAG =3D 1 << 7, /* apptag check */ - BIP_P2P_DMA =3D 1 << 8, /* using P2P address */ }; =20 struct bio_integrity_payload { diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index b659373788f6..b9e6376b5e36 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -28,14 +28,6 @@ static inline bool queue_limits_stack_integrity_bdev(str= uct queue_limits *t, #ifdef CONFIG_BLK_DEV_INTEGRITY int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); =20 -static inline bool blk_rq_integrity_dma_unmap(struct request *req, - struct device *dma_dev, struct dma_iova_state *state, - size_t mapped_len) -{ - return blk_dma_unmap(req, dma_dev, state, mapped_len, - bio_integrity(req->bio)->bip_flags & BIP_P2P_DMA); -} - int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, ssize_t bytes); @@ -124,12 +116,6 @@ static inline int blk_rq_map_integrity_sg(struct reque= st *q, { return 0; } -static inline bool blk_rq_integrity_dma_unmap(struct request *req, - struct device *dma_dev, struct dma_iova_state *state, - size_t mapped_len) -{ - return false; -} static inline int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, ssize_t bytes) diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h index 51829958d872..cb88fc791fbd 100644 --- a/include/linux/blk-mq-dma.h +++ b/include/linux/blk-mq-dma.h @@ -16,13 +16,13 @@ struct blk_dma_iter { /* Output address range for this iteration */ dma_addr_t addr; u32 len; + struct pci_p2pdma_map_state p2pdma; =20 /* Status code. Only valid when blk_rq_dma_map_iter_* returned false */ blk_status_t status; =20 /* Internal to blk_rq_dma_map_iter_* */ struct blk_map_iter iter; - struct pci_p2pdma_map_state p2pdma; }; =20 bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev, @@ -43,36 +43,34 @@ static inline bool blk_rq_dma_map_coalesce(struct dma_i= ova_state *state) } =20 /** - * blk_dma_unmap - try to DMA unmap a request + * blk_rq_dma_unmap - try to DMA unmap a request * @req: request to unmap * @dma_dev: device to unmap from * @state: DMA IOVA state * @mapped_len: number of bytes to unmap - * @is_p2p: true if mapped with PCI_P2PDMA_MAP_BUS_ADDR + * @map: peer-to-peer mapping type * * Returns %false if the callers need to manually unmap every DMA segment * mapped using @iter or %true if no work is left to be done. */ -static inline bool blk_dma_unmap(struct request *req, struct device *dma_d= ev, - struct dma_iova_state *state, size_t mapped_len, bool is_p2p) +static inline bool blk_rq_dma_unmap(struct request *req, struct device *dm= a_dev, + struct dma_iova_state *state, size_t mapped_len, + enum pci_p2pdma_map_type map) { - if (is_p2p) + if (map =3D=3D PCI_P2PDMA_MAP_BUS_ADDR) return true; =20 if (dma_use_iova(state)) { + unsigned int attrs =3D 0; + + if (map =3D=3D PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) + attrs |=3D DMA_ATTR_MMIO; + dma_iova_destroy(dma_dev, state, mapped_len, rq_dma_dir(req), - 0); + attrs); return true; } =20 return !dma_need_unmap(dma_dev); } - -static inline bool blk_rq_dma_unmap(struct request *req, struct device *dm= a_dev, - struct dma_iova_state *state, size_t mapped_len) -{ - return blk_dma_unmap(req, dma_dev, state, mapped_len, - req->cmd_flags & REQ_P2PDMA); -} - #endif /* BLK_MQ_DMA_H */ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 8e8d1cc8b06c..1f8e429c4c77 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -381,7 +381,6 @@ enum req_flag_bits { __REQ_DRV, /* for driver use */ __REQ_FS_PRIVATE, /* for file system (submitter) use */ __REQ_ATOMIC, /* for atomic write operations */ - __REQ_P2PDMA, /* contains P2P DMA pages */ /* * Command specific flags, keep last: */ @@ -414,7 +413,6 @@ enum req_flag_bits { #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV) #define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE) #define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC) -#define REQ_P2PDMA (__force blk_opf_t)(1ULL << __REQ_P2PDMA) =20 #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP) =20 --=20 2.51.1