From: Leon Romanovsky <leonro@nvidia.com>
After introduction of dma_map_phys(), there is no need to convert
from physical address to struct page in order to map page. So let's
use it directly.
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
block/blk-mq-dma.c | 4 ++--
drivers/nvme/host/pci.c | 27 +++++++++++++++------------
include/linux/blk-mq-dma.h | 1 +
3 files changed, 18 insertions(+), 14 deletions(-)
diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c
index 449950029872..4ba7b0323da4 100644
--- a/block/blk-mq-dma.c
+++ b/block/blk-mq-dma.c
@@ -93,8 +93,8 @@ static bool blk_dma_map_bus(struct blk_dma_iter *iter, struct phys_vec *vec)
static bool blk_dma_map_direct(struct request *req, struct device *dma_dev,
struct blk_dma_iter *iter, struct phys_vec *vec)
{
- iter->addr = dma_map_page(dma_dev, phys_to_page(vec->paddr),
- offset_in_page(vec->paddr), vec->len, rq_dma_dir(req));
+ iter->addr = dma_map_phys(dma_dev, vec->paddr, vec->len,
+ rq_dma_dir(req), 0);
if (dma_mapping_error(dma_dev, iter->addr)) {
iter->status = BLK_STS_RESOURCE;
return false;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c916176bd9f0..91a8965754f0 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -685,20 +685,20 @@ static void nvme_free_descriptors(struct request *req)
}
}
-static void nvme_free_prps(struct request *req)
+static void nvme_free_prps(struct request *req, unsigned int attrs)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
unsigned int i;
for (i = 0; i < iod->nr_dma_vecs; i++)
- dma_unmap_page(nvmeq->dev->dev, iod->dma_vecs[i].addr,
- iod->dma_vecs[i].len, rq_dma_dir(req));
+ dma_unmap_phys(nvmeq->dev->dev, iod->dma_vecs[i].addr,
+ iod->dma_vecs[i].len, rq_dma_dir(req), attrs);
mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool);
}
static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge,
- struct nvme_sgl_desc *sg_list)
+ struct nvme_sgl_desc *sg_list, unsigned int attrs)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
enum dma_data_direction dir = rq_dma_dir(req);
@@ -707,13 +707,14 @@ static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge,
unsigned int i;
if (sge->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
- dma_unmap_page(dma_dev, le64_to_cpu(sge->addr), len, dir);
+ dma_unmap_phys(dma_dev, le64_to_cpu(sge->addr), len, dir,
+ attrs);
return;
}
for (i = 0; i < len / sizeof(*sg_list); i++)
- dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr),
- le32_to_cpu(sg_list[i].length), dir);
+ dma_unmap_phys(dma_dev, le64_to_cpu(sg_list[i].addr),
+ le32_to_cpu(sg_list[i].length), dir, attrs);
}
static void nvme_unmap_metadata(struct request *req)
@@ -723,6 +724,7 @@ static void nvme_unmap_metadata(struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct device *dma_dev = nvmeq->dev->dev;
struct nvme_sgl_desc *sge = iod->meta_descriptor;
+ unsigned int attrs = 0;
if (iod->flags & IOD_SINGLE_META_SEGMENT) {
dma_unmap_page(dma_dev, iod->meta_dma,
@@ -734,10 +736,10 @@ static void nvme_unmap_metadata(struct request *req)
if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state,
iod->meta_total_len)) {
if (nvme_pci_cmd_use_meta_sgl(&iod->cmd))
- nvme_free_sgls(req, sge, &sge[1]);
+ nvme_free_sgls(req, sge, &sge[1], attrs);
else
- dma_unmap_page(dma_dev, iod->meta_dma,
- iod->meta_total_len, dir);
+ dma_unmap_phys(dma_dev, iod->meta_dma,
+ iod->meta_total_len, dir, attrs);
}
if (iod->meta_descriptor)
@@ -750,6 +752,7 @@ static void nvme_unmap_data(struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct device *dma_dev = nvmeq->dev->dev;
+ unsigned int attrs = 0;
if (iod->flags & IOD_SINGLE_SEGMENT) {
static_assert(offsetof(union nvme_data_ptr, prp1) ==
@@ -762,9 +765,9 @@ static void nvme_unmap_data(struct request *req)
if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) {
if (nvme_pci_cmd_use_sgl(&iod->cmd))
nvme_free_sgls(req, iod->descriptors[0],
- &iod->cmd.common.dptr.sgl);
+ &iod->cmd.common.dptr.sgl, attrs);
else
- nvme_free_prps(req);
+ nvme_free_prps(req, attrs);
}
if (iod->nr_descriptors)
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
index 51829958d872..faf4dd574c62 100644
--- a/include/linux/blk-mq-dma.h
+++ b/include/linux/blk-mq-dma.h
@@ -16,6 +16,7 @@ struct blk_dma_iter {
/* Output address range for this iteration */
dma_addr_t addr;
u32 len;
+ unsigned int attrs;
/* Status code. Only valid when blk_rq_dma_map_iter_* returned false */
blk_status_t status;
--
2.51.0
This actually has block and nvme bits, so the subject line should say that. > + unsigned int attrs = 0; attrs is always zero here, no need to start passing it for the map_phys conversion alone. > + unsigned int attrs = 0; Same here. > + unsigned int attrs; And this is also entirely unused as far as I can tell.
On Wed, Oct 22, 2025 at 08:14:18AM +0200, Christoph Hellwig wrote: > This actually has block and nvme bits, so the subject line should > say that. > > > + unsigned int attrs = 0; > > attrs is always zero here, no need to start passing it for the > map_phys conversion alone. > > > + unsigned int attrs = 0; > > Same here. It gave me more clean second patch where I only added new attribute, but if it doesn't look right to you, let's change. > > > + unsigned int attrs; > > And this is also entirely unused as far as I can tell. Right, it is used in second patch, will fix. Thanks
On Sun, Oct 26, 2025 at 02:38:04PM +0200, Leon Romanovsky wrote: > On Wed, Oct 22, 2025 at 08:14:18AM +0200, Christoph Hellwig wrote: > > This actually has block and nvme bits, so the subject line should > > say that. > > > > > + unsigned int attrs = 0; > > > > attrs is always zero here, no need to start passing it for the > > map_phys conversion alone. > > > > > + unsigned int attrs = 0; > > > > Same here. > > It gave me more clean second patch where I only added new attribute, but > if it doesn't look right to you, let's change. The usual rule is do one thing at a time. There might be an occasinal slight bend of the rule to make life easier, but I don't think that really fits here.
On 10/20/25 10:00, Leon Romanovsky wrote: > From: Leon Romanovsky<leonro@nvidia.com> > > After introduction of dma_map_phys(), there is no need to convert > from physical address to struct page in order to map page. So let's > use it directly. > > Reviewed-by: Keith Busch<kbusch@kernel.org> > Reviewed-by: Christoph Hellwig<hch@lst.de> > Signed-off-by: Leon Romanovsky<leonro@nvidia.com> Looks good. Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> -ck
© 2016 - 2026 Red Hat, Inc.