From: Leon Romanovsky <leonro@nvidia.com>
Combine resource and page mappings routines to one function
and remove .map_resource/.unmap_resource callbacks completely.
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
drivers/xen/swiotlb-xen.c | 63 ++++++++++++++++++---------------------
1 file changed, 29 insertions(+), 34 deletions(-)
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index dd7747a2de879..48936179c940b 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -200,17 +200,32 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
* physical address to use is returned.
*
* Once the device is given the dma address, the device owns this memory until
- * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
+ * either xen_swiotlb_unmap_phys or xen_swiotlb_dma_sync_single is performed.
*/
-static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
+static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- phys_addr_t map, phys = page_to_phys(page) + offset;
- dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
+ dma_addr_t dev_addr;
+ phys_addr_t map;
BUG_ON(dir == DMA_NONE);
+
+ if (attrs & DMA_ATTR_MMIO) {
+ if (unlikely(!dma_capable(dev, phys, size, false))) {
+ dev_err_once(
+ dev,
+ "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
+ &dma_addr, size, *dev->dma_mask,
+ dev->bus_dma_limit);
+ WARN_ON_ONCE(1);
+ return DMA_MAPPING_ERROR;
+ }
+ return phys;
+ }
+
+ dev_addr = xen_phys_to_dma(dev, phys);
+
/*
* If the address happens to be in the device's DMA window,
* we can safely return the device addr and not worry about bounce
@@ -257,13 +272,13 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
- * match what was provided for in a previous xen_swiotlb_map_page call. All
+ * match what was provided for in a previous xen_swiotlb_map_phys call. All
* other usages are undefined.
*
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
-static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+static void xen_swiotlb_unmap_phys(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
@@ -325,7 +340,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
+ * concerning calls here are the same as for swiotlb_unmap_phys() above.
*/
static void
xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
@@ -337,7 +352,7 @@ xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
- xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
+ xen_swiotlb_unmap_phys(hwdev, sg->dma_address, sg_dma_len(sg),
dir, attrs);
}
@@ -352,8 +367,8 @@ xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i) {
- sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
- sg->offset, sg->length, dir, attrs);
+ sg->dma_address = xen_swiotlb_map_phys(dev, sg_phys(sg),
+ sg->length, dir, attrs);
if (sg->dma_address == DMA_MAPPING_ERROR)
goto out_unmap;
sg_dma_len(sg) = sg->length;
@@ -392,25 +407,6 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
}
}
-static dma_addr_t xen_swiotlb_direct_map_resource(struct device *dev,
- phys_addr_t paddr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t dma_addr = paddr;
-
- if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
- dev_err_once(dev,
- "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
- &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
- WARN_ON_ONCE(1);
- return DMA_MAPPING_ERROR;
- }
-
- return dma_addr;
-}
-
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
@@ -437,13 +433,12 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
.map_sg = xen_swiotlb_map_sg,
.unmap_sg = xen_swiotlb_unmap_sg,
- .map_page = xen_swiotlb_map_page,
- .unmap_page = xen_swiotlb_unmap_page,
+ .map_phys = xen_swiotlb_map_phys,
+ .unmap_phys = xen_swiotlb_unmap_phys,
.dma_supported = xen_swiotlb_dma_supported,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.max_mapping_size = swiotlb_max_mapping_size,
- .map_resource = xen_swiotlb_direct_map_resource,
};
--
2.51.0
On Thu, Sep 18, 2025 at 05:09:28PM +0300, Leon Romanovsky wrote: > From: Leon Romanovsky <leonro@nvidia.com> > > Combine resource and page mappings routines to one function > and remove .map_resource/.unmap_resource callbacks completely. > > Signed-off-by: Leon Romanovsky <leonro@nvidia.com> > --- > drivers/xen/swiotlb-xen.c | 63 ++++++++++++++++++--------------------- > 1 file changed, 29 insertions(+), 34 deletions(-) Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Jason
On Thu, Sep 18, 2025 at 05:09:28PM +0300, Leon Romanovsky wrote: > From: Leon Romanovsky <leonro@nvidia.com> > > Combine resource and page mappings routines to one function > and remove .map_resource/.unmap_resource callbacks completely. > > Signed-off-by: Leon Romanovsky <leonro@nvidia.com> > --- > drivers/xen/swiotlb-xen.c | 63 ++++++++++++++++++--------------------- > 1 file changed, 29 insertions(+), 34 deletions(-) <...> > + if (attrs & DMA_ATTR_MMIO) { > + if (unlikely(!dma_capable(dev, phys, size, false))) { > + dev_err_once( > + dev, > + "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", > + &dma_addr, size, *dev->dma_mask, > + dev->bus_dma_limit); > + WARN_ON_ONCE(1); > + return DMA_MAPPING_ERROR; > + } > + return phys; > + } This need to be fixed by the following change (dma_addr->phys): diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 48936179c940b..ccf25027bec19 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -215,8 +215,8 @@ static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phys, if (unlikely(!dma_capable(dev, phys, size, false))) { dev_err_once( dev, - "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", - &dma_addr, size, *dev->dma_mask, + "DMA addr %pa+%zu overflow (mask %llx, bus limit %llx).\n", + &phys, size, *dev->dma_mask, dev->bus_dma_limit); WARN_ON_ONCE(1); return DMA_MAPPING_ERROR; Thanks
© 2016 - 2025 Red Hat, Inc.