From: Leon Romanovsky <leonro@nvidia.com>
The existing .map_page() callback provides both allocating of IOVA
and linking DMA pages. That combination works great for most of the
callers who use it in control paths, but is less effective in fast
paths where there may be multiple calls to map_page().
These advanced callers already manage their data in some sort of
database and can perform IOVA allocation in advance, leaving range
linkage operation to be in fast path.
Provide an interface to allocate/deallocate IOVA and next patch
link/unlink DMA ranges to that specific IOVA.
In the new API a DMA mapping transaction is identified by a
struct dma_iova_state, which holds some recomputed information
for the transaction which does not change for each page being
mapped, so add a check if IOVA can be used for the specific
transaction.
The API is exported from dma-iommu as it is the only implementation
supported, the namespace is clearly different from iommu_* functions
which are not allowed to be used. This code layout allows us to save
function call per API call used in datapath as well as a lot of boilerplate
code.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Tested-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
drivers/iommu/dma-iommu.c | 86 +++++++++++++++++++++++++++++++++++++
include/linux/dma-mapping.h | 48 +++++++++++++++++++++
2 files changed, 134 insertions(+)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 9ba8d8bc0ce9..d3211a8d755e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1723,6 +1723,92 @@ size_t iommu_dma_max_mapping_size(struct device *dev)
return SIZE_MAX;
}
+/**
+ * dma_iova_try_alloc - Try to allocate an IOVA space
+ * @dev: Device to allocate the IOVA space for
+ * @state: IOVA state
+ * @phys: physical address
+ * @size: IOVA size
+ *
+ * Check if @dev supports the IOVA-based DMA API, and if yes allocate IOVA space
+ * for the given base address and size.
+ *
+ * Note: @phys is only used to calculate the IOVA alignment. Callers that always
+ * do PAGE_SIZE aligned transfers can safely pass 0 here.
+ *
+ * Returns %true if the IOVA-based DMA API can be used and IOVA space has been
+ * allocated, or %false if the regular DMA API should be used.
+ */
+bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t size)
+{
+ struct iommu_dma_cookie *cookie;
+ struct iommu_domain *domain;
+ struct iova_domain *iovad;
+ size_t iova_off;
+ dma_addr_t addr;
+
+ memset(state, 0, sizeof(*state));
+ if (!use_dma_iommu(dev))
+ return false;
+
+ domain = iommu_get_dma_domain(dev);
+ cookie = domain->iova_cookie;
+ iovad = &cookie->iovad;
+ iova_off = iova_offset(iovad, phys);
+
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
+ iommu_deferred_attach(dev, iommu_get_domain_for_dev(dev)))
+ return false;
+
+ if (WARN_ON_ONCE(!size))
+ return false;
+
+ /*
+ * DMA_IOVA_USE_SWIOTLB is flag which is set by dma-iommu
+ * internals, make sure that caller didn't set it and/or
+ * didn't use this interface to map SIZE_MAX.
+ */
+ if (WARN_ON_ONCE((u64)size & DMA_IOVA_USE_SWIOTLB))
+ return false;
+
+ addr = iommu_dma_alloc_iova(domain,
+ iova_align(iovad, size + iova_off),
+ dma_get_mask(dev), dev);
+ if (!addr)
+ return false;
+
+ state->addr = addr + iova_off;
+ state->__size = size;
+ return true;
+}
+EXPORT_SYMBOL_GPL(dma_iova_try_alloc);
+
+/**
+ * dma_iova_free - Free an IOVA space
+ * @dev: Device to free the IOVA space for
+ * @state: IOVA state
+ *
+ * Undoes a successful dma_try_iova_alloc().
+ *
+ * Note that all dma_iova_link() calls need to be undone first. For callers
+ * that never call dma_iova_unlink(), dma_iova_destroy() can be used instead
+ * which unlinks all ranges and frees the IOVA space in a single efficient
+ * operation.
+ */
+void dma_iova_free(struct device *dev, struct dma_iova_state *state)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_start_pad = iova_offset(iovad, state->addr);
+ size_t size = dma_iova_size(state);
+
+ iommu_dma_free_iova(domain, state->addr - iova_start_pad,
+ iova_align(iovad, size + iova_start_pad), NULL);
+}
+EXPORT_SYMBOL_GPL(dma_iova_free);
+
void iommu_setup_dma_ops(struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index b79925b1c433..de7f73810d54 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -72,6 +72,22 @@
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+struct dma_iova_state {
+ dma_addr_t addr;
+ u64 __size;
+};
+
+/*
+ * Use the high bit to mark if we used swiotlb for one or more ranges.
+ */
+#define DMA_IOVA_USE_SWIOTLB (1ULL << 63)
+
+static inline size_t dma_iova_size(struct dma_iova_state *state)
+{
+ /* Casting is needed for 32-bits systems */
+ return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB);
+}
+
#ifdef CONFIG_DMA_API_DEBUG
void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
void debug_dma_map_single(struct device *dev, const void *addr,
@@ -277,6 +293,38 @@ static inline int dma_mmap_noncontiguous(struct device *dev,
}
#endif /* CONFIG_HAS_DMA */
+#ifdef CONFIG_IOMMU_DMA
+/**
+ * dma_use_iova - check if the IOVA API is used for this state
+ * @state: IOVA state
+ *
+ * Return %true if the DMA transfers uses the dma_iova_*() calls or %false if
+ * they can't be used.
+ */
+static inline bool dma_use_iova(struct dma_iova_state *state)
+{
+ return state->__size != 0;
+}
+
+bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t size);
+void dma_iova_free(struct device *dev, struct dma_iova_state *state);
+#else /* CONFIG_IOMMU_DMA */
+static inline bool dma_use_iova(struct dma_iova_state *state)
+{
+ return false;
+}
+static inline bool dma_iova_try_alloc(struct device *dev,
+ struct dma_iova_state *state, phys_addr_t phys, size_t size)
+{
+ return false;
+}
+static inline void dma_iova_free(struct device *dev,
+ struct dma_iova_state *state)
+{
+}
+#endif /* CONFIG_IOMMU_DMA */
+
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir);
--
2.49.0
On 4/28/25 17:22, Leon Romanovsky wrote:
> From: Leon Romanovsky<leonro@nvidia.com>
>
> The existing .map_page() callback provides both allocating of IOVA
.map_pages()
> and linking DMA pages. That combination works great for most of the
> callers who use it in control paths, but is less effective in fast
> paths where there may be multiple calls to map_page().
>
> These advanced callers already manage their data in some sort of
> database and can perform IOVA allocation in advance, leaving range
> linkage operation to be in fast path.
>
> Provide an interface to allocate/deallocate IOVA and next patch
> link/unlink DMA ranges to that specific IOVA.
>
> In the new API a DMA mapping transaction is identified by a
> struct dma_iova_state, which holds some recomputed information
> for the transaction which does not change for each page being
> mapped, so add a check if IOVA can be used for the specific
> transaction.
>
> The API is exported from dma-iommu as it is the only implementation
> supported, the namespace is clearly different from iommu_* functions
> which are not allowed to be used. This code layout allows us to save
> function call per API call used in datapath as well as a lot of boilerplate
> code.
>
> Reviewed-by: Christoph Hellwig<hch@lst.de>
> Tested-by: Jens Axboe<axboe@kernel.dk>
> Reviewed-by: Luis Chamberlain<mcgrof@kernel.org>
> Signed-off-by: Leon Romanovsky<leonro@nvidia.com>
> ---
> drivers/iommu/dma-iommu.c | 86 +++++++++++++++++++++++++++++++++++++
> include/linux/dma-mapping.h | 48 +++++++++++++++++++++
> 2 files changed, 134 insertions(+)
>
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index 9ba8d8bc0ce9..d3211a8d755e 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -1723,6 +1723,92 @@ size_t iommu_dma_max_mapping_size(struct device *dev)
> return SIZE_MAX;
> }
>
> +/**
> + * dma_iova_try_alloc - Try to allocate an IOVA space
> + * @dev: Device to allocate the IOVA space for
> + * @state: IOVA state
> + * @phys: physical address
> + * @size: IOVA size
> + *
> + * Check if @dev supports the IOVA-based DMA API, and if yes allocate IOVA space
> + * for the given base address and size.
> + *
> + * Note: @phys is only used to calculate the IOVA alignment. Callers that always
> + * do PAGE_SIZE aligned transfers can safely pass 0 here.
Have you considered adding a direct alignment parameter to
dma_iova_try_alloc()? '0' simply means the default PAGE_SIZE alignment.
I'm imagining that some devices might have particular alignment needs
for better performance, especially for the ATS cache efficiency. This
would allow those device drivers to express the requirements directly
during iova allocation.
> + *
> + * Returns %true if the IOVA-based DMA API can be used and IOVA space has been
> + * allocated, or %false if the regular DMA API should be used.
> + */
> +bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
> + phys_addr_t phys, size_t size)
> +{
> + struct iommu_dma_cookie *cookie;
> + struct iommu_domain *domain;
> + struct iova_domain *iovad;
> + size_t iova_off;
> + dma_addr_t addr;
> +
> + memset(state, 0, sizeof(*state));
> + if (!use_dma_iommu(dev))
> + return false;
> +
> + domain = iommu_get_dma_domain(dev);
> + cookie = domain->iova_cookie;
> + iovad = &cookie->iovad;
> + iova_off = iova_offset(iovad, phys);
> +
> + if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
> + iommu_deferred_attach(dev, iommu_get_domain_for_dev(dev)))
> + return false;
> +
> + if (WARN_ON_ONCE(!size))
> + return false;
> +
> + /*
> + * DMA_IOVA_USE_SWIOTLB is flag which is set by dma-iommu
> + * internals, make sure that caller didn't set it and/or
> + * didn't use this interface to map SIZE_MAX.
> + */
> + if (WARN_ON_ONCE((u64)size & DMA_IOVA_USE_SWIOTLB))
I'm a little concerned that device drivers might inadvertently misuse
the state->__size by forgetting about the high bit being used for
DMA_IOVA_USE_SWIOTLB. Perhaps adding a separate flag within struct
dma_iova_state to prevent such issues?
> + return false;
> +
> + addr = iommu_dma_alloc_iova(domain,
> + iova_align(iovad, size + iova_off),
> + dma_get_mask(dev), dev);
> + if (!addr)
> + return false;
> +
> + state->addr = addr + iova_off;
> + state->__size = size;
> + return true;
> +}
> +EXPORT_SYMBOL_GPL(dma_iova_try_alloc);
Thanks,
baolu
On Tue, Apr 29, 2025 at 11:10:54AM +0800, Baolu Lu wrote:
> On 4/28/25 17:22, Leon Romanovsky wrote:
> > From: Leon Romanovsky<leonro@nvidia.com>
> >
> > The existing .map_page() callback provides both allocating of IOVA
>
> .map_pages()
Changed, thanks
>
> > and linking DMA pages. That combination works great for most of the
> > callers who use it in control paths, but is less effective in fast
> > paths where there may be multiple calls to map_page().
> >
> > These advanced callers already manage their data in some sort of
> > database and can perform IOVA allocation in advance, leaving range
> > linkage operation to be in fast path.
> >
> > Provide an interface to allocate/deallocate IOVA and next patch
> > link/unlink DMA ranges to that specific IOVA.
> >
> > In the new API a DMA mapping transaction is identified by a
> > struct dma_iova_state, which holds some recomputed information
> > for the transaction which does not change for each page being
> > mapped, so add a check if IOVA can be used for the specific
> > transaction.
> >
> > The API is exported from dma-iommu as it is the only implementation
> > supported, the namespace is clearly different from iommu_* functions
> > which are not allowed to be used. This code layout allows us to save
> > function call per API call used in datapath as well as a lot of boilerplate
> > code.
> >
> > Reviewed-by: Christoph Hellwig<hch@lst.de>
> > Tested-by: Jens Axboe<axboe@kernel.dk>
> > Reviewed-by: Luis Chamberlain<mcgrof@kernel.org>
> > Signed-off-by: Leon Romanovsky<leonro@nvidia.com>
> > ---
> > drivers/iommu/dma-iommu.c | 86 +++++++++++++++++++++++++++++++++++++
> > include/linux/dma-mapping.h | 48 +++++++++++++++++++++
> > 2 files changed, 134 insertions(+)
> >
> > diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> > index 9ba8d8bc0ce9..d3211a8d755e 100644
> > --- a/drivers/iommu/dma-iommu.c
> > +++ b/drivers/iommu/dma-iommu.c
> > @@ -1723,6 +1723,92 @@ size_t iommu_dma_max_mapping_size(struct device *dev)
> > return SIZE_MAX;
> > }
> > +/**
> > + * dma_iova_try_alloc - Try to allocate an IOVA space
> > + * @dev: Device to allocate the IOVA space for
> > + * @state: IOVA state
> > + * @phys: physical address
> > + * @size: IOVA size
> > + *
> > + * Check if @dev supports the IOVA-based DMA API, and if yes allocate IOVA space
> > + * for the given base address and size.
> > + *
> > + * Note: @phys is only used to calculate the IOVA alignment. Callers that always
> > + * do PAGE_SIZE aligned transfers can safely pass 0 here.
>
> Have you considered adding a direct alignment parameter to
> dma_iova_try_alloc()? '0' simply means the default PAGE_SIZE alignment.
>
> I'm imagining that some devices might have particular alignment needs
> for better performance, especially for the ATS cache efficiency. This
> would allow those device drivers to express the requirements directly
> during iova allocation.
This is actually what is happening now, take a look in
blk_rq_dma_map_iter_start() implementation, which uses custom alignment.
>
> > + *
> > + * Returns %true if the IOVA-based DMA API can be used and IOVA space has been
> > + * allocated, or %false if the regular DMA API should be used.
> > + */
> > +bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
> > + phys_addr_t phys, size_t size)
> > +{
> > + struct iommu_dma_cookie *cookie;
> > + struct iommu_domain *domain;
> > + struct iova_domain *iovad;
> > + size_t iova_off;
> > + dma_addr_t addr;
> > +
> > + memset(state, 0, sizeof(*state));
> > + if (!use_dma_iommu(dev))
> > + return false;
> > +
> > + domain = iommu_get_dma_domain(dev);
> > + cookie = domain->iova_cookie;
> > + iovad = &cookie->iovad;
> > + iova_off = iova_offset(iovad, phys);
> > +
> > + if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
> > + iommu_deferred_attach(dev, iommu_get_domain_for_dev(dev)))
> > + return false;
> > +
> > + if (WARN_ON_ONCE(!size))
> > + return false;
> > +
> > + /*
> > + * DMA_IOVA_USE_SWIOTLB is flag which is set by dma-iommu
> > + * internals, make sure that caller didn't set it and/or
> > + * didn't use this interface to map SIZE_MAX.
> > + */
> > + if (WARN_ON_ONCE((u64)size & DMA_IOVA_USE_SWIOTLB))
>
> I'm a little concerned that device drivers might inadvertently misuse
> the state->__size by forgetting about the high bit being used for
> DMA_IOVA_USE_SWIOTLB. Perhaps adding a separate flag within struct
> dma_iova_state to prevent such issues?
Device drivers are not supposed to use this DMA API interface and the
vision that subsystems will provide specific to them wrappers. See HMM,
and block changes as an example. VFIO mlx5 implementation is a temporary
measure till we convert another VFIO LM driver to get understanding what
type of abstraction we will need.
The high bit is used to save memory.
>
> > + return false;
> > +
> > + addr = iommu_dma_alloc_iova(domain,
> > + iova_align(iovad, size + iova_off),
> > + dma_get_mask(dev), dev);
> > + if (!addr)
> > + return false;
> > +
> > + state->addr = addr + iova_off;
> > + state->__size = size;
> > + return true;
> > +}
> > +EXPORT_SYMBOL_GPL(dma_iova_try_alloc);
>
> Thanks,
> baolu
>
© 2016 - 2025 Red Hat, Inc.