Introduce a new iterator type backed by a pre mapped dmabuf represented
by struct dma_token. The token is specific to the file for which it was
created, and the user must avoid the token and the iterator to any other
file. This limitation will be softened in the future.
Suggested-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
include/linux/uio.h | 10 ++++++++++
lib/iov_iter.c | 30 ++++++++++++++++++++++++------
2 files changed, 34 insertions(+), 6 deletions(-)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 5b127043a151..1b22594ca35b 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -29,6 +29,7 @@ enum iter_type {
ITER_FOLIOQ,
ITER_XARRAY,
ITER_DISCARD,
+ ITER_DMA_TOKEN,
};
#define ITER_SOURCE 1 // == WRITE
@@ -71,6 +72,7 @@ struct iov_iter {
const struct folio_queue *folioq;
struct xarray *xarray;
void __user *ubuf;
+ struct dma_token *dma_token;
};
size_t count;
};
@@ -155,6 +157,11 @@ static inline bool iov_iter_is_xarray(const struct iov_iter *i)
return iov_iter_type(i) == ITER_XARRAY;
}
+static inline bool iov_iter_is_dma_token(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_DMA_TOKEN;
+}
+
static inline unsigned char iov_iter_rw(const struct iov_iter *i)
{
return i->data_source ? WRITE : READ;
@@ -300,6 +307,9 @@ void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
unsigned int first_slot, unsigned int offset, size_t count);
void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
loff_t start, size_t count);
+void iov_iter_dma_token(struct iov_iter *i, unsigned int direction,
+ struct dma_token *token,
+ loff_t off, size_t count);
ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 2fe66a6b8789..26fa8f8f13c0 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -563,7 +563,8 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
{
if (unlikely(i->count < size))
size = i->count;
- if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
+ if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i)) ||
+ unlikely(iov_iter_is_dma_token(i))) {
i->iov_offset += size;
i->count -= size;
} else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
@@ -619,7 +620,8 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
return;
}
unroll -= i->iov_offset;
- if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
+ if (iov_iter_is_xarray(i) || iter_is_ubuf(i) ||
+ iov_iter_is_dma_token(i)) {
BUG(); /* We should never go beyond the start of the specified
* range since we might then be straying into pages that
* aren't pinned.
@@ -763,6 +765,21 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
}
EXPORT_SYMBOL(iov_iter_xarray);
+void iov_iter_dma_token(struct iov_iter *i, unsigned int direction,
+ struct dma_token *token,
+ loff_t off, size_t count)
+{
+ WARN_ON(direction & ~(READ | WRITE));
+ *i = (struct iov_iter){
+ .iter_type = ITER_DMA_TOKEN,
+ .data_source = direction,
+ .dma_token = token,
+ .iov_offset = 0,
+ .count = count,
+ .iov_offset = off,
+ };
+}
+
/**
* iov_iter_discard - Initialise an I/O iterator that discards data
* @i: The iterator to initialise.
@@ -829,7 +846,7 @@ static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
unsigned long iov_iter_alignment(const struct iov_iter *i)
{
- if (likely(iter_is_ubuf(i))) {
+ if (likely(iter_is_ubuf(i)) || iov_iter_is_dma_token(i)) {
size_t size = i->count;
if (size)
return ((unsigned long)i->ubuf + i->iov_offset) | size;
@@ -860,7 +877,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
size_t size = i->count;
unsigned k;
- if (iter_is_ubuf(i))
+ if (iter_is_ubuf(i) || iov_iter_is_dma_token(i))
return 0;
if (WARN_ON(!iter_is_iovec(i)))
@@ -1457,11 +1474,12 @@ EXPORT_SYMBOL_GPL(import_ubuf);
void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
{
if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
- !iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
+ !iter_is_ubuf(i) && !iov_iter_is_kvec(i) &&
+ !iov_iter_is_dma_token(i)))
return;
i->iov_offset = state->iov_offset;
i->count = state->count;
- if (iter_is_ubuf(i))
+ if (iter_is_ubuf(i) || iov_iter_is_dma_token(i))
return;
/*
* For the *vec iters, nr_segs + iov is constant - if we increment
--
2.52.0
> +void iov_iter_dma_token(struct iov_iter *i, unsigned int direction,
> + struct dma_token *token,
> + loff_t off, size_t count)
> +{
> + WARN_ON(direction & ~(READ | WRITE));
> + *i = (struct iov_iter){
> + .iter_type = ITER_DMA_TOKEN,
> + .data_source = direction,
> + .dma_token = token,
> + .iov_offset = 0,
nit: iov_offset is getting below too. can get rid of this one.
> + .count = count,
> + .iov_offset = off,
> + };
On Sun, Nov 23, 2025 at 10:51:22PM +0000, Pavel Begunkov wrote:
> diff --git a/include/linux/uio.h b/include/linux/uio.h
> index 5b127043a151..1b22594ca35b 100644
> --- a/include/linux/uio.h
> +++ b/include/linux/uio.h
> @@ -29,6 +29,7 @@ enum iter_type {
> ITER_FOLIOQ,
> ITER_XARRAY,
> ITER_DISCARD,
> + ITER_DMA_TOKEN,
Please use DMABUF/dmabuf naming everywhere, this is about dmabufs and
not dma in general.
Otherwise this looks good.
On 12/4/25 10:43, Christoph Hellwig wrote:
> On Sun, Nov 23, 2025 at 10:51:22PM +0000, Pavel Begunkov wrote:
>> diff --git a/include/linux/uio.h b/include/linux/uio.h
>> index 5b127043a151..1b22594ca35b 100644
>> --- a/include/linux/uio.h
>> +++ b/include/linux/uio.h
>> @@ -29,6 +29,7 @@ enum iter_type {
>> ITER_FOLIOQ,
>> ITER_XARRAY,
>> ITER_DISCARD,
>> + ITER_DMA_TOKEN,
>
> Please use DMABUF/dmabuf naming everywhere, this is about dmabufs and
> not dma in general.
I guess I can do that (in all places) since it got that much fat
around dmabuf, but for me it was always about passing dma
addresses. Dmabuf was a way to pass buffers, even though
mandatory for uapi.
--
Pavel Begunkov
© 2016 - 2026 Red Hat, Inc.