On Wed, 29 Apr 2026 16:25:48 +0100 Pavel Begunkov asml.silence@gmail.com wrote:
Introduce a new iterator type for dmabuf maps. The map in an opaque object with internals and format specific to the subsystem / driver, and only it can use that subsystem / driver for issuing IO. The task of the middle layers is to pass the map / iterator further down, maybe doing basic splitting and length checking. The iterator can only be used by operations of the file the associated map was created for.
Suggested-by: Keith Busch kbusch@kernel.org Signed-off-by: Pavel Begunkov asml.silence@gmail.com
include/linux/uio.h | 11 +++++++++++ lib/iov_iter.c | 29 +++++++++++++++++++++++------ 2 files changed, 34 insertions(+), 6 deletions(-)
diff --git a/include/linux/uio.h b/include/linux/uio.h index a9bc5b3067e3..75051aed70de 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -12,6 +12,7 @@ struct page; struct folio_queue; +struct io_dmabuf_map; typedef unsigned int __bitwise iov_iter_extraction_t; @@ -29,6 +30,7 @@ enum iter_type { ITER_FOLIOQ, ITER_XARRAY, ITER_DISCARD,
- ITER_DMABUF_MAP,
}; #define ITER_SOURCE 1 // == WRITE @@ -71,6 +73,7 @@ struct iov_iter { const struct folio_queue *folioq; struct xarray *xarray; void __user *ubuf;
};struct io_dmabuf_map *dmabuf_map; }; size_t count;@@ -155,6 +158,11 @@ static inline bool iov_iter_is_xarray(const struct iov_iter *i) return iov_iter_type(i) == ITER_XARRAY; } +static inline bool iov_iter_is_dmabuf_map(const struct iov_iter *i) +{
- return iov_iter_type(i) == ITER_DMABUF_MAP;
+}
static inline unsigned char iov_iter_rw(const struct iov_iter *i) { return i->data_source ? WRITE : READ; @@ -300,6 +308,9 @@ void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction, unsigned int first_slot, unsigned int offset, size_t count); void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray, loff_t start, size_t count); +void iov_iter_dmabuf_map(struct iov_iter *i, unsigned int direction,
struct io_dmabuf_map *map,loff_t off, size_t count);ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start); ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages, diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 243662af1af7..e2253684b991 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -575,7 +575,8 @@ void iov_iter_advance(struct iov_iter *i, size_t size) { if (unlikely(i->count < size)) size = i->count;
- if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
- if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i)) ||
unlikely(iov_iter_is_dmabuf_map(i))) {
Doesn't the extra check add more code to all the non-ubuf cases? This could be fixed by either making iter_type a bitmask (with one bit set) or writing an iter_is_one_of(i, ITER_xxx, ITER_yyy) define that uses '(1 << i->iter_type) & ((1 << ITER_xxx) | ...)' (look at the the nolibc printf code for an example).
i->iov_offset += size; i->count -= size;} else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { @@ -631,7 +632,8 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll) return; } unroll -= i->iov_offset;
- if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
- if (iov_iter_is_xarray(i) || iter_is_ubuf(i) ||
iter_is_ubuf() should have been first here.
-- David
BUG(); /* We should never go beyond the start of the specified * range since we might then be straying into pages that * aren't pinned.iov_iter_is_dmabuf_map(i)) {@@ -775,6 +777,20 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction, } EXPORT_SYMBOL(iov_iter_xarray); +void iov_iter_dmabuf_map(struct iov_iter *i, unsigned int direction,
struct io_dmabuf_map *map,loff_t off, size_t count)+{
- WARN_ON(direction & ~(READ | WRITE));
- *i = (struct iov_iter){
.iter_type = ITER_DMABUF_MAP,.data_source = direction,.dmabuf_map = map,.count = count,.iov_offset = off,- };
+}
/**
- iov_iter_discard - Initialise an I/O iterator that discards data
- @i: The iterator to initialise.
@@ -841,7 +857,7 @@ static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) unsigned long iov_iter_alignment(const struct iov_iter *i) {
- if (likely(iter_is_ubuf(i))) {
- if (likely(iter_is_ubuf(i)) || iov_iter_is_dmabuf_map(i)) { size_t size = i->count; if (size) return ((unsigned long)i->ubuf + i->iov_offset) | size;
@@ -872,7 +888,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i) size_t size = i->count; unsigned k;
- if (iter_is_ubuf(i))
- if (iter_is_ubuf(i) || iov_iter_is_dmabuf_map(i)) return 0;
if (WARN_ON(!iter_is_iovec(i))) @@ -1469,11 +1485,12 @@ EXPORT_SYMBOL_GPL(import_ubuf); void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) { if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
!iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
!iter_is_ubuf(i) && !iov_iter_is_kvec(i) && return; i->iov_offset = state->iov_offset; i->count = state->count;!iov_iter_is_dmabuf_map(i)))
- if (iter_is_ubuf(i))
- if (iter_is_ubuf(i) || iov_iter_is_dmabuf_map(i)) return; /*
- For the *vec iters, nr_segs + iov is constant - if we increment