In preparation to following changes, instead of passing an iovec for buffer registration introduce a new structure. It'll be moved to uapi later, but for now it's initialised early from a user provided iovec.
Signed-off-by: Pavel Begunkov asml.silence@gmail.com --- io_uring/rsrc.c | 50 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 34 insertions(+), 16 deletions(-)
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index c4a7a77d1ee9..ba00238941ed 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -27,8 +27,14 @@ struct io_rsrc_update { u32 offset; };
+struct io_uring_regbuf_desc { + __u64 uaddr; + __u64 size; +}; + static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx, - struct iovec *iov, struct page **last_hpage); + struct io_uring_regbuf_desc *desc, + struct page **last_hpage);
/* only define max */ #define IORING_MAX_FIXED_FILES (1U << 20) @@ -36,6 +42,15 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
#define IO_CACHED_BVECS_SEGS 32
+static void io_iov_to_regbuf_desc(const struct iovec *iov, + struct io_uring_regbuf_desc *desc) +{ + *desc = (struct io_uring_regbuf_desc) { + .uaddr = (u64)iov->iov_base, + .size = iov->iov_len, + }; +} + int __io_account_mem(struct user_struct *user, unsigned long nr_pages) { unsigned long page_limit, cur_pages, new_pages; @@ -291,6 +306,7 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx, return -EINVAL;
for (done = 0; done < nr_args; done++) { + struct io_uring_regbuf_desc desc; struct io_rsrc_node *node; u64 tag = 0;
@@ -304,7 +320,9 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx, err = -EFAULT; break; } - node = io_sqe_buffer_register(ctx, iov, &last_hpage); + + io_iov_to_regbuf_desc(iov, &desc); + node = io_sqe_buffer_register(ctx, &desc, &last_hpage); if (IS_ERR(node)) { err = PTR_ERR(node); break; @@ -760,27 +778,27 @@ bool io_check_coalesce_buffer(struct page **page_array, int nr_pages, }
static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx, - struct iovec *iov, - struct page **last_hpage) + struct io_uring_regbuf_desc *desc, + struct page **last_hpage) { + unsigned long uaddr = (unsigned long)desc->uaddr; + size_t size = desc->size; struct io_mapped_ubuf *imu = NULL; struct page **pages = NULL; struct io_rsrc_node *node; unsigned long off; - size_t size; int ret, nr_pages, i; struct io_imu_folio_data data; bool coalesced = false;
- if (!iov->iov_base) { - if (iov->iov_len) + if (!uaddr) { + if (size) return ERR_PTR(-EFAULT); /* remove the buffer without installing a new one */ return NULL; }
- ret = io_validate_user_buf_range((unsigned long)iov->iov_base, - iov->iov_len); + ret = io_validate_user_buf_range(uaddr, size); if (ret) return ERR_PTR(ret);
@@ -789,8 +807,7 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx, return ERR_PTR(-ENOMEM);
ret = -ENOMEM; - pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len, - &nr_pages); + pages = io_pin_pages(uaddr, size, &nr_pages); if (IS_ERR(pages)) { ret = PTR_ERR(pages); pages = NULL; @@ -812,10 +829,9 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx, if (ret) goto done;
- size = iov->iov_len; /* store original address for later verification */ - imu->ubuf = (unsigned long) iov->iov_base; - imu->len = iov->iov_len; + imu->ubuf = uaddr; + imu->len = size; imu->folio_shift = PAGE_SHIFT; imu->release = io_release_ubuf; imu->priv = imu; @@ -825,7 +841,7 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx, imu->folio_shift = data.folio_shift; refcount_set(&imu->refs, 1);
- off = (unsigned long)iov->iov_base & ~PAGE_MASK; + off = uaddr & ~PAGE_MASK; if (coalesced) off += data.first_folio_page_idx << PAGE_SHIFT;
@@ -878,6 +894,7 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, memset(iov, 0, sizeof(*iov));
for (i = 0; i < nr_args; i++) { + struct io_uring_regbuf_desc desc; struct io_rsrc_node *node; u64 tag = 0;
@@ -901,7 +918,8 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, } }
- node = io_sqe_buffer_register(ctx, iov, &last_hpage); + io_iov_to_regbuf_desc(iov, &desc); + node = io_sqe_buffer_register(ctx, &desc, &last_hpage); if (IS_ERR(node)) { ret = PTR_ERR(node); break;