Recently failed to apply io_uring stable-6.4 patches.
Jens Axboe (1): io_uring: cleanup io_aux_cqe() API
Pavel Begunkov (2): io_uring/net: don't overflow multishot accept io_uring/net: don't overflow multishot recv
io_uring/io_uring.c | 4 +++- io_uring/io_uring.h | 2 +- io_uring/net.c | 9 ++++----- io_uring/poll.c | 4 ++-- io_uring/timeout.c | 4 ++-- 5 files changed, 12 insertions(+), 11 deletions(-)
From: Jens Axboe axboe@kernel.dk
[ upstream commit d86eaed185e9c6052d1ee2ca538f1936ff255887 ]
Everybody is passing in the request, so get rid of the io_ring_ctx and explicit user_data pass-in. Both the ctx and user_data can be deduced from the request at hand.
Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Pavel Begunkov asml.silence@gmail.com --- io_uring/io_uring.c | 4 +++- io_uring/io_uring.h | 2 +- io_uring/net.c | 9 ++++----- io_uring/poll.c | 4 ++-- io_uring/timeout.c | 4 ++-- 5 files changed, 12 insertions(+), 11 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index d3b36197087a..d31765694d44 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -933,9 +933,11 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags return __io_post_aux_cqe(ctx, user_data, res, cflags, true); }
-bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags, +bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags, bool allow_overflow) { + struct io_ring_ctx *ctx = req->ctx; + u64 user_data = req->cqe.user_data; struct io_uring_cqe *cqe; unsigned int length;
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 97cfb3f2f06d..ad67bff51465 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -47,7 +47,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx); void io_req_defer_failed(struct io_kiocb *req, s32 res); void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags); bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); -bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags, +bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags, bool allow_overflow); void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
diff --git a/io_uring/net.c b/io_uring/net.c index c8a4b2ac00f7..bd25c1adbf13 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -634,8 +634,8 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, }
if (!mshot_finished) { - if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER, - req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) { + if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, + *ret, cflags | IORING_CQE_F_MORE, true)) { io_recv_prep_retry(req); return false; } @@ -1308,7 +1308,6 @@ int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_accept(struct io_kiocb *req, unsigned int issue_flags) { - struct io_ring_ctx *ctx = req->ctx; struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0; @@ -1358,8 +1357,8 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
if (ret < 0) return ret; - if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER, - req->cqe.user_data, ret, IORING_CQE_F_MORE, true)) + if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, ret, + IORING_CQE_F_MORE, true)) goto retry;
return -ECANCELED; diff --git a/io_uring/poll.c b/io_uring/poll.c index a78b8af7d9ab..b57e5937573d 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -300,8 +300,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts) __poll_t mask = mangle_poll(req->cqe.res & req->apoll_events);
- if (!io_aux_cqe(req->ctx, ts->locked, req->cqe.user_data, - mask, IORING_CQE_F_MORE, false)) { + if (!io_aux_cqe(req, ts->locked, mask, + IORING_CQE_F_MORE, false)) { io_req_set_res(req, mask, 0); return IOU_POLL_REMOVE_POLL_USE_RES; } diff --git a/io_uring/timeout.c b/io_uring/timeout.c index 350eb830b485..fb0547b35dcd 100644 --- a/io_uring/timeout.c +++ b/io_uring/timeout.c @@ -73,8 +73,8 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
if (!io_timeout_finish(timeout, data)) { bool filled; - filled = io_aux_cqe(ctx, ts->locked, req->cqe.user_data, -ETIME, - IORING_CQE_F_MORE, false); + filled = io_aux_cqe(req, ts->locked, -ETIME, IORING_CQE_F_MORE, + false); if (filled) { /* re-arm timer */ spin_lock_irq(&ctx->timeout_lock);
[ upstream commit 1bfed23349716a7811645336a7ce42c4b8f250bc ]
Don't allow overflowing multishot accept CQEs, we want to limit the grows of the overflow list.
Cc: stable@vger.kernel.org Fixes: 4e86a2c980137 ("io_uring: implement multishot mode for accept") Signed-off-by: Pavel Begunkov asml.silence@gmail.com Link: https://lore.kernel.org/r/7d0d749649244873772623dd7747966f516fe6e2.169175766... Signed-off-by: Jens Axboe axboe@kernel.dk --- io_uring/net.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/io_uring/net.c b/io_uring/net.c index bd25c1adbf13..0aadbd72b7a9 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -1358,7 +1358,7 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags) if (ret < 0) return ret; if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, ret, - IORING_CQE_F_MORE, true)) + IORING_CQE_F_MORE, false)) goto retry;
return -ECANCELED;
[ upstream commit b2e74db55dd93d6db22a813c9a775b5dbf87c560 ]
Don't allow overflowing multishot recv CQEs, it might get out of hand, hurt performance, and in the worst case scenario OOM the task.
Cc: stable@vger.kernel.org Fixes: b3fdea6ecb55c ("io_uring: multishot recv") Signed-off-by: Pavel Begunkov asml.silence@gmail.com Link: https://lore.kernel.org/r/0b295634e8f1b71aa764c984608c22d85f88f75c.169175766... Signed-off-by: Jens Axboe axboe@kernel.dk --- io_uring/net.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/io_uring/net.c b/io_uring/net.c index 0aadbd72b7a9..0e0cc8c8189e 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -635,7 +635,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
if (!mshot_finished) { if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, - *ret, cflags | IORING_CQE_F_MORE, true)) { + *ret, cflags | IORING_CQE_F_MORE, false)) { io_recv_prep_retry(req); return false; }
On Tue, Sep 12, 2023 at 02:55:21PM +0100, Pavel Begunkov wrote:
Recently failed to apply io_uring stable-6.4 patches.
Jens Axboe (1): io_uring: cleanup io_aux_cqe() API
Pavel Begunkov (2): io_uring/net: don't overflow multishot accept io_uring/net: don't overflow multishot recv
io_uring/io_uring.c | 4 +++- io_uring/io_uring.h | 2 +- io_uring/net.c | 9 ++++----- io_uring/poll.c | 4 ++-- io_uring/timeout.c | 4 ++-- 5 files changed, 12 insertions(+), 11 deletions(-)
Thanks for this, but 6.4.y just went end-of-life.
I've queued up all the other backports you submitted, many thanks for those!
greg k-h
linux-stable-mirror@lists.linaro.org