[ upstream commit 6ae91ac9a6aa7d6005c3c6d0f4d263fbab9f377f ]
We currently only add a notification CQE when the send succeded, i.e. cqe.res >= 0. However, it'd be more robust to do buffer notifications for failed requests as well in case drivers decide do something fanky.
Always return a buffer notification after initial prep, don't hide it. This behaviour is better aligned with documentation and the patch also helps the userspace to respect it.
Cc: stable@vger.kernel.org # 6.0 Suggested-by: Stefan Metzmacher metze@samba.org Signed-off-by: Pavel Begunkov asml.silence@gmail.com Link: https://lore.kernel.org/r/9c8bead87b2b980fcec441b8faef52188b4a6588.166429210... Signed-off-by: Jens Axboe axboe@kernel.dk --- io_uring/net.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-)
diff --git a/io_uring/net.c b/io_uring/net.c index 3dbb2bf99b4d..b0324775e6ce 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -875,7 +875,6 @@ void io_send_zc_cleanup(struct io_kiocb *req) { struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
- zc->notif->flags |= REQ_F_CQE_SKIP; io_notif_flush(zc->notif); zc->notif = NULL; } @@ -992,7 +991,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) struct msghdr msg; struct iovec iov; struct socket *sock; - unsigned msg_flags, cflags; + unsigned msg_flags; int ret, min_ret = 0;
sock = sock_from_file(req->file); @@ -1060,8 +1059,6 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) req->flags |= REQ_F_PARTIAL_IO; return io_setup_async_addr(req, addr, issue_flags); } - if (ret < 0 && !zc->done_io) - zc->notif->flags |= REQ_F_CQE_SKIP; if (ret == -ERESTARTSYS) ret = -EINTR; req_set_fail(req); @@ -1074,8 +1071,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
io_notif_flush(zc->notif); req->flags &= ~REQ_F_NEED_CLEANUP; - cflags = ret >= 0 ? IORING_CQE_F_MORE : 0; - io_req_set_res(req, ret, cflags); + io_req_set_res(req, ret, IORING_CQE_F_MORE); return IOU_OK; }
@@ -1092,17 +1088,11 @@ void io_sendrecv_fail(struct io_kiocb *req) void io_send_zc_fail(struct io_kiocb *req) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); - int res = req->cqe.res;
- if (req->flags & REQ_F_PARTIAL_IO) { - if (req->flags & REQ_F_NEED_CLEANUP) { - io_notif_flush(sr->notif); - sr->notif = NULL; - req->flags &= ~REQ_F_NEED_CLEANUP; - } - res = sr->done_io; - } - io_req_set_res(req, res, req->cqe.flags); + if (req->flags & REQ_F_PARTIAL_IO) + req->cqe.res = sr->done_io; + if (req->flags & REQ_F_NEED_CLEANUP) + req->cqe.flags |= IORING_CQE_F_MORE; }
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)