From: Jens Axboe axboe@kernel.dk
commit 8a3e8ee56417f5e0e66580d93941ed9d6f4c8274 upstream.
If we need to continue doing this IO, then we don't want a potentially selected buffer recycled. Add a flag for that.
Set this for recv/recvmsg if they do partial IO.
Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- io_uring/io_uring.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 04441e981624..2350d43aa782 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -740,6 +740,7 @@ enum { REQ_F_CREDS_BIT, REQ_F_REFCOUNT_BIT, REQ_F_ARM_LTIMEOUT_BIT, + REQ_F_PARTIAL_IO_BIT, /* keep async read/write and isreg together and in order */ REQ_F_NOWAIT_READ_BIT, REQ_F_NOWAIT_WRITE_BIT, @@ -795,6 +796,8 @@ enum { REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT), /* there is a linked timeout that has to be armed */ REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT), + /* request has already done partial IO */ + REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT), };
struct async_poll { @@ -5123,6 +5126,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) ret = -EINTR; if (ret > 0 && io_net_retry(sock, flags)) { sr->done_io += ret; + req->flags |= REQ_F_PARTIAL_IO; return io_setup_async_msg(req, kmsg); } req_set_fail(req); @@ -5196,6 +5200,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) sr->len -= ret; sr->buf += ret; sr->done_io += ret; + req->flags |= REQ_F_PARTIAL_IO; return -EAGAIN; } req_set_fail(req);