From: Ignacio Encinas ignacio@iencinas.com
[ Upstream commit fbc0283fbeae27b88448c90305e738991457fee2 ]
Writes for the error value of a connection are spinlock-protected inside p9_conn_cancel, but lockless reads are present elsewhere to avoid performing unnecessary work after an error has been met.
Mark the write and lockless reads to make KCSAN happy. Mark the write as exclusive following the recommendation in "Lock-Protected Writes with Lockless Reads" in tools/memory-model/Documentation/access-marking.txt while we are at it.
Mark p9_fd_request and p9_conn_cancel m->err reads despite the fact that they do not race with concurrent writes for stylistic reasons.
Reported-by: syzbot+d69a7cc8c683c2cb7506@syzkaller.appspotmail.com Reported-by: syzbot+483d6c9b9231ea7e1851@syzkaller.appspotmail.com Signed-off-by: Ignacio Encinas ignacio@iencinas.com Message-ID: 20250318-p9_conn_err_benign_data_race-v3-1-290bb18335cc@iencinas.com Signed-off-by: Dominique Martinet asmadeus@codewreck.org Signed-off-by: Sasha Levin sashal@kernel.org --- net/9p/trans_fd.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 196060dc6138a..791e4868f2d4e 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -191,12 +191,13 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
spin_lock(&m->req_lock);
- if (m->err) { + if (READ_ONCE(m->err)) { spin_unlock(&m->req_lock); return; }
- m->err = err; + WRITE_ONCE(m->err, err); + ASSERT_EXCLUSIVE_WRITER(m->err);
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { list_move(&req->req_list, &cancel_list); @@ -283,7 +284,7 @@ static void p9_read_work(struct work_struct *work)
m = container_of(work, struct p9_conn, rq);
- if (m->err < 0) + if (READ_ONCE(m->err) < 0) return;
p9_debug(P9_DEBUG_TRANS, "start mux %p pos %zd\n", m, m->rc.offset); @@ -450,7 +451,7 @@ static void p9_write_work(struct work_struct *work)
m = container_of(work, struct p9_conn, wq);
- if (m->err < 0) { + if (READ_ONCE(m->err) < 0) { clear_bit(Wworksched, &m->wsched); return; } @@ -622,7 +623,7 @@ static void p9_poll_mux(struct p9_conn *m) __poll_t n; int err = -ECONNRESET;
- if (m->err < 0) + if (READ_ONCE(m->err) < 0) return;
n = p9_fd_poll(m->client, NULL, &err); @@ -665,6 +666,7 @@ static void p9_poll_mux(struct p9_conn *m) static int p9_fd_request(struct p9_client *client, struct p9_req_t *req) { __poll_t n; + int err; struct p9_trans_fd *ts = client->trans; struct p9_conn *m = &ts->conn;
@@ -673,9 +675,10 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
spin_lock(&m->req_lock);
- if (m->err < 0) { + err = READ_ONCE(m->err); + if (err < 0) { spin_unlock(&m->req_lock); - return m->err; + return err; }
WRITE_ONCE(req->status, REQ_STATUS_UNSENT);