The patch below does not apply to the 5.15-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
To reproduce the conflict and resubmit, you may use the following commands:
git fetch https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/ linux-5.15.y git checkout FETCH_HEAD git cherry-pick -x def5b7b2643ebba696fc60ddf675dca13f073486 # <resolve conflicts, build, test, etc.> git commit -s git send-email --to 'stable@vger.kernel.org' --in-reply-to '2025072102-broadness-retreat-41e9@gregkh' --subject-prefix 'PATCH 5.15.y' HEAD^..
Possible dependencies:
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From def5b7b2643ebba696fc60ddf675dca13f073486 Mon Sep 17 00:00:00 2001 From: Paolo Abeni pabeni@redhat.com Date: Mon, 14 Jul 2025 18:41:45 +0200 Subject: [PATCH] mptcp: plug races between subflow fail and subflow creation
We have races similar to the one addressed by the previous patch between subflow failing and additional subflow creation. They are just harder to trigger.
The solution is similar. Use a separate flag to track the condition 'socket state prevent any additional subflow creation' protected by the fallback lock.
The socket fallback makes such flag true, and also receiving or sending an MP_FAIL option.
The field 'allow_infinite_fallback' is now always touched under the relevant lock, we can drop the ONCE annotation on write.
Fixes: 478d770008b0 ("mptcp: send out MP_FAIL when data checksum fails") Cc: stable@vger.kernel.org Signed-off-by: Paolo Abeni pabeni@redhat.com Reviewed-by: Matthieu Baerts (NGI0) matttbe@kernel.org Signed-off-by: Matthieu Baerts (NGI0) matttbe@kernel.org Link: https://patch.msgid.link/20250714-net-mptcp-fallback-races-v1-2-391aff963322... Signed-off-by: Jakub Kicinski kuba@kernel.org
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index feb01747d7d8..420d416e2603 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -765,8 +765,14 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
pr_debug("fail_seq=%llu\n", fail_seq);
- if (!READ_ONCE(msk->allow_infinite_fallback)) + /* After accepting the fail, we can't create any other subflows */ + spin_lock_bh(&msk->fallback_lock); + if (!msk->allow_infinite_fallback) { + spin_unlock_bh(&msk->fallback_lock); return; + } + msk->allow_subflows = false; + spin_unlock_bh(&msk->fallback_lock);
if (!subflow->fail_tout) { pr_debug("send MP_FAIL response and infinite map\n"); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index b08a42fcbb65..bf92cee9b5ce 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -791,7 +791,7 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk) static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk) { mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq); - WRITE_ONCE(msk->allow_infinite_fallback, false); + msk->allow_infinite_fallback = false; mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); }
@@ -803,7 +803,7 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) return false;
spin_lock_bh(&msk->fallback_lock); - if (__mptcp_check_fallback(msk)) { + if (!msk->allow_subflows) { spin_unlock_bh(&msk->fallback_lock); return false; } @@ -2625,7 +2625,7 @@ static void __mptcp_retrans(struct sock *sk) len = max(copied, len); tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, info.size_goal); - WRITE_ONCE(msk->allow_infinite_fallback, false); + msk->allow_infinite_fallback = false; } spin_unlock_bh(&msk->fallback_lock);
@@ -2753,7 +2753,8 @@ static void __mptcp_init_sock(struct sock *sk) WRITE_ONCE(msk->first, NULL); inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); - WRITE_ONCE(msk->allow_infinite_fallback, true); + msk->allow_infinite_fallback = true; + msk->allow_subflows = true; msk->recovery = false; msk->subflow_id = 1; msk->last_data_sent = tcp_jiffies32; @@ -3549,7 +3550,7 @@ bool mptcp_finish_join(struct sock *ssk) /* active subflow, already present inside the conn_list */ if (!list_empty(&subflow->node)) { spin_lock_bh(&msk->fallback_lock); - if (__mptcp_check_fallback(msk)) { + if (!msk->allow_subflows) { spin_unlock_bh(&msk->fallback_lock); return false; } diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 2a60c3c71651..6ec245fd2778 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -346,13 +346,15 @@ struct mptcp_sock { u64 rtt_us; /* last maximum rtt of subflows */ } rcvq_space; u8 scaling_ratio; + bool allow_subflows;
u32 subflow_id; u32 setsockopt_seq; char ca_name[TCP_CA_NAME_MAX];
- spinlock_t fallback_lock; /* protects fallback and - * allow_infinite_fallback + spinlock_t fallback_lock; /* protects fallback, + * allow_infinite_fallback and + * allow_join */ };
@@ -1232,6 +1234,7 @@ static inline bool __mptcp_try_fallback(struct mptcp_sock *msk) return false; }
+ msk->allow_subflows = false; set_bit(MPTCP_FALLBACK_DONE, &msk->flags); spin_unlock_bh(&msk->fallback_lock); return true; diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index a6a35985e551..1802bc5435a1 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1302,20 +1302,29 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss mptcp_schedule_work(sk); }
-static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) +static bool mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); unsigned long fail_tout;
+ /* we are really failing, prevent any later subflow join */ + spin_lock_bh(&msk->fallback_lock); + if (!msk->allow_infinite_fallback) { + spin_unlock_bh(&msk->fallback_lock); + return false; + } + msk->allow_subflows = false; + spin_unlock_bh(&msk->fallback_lock); + /* graceful failure can happen only on the MPC subflow */ if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first))) - return; + return false;
/* since the close timeout take precedence on the fail one, * no need to start the latter when the first is already set */ if (sock_flag((struct sock *)msk, SOCK_DEAD)) - return; + return true;
/* we don't need extreme accuracy here, use a zero fail_tout as special * value meaning no fail timeout at all; @@ -1327,6 +1336,7 @@ static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) tcp_send_ack(ssk);
mptcp_reset_tout_timer(msk, subflow->fail_tout); + return true; }
static bool subflow_check_data_avail(struct sock *ssk) @@ -1387,12 +1397,11 @@ static bool subflow_check_data_avail(struct sock *ssk) (subflow->mp_join || subflow->valid_csum_seen)) { subflow->send_mp_fail = 1;
- if (!READ_ONCE(msk->allow_infinite_fallback)) { + if (!mptcp_subflow_fail(msk, ssk)) { subflow->reset_transient = 0; subflow->reset_reason = MPTCP_RST_EMIDDLEBOX; goto reset; } - mptcp_subflow_fail(msk, ssk); WRITE_ONCE(subflow->data_avail, true); return true; }
linux-stable-mirror@lists.linaro.org