Backport fix commit ("tls: fix race between async notify and socket close") for CVE-2024-26583 [1]. It's dependent on three tls commits being used to simplify and factor out async waiting. They also benefit backporting fix commit ("net: tls: handle backlogging of crypto requests") for CVE-2024-26584 [2]. Therefore, add them for clean backport:
Jakub Kicinski (4): tls: rx: simplify async wait net: tls: factor out tls_*crypt_async_wait() tls: fix race between async notify and socket close net: tls: handle backlogging of crypto requests
Sabrina Dubroca (1): tls: extract context alloc/initialization out of tls_set_sw_offload
Please review and consider applying these patches.
[1] https://lore.kernel.org/all/2024022146-traction-unjustly-f451@gregkh/ [2] https://lore.kernel.org/all/2024022148-showpiece-yanking-107c@gregkh/
include/net/tls.h | 6 -- net/tls/tls_sw.c | 199 ++++++++++++++++++++++++---------------------- 2 files changed, 106 insertions(+), 99 deletions(-)
From: Jakub Kicinski kuba@kernel.org
commit 37943f047bfb88ba4dfc7a522563f57c86d088a0 upstream.
Since we are protected from async completions by decrypt_compl_lock we can drop the async_notify and reinit the completion before we start waiting.
Signed-off-by: Jakub Kicinski kuba@kernel.org Signed-off-by: David S. Miller davem@davemloft.net Stable-dep-of: aec7961916f3 ("tls: fix race between async notify and socket close") Cc: stable@vger.kernel.org # 5.15 Signed-off-by: Shaoying Xu shaoyi@amazon.com --- include/net/tls.h | 1 - net/tls/tls_sw.c | 14 ++------------ 2 files changed, 2 insertions(+), 13 deletions(-)
diff --git a/include/net/tls.h b/include/net/tls.h index ea0aeae26cf7..dcd6aa08c067 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -150,7 +150,6 @@ struct tls_sw_context_rx { atomic_t decrypt_pending; /* protect crypto_wait with decrypt_pending*/ spinlock_t decrypt_compl_lock; - bool async_notify; };
struct tls_record_info { diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index fc55b65695e5..9c443646417e 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -174,7 +174,6 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err) struct scatterlist *sg; struct sk_buff *skb; unsigned int pages; - int pending;
skb = (struct sk_buff *)req->data; tls_ctx = tls_get_ctx(skb->sk); @@ -222,9 +221,7 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err) kfree(aead_req);
spin_lock_bh(&ctx->decrypt_compl_lock); - pending = atomic_dec_return(&ctx->decrypt_pending); - - if (!pending && ctx->async_notify) + if (!atomic_dec_return(&ctx->decrypt_pending)) complete(&ctx->async_wait.completion); spin_unlock_bh(&ctx->decrypt_compl_lock); } @@ -1917,7 +1914,7 @@ int tls_sw_recvmsg(struct sock *sk,
/* Wait for all previously submitted records to be decrypted */ spin_lock_bh(&ctx->decrypt_compl_lock); - ctx->async_notify = true; + reinit_completion(&ctx->async_wait.completion); pending = atomic_read(&ctx->decrypt_pending); spin_unlock_bh(&ctx->decrypt_compl_lock); if (pending) { @@ -1929,15 +1926,8 @@ int tls_sw_recvmsg(struct sock *sk, decrypted = 0; goto end; } - } else { - reinit_completion(&ctx->async_wait.completion); }
- /* There can be no concurrent accesses, since we have no - * pending decrypt operations - */ - WRITE_ONCE(ctx->async_notify, false); - /* Drain records from the rx_list & copy if required */ if (is_peek || is_kvec) err = process_rx_list(ctx, msg, &control, copied,
From: Sabrina Dubroca sd@queasysnail.net
commit 615580cbc99af0da2d1c7226fab43a3d5003eb97 upstream.
Simplify tls_set_sw_offload a bit.
Signed-off-by: Sabrina Dubroca sd@queasysnail.net Signed-off-by: David S. Miller davem@davemloft.net Stable-dep-of: aec7961916f3 ("tls: fix race between async notify and socket close") [v5.15: fixed contextual conflicts from unavailable init_waitqueue_head and skb_queue_head_init calls in tls_set_sw_offload and init_ctx_rx] Cc: stable@vger.kernel.org # 5.15 Signed-off-by: Shaoying Xu shaoyi@amazon.com --- net/tls/tls_sw.c | 82 +++++++++++++++++++++++++++++------------------- 1 file changed, 49 insertions(+), 33 deletions(-)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 9c443646417e..40d1f205c92f 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -2291,6 +2291,46 @@ void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx) strp_check_rcv(&rx_ctx->strp); }
+static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk) +{ + struct tls_sw_context_tx *sw_ctx_tx; + + if (!ctx->priv_ctx_tx) { + sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); + if (!sw_ctx_tx) + return NULL; + } else { + sw_ctx_tx = ctx->priv_ctx_tx; + } + + crypto_init_wait(&sw_ctx_tx->async_wait); + spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); + INIT_LIST_HEAD(&sw_ctx_tx->tx_list); + INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); + sw_ctx_tx->tx_work.sk = sk; + + return sw_ctx_tx; +} + +static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx) +{ + struct tls_sw_context_rx *sw_ctx_rx; + + if (!ctx->priv_ctx_rx) { + sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); + if (!sw_ctx_rx) + return NULL; + } else { + sw_ctx_rx = ctx->priv_ctx_rx; + } + + crypto_init_wait(&sw_ctx_rx->async_wait); + spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); + skb_queue_head_init(&sw_ctx_rx->rx_list); + + return sw_ctx_rx; +} + int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) { struct tls_context *tls_ctx = tls_get_ctx(sk); @@ -2317,46 +2357,22 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) }
if (tx) { - if (!ctx->priv_ctx_tx) { - sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); - if (!sw_ctx_tx) { - rc = -ENOMEM; - goto out; - } - ctx->priv_ctx_tx = sw_ctx_tx; - } else { - sw_ctx_tx = - (struct tls_sw_context_tx *)ctx->priv_ctx_tx; - } - } else { - if (!ctx->priv_ctx_rx) { - sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); - if (!sw_ctx_rx) { - rc = -ENOMEM; - goto out; - } - ctx->priv_ctx_rx = sw_ctx_rx; - } else { - sw_ctx_rx = - (struct tls_sw_context_rx *)ctx->priv_ctx_rx; - } - } + ctx->priv_ctx_tx = init_ctx_tx(ctx, sk); + if (!ctx->priv_ctx_tx) + return -ENOMEM;
- if (tx) { - crypto_init_wait(&sw_ctx_tx->async_wait); - spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); + sw_ctx_tx = ctx->priv_ctx_tx; crypto_info = &ctx->crypto_send.info; cctx = &ctx->tx; aead = &sw_ctx_tx->aead_send; - INIT_LIST_HEAD(&sw_ctx_tx->tx_list); - INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); - sw_ctx_tx->tx_work.sk = sk; } else { - crypto_init_wait(&sw_ctx_rx->async_wait); - spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); + ctx->priv_ctx_rx = init_ctx_rx(ctx); + if (!ctx->priv_ctx_rx) + return -ENOMEM; + + sw_ctx_rx = ctx->priv_ctx_rx; crypto_info = &ctx->crypto_recv.info; cctx = &ctx->rx; - skb_queue_head_init(&sw_ctx_rx->rx_list); aead = &sw_ctx_rx->aead_recv; }
From: Jakub Kicinski kuba@kernel.org
commit c57ca512f3b68ddcd62bda9cc24a8f5584ab01b1 upstream.
Factor out waiting for async encrypt and decrypt to finish. There are already multiple copies and a subsequent fix will need more. No functional changes.
Note that crypto_wait_req() returns wait->err
Signed-off-by: Jakub Kicinski kuba@kernel.org Reviewed-by: Simon Horman horms@kernel.org Reviewed-by: Sabrina Dubroca sd@queasysnail.net Signed-off-by: David S. Miller davem@davemloft.net Stable-dep-of: aec7961916f3 ("tls: fix race between async notify and socket close") [v5.15: removed changes in tls_sw_splice_eof and adjusted waiting factor out for async descrypt in tls_sw_recvmsg] Cc: stable@vger.kernel.org # 5.15 Signed-off-by: Shaoying Xu shaoyi@amazon.com --- net/tls/tls_sw.c | 90 ++++++++++++++++++++++++++---------------------- 1 file changed, 49 insertions(+), 41 deletions(-)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 40d1f205c92f..614cb30dae13 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -226,6 +226,20 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err) spin_unlock_bh(&ctx->decrypt_compl_lock); }
+static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx) +{ + int pending; + + spin_lock_bh(&ctx->decrypt_compl_lock); + reinit_completion(&ctx->async_wait.completion); + pending = atomic_read(&ctx->decrypt_pending); + spin_unlock_bh(&ctx->decrypt_compl_lock); + if (pending) + crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + + return ctx->async_wait.err; +} + static int tls_do_decryption(struct sock *sk, struct sk_buff *skb, struct scatterlist *sgin, @@ -496,6 +510,28 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err) schedule_delayed_work(&ctx->tx_work.work, 1); }
+static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx) +{ + int pending; + + spin_lock_bh(&ctx->encrypt_compl_lock); + ctx->async_notify = true; + + pending = atomic_read(&ctx->encrypt_pending); + spin_unlock_bh(&ctx->encrypt_compl_lock); + if (pending) + crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + else + reinit_completion(&ctx->async_wait.completion); + + /* There can be no concurrent accesses, since we have no + * pending encrypt operations + */ + WRITE_ONCE(ctx->async_notify, false); + + return ctx->async_wait.err; +} + static int tls_do_encryption(struct sock *sk, struct tls_context *tls_ctx, struct tls_sw_context_tx *ctx, @@ -946,7 +982,6 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) int num_zc = 0; int orig_size; int ret = 0; - int pending;
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_CMSG_COMPAT)) @@ -1115,24 +1150,12 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) if (!num_async) { goto send_end; } else if (num_zc) { - /* Wait for pending encryptions to get completed */ - spin_lock_bh(&ctx->encrypt_compl_lock); - ctx->async_notify = true; - - pending = atomic_read(&ctx->encrypt_pending); - spin_unlock_bh(&ctx->encrypt_compl_lock); - if (pending) - crypto_wait_req(-EINPROGRESS, &ctx->async_wait); - else - reinit_completion(&ctx->async_wait.completion); - - /* There can be no concurrent accesses, since we have no - * pending encrypt operations - */ - WRITE_ONCE(ctx->async_notify, false); + int err;
- if (ctx->async_wait.err) { - ret = ctx->async_wait.err; + /* Wait for pending encryptions to get completed */ + err = tls_encrypt_async_wait(ctx); + if (err) { + ret = err; copied = 0; } } @@ -1910,22 +1933,14 @@ int tls_sw_recvmsg(struct sock *sk,
recv_end: if (async) { - int pending; - /* Wait for all previously submitted records to be decrypted */ - spin_lock_bh(&ctx->decrypt_compl_lock); - reinit_completion(&ctx->async_wait.completion); - pending = atomic_read(&ctx->decrypt_pending); - spin_unlock_bh(&ctx->decrypt_compl_lock); - if (pending) { - err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); - if (err) { - /* one of async decrypt failed */ - tls_err_abort(sk, err); - copied = 0; - decrypted = 0; - goto end; - } + err = tls_decrypt_async_wait(ctx); + if (err) { + /* one of async decrypt failed */ + tls_err_abort(sk, err); + copied = 0; + decrypted = 0; + goto end; }
/* Drain records from the rx_list & copy if required */ @@ -2144,16 +2159,9 @@ void tls_sw_release_resources_tx(struct sock *sk) struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); struct tls_rec *rec, *tmp; - int pending;
/* Wait for any pending async encryptions to complete */ - spin_lock_bh(&ctx->encrypt_compl_lock); - ctx->async_notify = true; - pending = atomic_read(&ctx->encrypt_pending); - spin_unlock_bh(&ctx->encrypt_compl_lock); - - if (pending) - crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + tls_encrypt_async_wait(ctx);
tls_tx_records(sk, -1);
From: Jakub Kicinski kuba@kernel.org
commit aec7961916f3f9e88766e2688992da6980f11b8d upstream.
The submitting thread (one which called recvmsg/sendmsg) may exit as soon as the async crypto handler calls complete() so any code past that point risks touching already freed data.
Try to avoid the locking and extra flags altogether. Have the main thread hold an extra reference, this way we can depend solely on the atomic ref counter for synchronization.
Don't futz with reiniting the completion, either, we are now tightly controlling when completion fires.
Reported-by: valis sec@valis.email Fixes: 0cada33241d9 ("net/tls: fix race condition causing kernel panic") Signed-off-by: Jakub Kicinski kuba@kernel.org Reviewed-by: Simon Horman horms@kernel.org Reviewed-by: Eric Dumazet edumazet@google.com Reviewed-by: Sabrina Dubroca sd@queasysnail.net Signed-off-by: David S. Miller davem@davemloft.net [v5.15: fixed contextual conflicts in struct tls_sw_context_rx and func init_ctx_rx; replaced DEBUG_NET_WARN_ON_ONCE with BUILD_BUG_ON_INVALID since they're equivalent when DEBUG_NET is not defined] Cc: stable@vger.kernel.org # 5.15 Signed-off-by: Shaoying Xu shaoyi@amazon.com --- include/net/tls.h | 5 ----- net/tls/tls_sw.c | 43 ++++++++++--------------------------------- 2 files changed, 10 insertions(+), 38 deletions(-)
diff --git a/include/net/tls.h b/include/net/tls.h index dcd6aa08c067..59ff5c901ab5 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -128,9 +128,6 @@ struct tls_sw_context_tx { struct tls_rec *open_rec; struct list_head tx_list; atomic_t encrypt_pending; - /* protect crypto_wait with encrypt_pending */ - spinlock_t encrypt_compl_lock; - int async_notify; u8 async_capable:1;
#define BIT_TX_SCHEDULED 0 @@ -148,8 +145,6 @@ struct tls_sw_context_rx { struct sk_buff *recv_pkt; u8 async_capable:1; atomic_t decrypt_pending; - /* protect crypto_wait with decrypt_pending*/ - spinlock_t decrypt_compl_lock; };
struct tls_record_info { diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 614cb30dae13..40b96780e13d 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -220,22 +220,15 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
kfree(aead_req);
- spin_lock_bh(&ctx->decrypt_compl_lock); - if (!atomic_dec_return(&ctx->decrypt_pending)) + if (atomic_dec_and_test(&ctx->decrypt_pending)) complete(&ctx->async_wait.completion); - spin_unlock_bh(&ctx->decrypt_compl_lock); }
static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx) { - int pending; - - spin_lock_bh(&ctx->decrypt_compl_lock); - reinit_completion(&ctx->async_wait.completion); - pending = atomic_read(&ctx->decrypt_pending); - spin_unlock_bh(&ctx->decrypt_compl_lock); - if (pending) + if (!atomic_dec_and_test(&ctx->decrypt_pending)) crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + atomic_inc(&ctx->decrypt_pending);
return ctx->async_wait.err; } @@ -271,6 +264,7 @@ static int tls_do_decryption(struct sock *sk, aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, tls_decrypt_done, skb); + BUILD_BUG_ON_INVALID(atomic_read(&ctx->decrypt_pending) < 1); atomic_inc(&ctx->decrypt_pending); } else { aead_request_set_callback(aead_req, @@ -460,7 +454,6 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err) struct sk_msg *msg_en; struct tls_rec *rec; bool ready = false; - int pending;
rec = container_of(aead_req, struct tls_rec, aead_req); msg_en = &rec->msg_encrypted; @@ -495,12 +488,8 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err) ready = true; }
- spin_lock_bh(&ctx->encrypt_compl_lock); - pending = atomic_dec_return(&ctx->encrypt_pending); - - if (!pending && ctx->async_notify) + if (atomic_dec_and_test(&ctx->encrypt_pending)) complete(&ctx->async_wait.completion); - spin_unlock_bh(&ctx->encrypt_compl_lock);
if (!ready) return; @@ -512,22 +501,9 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx) { - int pending; - - spin_lock_bh(&ctx->encrypt_compl_lock); - ctx->async_notify = true; - - pending = atomic_read(&ctx->encrypt_pending); - spin_unlock_bh(&ctx->encrypt_compl_lock); - if (pending) + if (!atomic_dec_and_test(&ctx->encrypt_pending)) crypto_wait_req(-EINPROGRESS, &ctx->async_wait); - else - reinit_completion(&ctx->async_wait.completion); - - /* There can be no concurrent accesses, since we have no - * pending encrypt operations - */ - WRITE_ONCE(ctx->async_notify, false); + atomic_inc(&ctx->encrypt_pending);
return ctx->async_wait.err; } @@ -571,6 +547,7 @@ static int tls_do_encryption(struct sock *sk,
/* Add the record in tx_list */ list_add_tail((struct list_head *)&rec->list, &ctx->tx_list); + BUILD_BUG_ON_INVALID(atomic_read(&ctx->encrypt_pending) < 1); atomic_inc(&ctx->encrypt_pending);
rc = crypto_aead_encrypt(aead_req); @@ -2312,7 +2289,7 @@ static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct soc }
crypto_init_wait(&sw_ctx_tx->async_wait); - spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); + atomic_set(&sw_ctx_tx->encrypt_pending, 1); INIT_LIST_HEAD(&sw_ctx_tx->tx_list); INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); sw_ctx_tx->tx_work.sk = sk; @@ -2333,7 +2310,7 @@ static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx) }
crypto_init_wait(&sw_ctx_rx->async_wait); - spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); + atomic_set(&sw_ctx_rx->decrypt_pending, 1); skb_queue_head_init(&sw_ctx_rx->rx_list);
return sw_ctx_rx;
From: Jakub Kicinski kuba@kernel.org
commit 8590541473188741055d27b955db0777569438e3 upstream.
Since we're setting the CRYPTO_TFM_REQ_MAY_BACKLOG flag on our requests to the crypto API, crypto_aead_{encrypt,decrypt} can return -EBUSY instead of -EINPROGRESS in valid situations. For example, when the cryptd queue for AESNI is full (easy to trigger with an artificially low cryptd.cryptd_max_cpu_qlen), requests will be enqueued to the backlog but still processed. In that case, the async callback will also be called twice: first with err == -EINPROGRESS, which it seems we can just ignore, then with err == 0.
Compared to Sabrina's original patch this version uses the new tls_*crypt_async_wait() helpers and converts the EBUSY to EINPROGRESS to avoid having to modify all the error handling paths. The handling is identical.
Fixes: a54667f6728c ("tls: Add support for encryption using async offload accelerator") Fixes: 94524d8fc965 ("net/tls: Add support for async decryption of tls records") Co-developed-by: Sabrina Dubroca sd@queasysnail.net Signed-off-by: Sabrina Dubroca sd@queasysnail.net Link: https://lore.kernel.org/netdev/9681d1febfec295449a62300938ed2ae66983f28.1694... Signed-off-by: Jakub Kicinski kuba@kernel.org Reviewed-by: Simon Horman horms@kernel.org Signed-off-by: David S. Miller davem@davemloft.net [v5.15: fixed contextual merge-conflicts in tls_decrypt_done and tls_encrypt_done] Cc: stable@vger.kernel.org # 5.15 Signed-off-by: Shaoying Xu shaoyi@amazon.com --- net/tls/tls_sw.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 40b96780e13d..90f6cbe5cd5d 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -175,6 +175,17 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err) struct sk_buff *skb; unsigned int pages;
+ /* If requests get too backlogged crypto API returns -EBUSY and calls + * ->complete(-EINPROGRESS) immediately followed by ->complete(0) + * to make waiting for backlog to flush with crypto_wait_req() easier. + * First wait converts -EBUSY -> -EINPROGRESS, and the second one + * -EINPROGRESS -> 0. + * We have a single struct crypto_async_request per direction, this + * scheme doesn't help us, so just ignore the first ->complete(). + */ + if (err == -EINPROGRESS) + return; + skb = (struct sk_buff *)req->data; tls_ctx = tls_get_ctx(skb->sk); ctx = tls_sw_ctx_rx(tls_ctx); @@ -273,6 +284,10 @@ static int tls_do_decryption(struct sock *sk, }
ret = crypto_aead_decrypt(aead_req); + if (ret == -EBUSY) { + ret = tls_decrypt_async_wait(ctx); + ret = ret ?: -EINPROGRESS; + } if (ret == -EINPROGRESS) { if (darg->async) return 0; @@ -455,6 +470,9 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err) struct tls_rec *rec; bool ready = false;
+ if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */ + return; + rec = container_of(aead_req, struct tls_rec, aead_req); msg_en = &rec->msg_encrypted;
@@ -551,6 +569,10 @@ static int tls_do_encryption(struct sock *sk, atomic_inc(&ctx->encrypt_pending);
rc = crypto_aead_encrypt(aead_req); + if (rc == -EBUSY) { + rc = tls_encrypt_async_wait(ctx); + rc = rc ?: -EINPROGRESS; + } if (!rc || rc != -EINPROGRESS) { atomic_dec(&ctx->encrypt_pending); sge->offset -= prot->prepend_size;
On Tue, May 07, 2024 at 10:18:01PM +0000, Shaoying Xu wrote:
Backport fix commit ("tls: fix race between async notify and socket close") for CVE-2024-26583 [1]. It's dependent on three tls commits being used to simplify and factor out async waiting. They also benefit backporting fix commit ("net: tls: handle backlogging of crypto requests") for CVE-2024-26584 [2]. Therefore, add them for clean backport:
Jakub Kicinski (4): tls: rx: simplify async wait net: tls: factor out tls_*crypt_async_wait() tls: fix race between async notify and socket close net: tls: handle backlogging of crypto requests
Sabrina Dubroca (1): tls: extract context alloc/initialization out of tls_set_sw_offload
Please review and consider applying these patches.
[1] https://lore.kernel.org/all/2024022146-traction-unjustly-f451@gregkh/ [2] https://lore.kernel.org/all/2024022148-showpiece-yanking-107c@gregkh/
include/net/tls.h | 6 -- net/tls/tls_sw.c | 199 ++++++++++++++++++++++++---------------------- 2 files changed, 106 insertions(+), 99 deletions(-)
-- 2.40.1
All now queued up, thanks.
greg k-h
linux-stable-mirror@lists.linaro.org