[ Upstream commit 432061b3da64e488be3403124a72a9250bbe96d4 ]
There's a XFS on dm-crypt deadlock, recursing back to itself due to the crypto subsystems use of GFP_KERNEL, reported here: https://bugzilla.kernel.org/show_bug.cgi?id=200835
* dm-crypt calls crypt_convert in xts mode * init_crypt from xts.c calls kmalloc(GFP_KERNEL) * kmalloc(GFP_KERNEL) recurses into the XFS filesystem, the filesystem tries to submit some bios and wait for them, causing a deadlock
Fix this by updating both the DM crypt and integrity targets to no longer use the CRYPTO_TFM_REQ_MAY_SLEEP flag, which will change the crypto allocations from GFP_KERNEL to GFP_ATOMIC, therefore they can't recurse into a filesystem. A GFP_ATOMIC allocation can fail, but init_crypt() in xts.c handles the allocation failure gracefully - it will fall back to preallocated buffer if the allocation fails.
The crypto API maintainer says that the crypto API only needs to allocate memory when dealing with unaligned buffers and therefore turning CRYPTO_TFM_REQ_MAY_SLEEP off is safe (see this discussion: https://www.redhat.com/archives/dm-devel/2018-August/msg00195.html )
Cc: stable@vger.kernel.org Signed-off-by: Mikulas Patocka mpatocka@redhat.com Signed-off-by: Mike Snitzer snitzer@redhat.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/md/dm-crypt.c | 10 +++++----- drivers/md/dm-integrity.c | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 0d2005e5b24c..94b8d81f6020 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -334,7 +334,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
sg_init_one(&sg, cc->key, cc->key_size); ahash_request_set_tfm(req, essiv->hash_tfm); - ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + ahash_request_set_callback(req, 0, NULL, NULL); ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size);
err = crypto_ahash_digest(req); @@ -609,7 +609,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, int i, r;
desc->tfm = lmk->hash_tfm; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + desc->flags = 0;
r = crypto_shash_init(desc); if (r) @@ -771,7 +771,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
/* calculate crc32 for every 32bit part and xor it */ desc->tfm = tcw->crc32_tfm; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + desc->flags = 0; for (i = 0; i < 4; i++) { r = crypto_shash_init(desc); if (r) @@ -1254,7 +1254,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc, * requests if driver request queue is full. */ skcipher_request_set_callback(ctx->r.req, - CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + CRYPTO_TFM_REQ_MAY_BACKLOG, kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); }
@@ -1271,7 +1271,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc, * requests if driver request queue is full. */ aead_request_set_callback(ctx->r.req_aead, - CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + CRYPTO_TFM_REQ_MAY_BACKLOG, kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); }
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index da4baea9cf83..036379a23499 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -493,7 +493,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result unsigned j, size;
desc->tfm = ic->journal_mac; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + desc->flags = 0;
r = crypto_shash_init(desc); if (unlikely(r)) { @@ -637,7 +637,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err) static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp) { int r; - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, complete_journal_encrypt, comp); if (likely(encrypt)) r = crypto_skcipher_encrypt(req);