From: Eric Biggers ebiggers@kernel.org
[ Upstream commit 3d9eb180fbe8828cce43bce4c370124685b205c3 ]
The skcipher_walk functions can allocate memory and can fail, so checking for errors is necessary.
Fixes: 1d373d4e8e15 ("crypto: x86 - Add optimized AEGIS implementations") Cc: stable@vger.kernel.org Signed-off-by: Eric Biggers ebiggers@kernel.org Signed-off-by: Herbert Xu herbert@gondor.apana.org.au Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/crypto/aegis128-aesni-glue.c | 36 +++++++++++++++++++-------- 1 file changed, 25 insertions(+), 11 deletions(-)
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c index de0aab6997d4..f5bbf274a54e 100644 --- a/arch/x86/crypto/aegis128-aesni-glue.c +++ b/arch/x86/crypto/aegis128-aesni-glue.c @@ -108,10 +108,12 @@ static void crypto_aegis128_aesni_process_ad( } }
-static __always_inline void +static __always_inline int crypto_aegis128_aesni_process_crypt(struct aegis_state *state, struct skcipher_walk *walk, bool enc) { + int err = 0; + while (walk->nbytes >= AEGIS128_BLOCK_SIZE) { if (enc) aegis128_aesni_enc(state, walk->src.virt.addr, @@ -124,7 +126,8 @@ crypto_aegis128_aesni_process_crypt(struct aegis_state *state, round_down(walk->nbytes, AEGIS128_BLOCK_SIZE)); kernel_fpu_end(); - skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE); + err = skcipher_walk_done(walk, + walk->nbytes % AEGIS128_BLOCK_SIZE); kernel_fpu_begin(); }
@@ -138,9 +141,10 @@ crypto_aegis128_aesni_process_crypt(struct aegis_state *state, walk->dst.virt.addr, walk->nbytes); kernel_fpu_end(); - skcipher_walk_done(walk, 0); + err = skcipher_walk_done(walk, 0); kernel_fpu_begin(); } + return err; }
static struct aegis_ctx *crypto_aegis128_aesni_ctx(struct crypto_aead *aead) @@ -173,7 +177,7 @@ static int crypto_aegis128_aesni_setauthsize(struct crypto_aead *tfm, return 0; }
-static __always_inline void +static __always_inline int crypto_aegis128_aesni_crypt(struct aead_request *req, struct aegis_block *tag_xor, unsigned int cryptlen, bool enc) @@ -182,20 +186,24 @@ crypto_aegis128_aesni_crypt(struct aead_request *req, struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm); struct skcipher_walk walk; struct aegis_state state; + int err;
if (enc) - skcipher_walk_aead_encrypt(&walk, req, false); + err = skcipher_walk_aead_encrypt(&walk, req, false); else - skcipher_walk_aead_decrypt(&walk, req, false); + err = skcipher_walk_aead_decrypt(&walk, req, false); + if (err) + return err;
kernel_fpu_begin();
aegis128_aesni_init(&state, &ctx->key, req->iv); crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_aesni_process_crypt(&state, &walk, enc); - aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen); - + err = crypto_aegis128_aesni_process_crypt(&state, &walk, enc); + if (err == 0) + aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); + return err; }
static int crypto_aegis128_aesni_encrypt(struct aead_request *req) @@ -204,8 +212,11 @@ static int crypto_aegis128_aesni_encrypt(struct aead_request *req) struct aegis_block tag = {}; unsigned int authsize = crypto_aead_authsize(tfm); unsigned int cryptlen = req->cryptlen; + int err;
- crypto_aegis128_aesni_crypt(req, &tag, cryptlen, true); + err = crypto_aegis128_aesni_crypt(req, &tag, cryptlen, true); + if (err) + return err;
scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, authsize, 1); @@ -220,11 +231,14 @@ static int crypto_aegis128_aesni_decrypt(struct aead_request *req) struct aegis_block tag; unsigned int authsize = crypto_aead_authsize(tfm); unsigned int cryptlen = req->cryptlen - authsize; + int err;
scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, authsize, 0);
- crypto_aegis128_aesni_crypt(req, &tag, cryptlen, false); + err = crypto_aegis128_aesni_crypt(req, &tag, cryptlen, false); + if (err) + return err;
return crypto_memneq(tag.bytes, zeros.bytes, authsize) ? -EBADMSG : 0; }