Commit 8991f1af authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Greg Kroah-Hartman

dm: disable CRYPTO_TFM_REQ_MAY_SLEEP to fix a GFP_KERNEL recursion deadlock

[ Upstream commit 432061b3 ]

There's a XFS on dm-crypt deadlock, recursing back to itself due to the
crypto subsystems use of GFP_KERNEL, reported here:
https://bugzilla.kernel.org/show_bug.cgi?id=200835

* dm-crypt calls crypt_convert in xts mode
* init_crypt from xts.c calls kmalloc(GFP_KERNEL)
* kmalloc(GFP_KERNEL) recurses into the XFS filesystem, the filesystem
	tries to submit some bios and wait for them, causing a deadlock

Fix this by updating both the DM crypt and integrity targets to no
longer use the CRYPTO_TFM_REQ_MAY_SLEEP flag, which will change the
crypto allocations from GFP_KERNEL to GFP_ATOMIC, therefore they can't
recurse into a filesystem.  A GFP_ATOMIC allocation can fail, but
init_crypt() in xts.c handles the allocation failure gracefully - it
will fall back to preallocated buffer if the allocation fails.

The crypto API maintainer says that the crypto API only needs to
allocate memory when dealing with unaligned buffers and therefore
turning CRYPTO_TFM_REQ_MAY_SLEEP off is safe (see this discussion:
https://www.redhat.com/archives/dm-devel/2018-August/msg00195.html )

Cc: stable@vger.kernel.org
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarSasha Levin (Microsoft) <sashal@kernel.org>
parent 02c2de9b
...@@ -334,7 +334,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc) ...@@ -334,7 +334,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
sg_init_one(&sg, cc->key, cc->key_size); sg_init_one(&sg, cc->key, cc->key_size);
ahash_request_set_tfm(req, essiv->hash_tfm); ahash_request_set_tfm(req, essiv->hash_tfm);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); ahash_request_set_callback(req, 0, NULL, NULL);
ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size); ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size);
err = crypto_ahash_digest(req); err = crypto_ahash_digest(req);
...@@ -609,7 +609,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, ...@@ -609,7 +609,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
int i, r; int i, r;
desc->tfm = lmk->hash_tfm; desc->tfm = lmk->hash_tfm;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; desc->flags = 0;
r = crypto_shash_init(desc); r = crypto_shash_init(desc);
if (r) if (r)
...@@ -771,7 +771,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc, ...@@ -771,7 +771,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
/* calculate crc32 for every 32bit part and xor it */ /* calculate crc32 for every 32bit part and xor it */
desc->tfm = tcw->crc32_tfm; desc->tfm = tcw->crc32_tfm;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; desc->flags = 0;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
r = crypto_shash_init(desc); r = crypto_shash_init(desc);
if (r) if (r)
...@@ -1254,7 +1254,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc, ...@@ -1254,7 +1254,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
* requests if driver request queue is full. * requests if driver request queue is full.
*/ */
skcipher_request_set_callback(ctx->r.req, skcipher_request_set_callback(ctx->r.req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
} }
...@@ -1271,7 +1271,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc, ...@@ -1271,7 +1271,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
* requests if driver request queue is full. * requests if driver request queue is full.
*/ */
aead_request_set_callback(ctx->r.req_aead, aead_request_set_callback(ctx->r.req_aead,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
} }
......
...@@ -493,7 +493,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result ...@@ -493,7 +493,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
unsigned j, size; unsigned j, size;
desc->tfm = ic->journal_mac; desc->tfm = ic->journal_mac;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; desc->flags = 0;
r = crypto_shash_init(desc); r = crypto_shash_init(desc);
if (unlikely(r)) { if (unlikely(r)) {
...@@ -637,7 +637,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err) ...@@ -637,7 +637,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err)
static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp) static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
{ {
int r; int r;
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
complete_journal_encrypt, comp); complete_journal_encrypt, comp);
if (likely(encrypt)) if (likely(encrypt))
r = crypto_skcipher_encrypt(req); r = crypto_skcipher_encrypt(req);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment