Commit 501ec62e authored by Jan Glauber's avatar Jan Glauber Committed by Ben Hutchings

s390/crypto: Don't panic after crypto instruction failures

commit 36eb2caa upstream.

Remove the BUG_ON's that check for failure or incomplete
results of the s390 hardware crypto instructions.
Rather report the errors as -EIO to the crypto layer.
Signed-off-by: default avatarJan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
[bwh: Backported to 3.2: adjust context]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent 6b1246d9
...@@ -324,7 +324,8 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param, ...@@ -324,7 +324,8 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *in = walk->src.virt.addr; u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n); ret = crypt_s390_km(func, param, out, in, n);
BUG_ON((ret < 0) || (ret != n)); if (ret < 0 || ret != n)
return -EIO;
nbytes &= AES_BLOCK_SIZE - 1; nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes); ret = blkcipher_walk_done(desc, walk, nbytes);
...@@ -463,7 +464,8 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, ...@@ -463,7 +464,8 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
u8 *in = walk->src.virt.addr; u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, &param, out, in, n); ret = crypt_s390_kmc(func, &param, out, in, n);
BUG_ON((ret < 0) || (ret != n)); if (ret < 0 || ret != n)
return -EIO;
nbytes &= AES_BLOCK_SIZE - 1; nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes); ret = blkcipher_walk_done(desc, walk, nbytes);
...@@ -636,7 +638,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func, ...@@ -636,7 +638,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
memcpy(pcc_param.key, xts_ctx->pcc_key, 32); memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
ret = crypt_s390_pcc(func, &pcc_param.key[offset]); ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
BUG_ON(ret < 0); if (ret < 0)
return -EIO;
memcpy(xts_param.key, xts_ctx->key, 32); memcpy(xts_param.key, xts_ctx->key, 32);
memcpy(xts_param.init, pcc_param.xts, 16); memcpy(xts_param.init, pcc_param.xts, 16);
...@@ -647,7 +650,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func, ...@@ -647,7 +650,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr; in = walk->src.virt.addr;
ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n); ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
BUG_ON(ret < 0 || ret != n); if (ret < 0 || ret != n)
return -EIO;
nbytes &= AES_BLOCK_SIZE - 1; nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes); ret = blkcipher_walk_done(desc, walk, nbytes);
...@@ -781,7 +785,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, ...@@ -781,7 +785,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
crypto_inc(ctrblk + i, AES_BLOCK_SIZE); crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
} }
ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk); ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
BUG_ON(ret < 0 || ret != n); if (ret < 0 || ret != n)
return -EIO;
if (n > AES_BLOCK_SIZE) if (n > AES_BLOCK_SIZE)
memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE, memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE); AES_BLOCK_SIZE);
...@@ -800,7 +805,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, ...@@ -800,7 +805,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr; in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in, ret = crypt_s390_kmctr(func, sctx->key, buf, in,
AES_BLOCK_SIZE, ctrblk); AES_BLOCK_SIZE, ctrblk);
BUG_ON(ret < 0 || ret != AES_BLOCK_SIZE); if (ret < 0 || ret != AES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes); memcpy(out, buf, nbytes);
crypto_inc(ctrblk, AES_BLOCK_SIZE); crypto_inc(ctrblk, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0); ret = blkcipher_walk_done(desc, walk, 0);
......
...@@ -95,7 +95,8 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func, ...@@ -95,7 +95,8 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *in = walk->src.virt.addr; u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, key, out, in, n); ret = crypt_s390_km(func, key, out, in, n);
BUG_ON((ret < 0) || (ret != n)); if (ret < 0 || ret != n)
return -EIO;
nbytes &= DES_BLOCK_SIZE - 1; nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes); ret = blkcipher_walk_done(desc, walk, nbytes);
...@@ -121,7 +122,8 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, ...@@ -121,7 +122,8 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *in = walk->src.virt.addr; u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, iv, out, in, n); ret = crypt_s390_kmc(func, iv, out, in, n);
BUG_ON((ret < 0) || (ret != n)); if (ret < 0 || ret != n)
return -EIO;
nbytes &= DES_BLOCK_SIZE - 1; nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes); ret = blkcipher_walk_done(desc, walk, nbytes);
...@@ -394,7 +396,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, ...@@ -394,7 +396,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
crypto_inc(ctrblk + i, DES_BLOCK_SIZE); crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
} }
ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk); ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
BUG_ON((ret < 0) || (ret != n)); if (ret < 0 || ret != n)
return -EIO;
if (n > DES_BLOCK_SIZE) if (n > DES_BLOCK_SIZE)
memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE, memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE); DES_BLOCK_SIZE);
...@@ -412,7 +415,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, ...@@ -412,7 +415,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr; in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, ctx->key, buf, in, ret = crypt_s390_kmctr(func, ctx->key, buf, in,
DES_BLOCK_SIZE, ctrblk); DES_BLOCK_SIZE, ctrblk);
BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE); if (ret < 0 || ret != DES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes); memcpy(out, buf, nbytes);
crypto_inc(ctrblk, DES_BLOCK_SIZE); crypto_inc(ctrblk, DES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0); ret = blkcipher_walk_done(desc, walk, 0);
......
...@@ -72,14 +72,16 @@ static int ghash_update(struct shash_desc *desc, ...@@ -72,14 +72,16 @@ static int ghash_update(struct shash_desc *desc,
if (!dctx->bytes) { if (!dctx->bytes) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
GHASH_BLOCK_SIZE); GHASH_BLOCK_SIZE);
BUG_ON(ret != GHASH_BLOCK_SIZE); if (ret != GHASH_BLOCK_SIZE)
return -EIO;
} }
} }
n = srclen & ~(GHASH_BLOCK_SIZE - 1); n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) { if (n) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n); ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
BUG_ON(ret != n); if (ret != n)
return -EIO;
src += n; src += n;
srclen -= n; srclen -= n;
} }
...@@ -92,7 +94,7 @@ static int ghash_update(struct shash_desc *desc, ...@@ -92,7 +94,7 @@ static int ghash_update(struct shash_desc *desc,
return 0; return 0;
} }
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
{ {
u8 *buf = dctx->buffer; u8 *buf = dctx->buffer;
int ret; int ret;
...@@ -103,21 +105,24 @@ static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) ...@@ -103,21 +105,24 @@ static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
memset(pos, 0, dctx->bytes); memset(pos, 0, dctx->bytes);
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE); ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
BUG_ON(ret != GHASH_BLOCK_SIZE); if (ret != GHASH_BLOCK_SIZE)
return -EIO;
} }
dctx->bytes = 0; dctx->bytes = 0;
return 0;
} }
static int ghash_final(struct shash_desc *desc, u8 *dst) static int ghash_final(struct shash_desc *desc, u8 *dst)
{ {
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
int ret;
ghash_flush(ctx, dctx); ret = ghash_flush(ctx, dctx);
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE); if (!ret)
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
return 0; return ret;
} }
static struct shash_alg ghash_alg = { static struct shash_alg ghash_alg = {
......
...@@ -36,7 +36,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) ...@@ -36,7 +36,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if (index) { if (index) {
memcpy(ctx->buf + index, data, bsize - index); memcpy(ctx->buf + index, data, bsize - index);
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize); ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
BUG_ON(ret != bsize); if (ret != bsize)
return -EIO;
data += bsize - index; data += bsize - index;
len -= bsize - index; len -= bsize - index;
index = 0; index = 0;
...@@ -46,7 +47,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) ...@@ -46,7 +47,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if (len >= bsize) { if (len >= bsize) {
ret = crypt_s390_kimd(ctx->func, ctx->state, data, ret = crypt_s390_kimd(ctx->func, ctx->state, data,
len & ~(bsize - 1)); len & ~(bsize - 1));
BUG_ON(ret != (len & ~(bsize - 1))); if (ret != (len & ~(bsize - 1)))
return -EIO;
data += ret; data += ret;
len -= ret; len -= ret;
} }
...@@ -88,7 +90,8 @@ int s390_sha_final(struct shash_desc *desc, u8 *out) ...@@ -88,7 +90,8 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
memcpy(ctx->buf + end - 8, &bits, sizeof(bits)); memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end); ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
BUG_ON(ret != end); if (ret != end)
return -EIO;
/* copy digest to out */ /* copy digest to out */
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm)); memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment