Commit b548493c authored by David S. Miller's avatar David S. Miller

Merge branch 'chcr-Fixing-issues-in-dma-mapping-and-driver-removal'

Ayush Sawal says:

====================
Fixing issues in dma mapping and driver removal

Patch 1: This fixes the kernel panic which occurs due to the accessing
of a zero length sg.

Patch 2: Avoiding unregistering the algorithm if cra_refcnt is not 1.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 934e36ec 8b9914cd
...@@ -2590,11 +2590,22 @@ int chcr_aead_dma_map(struct device *dev, ...@@ -2590,11 +2590,22 @@ int chcr_aead_dma_map(struct device *dev,
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(tfm); unsigned int authsize = crypto_aead_authsize(tfm);
int dst_size; int src_len, dst_len;
dst_size = req->assoclen + req->cryptlen + (op_type ? /* calculate and handle src and dst sg length separately
* for inplace and out-of place operations
*/
if (req->src == req->dst) {
src_len = req->assoclen + req->cryptlen + (op_type ?
0 : authsize); 0 : authsize);
if (!req->cryptlen || !dst_size) dst_len = src_len;
} else {
src_len = req->assoclen + req->cryptlen;
dst_len = req->assoclen + req->cryptlen + (op_type ?
-authsize : authsize);
}
if (!req->cryptlen || !src_len || !dst_len)
return 0; return 0;
reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len), reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
...@@ -2606,19 +2617,22 @@ int chcr_aead_dma_map(struct device *dev, ...@@ -2606,19 +2617,22 @@ int chcr_aead_dma_map(struct device *dev,
reqctx->b0_dma = 0; reqctx->b0_dma = 0;
if (req->src == req->dst) { if (req->src == req->dst) {
error = dma_map_sg(dev, req->src, error = dma_map_sg(dev, req->src,
sg_nents_for_len(req->src, dst_size), sg_nents_for_len(req->src, src_len),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (!error) if (!error)
goto err; goto err;
} else { } else {
error = dma_map_sg(dev, req->src, sg_nents(req->src), error = dma_map_sg(dev, req->src,
sg_nents_for_len(req->src, src_len),
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (!error) if (!error)
goto err; goto err;
error = dma_map_sg(dev, req->dst, sg_nents(req->dst), error = dma_map_sg(dev, req->dst,
sg_nents_for_len(req->dst, dst_len),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (!error) { if (!error) {
dma_unmap_sg(dev, req->src, sg_nents(req->src), dma_unmap_sg(dev, req->src,
sg_nents_for_len(req->src, src_len),
DMA_TO_DEVICE); DMA_TO_DEVICE);
goto err; goto err;
} }
...@@ -2637,23 +2651,36 @@ void chcr_aead_dma_unmap(struct device *dev, ...@@ -2637,23 +2651,36 @@ void chcr_aead_dma_unmap(struct device *dev,
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(tfm); unsigned int authsize = crypto_aead_authsize(tfm);
int dst_size; int src_len, dst_len;
dst_size = req->assoclen + req->cryptlen + (op_type ? /* calculate and handle src and dst sg length separately
* for inplace and out-of place operations
*/
if (req->src == req->dst) {
src_len = req->assoclen + req->cryptlen + (op_type ?
0 : authsize); 0 : authsize);
if (!req->cryptlen || !dst_size) dst_len = src_len;
} else {
src_len = req->assoclen + req->cryptlen;
dst_len = req->assoclen + req->cryptlen + (op_type ?
-authsize : authsize);
}
if (!req->cryptlen || !src_len || !dst_len)
return; return;
dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len), dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (req->src == req->dst) { if (req->src == req->dst) {
dma_unmap_sg(dev, req->src, dma_unmap_sg(dev, req->src,
sg_nents_for_len(req->src, dst_size), sg_nents_for_len(req->src, src_len),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} else { } else {
dma_unmap_sg(dev, req->src, sg_nents(req->src), dma_unmap_sg(dev, req->src,
sg_nents_for_len(req->src, src_len),
DMA_TO_DEVICE); DMA_TO_DEVICE);
dma_unmap_sg(dev, req->dst, sg_nents(req->dst), dma_unmap_sg(dev, req->dst,
sg_nents_for_len(req->dst, dst_len),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
} }
...@@ -4364,22 +4391,32 @@ static int chcr_unregister_alg(void) ...@@ -4364,22 +4391,32 @@ static int chcr_unregister_alg(void)
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_SKCIPHER: case CRYPTO_ALG_TYPE_SKCIPHER:
if (driver_algs[i].is_registered) if (driver_algs[i].is_registered && refcount_read(
&driver_algs[i].alg.skcipher.base.cra_refcnt)
== 1) {
crypto_unregister_skcipher( crypto_unregister_skcipher(
&driver_algs[i].alg.skcipher); &driver_algs[i].alg.skcipher);
driver_algs[i].is_registered = 0;
}
break; break;
case CRYPTO_ALG_TYPE_AEAD: case CRYPTO_ALG_TYPE_AEAD:
if (driver_algs[i].is_registered) if (driver_algs[i].is_registered && refcount_read(
&driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
crypto_unregister_aead( crypto_unregister_aead(
&driver_algs[i].alg.aead); &driver_algs[i].alg.aead);
driver_algs[i].is_registered = 0;
}
break; break;
case CRYPTO_ALG_TYPE_AHASH: case CRYPTO_ALG_TYPE_AHASH:
if (driver_algs[i].is_registered) if (driver_algs[i].is_registered && refcount_read(
&driver_algs[i].alg.hash.halg.base.cra_refcnt)
== 1) {
crypto_unregister_ahash( crypto_unregister_ahash(
&driver_algs[i].alg.hash); &driver_algs[i].alg.hash);
driver_algs[i].is_registered = 0;
}
break; break;
} }
driver_algs[i].is_registered = 0;
} }
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment