Commit 49186a7d authored by Peter Harliman Liem's avatar Peter Harliman Liem Committed by Herbert Xu

crypto: inside_secure - Avoid dma map if size is zero

From commit d03c5441 ("dma-mapping: disallow .map_sg
operations from returning zero on error"), dma_map_sg()
produces warning if size is 0. This results in visible
warnings if crypto length is zero.
To avoid that, we avoid calling dma_map_sg if size is zero.
Signed-off-by: default avatarPeter Harliman Liem <pliem@maxlinear.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 072d36ee
...@@ -642,10 +642,16 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin ...@@ -642,10 +642,16 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
safexcel_complete(priv, ring); safexcel_complete(priv, ring);
if (src == dst) { if (src == dst) {
dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); if (sreq->nr_src > 0)
dma_unmap_sg(priv->dev, src, sreq->nr_src,
DMA_BIDIRECTIONAL);
} else { } else {
dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); if (sreq->nr_src > 0)
dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE); dma_unmap_sg(priv->dev, src, sreq->nr_src,
DMA_TO_DEVICE);
if (sreq->nr_dst > 0)
dma_unmap_sg(priv->dev, dst, sreq->nr_dst,
DMA_FROM_DEVICE);
} }
/* /*
...@@ -737,23 +743,29 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, ...@@ -737,23 +743,29 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
max(totlen_src, totlen_dst)); max(totlen_src, totlen_dst));
return -EINVAL; return -EINVAL;
} }
dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); if (sreq->nr_src > 0)
dma_map_sg(priv->dev, src, sreq->nr_src,
DMA_BIDIRECTIONAL);
} else { } else {
if (unlikely(totlen_src && (sreq->nr_src <= 0))) { if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!", dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
totlen_src); totlen_src);
return -EINVAL; return -EINVAL;
} }
if (sreq->nr_src > 0)
dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) { if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!", dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
totlen_dst); totlen_dst);
dma_unmap_sg(priv->dev, src, sreq->nr_src, ret = -EINVAL;
DMA_TO_DEVICE); goto unmap;
return -EINVAL;
} }
dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
if (sreq->nr_dst > 0)
dma_map_sg(priv->dev, dst, sreq->nr_dst,
DMA_FROM_DEVICE);
} }
memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
...@@ -883,12 +895,18 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, ...@@ -883,12 +895,18 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
cdesc_rollback: cdesc_rollback:
for (i = 0; i < n_cdesc; i++) for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
unmap:
if (src == dst) { if (src == dst) {
dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); if (sreq->nr_src > 0)
dma_unmap_sg(priv->dev, src, sreq->nr_src,
DMA_BIDIRECTIONAL);
} else { } else {
dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); if (sreq->nr_src > 0)
dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE); dma_unmap_sg(priv->dev, src, sreq->nr_src,
DMA_TO_DEVICE);
if (sreq->nr_dst > 0)
dma_unmap_sg(priv->dev, dst, sreq->nr_dst,
DMA_FROM_DEVICE);
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment