Commit a38acd23 authored by Horia Geantă's avatar Horia Geantă Committed by Herbert Xu

crypto: caam - fix DMA mapping dir for generated IV

In case of GIVCIPHER, IV is generated by the device.
Fix the DMA mapping direction.

Cc: <stable@vger.kernel.org> # 3.19+
Fixes: 7222d1a3 ("crypto: caam - add support for givencrypt cbc(aes) and rfc3686(ctr(aes))")
Signed-off-by: default avatarHoria Geantă <horia.geanta@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent eea0d3ea
...@@ -769,6 +769,7 @@ struct aead_edesc { ...@@ -769,6 +769,7 @@ struct aead_edesc {
* @src_nents: number of segments in input s/w scatterlist * @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist * @dst_nents: number of segments in output s/w scatterlist
* @iv_dma: dma address of iv for checking continuity and link table * @iv_dma: dma address of iv for checking continuity and link table
* @iv_dir: DMA mapping direction for IV
* @sec4_sg_bytes: length of dma mapped sec4_sg space * @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table * @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table * @sec4_sg: pointer to h/w link table
...@@ -778,6 +779,7 @@ struct ablkcipher_edesc { ...@@ -778,6 +779,7 @@ struct ablkcipher_edesc {
int src_nents; int src_nents;
int dst_nents; int dst_nents;
dma_addr_t iv_dma; dma_addr_t iv_dma;
enum dma_data_direction iv_dir;
int sec4_sg_bytes; int sec4_sg_bytes;
dma_addr_t sec4_sg_dma; dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg; struct sec4_sg_entry *sec4_sg;
...@@ -787,7 +789,8 @@ struct ablkcipher_edesc { ...@@ -787,7 +789,8 @@ struct ablkcipher_edesc {
static void caam_unmap(struct device *dev, struct scatterlist *src, static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents, struct scatterlist *dst, int src_nents,
int dst_nents, int dst_nents,
dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, dma_addr_t iv_dma, int ivsize,
enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
int sec4_sg_bytes) int sec4_sg_bytes)
{ {
if (dst != src) { if (dst != src) {
...@@ -799,7 +802,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, ...@@ -799,7 +802,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
} }
if (iv_dma) if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
if (sec4_sg_bytes) if (sec4_sg_bytes)
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -810,7 +813,7 @@ static void aead_unmap(struct device *dev, ...@@ -810,7 +813,7 @@ static void aead_unmap(struct device *dev,
struct aead_request *req) struct aead_request *req)
{ {
caam_unmap(dev, req->src, req->dst, caam_unmap(dev, req->src, req->dst,
edesc->src_nents, edesc->dst_nents, 0, 0, edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes); edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
} }
...@@ -823,7 +826,7 @@ static void ablkcipher_unmap(struct device *dev, ...@@ -823,7 +826,7 @@ static void ablkcipher_unmap(struct device *dev,
caam_unmap(dev, req->src, req->dst, caam_unmap(dev, req->src, req->dst,
edesc->src_nents, edesc->dst_nents, edesc->src_nents, edesc->dst_nents,
edesc->iv_dma, ivsize, edesc->iv_dma, ivsize, edesc->iv_dir,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes); edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
} }
...@@ -1287,7 +1290,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -1287,7 +1290,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
GFP_DMA | flags); GFP_DMA | flags);
if (!edesc) { if (!edesc) {
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0); 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1550,7 +1553,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request ...@@ -1550,7 +1553,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (dma_mapping_error(jrdev, iv_dma)) { if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n"); dev_err(jrdev, "unable to map IV\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0); 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1572,7 +1575,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request ...@@ -1572,7 +1575,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (!edesc) { if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n"); dev_err(jrdev, "could not allocate extended descriptor\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, 0, 0); iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1581,6 +1584,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request ...@@ -1581,6 +1584,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes; desc_bytes;
edesc->iv_dir = DMA_TO_DEVICE;
if (!in_contig) { if (!in_contig) {
dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
...@@ -1598,7 +1602,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request ...@@ -1598,7 +1602,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n"); dev_err(jrdev, "unable to map S/G table\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, 0, 0); iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
kfree(edesc); kfree(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1756,11 +1760,11 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( ...@@ -1756,11 +1760,11 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
* Check if iv can be contiguous with source and destination. * Check if iv can be contiguous with source and destination.
* If so, include it. If not, create scatterlist. * If so, include it. If not, create scatterlist.
*/ */
iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) { if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n"); dev_err(jrdev, "unable to map IV\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0); 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1781,7 +1785,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( ...@@ -1781,7 +1785,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
if (!edesc) { if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n"); dev_err(jrdev, "could not allocate extended descriptor\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, 0, 0); iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1790,6 +1794,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( ...@@ -1790,6 +1794,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes; desc_bytes;
edesc->iv_dir = DMA_FROM_DEVICE;
if (mapped_src_nents > 1) if (mapped_src_nents > 1)
sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg, sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
...@@ -1807,7 +1812,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( ...@@ -1807,7 +1812,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n"); dev_err(jrdev, "unable to map S/G table\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, 0, 0); iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
kfree(edesc); kfree(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment