Commit eff9771d authored by Iuliana Prodan's avatar Iuliana Prodan Committed by Herbert Xu

crypto: caam - use mapped_{src,dst}_nents for descriptor

The mapped_{src,dst}_nents _returned_ from the dma_map_sg
call (which could be less than src/dst_nents) have to be
used to generate the job descriptors.
Signed-off-by: default avatarIuliana Prodan <iuliana.prodan@nxp.com>
Reviewed-by: default avatarHoria Geantă <horia.geanta@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 554c42b4
...@@ -252,9 +252,9 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, ...@@ -252,9 +252,9 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC; GFP_KERNEL : GFP_ATOMIC;
int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0; int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
int sgc;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
int src_nents, dst_nents; int src_nents, dst_nents;
int mapped_src_nents, mapped_dst_nents;
unsigned int diff_size = 0; unsigned int diff_size = 0;
int lzeros; int lzeros;
...@@ -285,13 +285,27 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, ...@@ -285,13 +285,27 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
req_ctx->fixup_src_len); req_ctx->fixup_src_len);
dst_nents = sg_nents_for_len(req->dst, req->dst_len); dst_nents = sg_nents_for_len(req->dst, req->dst_len);
if (!diff_size && src_nents == 1) mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
DMA_TO_DEVICE);
if (unlikely(!mapped_src_nents)) {
dev_err(dev, "unable to map source\n");
return ERR_PTR(-ENOMEM);
}
mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
DMA_FROM_DEVICE);
if (unlikely(!mapped_dst_nents)) {
dev_err(dev, "unable to map destination\n");
goto src_fail;
}
if (!diff_size && mapped_src_nents == 1)
sec4_sg_len = 0; /* no need for an input hw s/g table */ sec4_sg_len = 0; /* no need for an input hw s/g table */
else else
sec4_sg_len = src_nents + !!diff_size; sec4_sg_len = mapped_src_nents + !!diff_size;
sec4_sg_index = sec4_sg_len; sec4_sg_index = sec4_sg_len;
if (dst_nents > 1)
sec4_sg_len += pad_sg_nents(dst_nents); if (mapped_dst_nents > 1)
sec4_sg_len += pad_sg_nents(mapped_dst_nents);
else else
sec4_sg_len = pad_sg_nents(sec4_sg_len); sec4_sg_len = pad_sg_nents(sec4_sg_len);
...@@ -301,19 +315,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, ...@@ -301,19 +315,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
GFP_DMA | flags); GFP_DMA | flags);
if (!edesc) if (!edesc)
return ERR_PTR(-ENOMEM);
sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
if (unlikely(!sgc)) {
dev_err(dev, "unable to map source\n");
goto src_fail;
}
sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
if (unlikely(!sgc)) {
dev_err(dev, "unable to map destination\n");
goto dst_fail; goto dst_fail;
}
edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
if (diff_size) if (diff_size)
...@@ -324,7 +326,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, ...@@ -324,7 +326,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len, sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
edesc->sec4_sg + !!diff_size, 0); edesc->sec4_sg + !!diff_size, 0);
if (dst_nents > 1) if (mapped_dst_nents > 1)
sg_to_sec4_sg_last(req->dst, req->dst_len, sg_to_sec4_sg_last(req->dst, req->dst_len,
edesc->sec4_sg + sec4_sg_index, 0); edesc->sec4_sg + sec4_sg_index, 0);
...@@ -335,6 +337,9 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, ...@@ -335,6 +337,9 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
if (!sec4_sg_bytes) if (!sec4_sg_bytes)
return edesc; return edesc;
edesc->mapped_src_nents = mapped_src_nents;
edesc->mapped_dst_nents = mapped_dst_nents;
edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg, edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE); sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(dev, edesc->sec4_sg_dma)) { if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
...@@ -351,11 +356,11 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, ...@@ -351,11 +356,11 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
return edesc; return edesc;
sec4_sg_fail: sec4_sg_fail:
dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); kfree(edesc);
dst_fail: dst_fail:
dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
src_fail: src_fail:
kfree(edesc); dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -383,15 +388,15 @@ static int set_rsa_pub_pdb(struct akcipher_request *req, ...@@ -383,15 +388,15 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
return -ENOMEM; return -ENOMEM;
} }
if (edesc->src_nents > 1) { if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PDB_SGF_F; pdb->sgf |= RSA_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma; pdb->f_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents; sec4_sg_index += edesc->mapped_src_nents;
} else { } else {
pdb->f_dma = sg_dma_address(req_ctx->fixup_src); pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
} }
if (edesc->dst_nents > 1) { if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PDB_SGF_G; pdb->sgf |= RSA_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma + pdb->g_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry); sec4_sg_index * sizeof(struct sec4_sg_entry);
...@@ -428,17 +433,18 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req, ...@@ -428,17 +433,18 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
return -ENOMEM; return -ENOMEM;
} }
if (edesc->src_nents > 1) { if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_G; pdb->sgf |= RSA_PRIV_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma; pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents; sec4_sg_index += edesc->mapped_src_nents;
} else { } else {
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
pdb->g_dma = sg_dma_address(req_ctx->fixup_src); pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
} }
if (edesc->dst_nents > 1) { if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_F; pdb->sgf |= RSA_PRIV_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma + pdb->f_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry); sec4_sg_index * sizeof(struct sec4_sg_entry);
...@@ -493,17 +499,17 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, ...@@ -493,17 +499,17 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
goto unmap_tmp1; goto unmap_tmp1;
} }
if (edesc->src_nents > 1) { if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_G; pdb->sgf |= RSA_PRIV_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma; pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents; sec4_sg_index += edesc->mapped_src_nents;
} else { } else {
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
pdb->g_dma = sg_dma_address(req_ctx->fixup_src); pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
} }
if (edesc->dst_nents > 1) { if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_F; pdb->sgf |= RSA_PRIV_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma + pdb->f_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry); sec4_sg_index * sizeof(struct sec4_sg_entry);
...@@ -582,17 +588,17 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, ...@@ -582,17 +588,17 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
goto unmap_tmp1; goto unmap_tmp1;
} }
if (edesc->src_nents > 1) { if (edesc->mapped_src_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_G; pdb->sgf |= RSA_PRIV_PDB_SGF_G;
pdb->g_dma = edesc->sec4_sg_dma; pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents; sec4_sg_index += edesc->mapped_src_nents;
} else { } else {
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
pdb->g_dma = sg_dma_address(req_ctx->fixup_src); pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
} }
if (edesc->dst_nents > 1) { if (edesc->mapped_dst_nents > 1) {
pdb->sgf |= RSA_PRIV_PDB_SGF_F; pdb->sgf |= RSA_PRIV_PDB_SGF_F;
pdb->f_dma = edesc->sec4_sg_dma + pdb->f_dma = edesc->sec4_sg_dma +
sec4_sg_index * sizeof(struct sec4_sg_entry); sec4_sg_index * sizeof(struct sec4_sg_entry);
......
...@@ -112,8 +112,10 @@ struct caam_rsa_req_ctx { ...@@ -112,8 +112,10 @@ struct caam_rsa_req_ctx {
/** /**
* rsa_edesc - s/w-extended rsa descriptor * rsa_edesc - s/w-extended rsa descriptor
* @src_nents : number of segments in input scatterlist * @src_nents : number of segments in input s/w scatterlist
* @dst_nents : number of segments in output scatterlist * @dst_nents : number of segments in output s/w scatterlist
* @mapped_src_nents: number of segments in input h/w link table
* @mapped_dst_nents: number of segments in output h/w link table
* @sec4_sg_bytes : length of h/w link table * @sec4_sg_bytes : length of h/w link table
* @sec4_sg_dma : dma address of h/w link table * @sec4_sg_dma : dma address of h/w link table
* @sec4_sg : pointer to h/w link table * @sec4_sg : pointer to h/w link table
...@@ -123,6 +125,8 @@ struct caam_rsa_req_ctx { ...@@ -123,6 +125,8 @@ struct caam_rsa_req_ctx {
struct rsa_edesc { struct rsa_edesc {
int src_nents; int src_nents;
int dst_nents; int dst_nents;
int mapped_src_nents;
int mapped_dst_nents;
int sec4_sg_bytes; int sec4_sg_bytes;
dma_addr_t sec4_sg_dma; dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg; struct sec4_sg_entry *sec4_sg;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment