Commit c3f7394e authored by Iuliana Prodan's avatar Iuliana Prodan Committed by Herbert Xu

crypto: caam - refactor ahash_done callbacks

Create two common ahash_done_* functions with the dma
direction as parameter. Then, these 2 are called with
the proper direction for unmap.
Signed-off-by: default avatarIuliana Prodan <iuliana.prodan@nxp.com>
Reviewed-by: default avatarHoria Geanta <horia.geanta@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent b7f17fe2
...@@ -565,8 +565,8 @@ static inline void ahash_unmap_ctx(struct device *dev, ...@@ -565,8 +565,8 @@ static inline void ahash_unmap_ctx(struct device *dev,
ahash_unmap(dev, edesc, req, dst_len); ahash_unmap(dev, edesc, req, dst_len);
} }
static void ahash_done(struct device *jrdev, u32 *desc, u32 err, static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
void *context) void *context, enum dma_data_direction dir)
{ {
struct ahash_request *req = context; struct ahash_request *req = context;
struct ahash_edesc *edesc; struct ahash_edesc *edesc;
...@@ -582,7 +582,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, ...@@ -582,7 +582,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
if (err) if (err)
ecode = caam_jr_strstatus(jrdev, err); ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
memcpy(req->result, state->caam_ctx, digestsize); memcpy(req->result, state->caam_ctx, digestsize);
kfree(edesc); kfree(edesc);
...@@ -593,76 +593,20 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, ...@@ -593,76 +593,20 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
req->base.complete(&req->base, ecode); req->base.complete(&req->base, ecode);
} }
static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
void *context) void *context)
{ {
struct ahash_request *req = context; ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
int digestsize = crypto_ahash_digestsize(ahash);
int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
kfree(edesc);
scatterwalk_map_and_copy(state->buf, req->src,
req->nbytes - state->next_buflen,
state->next_buflen, 0);
state->buflen = state->next_buflen;
print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
state->buflen, 1);
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
if (req->result)
print_hex_dump_debug("result@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
req->base.complete(&req->base, ecode);
} }
static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
void *context) void *context)
{ {
struct ahash_request *req = context; ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
memcpy(req->result, state->caam_ctx, digestsize);
kfree(edesc);
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
req->base.complete(&req->base, ecode);
} }
static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
void *context) void *context, enum dma_data_direction dir)
{ {
struct ahash_request *req = context; struct ahash_request *req = context;
struct ahash_edesc *edesc; struct ahash_edesc *edesc;
...@@ -678,7 +622,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, ...@@ -678,7 +622,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
if (err) if (err)
ecode = caam_jr_strstatus(jrdev, err); ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
kfree(edesc); kfree(edesc);
scatterwalk_map_and_copy(state->buf, req->src, scatterwalk_map_and_copy(state->buf, req->src,
...@@ -701,6 +645,18 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, ...@@ -701,6 +645,18 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
req->base.complete(&req->base, ecode); req->base.complete(&req->base, ecode);
} }
static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
}
static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
}
/* /*
* Allocate an enhanced descriptor, which contains the hardware descriptor * Allocate an enhanced descriptor, which contains the hardware descriptor
* and space for hardware scatter table containing sg_num entries. * and space for hardware scatter table containing sg_num entries.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment