Commit ccba2f11 authored by Hadar Gat's avatar Hadar Gat Committed by Herbert Xu

crypto: ccree - improve error handling

pass the returned error code to the higher level functions
Signed-off-by: default avatarHadar Gat <hadar.gat@arm.com>
Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent a0d608ee
......@@ -511,10 +511,8 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
/* Map the src SGL */
rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
if (rc) {
rc = -ENOMEM;
if (rc)
goto cipher_exit;
}
if (mapped_nents > 1)
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
......@@ -528,12 +526,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
}
} else {
/* Map the dst sg */
if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
&req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents)) {
rc = -ENOMEM;
rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
&req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents);
if (rc)
goto cipher_exit;
}
if (mapped_nents > 1)
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
......@@ -1078,10 +1075,8 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
&areq_ctx->dst.nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents);
if (rc) {
rc = -ENOMEM;
if (rc)
goto chain_data_exit;
}
}
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
......@@ -1235,11 +1230,10 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
}
areq_ctx->ccm_iv0_dma_addr = dma_addr;
if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
&sg_data, req->assoclen)) {
rc = -ENOMEM;
rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
&sg_data, req->assoclen);
if (rc)
goto aead_map_failure;
}
}
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
......@@ -1299,10 +1293,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES),
&dummy, &mapped_nents);
if (rc) {
rc = -ENOMEM;
if (rc)
goto aead_map_failure;
}
if (areq_ctx->is_single_pass) {
/*
......@@ -1386,6 +1378,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
......@@ -1405,18 +1398,18 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
/*TODO: copy data in case that buffer is enough for operation */
/* map the previous buffer */
if (*curr_buff_cnt) {
if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
&sg_data)) {
return -ENOMEM;
}
rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
&sg_data);
if (rc)
return rc;
}
if (src && nbytes > 0 && do_update) {
if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
&areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents)) {
rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
&areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents);
if (rc)
goto unmap_curr_buff;
}
if (src && mapped_nents == 1 &&
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
memcpy(areq_ctx->buff_sg, src,
......@@ -1435,7 +1428,8 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
/* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
0, true, &areq_ctx->mlli_nents);
if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
if (rc)
goto fail_unmap_din;
}
/* change the buffer index for the unmap function */
......@@ -1451,7 +1445,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
return -ENOMEM;
return rc;
}
int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
......@@ -1470,6 +1464,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
unsigned int swap_index = 0;
int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
......@@ -1514,21 +1509,21 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
}
if (*curr_buff_cnt) {
if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
&sg_data)) {
return -ENOMEM;
}
rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
&sg_data);
if (rc)
return rc;
/* change the buffer index for next operation */
swap_index = 1;
}
if (update_data_len > *curr_buff_cnt) {
if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
DMA_TO_DEVICE, &areq_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
&mapped_nents)) {
rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
DMA_TO_DEVICE, &areq_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
&mapped_nents);
if (rc)
goto unmap_curr_buff;
}
if (mapped_nents == 1 &&
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
/* only one entry in the SG and no previous data */
......@@ -1548,7 +1543,8 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
(update_data_len - *curr_buff_cnt), 0, true,
&areq_ctx->mlli_nents);
if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
if (rc)
goto fail_unmap_din;
}
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
......@@ -1562,7 +1558,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
return -ENOMEM;
return rc;
}
void cc_unmap_hash_request(struct device *dev, void *ctx,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment