Commit 059b1494 authored by Ryder Lee's avatar Ryder Lee Committed by Herbert Xu

crypto: mediatek - fix typo and indentation

Dummy patch to fix typo and indentation.
Signed-off-by: default avatarRyder Lee <ryder.lee@mediatek.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 0abc2714
...@@ -314,8 +314,8 @@ static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) ...@@ -314,8 +314,8 @@ static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg, aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
aes->dst.nents, DMA_FROM_DEVICE); aes->dst.nents, DMA_FROM_DEVICE);
if (unlikely(!aes->dst.sg_len)) { if (unlikely(!aes->dst.sg_len)) {
dma_unmap_sg(cryp->dev, aes->src.sg, dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
aes->src.nents, DMA_TO_DEVICE); DMA_TO_DEVICE);
goto sg_map_err; goto sg_map_err;
} }
} }
...@@ -484,7 +484,7 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm, ...@@ -484,7 +484,7 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
const u8 *key, u32 keylen) const u8 *key, u32 keylen)
{ {
struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
const u32 *key_tmp = (const u32 *)key; const u32 *aes_key = (const u32 *)key;
u32 *key_state = ctx->tfm.state; u32 *key_state = ctx->tfm.state;
int i; int i;
...@@ -498,7 +498,7 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm, ...@@ -498,7 +498,7 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
ctx->keylen = SIZE_IN_WORDS(keylen); ctx->keylen = SIZE_IN_WORDS(keylen);
for (i = 0; i < ctx->keylen; i++) for (i = 0; i < ctx->keylen; i++)
key_state[i] = cpu_to_le32(key_tmp[i]); key_state[i] = cpu_to_le32(aes_key[i]);
return 0; return 0;
} }
...@@ -512,26 +512,26 @@ static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode) ...@@ -512,26 +512,26 @@ static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
rctx = ablkcipher_request_ctx(req); rctx = ablkcipher_request_ctx(req);
rctx->mode = mode; rctx->mode = mode;
return mtk_aes_handle_queue(ctx->cryp, return mtk_aes_handle_queue(ctx->cryp, !(mode & AES_FLAGS_ENCRYPT),
!(mode & AES_FLAGS_ENCRYPT), &req->base); &req->base);
} }
static int mtk_ecb_encrypt(struct ablkcipher_request *req) static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
{ {
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB); return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
} }
static int mtk_ecb_decrypt(struct ablkcipher_request *req) static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
{ {
return mtk_aes_crypt(req, AES_FLAGS_ECB); return mtk_aes_crypt(req, AES_FLAGS_ECB);
} }
static int mtk_cbc_encrypt(struct ablkcipher_request *req) static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
{ {
return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC); return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
} }
static int mtk_cbc_decrypt(struct ablkcipher_request *req) static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
{ {
return mtk_aes_crypt(req, AES_FLAGS_CBC); return mtk_aes_crypt(req, AES_FLAGS_CBC);
} }
...@@ -554,44 +554,44 @@ static int mtk_aes_cra_init(struct crypto_tfm *tfm) ...@@ -554,44 +554,44 @@ static int mtk_aes_cra_init(struct crypto_tfm *tfm)
static struct crypto_alg aes_algs[] = { static struct crypto_alg aes_algs[] = {
{ {
.cra_name = "cbc(aes)", .cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-mtk", .cra_driver_name = "cbc-aes-mtk",
.cra_priority = 400, .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC, CRYPTO_ALG_ASYNC,
.cra_init = mtk_aes_cra_init, .cra_init = mtk_aes_cra_init,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mtk_aes_ctx), .cra_ctxsize = sizeof(struct mtk_aes_ctx),
.cra_alignmask = 15, .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type, .cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_u.ablkcipher = { .cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE, .min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE,
.setkey = mtk_aes_setkey, .setkey = mtk_aes_setkey,
.encrypt = mtk_cbc_encrypt, .encrypt = mtk_aes_cbc_encrypt,
.decrypt = mtk_cbc_decrypt, .decrypt = mtk_aes_cbc_decrypt,
.ivsize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE,
} }
}, },
{ {
.cra_name = "ecb(aes)", .cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-mtk", .cra_driver_name = "ecb-aes-mtk",
.cra_priority = 400, .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC, CRYPTO_ALG_ASYNC,
.cra_init = mtk_aes_cra_init, .cra_init = mtk_aes_cra_init,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mtk_aes_ctx), .cra_ctxsize = sizeof(struct mtk_aes_ctx),
.cra_alignmask = 15, .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type, .cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_u.ablkcipher = { .cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE, .min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE,
.setkey = mtk_aes_setkey, .setkey = mtk_aes_setkey,
.encrypt = mtk_ecb_encrypt, .encrypt = mtk_aes_ecb_encrypt,
.decrypt = mtk_ecb_decrypt, .decrypt = mtk_aes_ecb_decrypt,
} }
}, },
}; };
......
...@@ -124,7 +124,7 @@ typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes); ...@@ -124,7 +124,7 @@ typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes);
/** /**
* struct mtk_aes_rec - AES operation record * struct mtk_aes_rec - AES operation record
* @queue: crypto request queue * @queue: crypto request queue
* @req: pointer to async request * @areq: pointer to async request
* @task: the tasklet is use in AES interrupt * @task: the tasklet is use in AES interrupt
* @ctx: pointer to current context * @ctx: pointer to current context
* @src: the structure that holds source sg list info * @src: the structure that holds source sg list info
......
...@@ -317,9 +317,9 @@ static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx) ...@@ -317,9 +317,9 @@ static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
* Update input data length field of transform information and * Update input data length field of transform information and
* map it to DMA region. * map it to DMA region.
*/ */
static int mtk_sha_info_map(struct mtk_cryp *cryp, static int mtk_sha_info_update(struct mtk_cryp *cryp,
struct mtk_sha_rec *sha, struct mtk_sha_rec *sha,
size_t len) size_t len)
{ {
struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
struct mtk_sha_info *info = &ctx->info; struct mtk_sha_info *info = &ctx->info;
...@@ -338,7 +338,7 @@ static int mtk_sha_info_map(struct mtk_cryp *cryp, ...@@ -338,7 +338,7 @@ static int mtk_sha_info_map(struct mtk_cryp *cryp,
ctx->digcnt += len; ctx->digcnt += len;
ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) { if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) {
dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info)); dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
return -EINVAL; return -EINVAL;
...@@ -430,20 +430,15 @@ static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha, ...@@ -430,20 +430,15 @@ static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
struct mtk_desc *res = ring->res_base + ring->res_pos; struct mtk_desc *res = ring->res_base + ring->res_pos;
int err; int err;
err = mtk_sha_info_map(cryp, sha, len); err = mtk_sha_info_update(cryp, sha, len);
if (err) if (err)
return err; return err;
/* Fill in the command/result descriptors */ /* Fill in the command/result descriptors */
res->hdr = MTK_DESC_FIRST | res->hdr = MTK_DESC_FIRST | MTK_DESC_LAST | MTK_DESC_BUF_LEN(len);
MTK_DESC_LAST |
MTK_DESC_BUF_LEN(len);
res->buf = cpu_to_le32(cryp->tmp_dma); res->buf = cpu_to_le32(cryp->tmp_dma);
cmd->hdr = MTK_DESC_FIRST | cmd->hdr = MTK_DESC_FIRST | MTK_DESC_LAST | MTK_DESC_BUF_LEN(len) |
MTK_DESC_LAST |
MTK_DESC_BUF_LEN(len) |
MTK_DESC_CT_LEN(ctx->ct_size); MTK_DESC_CT_LEN(ctx->ct_size);
cmd->buf = cpu_to_le32(addr); cmd->buf = cpu_to_le32(addr);
...@@ -477,7 +472,7 @@ static int mtk_sha_xmit2(struct mtk_cryp *cryp, ...@@ -477,7 +472,7 @@ static int mtk_sha_xmit2(struct mtk_cryp *cryp,
struct mtk_desc *res = ring->res_base + ring->res_pos; struct mtk_desc *res = ring->res_base + ring->res_pos;
int err; int err;
err = mtk_sha_info_map(cryp, sha, len1 + len2); err = mtk_sha_info_update(cryp, sha, len1 + len2);
if (err) if (err)
return err; return err;
...@@ -485,8 +480,7 @@ static int mtk_sha_xmit2(struct mtk_cryp *cryp, ...@@ -485,8 +480,7 @@ static int mtk_sha_xmit2(struct mtk_cryp *cryp,
res->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST; res->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST;
res->buf = cpu_to_le32(cryp->tmp_dma); res->buf = cpu_to_le32(cryp->tmp_dma);
cmd->hdr = MTK_DESC_BUF_LEN(len1) | cmd->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST |
MTK_DESC_FIRST |
MTK_DESC_CT_LEN(ctx->ct_size); MTK_DESC_CT_LEN(ctx->ct_size);
cmd->buf = cpu_to_le32(sg_dma_address(ctx->sg)); cmd->buf = cpu_to_le32(sg_dma_address(ctx->sg));
cmd->ct = cpu_to_le32(ctx->ct_dma); cmd->ct = cpu_to_le32(ctx->ct_dma);
...@@ -530,7 +524,7 @@ static int mtk_sha_dma_map(struct mtk_cryp *cryp, ...@@ -530,7 +524,7 @@ static int mtk_sha_dma_map(struct mtk_cryp *cryp,
size_t count) size_t count)
{ {
ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
SHA_BUF_SIZE, DMA_TO_DEVICE); SHA_BUF_SIZE, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
dev_err(cryp->dev, "dma map error\n"); dev_err(cryp->dev, "dma map error\n");
return -EINVAL; return -EINVAL;
...@@ -619,7 +613,7 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp, ...@@ -619,7 +613,7 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp,
mtk_sha_fill_padding(ctx, len); mtk_sha_fill_padding(ctx, len);
ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
SHA_BUF_SIZE, DMA_TO_DEVICE); SHA_BUF_SIZE, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
dev_err(cryp->dev, "dma map bytes error\n"); dev_err(cryp->dev, "dma map bytes error\n");
return -EINVAL; return -EINVAL;
...@@ -658,8 +652,7 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp, ...@@ -658,8 +652,7 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp,
static int mtk_sha_final_req(struct mtk_cryp *cryp, static int mtk_sha_final_req(struct mtk_cryp *cryp,
struct mtk_sha_rec *sha) struct mtk_sha_rec *sha)
{ {
struct ahash_request *req = sha->req; struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
size_t count; size_t count;
mtk_sha_fill_padding(ctx, 0); mtk_sha_fill_padding(ctx, 0);
...@@ -690,7 +683,8 @@ static int mtk_sha_finish(struct ahash_request *req) ...@@ -690,7 +683,8 @@ static int mtk_sha_finish(struct ahash_request *req)
} }
static void mtk_sha_finish_req(struct mtk_cryp *cryp, static void mtk_sha_finish_req(struct mtk_cryp *cryp,
struct mtk_sha_rec *sha, int err) struct mtk_sha_rec *sha,
int err)
{ {
if (likely(!err && (SHA_FLAGS_FINAL & sha->flags))) if (likely(!err && (SHA_FLAGS_FINAL & sha->flags)))
err = mtk_sha_finish(sha->req); err = mtk_sha_finish(sha->req);
...@@ -850,8 +844,8 @@ static int mtk_sha_digest(struct ahash_request *req) ...@@ -850,8 +844,8 @@ static int mtk_sha_digest(struct ahash_request *req)
return mtk_sha_init(req) ?: mtk_sha_finup(req); return mtk_sha_init(req) ?: mtk_sha_finup(req);
} }
static int mtk_sha_setkey(struct crypto_ahash *tfm, static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
const unsigned char *key, u32 keylen) u32 keylen)
{ {
struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm); struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
struct mtk_sha_hmac_ctx *bctx = tctx->base; struct mtk_sha_hmac_ctx *bctx = tctx->base;
...@@ -863,7 +857,7 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, ...@@ -863,7 +857,7 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm,
shash->tfm = bctx->shash; shash->tfm = bctx->shash;
shash->flags = crypto_shash_get_flags(bctx->shash) & shash->flags = crypto_shash_get_flags(bctx->shash) &
CRYPTO_TFM_REQ_MAY_SLEEP; CRYPTO_TFM_REQ_MAY_SLEEP;
if (keylen > bs) { if (keylen > bs) {
err = crypto_shash_digest(shash, key, keylen, bctx->ipad); err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment