Commit 93c7f4d3 authored by Corentin Labbe's avatar Corentin Labbe Committed by Herbert Xu

crypto: sun8i-ce - enable working on big endian

On big endian kernel, the sun8i-ce crypto driver does not works.
This patch do the necessary modification to permit it to work on BE
kernel (setting descriptor entries as __le32 and adding some cpu_to_le32)

Fixes: 06f751b6 ("crypto: allwinner - Add sun8i-ce Crypto Engine")
Signed-off-by: default avatarCorentin Labbe <clabbe.montjoie@gmail.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 660eda8d
...@@ -90,7 +90,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) ...@@ -90,7 +90,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
struct ce_task *cet; struct ce_task *cet;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int todo, len, offset, ivsize; unsigned int todo, len, offset, ivsize;
dma_addr_t addr_iv = 0, addr_key = 0;
void *backup_iv = NULL; void *backup_iv = NULL;
u32 common, sym;
int flow, i; int flow, i;
int nr_sgs = 0; int nr_sgs = 0;
int nr_sgd = 0; int nr_sgd = 0;
...@@ -115,28 +117,31 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) ...@@ -115,28 +117,31 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
cet = chan->tl; cet = chan->tl;
memset(cet, 0, sizeof(struct ce_task)); memset(cet, 0, sizeof(struct ce_task));
cet->t_id = flow; cet->t_id = cpu_to_le32(flow);
cet->t_common_ctl = ce->variant->alg_cipher[algt->ce_algo_id]; common = ce->variant->alg_cipher[algt->ce_algo_id];
cet->t_common_ctl |= rctx->op_dir | CE_COMM_INT; common |= rctx->op_dir | CE_COMM_INT;
cet->t_dlen = areq->cryptlen / 4; cet->t_common_ctl = cpu_to_le32(common);
/* CTS and recent CE (H6) need length in bytes, in word otherwise */ /* CTS and recent CE (H6) need length in bytes, in word otherwise */
if (ce->variant->has_t_dlen_in_bytes) if (ce->variant->has_t_dlen_in_bytes)
cet->t_dlen = areq->cryptlen; cet->t_dlen = cpu_to_le32(areq->cryptlen);
else
cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
cet->t_sym_ctl = ce->variant->op_mode[algt->ce_blockmode]; sym = ce->variant->op_mode[algt->ce_blockmode];
len = op->keylen; len = op->keylen;
switch (len) { switch (len) {
case 128 / 8: case 128 / 8:
cet->t_sym_ctl |= CE_AES_128BITS; sym |= CE_AES_128BITS;
break; break;
case 192 / 8: case 192 / 8:
cet->t_sym_ctl |= CE_AES_192BITS; sym |= CE_AES_192BITS;
break; break;
case 256 / 8: case 256 / 8:
cet->t_sym_ctl |= CE_AES_256BITS; sym |= CE_AES_256BITS;
break; break;
} }
cet->t_sym_ctl = cpu_to_le32(sym);
cet->t_asym_ctl = 0; cet->t_asym_ctl = 0;
chan->op_mode = ce->variant->op_mode[algt->ce_blockmode]; chan->op_mode = ce->variant->op_mode[algt->ce_blockmode];
...@@ -144,9 +149,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) ...@@ -144,9 +149,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
chan->method = ce->variant->alg_cipher[algt->ce_algo_id]; chan->method = ce->variant->alg_cipher[algt->ce_algo_id];
chan->keylen = op->keylen; chan->keylen = op->keylen;
cet->t_key = dma_map_single(ce->dev, op->key, op->keylen, addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
DMA_TO_DEVICE); cet->t_key = cpu_to_le32(addr_key);
if (dma_mapping_error(ce->dev, cet->t_key)) { if (dma_mapping_error(ce->dev, addr_key)) {
dev_err(ce->dev, "Cannot DMA MAP KEY\n"); dev_err(ce->dev, "Cannot DMA MAP KEY\n");
err = -EFAULT; err = -EFAULT;
goto theend; goto theend;
...@@ -171,9 +176,10 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) ...@@ -171,9 +176,10 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
ivsize, 0); ivsize, 0);
} }
memcpy(chan->bounce_iv, areq->iv, ivsize); memcpy(chan->bounce_iv, areq->iv, ivsize);
cet->t_iv = dma_map_single(ce->dev, chan->bounce_iv, addr_iv = dma_map_single(ce->dev, chan->bounce_iv, chan->ivlen,
chan->ivlen, DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, cet->t_iv)) { cet->t_iv = cpu_to_le32(addr_iv);
if (dma_mapping_error(ce->dev, addr_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n"); dev_err(ce->dev, "Cannot DMA MAP IV\n");
err = -ENOMEM; err = -ENOMEM;
goto theend_iv; goto theend_iv;
...@@ -208,9 +214,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) ...@@ -208,9 +214,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
len = areq->cryptlen; len = areq->cryptlen;
for_each_sg(areq->src, sg, nr_sgs, i) { for_each_sg(areq->src, sg, nr_sgs, i) {
cet->t_src[i].addr = sg_dma_address(sg); cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
todo = min(len, sg_dma_len(sg)); todo = min(len, sg_dma_len(sg));
cet->t_src[i].len = todo / 4; cet->t_src[i].len = cpu_to_le32(todo / 4);
dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__, dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo); areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
len -= todo; len -= todo;
...@@ -223,9 +229,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) ...@@ -223,9 +229,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
len = areq->cryptlen; len = areq->cryptlen;
for_each_sg(areq->dst, sg, nr_sgd, i) { for_each_sg(areq->dst, sg, nr_sgd, i) {
cet->t_dst[i].addr = sg_dma_address(sg); cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg));
todo = min(len, sg_dma_len(sg)); todo = min(len, sg_dma_len(sg));
cet->t_dst[i].len = todo / 4; cet->t_dst[i].len = cpu_to_le32(todo / 4);
dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__, dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo); areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
len -= todo; len -= todo;
...@@ -250,8 +256,8 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) ...@@ -250,8 +256,8 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
theend_iv: theend_iv:
if (areq->iv && ivsize > 0) { if (areq->iv && ivsize > 0) {
if (cet->t_iv) if (addr_iv)
dma_unmap_single(ce->dev, cet->t_iv, chan->ivlen, dma_unmap_single(ce->dev, addr_iv, chan->ivlen,
DMA_TO_DEVICE); DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize; offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) { if (rctx->op_dir & CE_DECRYPTION) {
...@@ -265,7 +271,7 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) ...@@ -265,7 +271,7 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
} }
theend_key: theend_key:
dma_unmap_single(ce->dev, cet->t_key, op->keylen, DMA_TO_DEVICE); dma_unmap_single(ce->dev, addr_key, op->keylen, DMA_TO_DEVICE);
theend: theend:
return err; return err;
......
...@@ -103,8 +103,8 @@ struct ce_variant { ...@@ -103,8 +103,8 @@ struct ce_variant {
}; };
struct sginfo { struct sginfo {
u32 addr; __le32 addr;
u32 len; __le32 len;
} __packed; } __packed;
/* /*
...@@ -112,18 +112,18 @@ struct sginfo { ...@@ -112,18 +112,18 @@ struct sginfo {
* The structure of this descriptor could be found in the datasheet * The structure of this descriptor could be found in the datasheet
*/ */
struct ce_task { struct ce_task {
u32 t_id; __le32 t_id;
u32 t_common_ctl; __le32 t_common_ctl;
u32 t_sym_ctl; __le32 t_sym_ctl;
u32 t_asym_ctl; __le32 t_asym_ctl;
u32 t_key; __le32 t_key;
u32 t_iv; __le32 t_iv;
u32 t_ctr; __le32 t_ctr;
u32 t_dlen; __le32 t_dlen;
struct sginfo t_src[MAX_SG]; struct sginfo t_src[MAX_SG];
struct sginfo t_dst[MAX_SG]; struct sginfo t_dst[MAX_SG];
u32 next; __le32 next;
u32 reserved[3]; __le32 reserved[3];
} __packed __aligned(8); } __packed __aligned(8);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment