Commit 86671abb authored by Antoine Ténart's avatar Antoine Ténart Committed by Herbert Xu

crypto: inside-secure - use one queue per hw ring

Update the inside-secure safexcel driver from using one global queue to
one queue per hw ring. This ease the request management and keep the hw
in sync with what's done in sw.
Signed-off-by: default avatarAntoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 97858434
...@@ -422,20 +422,18 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) ...@@ -422,20 +422,18 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
return 0; return 0;
} }
void safexcel_dequeue(struct safexcel_crypto_priv *priv) void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{ {
struct crypto_async_request *req, *backlog; struct crypto_async_request *req, *backlog;
struct safexcel_context *ctx; struct safexcel_context *ctx;
struct safexcel_request *request; struct safexcel_request *request;
int i, ret, n = 0, nreq[EIP197_MAX_RINGS] = {0}; int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
int cdesc[EIP197_MAX_RINGS] = {0}, rdesc[EIP197_MAX_RINGS] = {0};
int commands, results;
do { do {
spin_lock_bh(&priv->lock); spin_lock_bh(&priv->ring[ring].queue_lock);
req = crypto_dequeue_request(&priv->queue); req = crypto_dequeue_request(&priv->ring[ring].queue);
backlog = crypto_get_backlog(&priv->queue); backlog = crypto_get_backlog(&priv->ring[ring].queue);
spin_unlock_bh(&priv->lock); spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!req) if (!req)
goto finalize; goto finalize;
...@@ -445,58 +443,51 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv) ...@@ -445,58 +443,51 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv)
goto requeue; goto requeue;
ctx = crypto_tfm_ctx(req->tfm); ctx = crypto_tfm_ctx(req->tfm);
ret = ctx->send(req, ctx->ring, request, &commands, &results); ret = ctx->send(req, ring, request, &commands, &results);
if (ret) { if (ret) {
kfree(request); kfree(request);
requeue: requeue:
spin_lock_bh(&priv->lock); spin_lock_bh(&priv->ring[ring].queue_lock);
crypto_enqueue_request(&priv->queue, req); crypto_enqueue_request(&priv->ring[ring].queue, req);
spin_unlock_bh(&priv->lock); spin_unlock_bh(&priv->ring[ring].queue_lock);
priv->need_dequeue = true; priv->ring[ring].need_dequeue = true;
continue; continue;
} }
if (backlog) if (backlog)
backlog->complete(backlog, -EINPROGRESS); backlog->complete(backlog, -EINPROGRESS);
spin_lock_bh(&priv->ring[ctx->ring].egress_lock); spin_lock_bh(&priv->ring[ring].egress_lock);
list_add_tail(&request->list, &priv->ring[ctx->ring].list); list_add_tail(&request->list, &priv->ring[ring].list);
spin_unlock_bh(&priv->ring[ctx->ring].egress_lock); spin_unlock_bh(&priv->ring[ring].egress_lock);
cdesc[ctx->ring] += commands;
rdesc[ctx->ring] += results;
nreq[ctx->ring]++; cdesc += commands;
} while (n++ < EIP197_MAX_BATCH_SZ); rdesc += results;
} while (nreq++ < EIP197_MAX_BATCH_SZ);
finalize: finalize:
if (n == EIP197_MAX_BATCH_SZ) if (nreq == EIP197_MAX_BATCH_SZ)
priv->need_dequeue = true; priv->ring[ring].need_dequeue = true;
else if (!n) else if (!nreq)
return; return;
for (i = 0; i < priv->config.rings; i++) { spin_lock_bh(&priv->ring[ring].lock);
if (!nreq[i])
continue;
spin_lock_bh(&priv->ring[i].lock); /* Configure when we want an interrupt */
writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
EIP197_HIA_RDR_THRESH_PROC_PKT(nreq),
priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH);
/* Configure when we want an interrupt */ /* let the RDR know we have pending descriptors */
writel(EIP197_HIA_RDR_THRESH_PKT_MODE | writel((rdesc * priv->config.rd_offset) << 2,
EIP197_HIA_RDR_THRESH_PROC_PKT(nreq[i]), priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_THRESH);
/* let the RDR know we have pending descriptors */ /* let the CDR know we have pending descriptors */
writel((rdesc[i] * priv->config.rd_offset) << 2, writel((cdesc * priv->config.cd_offset) << 2,
priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT); priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
/* let the CDR know we have pending descriptors */ spin_unlock_bh(&priv->ring[ring].lock);
writel((cdesc[i] * priv->config.cd_offset) << 2,
priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT);
spin_unlock_bh(&priv->ring[i].lock);
}
} }
void safexcel_free_context(struct safexcel_crypto_priv *priv, void safexcel_free_context(struct safexcel_crypto_priv *priv,
...@@ -638,9 +629,9 @@ static void safexcel_handle_result_work(struct work_struct *work) ...@@ -638,9 +629,9 @@ static void safexcel_handle_result_work(struct work_struct *work)
safexcel_handle_result_descriptor(priv, data->ring); safexcel_handle_result_descriptor(priv, data->ring);
if (priv->need_dequeue) { if (priv->ring[data->ring].need_dequeue) {
priv->need_dequeue = false; priv->ring[data->ring].need_dequeue = false;
safexcel_dequeue(data->priv); safexcel_dequeue(data->priv, data->ring);
} }
} }
...@@ -864,17 +855,18 @@ static int safexcel_probe(struct platform_device *pdev) ...@@ -864,17 +855,18 @@ static int safexcel_probe(struct platform_device *pdev)
goto err_clk; goto err_clk;
} }
crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE);
INIT_LIST_HEAD(&priv->ring[i].list); INIT_LIST_HEAD(&priv->ring[i].list);
spin_lock_init(&priv->ring[i].lock); spin_lock_init(&priv->ring[i].lock);
spin_lock_init(&priv->ring[i].egress_lock); spin_lock_init(&priv->ring[i].egress_lock);
spin_lock_init(&priv->ring[i].queue_lock);
} }
platform_set_drvdata(pdev, priv); platform_set_drvdata(pdev, priv);
atomic_set(&priv->ring_used, 0); atomic_set(&priv->ring_used, 0);
spin_lock_init(&priv->lock);
crypto_init_queue(&priv->queue, EIP197_DEFAULT_RING_SIZE);
ret = safexcel_hw_init(priv); ret = safexcel_hw_init(priv);
if (ret) { if (ret) {
dev_err(dev, "EIP h/w init failed (%d)\n", ret); dev_err(dev, "EIP h/w init failed (%d)\n", ret);
......
...@@ -469,11 +469,6 @@ struct safexcel_crypto_priv { ...@@ -469,11 +469,6 @@ struct safexcel_crypto_priv {
struct clk *clk; struct clk *clk;
struct safexcel_config config; struct safexcel_config config;
spinlock_t lock;
struct crypto_queue queue;
bool need_dequeue;
/* context DMA pool */ /* context DMA pool */
struct dma_pool *context_pool; struct dma_pool *context_pool;
...@@ -490,6 +485,11 @@ struct safexcel_crypto_priv { ...@@ -490,6 +485,11 @@ struct safexcel_crypto_priv {
/* command/result rings */ /* command/result rings */
struct safexcel_ring cdr; struct safexcel_ring cdr;
struct safexcel_ring rdr; struct safexcel_ring rdr;
/* queue */
struct crypto_queue queue;
spinlock_t queue_lock;
bool need_dequeue;
} ring[EIP197_MAX_RINGS]; } ring[EIP197_MAX_RINGS];
}; };
...@@ -533,7 +533,7 @@ struct safexcel_inv_result { ...@@ -533,7 +533,7 @@ struct safexcel_inv_result {
int error; int error;
}; };
void safexcel_dequeue(struct safexcel_crypto_priv *priv); void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring); void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
void safexcel_free_context(struct safexcel_crypto_priv *priv, void safexcel_free_context(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req, struct crypto_async_request *req,
......
...@@ -339,18 +339,21 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ...@@ -339,18 +339,21 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
return ndesc; return ndesc;
} }
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
ctx->base.needs_inv = false; ctx->base.needs_inv = false;
ctx->base.ring = safexcel_select_ring(priv);
ctx->base.send = safexcel_aes_send; ctx->base.send = safexcel_aes_send;
spin_lock_bh(&priv->lock); spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->queue, async); enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
spin_unlock_bh(&priv->lock); spin_unlock_bh(&priv->ring[ring].queue_lock);
if (enq_ret != -EINPROGRESS) if (enq_ret != -EINPROGRESS)
*ret = enq_ret; *ret = enq_ret;
priv->need_dequeue = true; if (!priv->ring[ring].need_dequeue)
safexcel_dequeue(priv, ring);
*should_complete = false; *should_complete = false;
return ndesc; return ndesc;
...@@ -384,6 +387,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) ...@@ -384,6 +387,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
struct safexcel_crypto_priv *priv = ctx->priv; struct safexcel_crypto_priv *priv = ctx->priv;
struct skcipher_request req; struct skcipher_request req;
struct safexcel_inv_result result = { 0 }; struct safexcel_inv_result result = { 0 };
int ring = ctx->base.ring;
memset(&req, 0, sizeof(struct skcipher_request)); memset(&req, 0, sizeof(struct skcipher_request));
...@@ -397,12 +401,12 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) ...@@ -397,12 +401,12 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
ctx->base.exit_inv = true; ctx->base.exit_inv = true;
ctx->base.send = safexcel_cipher_send_inv; ctx->base.send = safexcel_cipher_send_inv;
spin_lock_bh(&priv->lock); spin_lock_bh(&priv->ring[ring].queue_lock);
crypto_enqueue_request(&priv->queue, &req.base); crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
spin_unlock_bh(&priv->lock); spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->need_dequeue) if (!priv->ring[ring].need_dequeue)
safexcel_dequeue(priv); safexcel_dequeue(priv, ring);
wait_for_completion_interruptible(&result.completion); wait_for_completion_interruptible(&result.completion);
...@@ -421,7 +425,7 @@ static int safexcel_aes(struct skcipher_request *req, ...@@ -421,7 +425,7 @@ static int safexcel_aes(struct skcipher_request *req,
{ {
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct safexcel_crypto_priv *priv = ctx->priv; struct safexcel_crypto_priv *priv = ctx->priv;
int ret; int ret, ring;
ctx->direction = dir; ctx->direction = dir;
ctx->mode = mode; ctx->mode = mode;
...@@ -440,12 +444,14 @@ static int safexcel_aes(struct skcipher_request *req, ...@@ -440,12 +444,14 @@ static int safexcel_aes(struct skcipher_request *req,
return -ENOMEM; return -ENOMEM;
} }
spin_lock_bh(&priv->lock); ring = ctx->base.ring;
ret = crypto_enqueue_request(&priv->queue, &req->base);
spin_unlock_bh(&priv->lock); spin_lock_bh(&priv->ring[ring].queue_lock);
ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->need_dequeue) if (!priv->ring[ring].need_dequeue)
safexcel_dequeue(priv); safexcel_dequeue(priv, ring);
return ret; return ret;
} }
......
...@@ -374,18 +374,21 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ...@@ -374,18 +374,21 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
return 1; return 1;
} }
ctx->base.ring = safexcel_select_ring(priv); ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
ctx->base.needs_inv = false; ctx->base.needs_inv = false;
ctx->base.send = safexcel_ahash_send; ctx->base.send = safexcel_ahash_send;
spin_lock_bh(&priv->lock); spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->queue, async); enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
spin_unlock_bh(&priv->lock); spin_unlock_bh(&priv->ring[ring].queue_lock);
if (enq_ret != -EINPROGRESS) if (enq_ret != -EINPROGRESS)
*ret = enq_ret; *ret = enq_ret;
priv->need_dequeue = true; if (!priv->ring[ring].need_dequeue)
safexcel_dequeue(priv, ring);
*should_complete = false; *should_complete = false;
return 1; return 1;
...@@ -417,6 +420,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) ...@@ -417,6 +420,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
struct safexcel_crypto_priv *priv = ctx->priv; struct safexcel_crypto_priv *priv = ctx->priv;
struct ahash_request req; struct ahash_request req;
struct safexcel_inv_result result = { 0 }; struct safexcel_inv_result result = { 0 };
int ring = ctx->base.ring;
memset(&req, 0, sizeof(struct ahash_request)); memset(&req, 0, sizeof(struct ahash_request));
...@@ -430,12 +434,12 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) ...@@ -430,12 +434,12 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
ctx->base.exit_inv = true; ctx->base.exit_inv = true;
ctx->base.send = safexcel_ahash_send_inv; ctx->base.send = safexcel_ahash_send_inv;
spin_lock_bh(&priv->lock); spin_lock_bh(&priv->ring[ring].queue_lock);
crypto_enqueue_request(&priv->queue, &req.base); crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
spin_unlock_bh(&priv->lock); spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->need_dequeue) if (!priv->ring[ring].need_dequeue)
safexcel_dequeue(priv); safexcel_dequeue(priv, ring);
wait_for_completion_interruptible(&result.completion); wait_for_completion_interruptible(&result.completion);
...@@ -477,7 +481,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) ...@@ -477,7 +481,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx(areq); struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct safexcel_crypto_priv *priv = ctx->priv; struct safexcel_crypto_priv *priv = ctx->priv;
int ret; int ret, ring;
ctx->base.send = safexcel_ahash_send; ctx->base.send = safexcel_ahash_send;
...@@ -496,12 +500,14 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) ...@@ -496,12 +500,14 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
return -ENOMEM; return -ENOMEM;
} }
spin_lock_bh(&priv->lock); ring = ctx->base.ring;
ret = crypto_enqueue_request(&priv->queue, &areq->base);
spin_unlock_bh(&priv->lock); spin_lock_bh(&priv->ring[ring].queue_lock);
ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->need_dequeue) if (!priv->ring[ring].need_dequeue)
safexcel_dequeue(priv); safexcel_dequeue(priv, ring);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment