Commit c65a52f8 authored by Tom Lendacky's avatar Tom Lendacky Committed by Herbert Xu

crypto: ccp - Account for CCP backlog processing

When the crypto layer is able to queue up a command for processing
by the CCP on the initial call to ccp_crypto_enqueue_request and
the CCP returns -EBUSY, then if the backlog flag is not set the
command needs to be freed and not added to the active command list.
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 950b10ba
...@@ -205,6 +205,7 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) ...@@ -205,6 +205,7 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
{ {
struct ccp_crypto_cmd *active = NULL, *tmp; struct ccp_crypto_cmd *active = NULL, *tmp;
unsigned long flags; unsigned long flags;
bool free_cmd = true;
int ret; int ret;
spin_lock_irqsave(&req_queue_lock, flags); spin_lock_irqsave(&req_queue_lock, flags);
...@@ -231,7 +232,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) ...@@ -231,7 +232,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
if (!active) { if (!active) {
ret = ccp_enqueue_cmd(crypto_cmd->cmd); ret = ccp_enqueue_cmd(crypto_cmd->cmd);
if (!ccp_crypto_success(ret)) if (!ccp_crypto_success(ret))
goto e_lock; goto e_lock; /* Error, don't queue it */
if ((ret == -EBUSY) &&
!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
goto e_lock; /* Not backlogging, don't queue it */
} }
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
...@@ -244,9 +248,14 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) ...@@ -244,9 +248,14 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
req_queue.cmd_count++; req_queue.cmd_count++;
list_add_tail(&crypto_cmd->entry, &req_queue.cmds); list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
free_cmd = false;
e_lock: e_lock:
spin_unlock_irqrestore(&req_queue_lock, flags); spin_unlock_irqrestore(&req_queue_lock, flags);
if (free_cmd)
kfree(crypto_cmd);
return ret; return ret;
} }
...@@ -262,7 +271,6 @@ int ccp_crypto_enqueue_request(struct crypto_async_request *req, ...@@ -262,7 +271,6 @@ int ccp_crypto_enqueue_request(struct crypto_async_request *req,
{ {
struct ccp_crypto_cmd *crypto_cmd; struct ccp_crypto_cmd *crypto_cmd;
gfp_t gfp; gfp_t gfp;
int ret;
gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
...@@ -287,11 +295,7 @@ int ccp_crypto_enqueue_request(struct crypto_async_request *req, ...@@ -287,11 +295,7 @@ int ccp_crypto_enqueue_request(struct crypto_async_request *req,
else else
cmd->flags &= ~CCP_CMD_MAY_BACKLOG; cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
ret = ccp_crypto_enqueue_cmd(crypto_cmd); return ccp_crypto_enqueue_cmd(crypto_cmd);
if (!ccp_crypto_success(ret))
kfree(crypto_cmd);
return ret;
} }
struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment