Commit 38682383 authored by Giovanni Cabiddu's avatar Giovanni Cabiddu Committed by Herbert Xu

crypto: qat - add backlog mechanism

The implementations of the crypto algorithms (aead, skcipher, etc) in
the QAT driver do not properly support requests with the
CRYPTO_TFM_REQ_MAY_BACKLOG flag set. If the HW queue is full, the driver
returns -EBUSY but does not enqueue the request. This can result in
applications like dm-crypt waiting indefinitely for the completion of a
request that was never submitted to the hardware.

Fix this by adding a software backlog queue: if the ring buffer is more
than eighty percent full, then the request is enqueued to a backlog
list and the error code -EBUSY is returned back to the caller.
Requests in the backlog queue are resubmitted at a later time, in the
context of the callback of a previously submitted request.
The request for which -EBUSY is returned is then marked as -EINPROGRESS
once submitted to the HW queues.

The submission loop inside the function qat_alg_send_message() has been
modified to decide which submission policy to use based on the request
flags. If the request does not have the CRYPTO_TFM_REQ_MAY_BACKLOG set,
the previous behaviour has been preserved.

Based on a patch by
Vishnu Das Ramachandran <vishnu.dasx.ramachandran@intel.com>

Cc: stable@vger.kernel.org
Fixes: d370cec3 ("crypto: qat - Intel(R) QAT crypto interface")
Reported-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Reported-by: default avatarKyle Sanderson <kyle.leet@gmail.com>
Signed-off-by: default avatarGiovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: default avatarMarco Chiappero <marco.chiappero@intel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent af88d3c1
...@@ -8,6 +8,9 @@ ...@@ -8,6 +8,9 @@
#include "adf_cfg.h" #include "adf_cfg.h"
#include "adf_common_drv.h" #include "adf_common_drv.h"
#define ADF_MAX_RING_THRESHOLD 80
#define ADF_PERCENT(tot, percent) (((tot) * (percent)) / 100)
static inline u32 adf_modulo(u32 data, u32 shift) static inline u32 adf_modulo(u32 data, u32 shift)
{ {
u32 div = data >> shift; u32 div = data >> shift;
...@@ -77,6 +80,11 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) ...@@ -77,6 +80,11 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
bank->irq_mask); bank->irq_mask);
} }
bool adf_ring_nearly_full(struct adf_etr_ring_data *ring)
{
return atomic_read(ring->inflights) > ring->threshold;
}
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg) int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
{ {
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev); struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
...@@ -217,6 +225,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, ...@@ -217,6 +225,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
struct adf_etr_bank_data *bank; struct adf_etr_bank_data *bank;
struct adf_etr_ring_data *ring; struct adf_etr_ring_data *ring;
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
int max_inflights;
u32 ring_num; u32 ring_num;
int ret; int ret;
...@@ -263,6 +272,8 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, ...@@ -263,6 +272,8 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
ring->ring_size = adf_verify_ring_size(msg_size, num_msgs); ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
ring->head = 0; ring->head = 0;
ring->tail = 0; ring->tail = 0;
max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD);
atomic_set(ring->inflights, 0); atomic_set(ring->inflights, 0);
ret = adf_init_ring(ring); ret = adf_init_ring(ring);
if (ret) if (ret)
......
...@@ -14,6 +14,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, ...@@ -14,6 +14,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
const char *ring_name, adf_callback_fn callback, const char *ring_name, adf_callback_fn callback,
int poll_mode, struct adf_etr_ring_data **ring_ptr); int poll_mode, struct adf_etr_ring_data **ring_ptr);
bool adf_ring_nearly_full(struct adf_etr_ring_data *ring);
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg); int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
void adf_remove_ring(struct adf_etr_ring_data *ring); void adf_remove_ring(struct adf_etr_ring_data *ring);
#endif #endif
...@@ -22,6 +22,7 @@ struct adf_etr_ring_data { ...@@ -22,6 +22,7 @@ struct adf_etr_ring_data {
spinlock_t lock; /* protects ring data struct */ spinlock_t lock; /* protects ring data struct */
u16 head; u16 head;
u16 tail; u16 tail;
u32 threshold;
u8 ring_number; u8 ring_number;
u8 ring_size; u8 ring_size;
u8 msg_size; u8 msg_size;
......
...@@ -935,19 +935,25 @@ void qat_alg_callback(void *resp) ...@@ -935,19 +935,25 @@ void qat_alg_callback(void *resp)
struct icp_qat_fw_la_resp *qat_resp = resp; struct icp_qat_fw_la_resp *qat_resp = resp;
struct qat_crypto_request *qat_req = struct qat_crypto_request *qat_req =
(void *)(__force long)qat_resp->opaque_data; (void *)(__force long)qat_resp->opaque_data;
struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
qat_req->cb(qat_resp, qat_req); qat_req->cb(qat_resp, qat_req);
qat_alg_send_backlog(backlog);
} }
static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req, static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
struct qat_crypto_instance *inst) struct qat_crypto_instance *inst,
struct crypto_async_request *base)
{ {
struct qat_alg_req req; struct qat_alg_req *alg_req = &qat_req->alg_req;
req.fw_req = (u32 *)&qat_req->req; alg_req->fw_req = (u32 *)&qat_req->req;
req.tx_ring = inst->sym_tx; alg_req->tx_ring = inst->sym_tx;
alg_req->base = base;
alg_req->backlog = &inst->backlog;
return qat_alg_send_message(&req); return qat_alg_send_message(alg_req);
} }
static int qat_alg_aead_dec(struct aead_request *areq) static int qat_alg_aead_dec(struct aead_request *areq)
...@@ -987,7 +993,7 @@ static int qat_alg_aead_dec(struct aead_request *areq) ...@@ -987,7 +993,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
auth_param->auth_off = 0; auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + cipher_param->cipher_length; auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
ret = qat_alg_send_sym_message(qat_req, ctx->inst); ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
if (ret == -ENOSPC) if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req); qat_alg_free_bufl(ctx->inst, qat_req);
...@@ -1031,7 +1037,7 @@ static int qat_alg_aead_enc(struct aead_request *areq) ...@@ -1031,7 +1037,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
auth_param->auth_off = 0; auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + areq->cryptlen; auth_param->auth_len = areq->assoclen + areq->cryptlen;
ret = qat_alg_send_sym_message(qat_req, ctx->inst); ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
if (ret == -ENOSPC) if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req); qat_alg_free_bufl(ctx->inst, qat_req);
...@@ -1212,7 +1218,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) ...@@ -1212,7 +1218,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req); qat_alg_set_req_iv(qat_req);
ret = qat_alg_send_sym_message(qat_req, ctx->inst); ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
if (ret == -ENOSPC) if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req); qat_alg_free_bufl(ctx->inst, qat_req);
...@@ -1278,7 +1284,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) ...@@ -1278,7 +1284,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req); qat_alg_set_req_iv(qat_req);
qat_alg_update_iv(qat_req); qat_alg_update_iv(qat_req);
ret = qat_alg_send_sym_message(qat_req, ctx->inst); ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
if (ret == -ENOSPC) if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req); qat_alg_free_bufl(ctx->inst, qat_req);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#define ADF_MAX_RETRIES 20 #define ADF_MAX_RETRIES 20
int qat_alg_send_message(struct qat_alg_req *req) static int qat_alg_send_message_retry(struct qat_alg_req *req)
{ {
int ret = 0, ctr = 0; int ret = 0, ctr = 0;
...@@ -19,3 +19,68 @@ int qat_alg_send_message(struct qat_alg_req *req) ...@@ -19,3 +19,68 @@ int qat_alg_send_message(struct qat_alg_req *req)
return -EINPROGRESS; return -EINPROGRESS;
} }
void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
{
struct qat_alg_req *req, *tmp;
spin_lock_bh(&backlog->lock);
list_for_each_entry_safe(req, tmp, &backlog->list, list) {
if (adf_send_message(req->tx_ring, req->fw_req)) {
/* The HW ring is full. Do nothing.
* qat_alg_send_backlog() will be invoked again by
* another callback.
*/
break;
}
list_del(&req->list);
req->base->complete(req->base, -EINPROGRESS);
}
spin_unlock_bh(&backlog->lock);
}
static void qat_alg_backlog_req(struct qat_alg_req *req,
struct qat_instance_backlog *backlog)
{
INIT_LIST_HEAD(&req->list);
spin_lock_bh(&backlog->lock);
list_add_tail(&req->list, &backlog->list);
spin_unlock_bh(&backlog->lock);
}
static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
{
struct qat_instance_backlog *backlog = req->backlog;
struct adf_etr_ring_data *tx_ring = req->tx_ring;
u32 *fw_req = req->fw_req;
/* If any request is already backlogged, then add to backlog list */
if (!list_empty(&backlog->list))
goto enqueue;
/* If ring is nearly full, then add to backlog list */
if (adf_ring_nearly_full(tx_ring))
goto enqueue;
/* If adding request to HW ring fails, then add to backlog list */
if (adf_send_message(tx_ring, fw_req))
goto enqueue;
return -EINPROGRESS;
enqueue:
qat_alg_backlog_req(req, backlog);
return -EBUSY;
}
int qat_alg_send_message(struct qat_alg_req *req)
{
u32 flags = req->base->flags;
if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
return qat_alg_send_message_maybacklog(req);
else
return qat_alg_send_message_retry(req);
}
...@@ -6,5 +6,6 @@ ...@@ -6,5 +6,6 @@
#include "qat_crypto.h" #include "qat_crypto.h"
int qat_alg_send_message(struct qat_alg_req *req); int qat_alg_send_message(struct qat_alg_req *req);
void qat_alg_send_backlog(struct qat_instance_backlog *backlog);
#endif #endif
...@@ -136,17 +136,21 @@ struct qat_asym_request { ...@@ -136,17 +136,21 @@ struct qat_asym_request {
} areq; } areq;
int err; int err;
void (*cb)(struct icp_qat_fw_pke_resp *resp); void (*cb)(struct icp_qat_fw_pke_resp *resp);
struct qat_alg_req alg_req;
} __aligned(64); } __aligned(64);
static int qat_alg_send_asym_message(struct qat_asym_request *qat_req, static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
struct qat_crypto_instance *inst) struct qat_crypto_instance *inst,
struct crypto_async_request *base)
{ {
struct qat_alg_req req; struct qat_alg_req *alg_req = &qat_req->alg_req;
req.fw_req = (u32 *)&qat_req->req; alg_req->fw_req = (u32 *)&qat_req->req;
req.tx_ring = inst->pke_tx; alg_req->tx_ring = inst->pke_tx;
alg_req->base = base;
alg_req->backlog = &inst->backlog;
return qat_alg_send_message(&req); return qat_alg_send_message(alg_req);
} }
static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp) static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
...@@ -350,7 +354,7 @@ static int qat_dh_compute_value(struct kpp_request *req) ...@@ -350,7 +354,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
msg->input_param_count = n_input_params; msg->input_param_count = n_input_params;
msg->output_param_count = 1; msg->output_param_count = 1;
ret = qat_alg_send_asym_message(qat_req, ctx->inst); ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
if (ret == -ENOSPC) if (ret == -ENOSPC)
goto unmap_all; goto unmap_all;
...@@ -557,8 +561,11 @@ void qat_alg_asym_callback(void *_resp) ...@@ -557,8 +561,11 @@ void qat_alg_asym_callback(void *_resp)
{ {
struct icp_qat_fw_pke_resp *resp = _resp; struct icp_qat_fw_pke_resp *resp = _resp;
struct qat_asym_request *areq = (void *)(__force long)resp->opaque; struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
struct qat_instance_backlog *backlog = areq->alg_req.backlog;
areq->cb(resp); areq->cb(resp);
qat_alg_send_backlog(backlog);
} }
#define PKE_RSA_EP_512 0x1c161b21 #define PKE_RSA_EP_512 0x1c161b21
...@@ -748,7 +755,7 @@ static int qat_rsa_enc(struct akcipher_request *req) ...@@ -748,7 +755,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
msg->input_param_count = 3; msg->input_param_count = 3;
msg->output_param_count = 1; msg->output_param_count = 1;
ret = qat_alg_send_asym_message(qat_req, ctx->inst); ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
if (ret == -ENOSPC) if (ret == -ENOSPC)
goto unmap_all; goto unmap_all;
...@@ -901,7 +908,7 @@ static int qat_rsa_dec(struct akcipher_request *req) ...@@ -901,7 +908,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
msg->output_param_count = 1; msg->output_param_count = 1;
ret = qat_alg_send_asym_message(qat_req, ctx->inst); ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
if (ret == -ENOSPC) if (ret == -ENOSPC)
goto unmap_all; goto unmap_all;
......
...@@ -353,6 +353,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) ...@@ -353,6 +353,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
&inst->pke_rx); &inst->pke_rx);
if (ret) if (ret)
goto err; goto err;
INIT_LIST_HEAD(&inst->backlog.list);
spin_lock_init(&inst->backlog.lock);
} }
return 0; return 0;
err: err:
......
...@@ -9,9 +9,17 @@ ...@@ -9,9 +9,17 @@
#include "adf_accel_devices.h" #include "adf_accel_devices.h"
#include "icp_qat_fw_la.h" #include "icp_qat_fw_la.h"
struct qat_instance_backlog {
struct list_head list;
spinlock_t lock; /* protects backlog list */
};
struct qat_alg_req { struct qat_alg_req {
u32 *fw_req; u32 *fw_req;
struct adf_etr_ring_data *tx_ring; struct adf_etr_ring_data *tx_ring;
struct crypto_async_request *base;
struct list_head list;
struct qat_instance_backlog *backlog;
}; };
struct qat_crypto_instance { struct qat_crypto_instance {
...@@ -24,6 +32,7 @@ struct qat_crypto_instance { ...@@ -24,6 +32,7 @@ struct qat_crypto_instance {
unsigned long state; unsigned long state;
int id; int id;
atomic_t refctr; atomic_t refctr;
struct qat_instance_backlog backlog;
}; };
#define QAT_MAX_BUFF_DESC 4 #define QAT_MAX_BUFF_DESC 4
...@@ -82,6 +91,7 @@ struct qat_crypto_request { ...@@ -82,6 +91,7 @@ struct qat_crypto_request {
u8 iv[AES_BLOCK_SIZE]; u8 iv[AES_BLOCK_SIZE];
}; };
bool encryption; bool encryption;
struct qat_alg_req alg_req;
}; };
static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev) static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment