Commit 3cc43a0a authored by Tadeusz Struk's avatar Tadeusz Struk Committed by Herbert Xu

crypto: qat - Add load balancing across devices

Load balancing of crypto instances only used a single device.
There was no problem with that on PF, but since there is only
one or two instance per VF we need to loadbalance across devices.
Signed-off-by: default avatarTadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 859e5805
...@@ -60,8 +60,8 @@ static struct service_hndl qat_crypto; ...@@ -60,8 +60,8 @@ static struct service_hndl qat_crypto;
void qat_crypto_put_instance(struct qat_crypto_instance *inst) void qat_crypto_put_instance(struct qat_crypto_instance *inst)
{ {
if (atomic_sub_return(1, &inst->refctr) == 0) atomic_dec(&inst->refctr);
adf_dev_put(inst->accel_dev); adf_dev_put(inst->accel_dev);
} }
static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
...@@ -97,19 +97,26 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) ...@@ -97,19 +97,26 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
struct qat_crypto_instance *qat_crypto_get_instance_node(int node) struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
{ {
struct adf_accel_dev *accel_dev = NULL; struct adf_accel_dev *accel_dev = NULL;
struct qat_crypto_instance *inst_best = NULL; struct qat_crypto_instance *inst = NULL;
struct list_head *itr; struct list_head *itr;
unsigned long best = ~0; unsigned long best = ~0;
list_for_each(itr, adf_devmgr_get_head()) { list_for_each(itr, adf_devmgr_get_head()) {
accel_dev = list_entry(itr, struct adf_accel_dev, list); struct adf_accel_dev *tmp_dev;
unsigned long ctr;
if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
dev_to_node(&GET_DEV(accel_dev)) < 0) && tmp_dev = list_entry(itr, struct adf_accel_dev, list);
adf_dev_started(accel_dev) &&
!list_empty(&accel_dev->crypto_list)) if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
break; dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
accel_dev = NULL; adf_dev_started(tmp_dev) &&
!list_empty(&tmp_dev->crypto_list)) {
ctr = atomic_read(&tmp_dev->ref_count);
if (best > ctr) {
accel_dev = tmp_dev;
best = ctr;
}
}
} }
if (!accel_dev) { if (!accel_dev) {
pr_err("QAT: Could not find a device on node %d\n", node); pr_err("QAT: Could not find a device on node %d\n", node);
...@@ -118,28 +125,26 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) ...@@ -118,28 +125,26 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
if (!accel_dev || !adf_dev_started(accel_dev)) if (!accel_dev || !adf_dev_started(accel_dev))
return NULL; return NULL;
best = ~0;
list_for_each(itr, &accel_dev->crypto_list) { list_for_each(itr, &accel_dev->crypto_list) {
struct qat_crypto_instance *inst; struct qat_crypto_instance *tmp_inst;
unsigned long cur; unsigned long ctr;
inst = list_entry(itr, struct qat_crypto_instance, list); tmp_inst = list_entry(itr, struct qat_crypto_instance, list);
cur = atomic_read(&inst->refctr); ctr = atomic_read(&tmp_inst->refctr);
if (best > cur) { if (best > ctr) {
inst_best = inst; inst = tmp_inst;
best = cur; best = ctr;
} }
} }
if (inst_best) { if (inst) {
if (atomic_add_return(1, &inst_best->refctr) == 1) { if (adf_dev_get(accel_dev)) {
if (adf_dev_get(accel_dev)) { dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
atomic_dec(&inst_best->refctr); return NULL;
dev_err(&GET_DEV(accel_dev),
"Could not increment dev refctr\n");
return NULL;
}
} }
atomic_inc(&inst->refctr);
} }
return inst_best; return inst;
} }
static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment