Commit ac5d15b4 authored by Horia Geantă's avatar Horia Geantă Committed by Herbert Xu

crypto: caam/qi2 - use affine DPIOs

Driver was relying on an older DPIO API, which provided a CPU-affine DPIO
in case it was called with preemption disabled.

Since this is no longer the case, save the CPU-affine DPIO in per-cpu
private structure during setup and further use it on the hot path.

Note that preemption is no longer disabled while trying to enqueue an
FD. Thus it might be possible to run the enqueue on a different CPU
(due to migration, when in process context), however this wouldn't be a
functionality issue.
Since we allow for all cores to enqueue, we take care of data
structures setup to handle the case when number of (Rx, Tx) queue pairs
is smaller than number of cores.
Signed-off-by: default avatarHoria Geantă <horia.geanta@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 7d220dab
...@@ -4502,7 +4502,8 @@ static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv) ...@@ -4502,7 +4502,8 @@ static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
nctx->cb = dpaa2_caam_fqdan_cb; nctx->cb = dpaa2_caam_fqdan_cb;
/* Register notification callbacks */ /* Register notification callbacks */
err = dpaa2_io_service_register(NULL, nctx, dev); ppriv->dpio = dpaa2_io_service_select(cpu);
err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
if (unlikely(err)) { if (unlikely(err)) {
dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu); dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
nctx->cb = NULL; nctx->cb = NULL;
...@@ -4535,7 +4536,7 @@ static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv) ...@@ -4535,7 +4536,7 @@ static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
ppriv = per_cpu_ptr(priv->ppriv, cpu); ppriv = per_cpu_ptr(priv->ppriv, cpu);
if (!ppriv->nctx.cb) if (!ppriv->nctx.cb)
break; break;
dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev); dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
} }
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
...@@ -4555,7 +4556,8 @@ static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv) ...@@ -4555,7 +4556,8 @@ static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
ppriv = per_cpu_ptr(priv->ppriv, cpu); ppriv = per_cpu_ptr(priv->ppriv, cpu);
dpaa2_io_service_deregister(NULL, &ppriv->nctx, priv->dev); dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
priv->dev);
dpaa2_io_store_destroy(ppriv->store); dpaa2_io_store_destroy(ppriv->store);
if (++i == priv->num_pairs) if (++i == priv->num_pairs)
...@@ -4653,7 +4655,7 @@ static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv) ...@@ -4653,7 +4655,7 @@ static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
/* Retry while portal is busy */ /* Retry while portal is busy */
do { do {
err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid, err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
ppriv->store); ppriv->store);
} while (err == -EBUSY); } while (err == -EBUSY);
...@@ -4721,7 +4723,7 @@ static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget) ...@@ -4721,7 +4723,7 @@ static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
if (cleaned < budget) { if (cleaned < budget) {
napi_complete_done(napi, cleaned); napi_complete_done(napi, cleaned);
err = dpaa2_io_service_rearm(NULL, &ppriv->nctx); err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
if (unlikely(err)) if (unlikely(err))
dev_err(priv->dev, "Notification rearm failed: %d\n", dev_err(priv->dev, "Notification rearm failed: %d\n",
err); err);
...@@ -4862,21 +4864,31 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) ...@@ -4862,21 +4864,31 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
i = 0; i = 0;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", i, u8 j;
priv->rx_queue_attr[i].fqid,
priv->tx_queue_attr[i].fqid); j = i % priv->num_pairs;
ppriv = per_cpu_ptr(priv->ppriv, cpu); ppriv = per_cpu_ptr(priv->ppriv, cpu);
ppriv->req_fqid = priv->tx_queue_attr[i].fqid; ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
ppriv->prio = i; /*
* Allow all cores to enqueue, while only some of them
* will take part in dequeuing.
*/
if (++i > priv->num_pairs)
continue;
ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
ppriv->prio = j;
dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
priv->rx_queue_attr[j].fqid,
priv->tx_queue_attr[j].fqid);
ppriv->net_dev.dev = *dev; ppriv->net_dev.dev = *dev;
INIT_LIST_HEAD(&ppriv->net_dev.napi_list); INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll, netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
DPAA2_CAAM_NAPI_WEIGHT); DPAA2_CAAM_NAPI_WEIGHT);
if (++i == priv->num_pairs)
break;
} }
return 0; return 0;
...@@ -5228,7 +5240,8 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) ...@@ -5228,7 +5240,8 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
{ {
struct dpaa2_fd fd; struct dpaa2_fd fd;
struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
int err = 0, i, id; struct dpaa2_caam_priv_per_cpu *ppriv;
int err = 0, i;
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
...@@ -5258,20 +5271,13 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) ...@@ -5258,20 +5271,13 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1])); dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
dpaa2_fd_set_flc(&fd, req->flc_dma); dpaa2_fd_set_flc(&fd, req->flc_dma);
/* ppriv = this_cpu_ptr(priv->ppriv);
* There is no guarantee that preemption is disabled here,
* thus take action.
*/
preempt_disable();
id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) { for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
err = dpaa2_io_service_enqueue_fq(NULL, err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
priv->tx_queue_attr[id].fqid,
&fd); &fd);
if (err != -EBUSY) if (err != -EBUSY)
break; break;
} }
preempt_enable();
if (unlikely(err)) { if (unlikely(err)) {
dev_err(dev, "Error enqueuing frame: %d\n", err); dev_err(dev, "Error enqueuing frame: %d\n", err);
......
...@@ -76,6 +76,7 @@ struct dpaa2_caam_priv { ...@@ -76,6 +76,7 @@ struct dpaa2_caam_priv {
* @nctx: notification context of response FQ * @nctx: notification context of response FQ
* @store: where dequeued frames are stored * @store: where dequeued frames are stored
* @priv: backpointer to dpaa2_caam_priv * @priv: backpointer to dpaa2_caam_priv
* @dpio: portal used for data path operations
*/ */
struct dpaa2_caam_priv_per_cpu { struct dpaa2_caam_priv_per_cpu {
struct napi_struct napi; struct napi_struct napi;
...@@ -86,6 +87,7 @@ struct dpaa2_caam_priv_per_cpu { ...@@ -86,6 +87,7 @@ struct dpaa2_caam_priv_per_cpu {
struct dpaa2_io_notification_ctx nctx; struct dpaa2_io_notification_ctx nctx;
struct dpaa2_io_store *store; struct dpaa2_io_store *store;
struct dpaa2_caam_priv *priv; struct dpaa2_caam_priv *priv;
struct dpaa2_io *dpio;
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment