Commit 88e69af0 authored by Ratheesh Kannoth's avatar Ratheesh Kannoth Committed by David S. Miller

octeontx2-pf: Fix page pool cache index corruption.

The access to page pool `cache' array and the `count' variable
is not locked. Page pool cache access is fine as long as there
is only one consumer per pool.

octeontx2 driver fills in rx buffers from page pool in NAPI context.
If system is stressed and could not allocate buffers, refiiling work
will be delegated to a delayed workqueue. This means that there are
two cosumers to the page pool cache.

Either workqueue or IRQ/NAPI can be run on other CPU. This will lead
to lock less access, hence corruption of cache pool indexes.

To fix this issue, NAPI is rescheduled from workqueue context to refill
rx buffers.

Fixes: b2e3406a ("octeontx2-pf: Add support for page pool")
Signed-off-by: default avatarRatheesh Kannoth <rkannoth@marvell.com>
Reported-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Reviewed-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 281f65d2
...@@ -107,12 +107,13 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) ...@@ -107,12 +107,13 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
} }
#define NPA_MAX_BURST 16 #define NPA_MAX_BURST 16
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
{ {
struct otx2_nic *pfvf = dev; struct otx2_nic *pfvf = dev;
int cnt = cq->pool_ptrs;
u64 ptrs[NPA_MAX_BURST]; u64 ptrs[NPA_MAX_BURST];
int num_ptrs = 1;
dma_addr_t bufptr; dma_addr_t bufptr;
int num_ptrs = 1;
/* Refill pool with new buffers */ /* Refill pool with new buffers */
while (cq->pool_ptrs) { while (cq->pool_ptrs) {
...@@ -131,6 +132,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) ...@@ -131,6 +132,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
num_ptrs = 1; num_ptrs = 1;
} }
} }
return cnt - cq->pool_ptrs;
} }
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
......
...@@ -24,7 +24,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu) ...@@ -24,7 +24,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
return weight; return weight;
} }
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_lmtst_init(struct otx2_nic *pfvf); int cn10k_lmtst_init(struct otx2_nic *pfvf);
......
...@@ -574,20 +574,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, ...@@ -574,20 +574,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma) dma_addr_t *dma)
{ {
if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) { if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
struct refill_work *work;
struct delayed_work *dwork;
work = &pfvf->refill_wrk[cq->cq_idx];
dwork = &work->pool_refill_work;
/* Schedule a task if no other task is running */
if (!cq->refill_task_sched) {
cq->refill_task_sched = true;
schedule_delayed_work(dwork,
msecs_to_jiffies(100));
}
return -ENOMEM; return -ENOMEM;
}
return 0; return 0;
} }
...@@ -1082,39 +1070,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) ...@@ -1082,39 +1070,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
static void otx2_pool_refill_task(struct work_struct *work) static void otx2_pool_refill_task(struct work_struct *work)
{ {
struct otx2_cq_queue *cq; struct otx2_cq_queue *cq;
struct otx2_pool *rbpool;
struct refill_work *wrk; struct refill_work *wrk;
int qidx, free_ptrs = 0;
struct otx2_nic *pfvf; struct otx2_nic *pfvf;
dma_addr_t bufptr; int qidx;
wrk = container_of(work, struct refill_work, pool_refill_work.work); wrk = container_of(work, struct refill_work, pool_refill_work.work);
pfvf = wrk->pf; pfvf = wrk->pf;
qidx = wrk - pfvf->refill_wrk; qidx = wrk - pfvf->refill_wrk;
cq = &pfvf->qset.cq[qidx]; cq = &pfvf->qset.cq[qidx];
rbpool = cq->rbpool;
free_ptrs = cq->pool_ptrs;
while (cq->pool_ptrs) {
if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
/* Schedule a WQ if we fails to free atleast half of the
* pointers else enable napi for this RQ.
*/
if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
struct delayed_work *dwork;
dwork = &wrk->pool_refill_work;
schedule_delayed_work(dwork,
msecs_to_jiffies(100));
} else {
cq->refill_task_sched = false;
}
return;
}
pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
cq->refill_task_sched = false; cq->refill_task_sched = false;
local_bh_disable();
napi_schedule(wrk->napi);
local_bh_enable();
} }
int otx2_config_nix_queues(struct otx2_nic *pfvf) int otx2_config_nix_queues(struct otx2_nic *pfvf)
......
...@@ -302,6 +302,7 @@ struct flr_work { ...@@ -302,6 +302,7 @@ struct flr_work {
struct refill_work { struct refill_work {
struct delayed_work pool_refill_work; struct delayed_work pool_refill_work;
struct otx2_nic *pf; struct otx2_nic *pf;
struct napi_struct *napi;
}; };
/* PTPv2 originTimestamp structure */ /* PTPv2 originTimestamp structure */
...@@ -370,7 +371,7 @@ struct dev_hw_ops { ...@@ -370,7 +371,7 @@ struct dev_hw_ops {
int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
int size, int qidx); int size, int qidx);
void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
void (*aura_freeptr)(void *dev, int aura, u64 buf); void (*aura_freeptr)(void *dev, int aura, u64 buf);
}; };
......
...@@ -1943,6 +1943,10 @@ int otx2_stop(struct net_device *netdev) ...@@ -1943,6 +1943,10 @@ int otx2_stop(struct net_device *netdev)
netif_tx_disable(netdev); netif_tx_disable(netdev);
for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
devm_kfree(pf->dev, pf->refill_wrk);
otx2_free_hw_resources(pf); otx2_free_hw_resources(pf);
otx2_free_cints(pf, pf->hw.cint_cnt); otx2_free_cints(pf, pf->hw.cint_cnt);
otx2_disable_napi(pf); otx2_disable_napi(pf);
...@@ -1950,9 +1954,6 @@ int otx2_stop(struct net_device *netdev) ...@@ -1950,9 +1954,6 @@ int otx2_stop(struct net_device *netdev)
for (qidx = 0; qidx < netdev->num_tx_queues; qidx++) for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx)); netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
devm_kfree(pf->dev, pf->refill_wrk);
kfree(qset->sq); kfree(qset->sq);
kfree(qset->cq); kfree(qset->cq);
......
...@@ -424,9 +424,10 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf, ...@@ -424,9 +424,10 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
return processed_cqe; return processed_cqe;
} }
void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
{ {
struct otx2_nic *pfvf = dev; struct otx2_nic *pfvf = dev;
int cnt = cq->pool_ptrs;
dma_addr_t bufptr; dma_addr_t bufptr;
while (cq->pool_ptrs) { while (cq->pool_ptrs) {
...@@ -435,6 +436,8 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) ...@@ -435,6 +436,8 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--; cq->pool_ptrs--;
} }
return cnt - cq->pool_ptrs;
} }
static int otx2_tx_napi_handler(struct otx2_nic *pfvf, static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
...@@ -521,6 +524,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) ...@@ -521,6 +524,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
struct otx2_cq_queue *cq; struct otx2_cq_queue *cq;
struct otx2_qset *qset; struct otx2_qset *qset;
struct otx2_nic *pfvf; struct otx2_nic *pfvf;
int filled_cnt = -1;
cq_poll = container_of(napi, struct otx2_cq_poll, napi); cq_poll = container_of(napi, struct otx2_cq_poll, napi);
pfvf = (struct otx2_nic *)cq_poll->dev; pfvf = (struct otx2_nic *)cq_poll->dev;
...@@ -541,7 +545,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) ...@@ -541,7 +545,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
} }
if (rx_cq && rx_cq->pool_ptrs) if (rx_cq && rx_cq->pool_ptrs)
pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
/* Clear the IRQ */ /* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
...@@ -561,10 +565,26 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) ...@@ -561,10 +565,26 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
otx2_config_irq_coalescing(pfvf, i); otx2_config_irq_coalescing(pfvf, i);
} }
if (unlikely(!filled_cnt)) {
struct refill_work *work;
struct delayed_work *dwork;
work = &pfvf->refill_wrk[cq->cq_idx];
dwork = &work->pool_refill_work;
/* Schedule a task if no other task is running */
if (!cq->refill_task_sched) {
work->napi = napi;
cq->refill_task_sched = true;
schedule_delayed_work(dwork,
msecs_to_jiffies(100));
}
} else {
/* Re-enable interrupts */ /* Re-enable interrupts */
otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), otx2_write64(pfvf,
NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
BIT_ULL(0)); BIT_ULL(0));
} }
}
return workdone; return workdone;
} }
......
...@@ -170,6 +170,6 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, ...@@ -170,6 +170,6 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx); int size, int qidx);
void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx); int size, int qidx);
void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
#endif /* OTX2_TXRX_H */ #endif /* OTX2_TXRX_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment