Commit c774169d authored by Manish Chopra's avatar Manish Chopra Committed by David S. Miller

qede: qede_poll refactoring

This patch cleanups qede_poll() routine a bit
and allows qede_poll() to do single iteration to handle
TX completion [As under heavy TX load qede_poll() might
run for indefinite time in the while(1) loop for TX
completion processing and cause CPU stuck].
Signed-off-by: default avatarManish <manish.chopra@qlogic.com>
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c72a6125
...@@ -1597,56 +1597,49 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) ...@@ -1597,56 +1597,49 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
static int qede_poll(struct napi_struct *napi, int budget) static int qede_poll(struct napi_struct *napi, int budget)
{ {
int work_done = 0;
struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
napi); napi);
struct qede_dev *edev = fp->edev; struct qede_dev *edev = fp->edev;
int rx_work_done = 0;
u8 tc;
while (1) { for (tc = 0; tc < edev->num_tc; tc++)
u8 tc; if (qede_txq_has_work(&fp->txqs[tc]))
qede_tx_int(edev, &fp->txqs[tc]);
for (tc = 0; tc < edev->num_tc; tc++)
if (qede_txq_has_work(&fp->txqs[tc])) rx_work_done = qede_has_rx_work(fp->rxq) ?
qede_tx_int(edev, &fp->txqs[tc]); qede_rx_int(fp, budget) : 0;
if (rx_work_done < budget) {
if (qede_has_rx_work(fp->rxq)) { qed_sb_update_sb_idx(fp->sb_info);
work_done += qede_rx_int(fp, budget - work_done); /* *_has_*_work() reads the status block,
* thus we need to ensure that status block indices
/* must not complete if we consumed full budget */ * have been actually read (qed_sb_update_sb_idx)
if (work_done >= budget) * prior to this check (*_has_*_work) so that
break; * we won't write the "newer" value of the status block
} * to HW (if there was a DMA right after
* qede_has_rx_work and if there is no rmb, the memory
* reading (qed_sb_update_sb_idx) may be postponed
* to right before *_ack_sb). In this case there
* will never be another interrupt until there is
* another update of the status block, while there
* is still unhandled work.
*/
rmb();
/* Fall out from the NAPI loop if needed */ /* Fall out from the NAPI loop if needed */
if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) { if (!(qede_has_rx_work(fp->rxq) ||
qed_sb_update_sb_idx(fp->sb_info); qede_has_tx_work(fp))) {
/* *_has_*_work() reads the status block, napi_complete(napi);
* thus we need to ensure that status block indices
* have been actually read (qed_sb_update_sb_idx) /* Update and reenable interrupts */
* prior to this check (*_has_*_work) so that qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
* we won't write the "newer" value of the status block 1 /*update*/);
* to HW (if there was a DMA right after } else {
* qede_has_rx_work and if there is no rmb, the memory rx_work_done = budget;
* reading (qed_sb_update_sb_idx) may be postponed
* to right before *_ack_sb). In this case there
* will never be another interrupt until there is
* another update of the status block, while there
* is still unhandled work.
*/
rmb();
if (!(qede_has_rx_work(fp->rxq) ||
qede_has_tx_work(fp))) {
napi_complete(napi);
/* Update and reenable interrupts */
qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
1 /*update*/);
break;
}
} }
} }
return work_done; return rx_work_done;
} }
static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment