Commit a505cce6 authored by Dust Li's avatar Dust Li Committed by David S. Miller

net/smc: don't req_notify until all CQEs drained

When we are handling softirq workload, enable hardirq may
again interrupt the current routine of softirq, and then
try to raise softirq again. This only wastes CPU cycles
and won't have any real gain.

Since IB_CQ_REPORT_MISSED_EVENTS already make sure if
ib_req_notify_cq() returns 0, it is safe to wait for the
next event, with no need to poll the CQ again in this case.

This patch disables hardirq during the processing of softirq,
and re-arm the CQ after softirq is done. Somehow like NAPI.
Co-developed-by: default avatarGuangguan Wang <guangguan.wang@linux.alibaba.com>
Signed-off-by: default avatarGuangguan Wang <guangguan.wang@linux.alibaba.com>
Signed-off-by: default avatarDust Li <dust.li@linux.alibaba.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6bf536eb
...@@ -137,25 +137,28 @@ static void smc_wr_tx_tasklet_fn(struct tasklet_struct *t) ...@@ -137,25 +137,28 @@ static void smc_wr_tx_tasklet_fn(struct tasklet_struct *t)
{ {
struct smc_ib_device *dev = from_tasklet(dev, t, send_tasklet); struct smc_ib_device *dev = from_tasklet(dev, t, send_tasklet);
struct ib_wc wc[SMC_WR_MAX_POLL_CQE]; struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
int i = 0, rc; int i, rc;
int polled = 0;
again: again:
polled++;
do { do {
memset(&wc, 0, sizeof(wc)); memset(&wc, 0, sizeof(wc));
rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc); rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
if (polled == 1) {
ib_req_notify_cq(dev->roce_cq_send,
IB_CQ_NEXT_COMP |
IB_CQ_REPORT_MISSED_EVENTS);
}
if (!rc)
break;
for (i = 0; i < rc; i++) for (i = 0; i < rc; i++)
smc_wr_tx_process_cqe(&wc[i]); smc_wr_tx_process_cqe(&wc[i]);
if (rc < SMC_WR_MAX_POLL_CQE)
/* If < SMC_WR_MAX_POLL_CQE, the CQ should have been
* drained, no need to poll again. --Guangguan Wang
*/
break;
} while (rc > 0); } while (rc > 0);
if (polled == 1)
/* IB_CQ_REPORT_MISSED_EVENTS make sure if ib_req_notify_cq() returns
* 0, it is safe to wait for the next event.
* Else we must poll the CQ again to make sure we won't miss any event
*/
if (ib_req_notify_cq(dev->roce_cq_send,
IB_CQ_NEXT_COMP |
IB_CQ_REPORT_MISSED_EVENTS))
goto again; goto again;
} }
...@@ -478,24 +481,28 @@ static void smc_wr_rx_tasklet_fn(struct tasklet_struct *t) ...@@ -478,24 +481,28 @@ static void smc_wr_rx_tasklet_fn(struct tasklet_struct *t)
{ {
struct smc_ib_device *dev = from_tasklet(dev, t, recv_tasklet); struct smc_ib_device *dev = from_tasklet(dev, t, recv_tasklet);
struct ib_wc wc[SMC_WR_MAX_POLL_CQE]; struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
int polled = 0;
int rc; int rc;
again: again:
polled++;
do { do {
memset(&wc, 0, sizeof(wc)); memset(&wc, 0, sizeof(wc));
rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc); rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
if (polled == 1) { if (rc > 0)
ib_req_notify_cq(dev->roce_cq_recv, smc_wr_rx_process_cqes(&wc[0], rc);
IB_CQ_SOLICITED_MASK if (rc < SMC_WR_MAX_POLL_CQE)
| IB_CQ_REPORT_MISSED_EVENTS); /* If < SMC_WR_MAX_POLL_CQE, the CQ should have been
} * drained, no need to poll again. --Guangguan Wang
if (!rc) */
break; break;
smc_wr_rx_process_cqes(&wc[0], rc);
} while (rc > 0); } while (rc > 0);
if (polled == 1)
/* IB_CQ_REPORT_MISSED_EVENTS make sure if ib_req_notify_cq() returns
* 0, it is safe to wait for the next event.
* Else we must poll the CQ again to make sure we won't miss any event
*/
if (ib_req_notify_cq(dev->roce_cq_recv,
IB_CQ_SOLICITED_MASK |
IB_CQ_REPORT_MISSED_EVENTS))
goto again; goto again;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment