Commit 925a2421 authored by Dust Li's avatar Dust Li Committed by David S. Miller

Revert "net/smc: don't req_notify until all CQEs drained"

This reverts commit a505cce6.

Leon says:
  We already discussed that. SMC should be changed to use
  RDMA CQ pool API
  drivers/infiniband/core/cq.c.
  ib_poll_handler() has much better implementation (tracing,
  IRQ rescheduling, proper error handling) than this SMC variant.

Since we will switch to ib_poll_handler() in the future,
revert this patch.

Link: https://lore.kernel.org/netdev/20220301105332.GA9417@linux.alibaba.com/Suggested-by: default avatarLeon Romanovsky <leon@kernel.org>
Suggested-by: default avatarKarsten Graul <kgraul@linux.ibm.com>
Signed-off-by: default avatarDust Li <dust.li@linux.alibaba.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d59e3cba
......@@ -137,28 +137,25 @@ static void smc_wr_tx_tasklet_fn(struct tasklet_struct *t)
{
struct smc_ib_device *dev = from_tasklet(dev, t, send_tasklet);
struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
int i, rc;
int i = 0, rc;
int polled = 0;
again:
polled++;
do {
memset(&wc, 0, sizeof(wc));
rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
if (polled == 1) {
ib_req_notify_cq(dev->roce_cq_send,
IB_CQ_NEXT_COMP |
IB_CQ_REPORT_MISSED_EVENTS);
}
if (!rc)
break;
for (i = 0; i < rc; i++)
smc_wr_tx_process_cqe(&wc[i]);
if (rc < SMC_WR_MAX_POLL_CQE)
/* If < SMC_WR_MAX_POLL_CQE, the CQ should have been
* drained, no need to poll again. --Guangguan Wang
*/
break;
} while (rc > 0);
/* IB_CQ_REPORT_MISSED_EVENTS make sure if ib_req_notify_cq() returns
* 0, it is safe to wait for the next event.
* Else we must poll the CQ again to make sure we won't miss any event
*/
if (ib_req_notify_cq(dev->roce_cq_send,
IB_CQ_NEXT_COMP |
IB_CQ_REPORT_MISSED_EVENTS))
if (polled == 1)
goto again;
}
......@@ -481,28 +478,24 @@ static void smc_wr_rx_tasklet_fn(struct tasklet_struct *t)
{
struct smc_ib_device *dev = from_tasklet(dev, t, recv_tasklet);
struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
int polled = 0;
int rc;
again:
polled++;
do {
memset(&wc, 0, sizeof(wc));
rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
if (rc > 0)
smc_wr_rx_process_cqes(&wc[0], rc);
if (rc < SMC_WR_MAX_POLL_CQE)
/* If < SMC_WR_MAX_POLL_CQE, the CQ should have been
* drained, no need to poll again. --Guangguan Wang
*/
if (polled == 1) {
ib_req_notify_cq(dev->roce_cq_recv,
IB_CQ_SOLICITED_MASK
| IB_CQ_REPORT_MISSED_EVENTS);
}
if (!rc)
break;
smc_wr_rx_process_cqes(&wc[0], rc);
} while (rc > 0);
/* IB_CQ_REPORT_MISSED_EVENTS make sure if ib_req_notify_cq() returns
* 0, it is safe to wait for the next event.
* Else we must poll the CQ again to make sure we won't miss any event
*/
if (ib_req_notify_cq(dev->roce_cq_recv,
IB_CQ_SOLICITED_MASK |
IB_CQ_REPORT_MISSED_EVENTS))
if (polled == 1)
goto again;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment