Commit dccb23f6 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Split rxe_run_task() into two subroutines

Split rxe_run_task(task, sched) into rxe_run_task(task) and
rxe_sched_task(task).

Link: https://lore.kernel.org/r/20221021200118.2163-5-rpearsonhpe@gmail.comSigned-off-by: default avatarIan Ziemba <ian.ziemba@hpe.com>
Signed-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent de669ae8
...@@ -118,7 +118,7 @@ void retransmit_timer(struct timer_list *t) ...@@ -118,7 +118,7 @@ void retransmit_timer(struct timer_list *t)
if (qp->valid) { if (qp->valid) {
qp->comp.timeout = 1; qp->comp.timeout = 1;
rxe_run_task(&qp->comp.task, 1); rxe_sched_task(&qp->comp.task);
} }
} }
...@@ -132,7 +132,10 @@ void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) ...@@ -132,7 +132,10 @@ void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
if (must_sched != 0) if (must_sched != 0)
rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED); rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
rxe_run_task(&qp->comp.task, must_sched); if (must_sched)
rxe_sched_task(&qp->comp.task);
else
rxe_run_task(&qp->comp.task);
} }
static inline enum comp_state get_wqe(struct rxe_qp *qp, static inline enum comp_state get_wqe(struct rxe_qp *qp,
...@@ -313,7 +316,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, ...@@ -313,7 +316,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn; qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) { if (qp->req.wait_psn) {
qp->req.wait_psn = 0; qp->req.wait_psn = 0;
rxe_run_task(&qp->req.task, 0); rxe_run_task(&qp->req.task);
} }
} }
return COMPST_ERROR_RETRY; return COMPST_ERROR_RETRY;
...@@ -460,7 +463,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) ...@@ -460,7 +463,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/ */
if (qp->req.wait_fence) { if (qp->req.wait_fence) {
qp->req.wait_fence = 0; qp->req.wait_fence = 0;
rxe_run_task(&qp->req.task, 0); rxe_run_task(&qp->req.task);
} }
} }
...@@ -474,7 +477,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, ...@@ -474,7 +477,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) { if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0; qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0; qp->req.need_rd_atomic = 0;
rxe_run_task(&qp->req.task, 0); rxe_run_task(&qp->req.task);
} }
} }
...@@ -520,7 +523,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp, ...@@ -520,7 +523,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
if (qp->req.wait_psn) { if (qp->req.wait_psn) {
qp->req.wait_psn = 0; qp->req.wait_psn = 0;
rxe_run_task(&qp->req.task, 1); rxe_sched_task(&qp->req.task);
} }
} }
...@@ -654,7 +657,7 @@ int rxe_completer(void *arg) ...@@ -654,7 +657,7 @@ int rxe_completer(void *arg)
if (qp->req.wait_psn) { if (qp->req.wait_psn) {
qp->req.wait_psn = 0; qp->req.wait_psn = 0;
rxe_run_task(&qp->req.task, 1); rxe_sched_task(&qp->req.task);
} }
state = COMPST_DONE; state = COMPST_DONE;
...@@ -722,7 +725,7 @@ int rxe_completer(void *arg) ...@@ -722,7 +725,7 @@ int rxe_completer(void *arg)
RXE_CNT_COMP_RETRY); RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1; qp->req.need_retry = 1;
qp->comp.started_retry = 1; qp->comp.started_retry = 1;
rxe_run_task(&qp->req.task, 0); rxe_run_task(&qp->req.task);
} }
goto done; goto done;
......
...@@ -345,7 +345,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb) ...@@ -345,7 +345,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
if (unlikely(qp->need_req_skb && if (unlikely(qp->need_req_skb &&
skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
rxe_run_task(&qp->req.task, 1); rxe_sched_task(&qp->req.task);
rxe_put(qp); rxe_put(qp);
} }
...@@ -429,7 +429,7 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, ...@@ -429,7 +429,7 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
if ((qp_type(qp) != IB_QPT_RC) && if ((qp_type(qp) != IB_QPT_RC) &&
(pkt->mask & RXE_END_MASK)) { (pkt->mask & RXE_END_MASK)) {
pkt->wqe->state = wqe_state_done; pkt->wqe->state = wqe_state_done;
rxe_run_task(&qp->comp.task, 1); rxe_sched_task(&qp->comp.task);
} }
rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS); rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
......
...@@ -536,10 +536,10 @@ static void rxe_qp_drain(struct rxe_qp *qp) ...@@ -536,10 +536,10 @@ static void rxe_qp_drain(struct rxe_qp *qp)
if (qp->req.state != QP_STATE_DRAINED) { if (qp->req.state != QP_STATE_DRAINED) {
qp->req.state = QP_STATE_DRAIN; qp->req.state = QP_STATE_DRAIN;
if (qp_type(qp) == IB_QPT_RC) if (qp_type(qp) == IB_QPT_RC)
rxe_run_task(&qp->comp.task, 1); rxe_sched_task(&qp->comp.task);
else else
__rxe_do_task(&qp->comp.task); __rxe_do_task(&qp->comp.task);
rxe_run_task(&qp->req.task, 1); rxe_sched_task(&qp->req.task);
} }
} }
} }
...@@ -553,13 +553,13 @@ void rxe_qp_error(struct rxe_qp *qp) ...@@ -553,13 +553,13 @@ void rxe_qp_error(struct rxe_qp *qp)
qp->attr.qp_state = IB_QPS_ERR; qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */ /* drain work and packet queues */
rxe_run_task(&qp->resp.task, 1); rxe_sched_task(&qp->resp.task);
if (qp_type(qp) == IB_QPT_RC) if (qp_type(qp) == IB_QPT_RC)
rxe_run_task(&qp->comp.task, 1); rxe_sched_task(&qp->comp.task);
else else
__rxe_do_task(&qp->comp.task); __rxe_do_task(&qp->comp.task);
rxe_run_task(&qp->req.task, 1); rxe_sched_task(&qp->req.task);
} }
/* called by the modify qp verb */ /* called by the modify qp verb */
......
...@@ -105,7 +105,7 @@ void rnr_nak_timer(struct timer_list *t) ...@@ -105,7 +105,7 @@ void rnr_nak_timer(struct timer_list *t)
/* request a send queue retry */ /* request a send queue retry */
qp->req.need_retry = 1; qp->req.need_retry = 1;
qp->req.wait_for_rnr_timer = 0; qp->req.wait_for_rnr_timer = 0;
rxe_run_task(&qp->req.task, 1); rxe_sched_task(&qp->req.task);
} }
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
...@@ -608,7 +608,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) ...@@ -608,7 +608,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
* which can lead to a deadlock. So go ahead and complete * which can lead to a deadlock. So go ahead and complete
* it now. * it now.
*/ */
rxe_run_task(&qp->comp.task, 1); rxe_sched_task(&qp->comp.task);
return 0; return 0;
} }
...@@ -733,7 +733,7 @@ int rxe_requester(void *arg) ...@@ -733,7 +733,7 @@ int rxe_requester(void *arg)
qp->req.wqe_index); qp->req.wqe_index);
wqe->state = wqe_state_done; wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS; wqe->status = IB_WC_SUCCESS;
rxe_run_task(&qp->comp.task, 0); rxe_run_task(&qp->comp.task);
goto done; goto done;
} }
payload = mtu; payload = mtu;
...@@ -795,7 +795,7 @@ int rxe_requester(void *arg) ...@@ -795,7 +795,7 @@ int rxe_requester(void *arg)
rollback_state(wqe, qp, &rollback_wqe, rollback_psn); rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
if (err == -EAGAIN) { if (err == -EAGAIN) {
rxe_run_task(&qp->req.task, 1); rxe_sched_task(&qp->req.task);
goto exit; goto exit;
} }
...@@ -817,7 +817,7 @@ int rxe_requester(void *arg) ...@@ -817,7 +817,7 @@ int rxe_requester(void *arg)
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index); qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
wqe->state = wqe_state_error; wqe->state = wqe_state_error;
qp->req.state = QP_STATE_ERROR; qp->req.state = QP_STATE_ERROR;
rxe_run_task(&qp->comp.task, 0); rxe_run_task(&qp->comp.task);
exit: exit:
ret = -EAGAIN; ret = -EAGAIN;
out: out:
......
...@@ -91,7 +91,10 @@ void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) ...@@ -91,7 +91,10 @@ void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) || must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
(skb_queue_len(&qp->req_pkts) > 1); (skb_queue_len(&qp->req_pkts) > 1);
rxe_run_task(&qp->resp.task, must_sched); if (must_sched)
rxe_sched_task(&qp->resp.task);
else
rxe_run_task(&qp->resp.task);
} }
static inline enum resp_states get_req(struct rxe_qp *qp, static inline enum resp_states get_req(struct rxe_qp *qp,
......
...@@ -123,17 +123,22 @@ void rxe_cleanup_task(struct rxe_task *task) ...@@ -123,17 +123,22 @@ void rxe_cleanup_task(struct rxe_task *task)
tasklet_kill(&task->tasklet); tasklet_kill(&task->tasklet);
} }
void rxe_run_task(struct rxe_task *task, int sched) void rxe_run_task(struct rxe_task *task)
{ {
if (task->destroyed) if (task->destroyed)
return; return;
if (sched)
tasklet_schedule(&task->tasklet);
else
rxe_do_task(&task->tasklet); rxe_do_task(&task->tasklet);
} }
void rxe_sched_task(struct rxe_task *task)
{
if (task->destroyed)
return;
tasklet_schedule(&task->tasklet);
}
void rxe_disable_task(struct rxe_task *task) void rxe_disable_task(struct rxe_task *task)
{ {
tasklet_disable(&task->tasklet); tasklet_disable(&task->tasklet);
......
...@@ -52,10 +52,9 @@ int __rxe_do_task(struct rxe_task *task); ...@@ -52,10 +52,9 @@ int __rxe_do_task(struct rxe_task *task);
*/ */
void rxe_do_task(struct tasklet_struct *t); void rxe_do_task(struct tasklet_struct *t);
/* run a task, else schedule it to run as a tasklet, The decision void rxe_run_task(struct rxe_task *task);
* to run or schedule tasklet is based on the parameter sched.
*/ void rxe_sched_task(struct rxe_task *task);
void rxe_run_task(struct rxe_task *task, int sched);
/* keep a task from scheduling */ /* keep a task from scheduling */
void rxe_disable_task(struct rxe_task *task); void rxe_disable_task(struct rxe_task *task);
......
...@@ -695,9 +695,9 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, ...@@ -695,9 +695,9 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
wr = next; wr = next;
} }
rxe_run_task(&qp->req.task, 1); rxe_sched_task(&qp->req.task);
if (unlikely(qp->req.state == QP_STATE_ERROR)) if (unlikely(qp->req.state == QP_STATE_ERROR))
rxe_run_task(&qp->comp.task, 1); rxe_sched_task(&qp->comp.task);
return err; return err;
} }
...@@ -719,7 +719,7 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -719,7 +719,7 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
if (qp->is_user) { if (qp->is_user) {
/* Utilize process context to do protocol processing */ /* Utilize process context to do protocol processing */
rxe_run_task(&qp->req.task, 0); rxe_run_task(&qp->req.task);
return 0; return 0;
} else } else
return rxe_post_send_kernel(qp, wr, bad_wr); return rxe_post_send_kernel(qp, wr, bad_wr);
...@@ -759,7 +759,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, ...@@ -759,7 +759,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
spin_unlock_irqrestore(&rq->producer_lock, flags); spin_unlock_irqrestore(&rq->producer_lock, flags);
if (qp->resp.state == QP_STATE_ERROR) if (qp->resp.state == QP_STATE_ERROR)
rxe_run_task(&qp->resp.task, 1); rxe_sched_task(&qp->resp.task);
err1: err1:
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment