Commit 960ebe97 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Remove __rxe_do_task()

The subroutine __rxe_do_task is not thread safe and it has no way to
guarantee that the tasks, which are designed with the assumption that they
are non-reentrant, are not reentered. All of its uses are non-performance
critical.

This patch replaces calls to __rxe_do_task with calls to
rxe_sched_task. It also removes irrelevant or unneeded if tests.

Instead of calling the task machinery a single call to the tasklet
function (rxe_requester, etc.) is sufficient to draing the queues if task
execution has been disabled or stopped.

Together these changes allow the removal of __rxe_do_task.

Link: https://lore.kernel.org/r/20230304174533.11296-7-rpearsonhpe@gmail.comSigned-off-by: default avatarIan Ziemba <ian.ziemba@hpe.com>
Signed-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent a246aa2e
...@@ -473,29 +473,23 @@ static void rxe_qp_reset(struct rxe_qp *qp) ...@@ -473,29 +473,23 @@ static void rxe_qp_reset(struct rxe_qp *qp)
{ {
/* stop tasks from running */ /* stop tasks from running */
rxe_disable_task(&qp->resp.task); rxe_disable_task(&qp->resp.task);
rxe_disable_task(&qp->comp.task);
/* stop request/comp */ rxe_disable_task(&qp->req.task);
if (qp->sq.queue) {
if (qp_type(qp) == IB_QPT_RC)
rxe_disable_task(&qp->comp.task);
rxe_disable_task(&qp->req.task);
}
/* move qp to the reset state */ /* move qp to the reset state */
qp->req.state = QP_STATE_RESET; qp->req.state = QP_STATE_RESET;
qp->comp.state = QP_STATE_RESET; qp->comp.state = QP_STATE_RESET;
qp->resp.state = QP_STATE_RESET; qp->resp.state = QP_STATE_RESET;
/* let state machines reset themselves drain work and packet queues /* drain work and packet queuesc */
* etc. rxe_requester(qp);
*/ rxe_completer(qp);
__rxe_do_task(&qp->resp.task); rxe_responder(qp);
if (qp->sq.queue) { if (qp->rq.queue)
__rxe_do_task(&qp->comp.task); rxe_queue_reset(qp->rq.queue);
__rxe_do_task(&qp->req.task); if (qp->sq.queue)
rxe_queue_reset(qp->sq.queue); rxe_queue_reset(qp->sq.queue);
}
/* cleanup attributes */ /* cleanup attributes */
atomic_set(&qp->ssn, 0); atomic_set(&qp->ssn, 0);
...@@ -518,13 +512,8 @@ static void rxe_qp_reset(struct rxe_qp *qp) ...@@ -518,13 +512,8 @@ static void rxe_qp_reset(struct rxe_qp *qp)
/* reenable tasks */ /* reenable tasks */
rxe_enable_task(&qp->resp.task); rxe_enable_task(&qp->resp.task);
rxe_enable_task(&qp->comp.task);
if (qp->sq.queue) { rxe_enable_task(&qp->req.task);
if (qp_type(qp) == IB_QPT_RC)
rxe_enable_task(&qp->comp.task);
rxe_enable_task(&qp->req.task);
}
} }
/* drain the send queue */ /* drain the send queue */
...@@ -533,10 +522,7 @@ static void rxe_qp_drain(struct rxe_qp *qp) ...@@ -533,10 +522,7 @@ static void rxe_qp_drain(struct rxe_qp *qp)
if (qp->sq.queue) { if (qp->sq.queue) {
if (qp->req.state != QP_STATE_DRAINED) { if (qp->req.state != QP_STATE_DRAINED) {
qp->req.state = QP_STATE_DRAIN; qp->req.state = QP_STATE_DRAIN;
if (qp_type(qp) == IB_QPT_RC) rxe_sched_task(&qp->comp.task);
rxe_sched_task(&qp->comp.task);
else
__rxe_do_task(&qp->comp.task);
rxe_sched_task(&qp->req.task); rxe_sched_task(&qp->req.task);
} }
} }
...@@ -552,11 +538,7 @@ void rxe_qp_error(struct rxe_qp *qp) ...@@ -552,11 +538,7 @@ void rxe_qp_error(struct rxe_qp *qp)
/* drain work and packet queues */ /* drain work and packet queues */
rxe_sched_task(&qp->resp.task); rxe_sched_task(&qp->resp.task);
rxe_sched_task(&qp->comp.task);
if (qp_type(qp) == IB_QPT_RC)
rxe_sched_task(&qp->comp.task);
else
__rxe_do_task(&qp->comp.task);
rxe_sched_task(&qp->req.task); rxe_sched_task(&qp->req.task);
} }
...@@ -773,24 +755,20 @@ static void rxe_qp_do_cleanup(struct work_struct *work) ...@@ -773,24 +755,20 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
qp->valid = 0; qp->valid = 0;
qp->qp_timeout_jiffies = 0; qp->qp_timeout_jiffies = 0;
rxe_cleanup_task(&qp->resp.task);
if (qp_type(qp) == IB_QPT_RC) { if (qp_type(qp) == IB_QPT_RC) {
del_timer_sync(&qp->retrans_timer); del_timer_sync(&qp->retrans_timer);
del_timer_sync(&qp->rnr_nak_timer); del_timer_sync(&qp->rnr_nak_timer);
} }
rxe_cleanup_task(&qp->resp.task);
rxe_cleanup_task(&qp->req.task); rxe_cleanup_task(&qp->req.task);
rxe_cleanup_task(&qp->comp.task); rxe_cleanup_task(&qp->comp.task);
/* flush out any receive wr's or pending requests */ /* flush out any receive wr's or pending requests */
if (qp->req.task.func) rxe_requester(qp);
__rxe_do_task(&qp->req.task); rxe_completer(qp);
rxe_responder(qp);
if (qp->sq.queue) {
__rxe_do_task(&qp->comp.task);
__rxe_do_task(&qp->req.task);
}
if (qp->sq.queue) if (qp->sq.queue)
rxe_queue_cleanup(qp->sq.queue); rxe_queue_cleanup(qp->sq.queue);
......
...@@ -6,19 +6,6 @@ ...@@ -6,19 +6,6 @@
#include "rxe.h" #include "rxe.h"
int __rxe_do_task(struct rxe_task *task)
{
int ret;
while ((ret = task->func(task->qp)) == 0)
;
task->ret = ret;
return ret;
}
/* /*
* this locking is due to a potential race where * this locking is due to a potential race where
* a second caller finds the task already running * a second caller finds the task already running
......
...@@ -39,12 +39,6 @@ int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp, ...@@ -39,12 +39,6 @@ int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
/* cleanup task */ /* cleanup task */
void rxe_cleanup_task(struct rxe_task *task); void rxe_cleanup_task(struct rxe_task *task);
/*
* raw call to func in loop without any checking
* can call when tasklets are disabled
*/
int __rxe_do_task(struct rxe_task *task);
void rxe_run_task(struct rxe_task *task); void rxe_run_task(struct rxe_task *task);
void rxe_sched_task(struct rxe_task *task); void rxe_sched_task(struct rxe_task *task);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment