Commit c3aedd22 authored by James Smart's avatar James Smart Committed by Sagi Grimberg

nvme_fc: cleanup io completion

There was some old cold that dealt with complete_rq being called
prior to the lldd returning the io completion. This is garbage code.
The complete_rq routine was being called after eh_timeouts were
called and it was due to eh_timeouts not being handled properly.
The timeouts were fixed in prior patches so that in general, a
timeout will initiate an abort and the reset timer restarted as
the abort operation will take care of completing things. Given the
reset timer restarted, the erroneous complete_rq calls were eliminated.

So remove the work that was synchronizing complete_rq with io
completion.
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
parent 3efd6e8e
...@@ -55,9 +55,7 @@ struct nvme_fc_queue { ...@@ -55,9 +55,7 @@ struct nvme_fc_queue {
enum nvme_fcop_flags { enum nvme_fcop_flags {
FCOP_FLAGS_TERMIO = (1 << 0), FCOP_FLAGS_TERMIO = (1 << 0),
FCOP_FLAGS_RELEASED = (1 << 1), FCOP_FLAGS_AEN = (1 << 1),
FCOP_FLAGS_COMPLETE = (1 << 2),
FCOP_FLAGS_AEN = (1 << 3),
}; };
struct nvmefc_ls_req_op { struct nvmefc_ls_req_op {
...@@ -1470,7 +1468,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) ...@@ -1470,7 +1468,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
/* *********************** NVME Ctrl Routines **************************** */ /* *********************** NVME Ctrl Routines **************************** */
static void __nvme_fc_final_op_cleanup(struct request *rq);
static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
static int static int
...@@ -1544,25 +1541,20 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) ...@@ -1544,25 +1541,20 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
__nvme_fc_abort_op(ctrl, aen_op); __nvme_fc_abort_op(ctrl, aen_op);
} }
static inline int static inline void
__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
struct nvme_fc_fcp_op *op, int opstate) struct nvme_fc_fcp_op *op, int opstate)
{ {
unsigned long flags; unsigned long flags;
bool complete_rq = false;
spin_lock_irqsave(&ctrl->lock, flags); if (opstate == FCPOP_STATE_ABORTED) {
if (opstate == FCPOP_STATE_ABORTED && ctrl->flags & FCCTRL_TERMIO) { spin_lock_irqsave(&ctrl->lock, flags);
if (!--ctrl->iocnt) if (ctrl->flags & FCCTRL_TERMIO) {
wake_up(&ctrl->ioabort_wait); if (!--ctrl->iocnt)
wake_up(&ctrl->ioabort_wait);
}
spin_unlock_irqrestore(&ctrl->lock, flags);
} }
if (op->flags & FCOP_FLAGS_RELEASED)
complete_rq = true;
else
op->flags |= FCOP_FLAGS_COMPLETE;
spin_unlock_irqrestore(&ctrl->lock, flags);
return complete_rq;
} }
static void static void
...@@ -1704,10 +1696,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) ...@@ -1704,10 +1696,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
ctrl->ctrl.state == NVME_CTRL_CONNECTING)) ctrl->ctrl.state == NVME_CTRL_CONNECTING))
status |= cpu_to_le16(NVME_SC_DNR << 1); status |= cpu_to_le16(NVME_SC_DNR << 1);
if (__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate)) __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
__nvme_fc_final_op_cleanup(rq); nvme_end_request(rq, status, result);
else
nvme_end_request(rq, status, result);
check_error: check_error:
if (terminate_assoc) if (terminate_assoc)
...@@ -2394,45 +2384,16 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg) ...@@ -2394,45 +2384,16 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg)
} }
static void static void
__nvme_fc_final_op_cleanup(struct request *rq) nvme_fc_complete_rq(struct request *rq)
{ {
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl; struct nvme_fc_ctrl *ctrl = op->ctrl;
atomic_set(&op->state, FCPOP_STATE_IDLE); atomic_set(&op->state, FCPOP_STATE_IDLE);
op->flags &= ~(FCOP_FLAGS_RELEASED | FCOP_FLAGS_COMPLETE);
nvme_fc_unmap_data(ctrl, rq, op); nvme_fc_unmap_data(ctrl, rq, op);
nvme_complete_rq(rq); nvme_complete_rq(rq);
nvme_fc_ctrl_put(ctrl); nvme_fc_ctrl_put(ctrl);
}
static void
nvme_fc_complete_rq(struct request *rq)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
unsigned long flags;
bool completed = false;
/*
* the core layer, on controller resets after calling
* nvme_shutdown_ctrl(), calls complete_rq without our
* calling blk_mq_complete_request(), thus there may still
* be live i/o outstanding with the LLDD. Means transport has
* to track complete calls vs fcpio_done calls to know what
* path to take on completes and dones.
*/
spin_lock_irqsave(&ctrl->lock, flags);
if (op->flags & FCOP_FLAGS_COMPLETE)
completed = true;
else
op->flags |= FCOP_FLAGS_RELEASED;
spin_unlock_irqrestore(&ctrl->lock, flags);
if (completed)
__nvme_fc_final_op_cleanup(rq);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment