Commit 5ddaabe8 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

nvme: refactor command completion

Lift all the code to decide the dispostition of a completed command
from nvme_complete_rq and nvme_failover_req into a new helper, which
returns an emum of the potential actions.  nvme_complete_rq then
just switches on those and calls the proper helper for the action.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2eb81a33
...@@ -241,17 +241,6 @@ static blk_status_t nvme_error_status(u16 status) ...@@ -241,17 +241,6 @@ static blk_status_t nvme_error_status(u16 status)
} }
} }
static inline bool nvme_req_needs_retry(struct request *req)
{
if (blk_noretry_request(req))
return false;
if (nvme_req(req)->status & NVME_SC_DNR)
return false;
if (nvme_req(req)->retries >= nvme_max_retries)
return false;
return true;
}
static void nvme_retry_req(struct request *req) static void nvme_retry_req(struct request *req)
{ {
struct nvme_ns *ns = req->q->queuedata; struct nvme_ns *ns = req->q->queuedata;
...@@ -268,33 +257,65 @@ static void nvme_retry_req(struct request *req) ...@@ -268,33 +257,65 @@ static void nvme_retry_req(struct request *req)
blk_mq_delay_kick_requeue_list(req->q, delay); blk_mq_delay_kick_requeue_list(req->q, delay);
} }
void nvme_complete_rq(struct request *req) enum nvme_disposition {
COMPLETE,
RETRY,
FAILOVER,
};
static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
{
if (likely(nvme_req(req)->status == 0))
return COMPLETE;
if (blk_noretry_request(req) ||
(nvme_req(req)->status & NVME_SC_DNR) ||
nvme_req(req)->retries >= nvme_max_retries)
return COMPLETE;
if (req->cmd_flags & REQ_NVME_MPATH) {
if (nvme_is_path_error(nvme_req(req)->status))
return FAILOVER;
}
if (blk_queue_dying(req->q))
return COMPLETE;
return RETRY;
}
static inline void nvme_end_req(struct request *req)
{ {
blk_status_t status = nvme_error_status(nvme_req(req)->status); blk_status_t status = nvme_error_status(nvme_req(req)->status);
trace_nvme_complete_rq(req); if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
req_op(req) == REQ_OP_ZONE_APPEND)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64));
nvme_trace_bio_complete(req, status);
blk_mq_end_request(req, status);
}
void nvme_complete_rq(struct request *req)
{
trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req); nvme_cleanup_cmd(req);
if (nvme_req(req)->ctrl->kas) if (nvme_req(req)->ctrl->kas)
nvme_req(req)->ctrl->comp_seen = true; nvme_req(req)->ctrl->comp_seen = true;
if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { switch (nvme_decide_disposition(req)) {
if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req)) case COMPLETE:
nvme_end_req(req);
return; return;
case RETRY:
if (!blk_queue_dying(req->q)) {
nvme_retry_req(req); nvme_retry_req(req);
return; return;
case FAILOVER:
nvme_failover_req(req);
return;
} }
} else if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
req_op(req) == REQ_OP_ZONE_APPEND) {
req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64));
}
nvme_trace_bio_complete(req, status);
blk_mq_end_request(req, status);
} }
EXPORT_SYMBOL_GPL(nvme_complete_rq); EXPORT_SYMBOL_GPL(nvme_complete_rq);
......
...@@ -65,51 +65,30 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, ...@@ -65,51 +65,30 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
} }
} }
bool nvme_failover_req(struct request *req) void nvme_failover_req(struct request *req)
{ {
struct nvme_ns *ns = req->q->queuedata; struct nvme_ns *ns = req->q->queuedata;
u16 status = nvme_req(req)->status; u16 status = nvme_req(req)->status & 0x7ff;
unsigned long flags; unsigned long flags;
switch (status & 0x7ff) { nvme_mpath_clear_current_path(ns);
case NVME_SC_ANA_TRANSITION:
case NVME_SC_ANA_INACCESSIBLE:
case NVME_SC_ANA_PERSISTENT_LOSS:
/* /*
* If we got back an ANA error we know the controller is alive, * If we got back an ANA error, we know the controller is alive but not
* but not ready to serve this namespaces. The spec suggests * ready to serve this namespace. Kick of a re-read of the ANA
* we should update our general state here, but due to the fact * information page, and just try any other available path for now.
* that the admin and I/O queues are not serialized that is
* fundamentally racy. So instead just clear the current path,
* mark the the path as pending and kick of a re-read of the ANA
* log page ASAP.
*/ */
nvme_mpath_clear_current_path(ns); if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
if (ns->ctrl->ana_log_buf) {
set_bit(NVME_NS_ANA_PENDING, &ns->flags); set_bit(NVME_NS_ANA_PENDING, &ns->flags);
queue_work(nvme_wq, &ns->ctrl->ana_work); queue_work(nvme_wq, &ns->ctrl->ana_work);
} }
break;
case NVME_SC_HOST_PATH_ERROR:
case NVME_SC_HOST_ABORTED_CMD:
/*
* Temporary transport disruption in talking to the controller.
* Try to send on a new path.
*/
nvme_mpath_clear_current_path(ns);
break;
default:
/* This was a non-ANA error so follow the normal error path. */
return false;
}
spin_lock_irqsave(&ns->head->requeue_lock, flags); spin_lock_irqsave(&ns->head->requeue_lock, flags);
blk_steal_bios(&ns->head->requeue_list, req); blk_steal_bios(&ns->head->requeue_list, req);
spin_unlock_irqrestore(&ns->head->requeue_lock, flags); spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
blk_mq_end_request(req, 0);
blk_mq_end_request(req, 0);
kblockd_schedule_work(&ns->head->requeue_work); kblockd_schedule_work(&ns->head->requeue_work);
return true;
} }
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
......
...@@ -523,6 +523,32 @@ static inline u32 nvme_bytes_to_numd(size_t len) ...@@ -523,6 +523,32 @@ static inline u32 nvme_bytes_to_numd(size_t len)
return (len >> 2) - 1; return (len >> 2) - 1;
} }
static inline bool nvme_is_ana_error(u16 status)
{
switch (status & 0x7ff) {
case NVME_SC_ANA_TRANSITION:
case NVME_SC_ANA_INACCESSIBLE:
case NVME_SC_ANA_PERSISTENT_LOSS:
return true;
default:
return false;
}
}
static inline bool nvme_is_path_error(u16 status)
{
switch (status & 0x7ff) {
case NVME_SC_HOST_PATH_ERROR:
case NVME_SC_HOST_ABORTED_CMD:
case NVME_SC_ANA_TRANSITION:
case NVME_SC_ANA_INACCESSIBLE:
case NVME_SC_ANA_PERSISTENT_LOSS:
return true;
default:
return false;
}
}
/* /*
* Fill in the status and result information from the CQE, and then figure out * Fill in the status and result information from the CQE, and then figure out
* if blk-mq will need to use IPI magic to complete the request, and if yes do * if blk-mq will need to use IPI magic to complete the request, and if yes do
...@@ -635,7 +661,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); ...@@ -635,7 +661,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags); struct nvme_ctrl *ctrl, int *flags);
bool nvme_failover_req(struct request *req); void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
...@@ -694,9 +720,8 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, ...@@ -694,9 +720,8 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
} }
static inline bool nvme_failover_req(struct request *req) static inline void nvme_failover_req(struct request *req)
{ {
return false;
} }
static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment