Commit 2eb81a33 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

nvme: rename and document nvme_end_request

nvme_end_request is a bit misnamed, as it wraps around the
blk_mq_complete_* API.  It's semantics also are non-trivial, so give it
a more descriptive name and add a comment explaining the semantics.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c41ad98b
...@@ -3,7 +3,7 @@ NVMe Fault Injection ...@@ -3,7 +3,7 @@ NVMe Fault Injection
Linux's fault injection framework provides a systematic way to support Linux's fault injection framework provides a systematic way to support
error injection via debugfs in the /sys/kernel/debug directory. When error injection via debugfs in the /sys/kernel/debug directory. When
enabled, the default NVME_SC_INVALID_OPCODE with no retry will be enabled, the default NVME_SC_INVALID_OPCODE with no retry will be
injected into the nvme_end_request. Users can change the default status injected into the nvme_try_complete_req. Users can change the default status
code and no retry flag via the debugfs. The list of Generic Command code and no retry flag via the debugfs. The list of Generic Command
Status can be found in include/linux/nvme.h Status can be found in include/linux/nvme.h
......
...@@ -2035,7 +2035,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) ...@@ -2035,7 +2035,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
} }
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
if (!nvme_end_request(rq, status, result)) if (!nvme_try_complete_req(rq, status, result))
nvme_fc_complete_rq(rq); nvme_fc_complete_rq(rq);
check_error: check_error:
......
...@@ -523,7 +523,13 @@ static inline u32 nvme_bytes_to_numd(size_t len) ...@@ -523,7 +523,13 @@ static inline u32 nvme_bytes_to_numd(size_t len)
return (len >> 2) - 1; return (len >> 2) - 1;
} }
static inline bool nvme_end_request(struct request *req, __le16 status, /*
* Fill in the status and result information from the CQE, and then figure out
* if blk-mq will need to use IPI magic to complete the request, and if yes do
* so. If not let the caller complete the request without an indirect function
* call.
*/
static inline bool nvme_try_complete_req(struct request *req, __le16 status,
union nvme_result result) union nvme_result result)
{ {
struct nvme_request *rq = nvme_req(req); struct nvme_request *rq = nvme_req(req);
......
...@@ -961,7 +961,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) ...@@ -961,7 +961,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
if (!nvme_end_request(req, cqe->status, cqe->result)) if (!nvme_try_complete_req(req, cqe->status, cqe->result))
nvme_pci_complete_rq(req); nvme_pci_complete_rq(req);
} }
......
...@@ -1189,7 +1189,7 @@ static void nvme_rdma_end_request(struct nvme_rdma_request *req) ...@@ -1189,7 +1189,7 @@ static void nvme_rdma_end_request(struct nvme_rdma_request *req)
if (!refcount_dec_and_test(&req->ref)) if (!refcount_dec_and_test(&req->ref))
return; return;
if (!nvme_end_request(rq, req->status, req->result)) if (!nvme_try_complete_req(rq, req->status, req->result))
nvme_rdma_complete_rq(rq); nvme_rdma_complete_rq(rq);
} }
......
...@@ -481,7 +481,7 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, ...@@ -481,7 +481,7 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
return -EINVAL; return -EINVAL;
} }
if (!nvme_end_request(rq, cqe->status, cqe->result)) if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
nvme_complete_rq(rq); nvme_complete_rq(rq);
queue->nr_cqe++; queue->nr_cqe++;
...@@ -672,7 +672,7 @@ static inline void nvme_tcp_end_request(struct request *rq, u16 status) ...@@ -672,7 +672,7 @@ static inline void nvme_tcp_end_request(struct request *rq, u16 status)
{ {
union nvme_result res = {}; union nvme_result res = {};
if (!nvme_end_request(rq, cpu_to_le16(status << 1), res)) if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
nvme_complete_rq(rq); nvme_complete_rq(rq);
} }
......
...@@ -115,7 +115,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req) ...@@ -115,7 +115,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
return; return;
} }
if (!nvme_end_request(rq, cqe->status, cqe->result)) if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
nvme_loop_complete_rq(rq); nvme_loop_complete_rq(rq);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment