Commit 16686f3a authored by Max Gurtovoy's avatar Max Gurtovoy Committed by Jens Axboe

nvme: move common call to nvme_cleanup_cmd to core layer

nvme_cleanup_cmd should be called for each call to nvme_setup_cmd
(symmetrical functions). Move the call for nvme_cleanup_cmd to the common
core layer and call it during nvme_complete_rq for the good flow. For
error flow, each transport will call nvme_cleanup_cmd independently. Also
take care of a special case of path failure, where we call
nvme_complete_rq without doing nvme_setup_cmd.
Signed-off-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2dc3947b
...@@ -268,6 +268,8 @@ void nvme_complete_rq(struct request *req) ...@@ -268,6 +268,8 @@ void nvme_complete_rq(struct request *req)
trace_nvme_complete_rq(req); trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
if (nvme_req(req)->ctrl->kas) if (nvme_req(req)->ctrl->kas)
nvme_req(req)->ctrl->comp_seen = true; nvme_req(req)->ctrl->comp_seen = true;
......
...@@ -2173,8 +2173,6 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, ...@@ -2173,8 +2173,6 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
rq_dma_dir(rq)); rq_dma_dir(rq));
nvme_cleanup_cmd(rq);
sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE); sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
freq->sg_cnt = 0; freq->sg_cnt = 0;
...@@ -2305,6 +2303,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, ...@@ -2305,6 +2303,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
if (!(op->flags & FCOP_FLAGS_AEN)) if (!(op->flags & FCOP_FLAGS_AEN))
nvme_fc_unmap_data(ctrl, op->rq, op); nvme_fc_unmap_data(ctrl, op->rq, op);
nvme_cleanup_cmd(op->rq);
nvme_fc_ctrl_put(ctrl); nvme_fc_ctrl_put(ctrl);
if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
......
...@@ -924,7 +924,6 @@ static void nvme_pci_complete_rq(struct request *req) ...@@ -924,7 +924,6 @@ static void nvme_pci_complete_rq(struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_dev *dev = iod->nvmeq->dev; struct nvme_dev *dev = iod->nvmeq->dev;
nvme_cleanup_cmd(req);
if (blk_integrity_rq(req)) if (blk_integrity_rq(req))
dma_unmap_page(dev->dev, iod->meta_dma, dma_unmap_page(dev->dev, iod->meta_dma,
rq_integrity_vec(req)->bv_len, rq_data_dir(req)); rq_integrity_vec(req)->bv_len, rq_data_dir(req));
......
...@@ -1160,8 +1160,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, ...@@ -1160,8 +1160,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
} }
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq)); ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
nvme_cleanup_cmd(rq);
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE); sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
} }
...@@ -1760,7 +1758,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -1760,7 +1758,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(err < 0)) { if (unlikely(err < 0)) {
dev_err(queue->ctrl->ctrl.device, dev_err(queue->ctrl->ctrl.device,
"Failed to map data (%d)\n", err); "Failed to map data (%d)\n", err);
nvme_cleanup_cmd(rq);
goto err; goto err;
} }
...@@ -1771,18 +1768,19 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -1771,18 +1768,19 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
req->mr ? &req->reg_wr.wr : NULL); req->mr ? &req->reg_wr.wr : NULL);
if (unlikely(err)) { if (unlikely(err))
nvme_rdma_unmap_data(queue, rq); goto err_unmap;
goto err;
}
return BLK_STS_OK; return BLK_STS_OK;
err_unmap:
nvme_rdma_unmap_data(queue, rq);
err: err:
if (err == -ENOMEM || err == -EAGAIN) if (err == -ENOMEM || err == -EAGAIN)
ret = BLK_STS_RESOURCE; ret = BLK_STS_RESOURCE;
else else
ret = BLK_STS_IOERR; ret = BLK_STS_IOERR;
nvme_cleanup_cmd(rq);
unmap_qe: unmap_qe:
ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command), ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
DMA_TO_DEVICE); DMA_TO_DEVICE);
......
...@@ -76,7 +76,6 @@ static void nvme_loop_complete_rq(struct request *req) ...@@ -76,7 +76,6 @@ static void nvme_loop_complete_rq(struct request *req)
{ {
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
nvme_cleanup_cmd(req);
sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE); sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
nvme_complete_rq(req); nvme_complete_rq(req);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment