Commit 4686af88 authored by Max Gurtovoy's avatar Max Gurtovoy Committed by Christoph Hellwig

nvme-rdma: add helpers for mapping/unmapping request

Introduce nvme_rdma_dma_map_req/nvme_rdma_dma_unmap_req helper functions
to improve code readability and ease on the error flow.
Reviewed-by: default avatarIsrael Rukshin <israelr@nvidia.com>
Signed-off-by: default avatarMax Gurtovoy <mgurtovoy@nvidia.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 44f331a6
...@@ -1282,6 +1282,22 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, ...@@ -1282,6 +1282,22 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
return ib_post_send(queue->qp, &wr, NULL); return ib_post_send(queue->qp, &wr, NULL);
} }
static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
if (blk_integrity_rq(rq)) {
ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents, rq_dma_dir(rq));
sg_free_table_chained(&req->metadata_sgl->sg_table,
NVME_INLINE_METADATA_SG_CNT);
}
ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
rq_dma_dir(rq));
sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
}
static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
struct request *rq) struct request *rq)
{ {
...@@ -1293,13 +1309,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, ...@@ -1293,13 +1309,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
if (!blk_rq_nr_phys_segments(rq)) if (!blk_rq_nr_phys_segments(rq))
return; return;
if (blk_integrity_rq(rq)) {
ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents, rq_dma_dir(rq));
sg_free_table_chained(&req->metadata_sgl->sg_table,
NVME_INLINE_METADATA_SG_CNT);
}
if (req->use_sig_mr) if (req->use_sig_mr)
pool = &queue->qp->sig_mrs; pool = &queue->qp->sig_mrs;
...@@ -1308,9 +1317,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, ...@@ -1308,9 +1317,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
req->mr = NULL; req->mr = NULL;
} }
ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, nvme_rdma_dma_unmap_req(ibdev, rq);
rq_dma_dir(rq));
sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
} }
static int nvme_rdma_set_sg_null(struct nvme_command *c) static int nvme_rdma_set_sg_null(struct nvme_command *c)
...@@ -1521,22 +1528,11 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, ...@@ -1521,22 +1528,11 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
return -EINVAL; return -EINVAL;
} }
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
struct request *rq, struct nvme_command *c) int *count, int *pi_count)
{ {
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device; int ret;
struct ib_device *ibdev = dev->dev;
int pi_count = 0;
int count, ret;
req->num_sge = 1;
refcount_set(&req->ref, 2); /* send and recv completions */
c->common.flags |= NVME_CMD_SGL_METABUF;
if (!blk_rq_nr_phys_segments(rq))
return nvme_rdma_set_sg_null(c);
req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1); req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
ret = sg_alloc_table_chained(&req->data_sgl.sg_table, ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
...@@ -1548,9 +1544,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, ...@@ -1548,9 +1544,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->data_sgl.nents = blk_rq_map_sg(rq->q, rq, req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
req->data_sgl.sg_table.sgl); req->data_sgl.sg_table.sgl);
count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl, *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
req->data_sgl.nents, rq_dma_dir(rq)); req->data_sgl.nents, rq_dma_dir(rq));
if (unlikely(count <= 0)) { if (unlikely(*count <= 0)) {
ret = -EIO; ret = -EIO;
goto out_free_table; goto out_free_table;
} }
...@@ -1569,16 +1565,50 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, ...@@ -1569,16 +1565,50 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q, req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
rq->bio, req->metadata_sgl->sg_table.sgl); rq->bio, req->metadata_sgl->sg_table.sgl);
pi_count = ib_dma_map_sg(ibdev, *pi_count = ib_dma_map_sg(ibdev,
req->metadata_sgl->sg_table.sgl, req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents, req->metadata_sgl->nents,
rq_dma_dir(rq)); rq_dma_dir(rq));
if (unlikely(pi_count <= 0)) { if (unlikely(*pi_count <= 0)) {
ret = -EIO; ret = -EIO;
goto out_free_pi_table; goto out_free_pi_table;
} }
} }
return 0;
out_free_pi_table:
sg_free_table_chained(&req->metadata_sgl->sg_table,
NVME_INLINE_METADATA_SG_CNT);
out_unmap_sg:
ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
rq_dma_dir(rq));
out_free_table:
sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
return ret;
}
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
struct request *rq, struct nvme_command *c)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
int pi_count = 0;
int count, ret;
req->num_sge = 1;
refcount_set(&req->ref, 2); /* send and recv completions */
c->common.flags |= NVME_CMD_SGL_METABUF;
if (!blk_rq_nr_phys_segments(rq))
return nvme_rdma_set_sg_null(c);
ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count);
if (unlikely(ret))
return ret;
if (req->use_sig_mr) { if (req->use_sig_mr) {
ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count); ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
goto out; goto out;
...@@ -1602,23 +1632,12 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, ...@@ -1602,23 +1632,12 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
ret = nvme_rdma_map_sg_fr(queue, req, c, count); ret = nvme_rdma_map_sg_fr(queue, req, c, count);
out: out:
if (unlikely(ret)) if (unlikely(ret))
goto out_unmap_pi_sg; goto out_dma_unmap_req;
return 0; return 0;
out_unmap_pi_sg: out_dma_unmap_req:
if (blk_integrity_rq(rq)) nvme_rdma_dma_unmap_req(ibdev, rq);
ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents, rq_dma_dir(rq));
out_free_pi_table:
if (blk_integrity_rq(rq))
sg_free_table_chained(&req->metadata_sgl->sg_table,
NVME_INLINE_METADATA_SG_CNT);
out_unmap_sg:
ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
rq_dma_dir(rq));
out_free_table:
sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment