Commit 38e18002 authored by Israel Rukshin's avatar Israel Rukshin Committed by Keith Busch

nvme-rdma: Avoid preallocating big SGL for data

nvme_rdma_alloc_tagset() preallocates a big buffer for the IO SGL based
on SG_CHUNK_SIZE.

Modern DMA engines are often capable of dealing with very big segments so
the SG_CHUNK_SIZE is often too big. SG_CHUNK_SIZE results in a static 4KB
SGL allocation per command.

If a controller has lots of deep queues, preallocation for the sg list can
consume substantial amounts of memory. For nvme-rdma, nr_hw_queues can be
128 and each queue's depth 128. This means the resulting preallocation
for the data SGL is 128*128*4K = 64MB per controller.

Switch to runtime allocation for SGL for lists longer than 2 entries. This
is the approach used by NVMe PCI so it should be reasonable for NVMeOF as
well. Runtime SGL allocation has always been the case for the legacy I/O
path so this is nothing new.

The preallocated small SGL depends on SG_CHAIN so if the ARCH doesn't
support SG_CHAIN, use only runtime allocation for the SGL.

We didn't notice of a performance degradation, since for small IOs we'll
use the inline SG and for the bigger IOs the allocation of a bigger SGL
from slab is fast enough.
Suggested-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarIsrael Rukshin <israelr@mellanox.com>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
parent be2eca94
...@@ -28,6 +28,12 @@ extern unsigned int admin_timeout; ...@@ -28,6 +28,12 @@ extern unsigned int admin_timeout;
#define NVME_DEFAULT_KATO 5 #define NVME_DEFAULT_KATO 5
#define NVME_KATO_GRACE 10 #define NVME_KATO_GRACE 10
#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define NVME_INLINE_SG_CNT 0
#else
#define NVME_INLINE_SG_CNT 2
#endif
extern struct workqueue_struct *nvme_wq; extern struct workqueue_struct *nvme_wq;
extern struct workqueue_struct *nvme_reset_wq; extern struct workqueue_struct *nvme_reset_wq;
extern struct workqueue_struct *nvme_delete_wq; extern struct workqueue_struct *nvme_delete_wq;
......
...@@ -731,7 +731,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, ...@@ -731,7 +731,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->reserved_tags = 2; /* connect + keep-alive */ set->reserved_tags = 2; /* connect + keep-alive */
set->numa_node = nctrl->numa_node; set->numa_node = nctrl->numa_node;
set->cmd_size = sizeof(struct nvme_rdma_request) + set->cmd_size = sizeof(struct nvme_rdma_request) +
SG_CHUNK_SIZE * sizeof(struct scatterlist); NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
set->driver_data = ctrl; set->driver_data = ctrl;
set->nr_hw_queues = 1; set->nr_hw_queues = 1;
set->timeout = ADMIN_TIMEOUT; set->timeout = ADMIN_TIMEOUT;
...@@ -745,7 +745,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, ...@@ -745,7 +745,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->numa_node = nctrl->numa_node; set->numa_node = nctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE; set->flags = BLK_MQ_F_SHOULD_MERGE;
set->cmd_size = sizeof(struct nvme_rdma_request) + set->cmd_size = sizeof(struct nvme_rdma_request) +
SG_CHUNK_SIZE * sizeof(struct scatterlist); NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
set->driver_data = ctrl; set->driver_data = ctrl;
set->nr_hw_queues = nctrl->queue_count - 1; set->nr_hw_queues = nctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT; set->timeout = NVME_IO_TIMEOUT;
...@@ -1160,7 +1160,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, ...@@ -1160,7 +1160,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
} }
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq)); ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE); sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
} }
static int nvme_rdma_set_sg_null(struct nvme_command *c) static int nvme_rdma_set_sg_null(struct nvme_command *c)
...@@ -1276,7 +1276,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, ...@@ -1276,7 +1276,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->sg_table.sgl = req->first_sgl; req->sg_table.sgl = req->first_sgl;
ret = sg_alloc_table_chained(&req->sg_table, ret = sg_alloc_table_chained(&req->sg_table,
blk_rq_nr_phys_segments(rq), req->sg_table.sgl, blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
SG_CHUNK_SIZE); NVME_INLINE_SG_CNT);
if (ret) if (ret)
return -ENOMEM; return -ENOMEM;
...@@ -1314,7 +1314,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, ...@@ -1314,7 +1314,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
out_unmap_sg: out_unmap_sg:
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq)); ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
out_free_table: out_free_table:
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE); sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment