Commit 4d50e084 authored by Gal Pressman's avatar Gal Pressman Committed by Jason Gunthorpe

RDMA/efa: Use rdma block iterator in chunk list creation

When creating the chunks list the rdma_for_each_block() iterator is used
in order to iterate over the payload in EFA_CHUNK_PAYLOAD_SIZE (device
defined) strides.
Reviewed-by: default avatarFiras JahJah <firasj@amazon.com>
Reviewed-by: default avatarYossi Leybovich <sleybo@amazon.com>
Reviewed-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarGal Pressman <galpress@amazon.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent e0e3f397
...@@ -1085,14 +1085,14 @@ static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt) ...@@ -1085,14 +1085,14 @@ static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
*/ */
static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl) static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
{ {
unsigned int entry, payloads_in_sg, chunk_list_size, chunk_idx, payload_idx;
struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list; struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages; int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
struct scatterlist *pages_sgl = pbl->phys.indirect.sgl; struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
unsigned int chunk_list_size, chunk_idx, payload_idx;
int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt; int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
struct efa_com_ctrl_buff_info *ctrl_buf; struct efa_com_ctrl_buff_info *ctrl_buf;
u64 *cur_chunk_buf, *prev_chunk_buf; u64 *cur_chunk_buf, *prev_chunk_buf;
struct scatterlist *sg; struct ib_block_iter biter;
dma_addr_t dma_addr; dma_addr_t dma_addr;
int i; int i;
...@@ -1126,18 +1126,15 @@ static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl) ...@@ -1126,18 +1126,15 @@ static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
chunk_idx = 0; chunk_idx = 0;
payload_idx = 0; payload_idx = 0;
cur_chunk_buf = chunk_list->chunks[0].buf; cur_chunk_buf = chunk_list->chunks[0].buf;
for_each_sg(pages_sgl, sg, sg_dma_cnt, entry) { rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
payloads_in_sg = sg_dma_len(sg) >> EFA_CHUNK_PAYLOAD_SHIFT; EFA_CHUNK_PAYLOAD_SIZE) {
for (i = 0; i < payloads_in_sg; i++) { cur_chunk_buf[payload_idx++] =
cur_chunk_buf[payload_idx++] = rdma_block_iter_dma_address(&biter);
(sg_dma_address(sg) & ~(EFA_CHUNK_PAYLOAD_SIZE - 1)) +
(EFA_CHUNK_PAYLOAD_SIZE * i); if (payload_idx == EFA_PTRS_PER_CHUNK) {
chunk_idx++;
if (payload_idx == EFA_PTRS_PER_CHUNK) { cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
chunk_idx++; payload_idx = 0;
cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
payload_idx = 0;
}
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment