Commit e57bb6be authored by Ram Amrani's avatar Ram Amrani Committed by Doug Ledford

RDMA/qedr: Add 64KB PAGE_SIZE support to user-space queues

Add 64KB PAGE_SIZE support to user-space CQ, SQ and RQ queues.
De-facto it means that code was added to translate 64KB
pages to smaller 4KB pages that the FW can handle. Otherwise,
the FW would wrap (or jump to the next page)  when reaching 4KB
while the user space library will continue on the same large page.

Note that MR code remains as is since the FW supports larger pages
for MRs.
Signed-off-by: default avatarRam Amrani <Ram.Amrani@cavium.com>
Signed-off-by: default avatarMichal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent dac27386
...@@ -60,6 +60,9 @@ ...@@ -60,6 +60,9 @@
#define QEDR_CQ_MAGIC_NUMBER (0x11223344) #define QEDR_CQ_MAGIC_NUMBER (0x11223344)
#define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE)
#define FW_PAGE_SHIFT (12)
struct qedr_dev; struct qedr_dev;
struct qedr_cnq { struct qedr_cnq {
......
...@@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev, ...@@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
struct qedr_pbl *pbl, struct qedr_pbl *pbl,
struct qedr_pbl_info *pbl_info) struct qedr_pbl_info *pbl_info, u32 pg_shift)
{ {
int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
u32 fw_pg_cnt, fw_pg_per_umem_pg;
struct qedr_pbl *pbl_tbl; struct qedr_pbl *pbl_tbl;
struct scatterlist *sg; struct scatterlist *sg;
struct regpair *pbe; struct regpair *pbe;
u64 pg_addr;
int entry; int entry;
u32 addr;
if (!pbl_info->num_pbes) if (!pbl_info->num_pbes)
return; return;
...@@ -683,15 +684,17 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, ...@@ -683,15 +684,17 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
shift = umem->page_shift; shift = umem->page_shift;
fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
pages = sg_dma_len(sg) >> shift; pages = sg_dma_len(sg) >> shift;
pg_addr = sg_dma_address(sg);
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
/* store the page address in pbe */ for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
pbe->lo = cpu_to_le32(sg_dma_address(sg) + pbe->lo = cpu_to_le32(pg_addr);
(pg_cnt << shift)); pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
addr = upper_32_bits(sg_dma_address(sg) +
(pg_cnt << shift)); pg_addr += BIT(pg_shift);
pbe->hi = cpu_to_le32(addr);
pbe_cnt++; pbe_cnt++;
total_num_pbes++; total_num_pbes++;
pbe++; pbe++;
...@@ -702,11 +705,15 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, ...@@ -702,11 +705,15 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
/* If the given pbl is full storing the pbes, /* If the given pbl is full storing the pbes,
* move to next pbl. * move to next pbl.
*/ */
if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { if (pbe_cnt ==
(pbl_info->pbl_size / sizeof(u64))) {
pbl_tbl++; pbl_tbl++;
pbe = (struct regpair *)pbl_tbl->va; pbe = (struct regpair *)pbl_tbl->va;
pbe_cnt = 0; pbe_cnt = 0;
} }
fw_pg_cnt++;
}
} }
} }
} }
...@@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, ...@@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
u64 buf_addr, size_t buf_len, u64 buf_addr, size_t buf_len,
int access, int dmasync) int access, int dmasync)
{ {
int page_cnt; u32 fw_pages;
int rc; int rc;
q->buf_addr = buf_addr; q->buf_addr = buf_addr;
...@@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, ...@@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
return PTR_ERR(q->umem); return PTR_ERR(q->umem);
} }
page_cnt = ib_umem_page_count(q->umem); fw_pages = ib_umem_page_count(q->umem) <<
rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0); (q->umem->page_shift - FW_PAGE_SHIFT);
rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
if (rc) if (rc)
goto err0; goto err0;
...@@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, ...@@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
goto err0; goto err0;
} }
qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info); qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
FW_PAGE_SHIFT);
return 0; return 0;
...@@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, ...@@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
goto err1; goto err1;
qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
&mr->info.pbl_info); &mr->info.pbl_info, mr->umem->page_shift);
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
if (rc) { if (rc) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment