Commit d8558251 authored by Shiraz Saleem's avatar Shiraz Saleem Committed by Jason Gunthorpe

RDMA/bnxt_re: Use core helpers to get aligned DMA address

Call the core helpers to retrieve the HW aligned address to use for the
MR, within a supported bnxt_re page size.

Remove checking the umem->hugtetlb flag as it is no longer required. The
new DMA block iterator will return the 2M aligned address if the MR is
backed by 2M huge pages.
Acked-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent eb52c033
...@@ -3507,17 +3507,12 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig, ...@@ -3507,17 +3507,12 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
int page_shift) int page_shift)
{ {
u64 *pbl_tbl = pbl_tbl_orig; u64 *pbl_tbl = pbl_tbl_orig;
u64 paddr; u64 page_size = BIT_ULL(page_shift);
u64 page_mask = (1ULL << page_shift) - 1; struct ib_block_iter biter;
struct sg_dma_page_iter sg_iter;
rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size)
*pbl_tbl++ = rdma_block_iter_dma_address(&biter);
for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
paddr = sg_page_iter_dma_address(&sg_iter);
if (pbl_tbl == pbl_tbl_orig)
*pbl_tbl++ = paddr & ~page_mask;
else if ((paddr & page_mask) == 0)
*pbl_tbl++ = paddr;
}
return pbl_tbl - pbl_tbl_orig; return pbl_tbl - pbl_tbl_orig;
} }
...@@ -3579,7 +3574,9 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, ...@@ -3579,7 +3574,9 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
goto free_umem; goto free_umem;
} }
page_shift = PAGE_SHIFT; page_shift = __ffs(ib_umem_find_best_pgsz(umem,
BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M,
virt_addr));
if (!bnxt_re_page_size_ok(page_shift)) { if (!bnxt_re_page_size_ok(page_shift)) {
dev_err(rdev_to_dev(rdev), "umem page size unsupported!"); dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
...@@ -3587,17 +3584,13 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, ...@@ -3587,17 +3584,13 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
goto fail; goto fail;
} }
if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) { if (page_shift == BNXT_RE_PAGE_SHIFT_4K &&
length > BNXT_RE_MAX_MR_SIZE_LOW) {
dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu", dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
length, (u64)BNXT_RE_MAX_MR_SIZE_LOW); length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
rc = -EINVAL; rc = -EINVAL;
goto fail; goto fail;
} }
if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
page_shift = BNXT_RE_PAGE_SHIFT_2M;
dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
1 << page_shift);
}
/* Map umem buf ptrs to the PBL */ /* Map umem buf ptrs to the PBL */
umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift); umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment