Commit eb52c033 authored by Shiraz Saleem's avatar Shiraz Saleem Committed by Jason Gunthorpe

RDMA/i40iw: Use core helpers to get aligned DMA address within a supported page size

Call the core helpers to retrieve the HW aligned address to use for the
MR, within a supported i40iw page size.

Remove code in i40iw to determine when MR is backed by 2M huge pages which
involves checking the umem->hugetlb flag and VMA inspection.  The new DMA
iterator will return the 2M aligned address if the MR is backed by 2M
pages.

Fixes: f26c7c83 ("i40iw: Add 2MB page support")
Reviewed-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent a808273a
...@@ -1338,52 +1338,21 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr, ...@@ -1338,52 +1338,21 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
struct i40iw_pbl *iwpbl = &iwmr->iwpbl; struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
struct i40iw_pble_info *pinfo; struct i40iw_pble_info *pinfo;
struct sg_dma_page_iter sg_iter; struct ib_block_iter biter;
u64 pg_addr = 0;
u32 idx = 0; u32 idx = 0;
bool first_pg = true;
pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf; pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
if (iwmr->type == IW_MEMREG_TYPE_QP) if (iwmr->type == IW_MEMREG_TYPE_QP)
iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl); iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
for_each_sg_dma_page (region->sg_head.sgl, &sg_iter, region->nmap, 0) { rdma_for_each_block(region->sg_head.sgl, &biter, region->nmap,
pg_addr = sg_page_iter_dma_address(&sg_iter); iwmr->page_size) {
if (first_pg) *pbl = rdma_block_iter_dma_address(&biter);
*pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
else if (!(pg_addr & ~iwmr->page_msk))
*pbl = cpu_to_le64(pg_addr);
else
continue;
first_pg = false;
pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx); pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
} }
} }
/**
* i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
* @addr: virtual address
* @iwmr: mr pointer for this memory registration
*/
static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
{
struct vm_area_struct *vma;
struct hstate *h;
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, addr);
if (vma && is_vm_hugetlb_page(vma)) {
h = hstate_vma(vma);
if (huge_page_size(h) == 0x200000) {
iwmr->page_size = huge_page_size(h);
iwmr->page_msk = huge_page_mask(h);
}
}
up_read(&current->mm->mmap_sem);
}
/** /**
* i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
* @arr: lvl1 pbl array * @arr: lvl1 pbl array
...@@ -1839,10 +1808,9 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd, ...@@ -1839,10 +1808,9 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
iwmr->ibmr.device = pd->device; iwmr->ibmr.device = pd->device;
iwmr->page_size = PAGE_SIZE; iwmr->page_size = PAGE_SIZE;
iwmr->page_msk = PAGE_MASK; if (req.reg_type == IW_MEMREG_TYPE_MEM)
iwmr->page_size = ib_umem_find_best_pgsz(region, SZ_4K | SZ_2M,
if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM)) virt);
i40iw_set_hugetlb_values(start, iwmr);
region_length = region->length + (start & (iwmr->page_size - 1)); region_length = region->length + (start & (iwmr->page_size - 1));
pg_shift = ffs(iwmr->page_size) - 1; pg_shift = ffs(iwmr->page_size) - 1;
......
...@@ -94,8 +94,7 @@ struct i40iw_mr { ...@@ -94,8 +94,7 @@ struct i40iw_mr {
struct ib_umem *region; struct ib_umem *region;
u16 type; u16 type;
u32 page_cnt; u32 page_cnt;
u32 page_size; u64 page_size;
u64 page_msk;
u32 npages; u32 npages;
u32 stag; u32 stag;
u64 length; u64 length;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment