Commit 3e7e1193 authored by Artemy Kovalyov's avatar Artemy Kovalyov Committed by Doug Ledford

IB: Replace ib_umem page_size by page_shift

Size of pages are held by struct ib_umem in page_size field.

It is better to store it as an exponent, because page size by nature
is always power-of-two and used as a factor, divisor or ilog2's argument.

The conversion of page_size to be page_shift allows to have portable
code and avoid following error while compiling on ARM:

  ERROR: "__aeabi_uldivmod" [drivers/infiniband/core/ib_core.ko] undefined!

CC: Selvin Xavier <selvin.xavier@broadcom.com>
CC: Steve Wise <swise@chelsio.com>
CC: Lijun Ou <oulijun@huawei.com>
CC: Shiraz Saleem <shiraz.saleem@intel.com>
CC: Adit Ranadive <aditr@vmware.com>
CC: Dennis Dalessandro <dennis.dalessandro@intel.com>
CC: Ram Amrani <Ram.Amrani@Cavium.com>
Signed-off-by: default avatarArtemy Kovalyov <artemyko@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Acked-by: default avatarRam Amrani <Ram.Amrani@cavium.com>
Acked-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Acked-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Acked-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Acked-by: default avatarAdit Ranadive <aditr@vmware.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 8d2216be
...@@ -118,7 +118,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -118,7 +118,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->context = context; umem->context = context;
umem->length = size; umem->length = size;
umem->address = addr; umem->address = addr;
umem->page_size = PAGE_SIZE; umem->page_shift = PAGE_SHIFT;
umem->pid = get_task_pid(current, PIDTYPE_PID); umem->pid = get_task_pid(current, PIDTYPE_PID);
/* /*
* We ask for writable memory if any of the following * We ask for writable memory if any of the following
...@@ -315,7 +315,6 @@ EXPORT_SYMBOL(ib_umem_release); ...@@ -315,7 +315,6 @@ EXPORT_SYMBOL(ib_umem_release);
int ib_umem_page_count(struct ib_umem *umem) int ib_umem_page_count(struct ib_umem *umem)
{ {
int shift;
int i; int i;
int n; int n;
struct scatterlist *sg; struct scatterlist *sg;
...@@ -323,11 +322,9 @@ int ib_umem_page_count(struct ib_umem *umem) ...@@ -323,11 +322,9 @@ int ib_umem_page_count(struct ib_umem *umem)
if (umem->odp_data) if (umem->odp_data)
return ib_umem_num_pages(umem); return ib_umem_num_pages(umem);
shift = ilog2(umem->page_size);
n = 0; n = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
n += sg_dma_len(sg) >> shift; n += sg_dma_len(sg) >> umem->page_shift;
return n; return n;
} }
......
...@@ -257,7 +257,7 @@ struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context, ...@@ -257,7 +257,7 @@ struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
umem->context = context; umem->context = context;
umem->length = size; umem->length = size;
umem->address = addr; umem->address = addr;
umem->page_size = PAGE_SIZE; umem->page_shift = PAGE_SHIFT;
umem->writable = 1; umem->writable = 1;
odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL); odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
...@@ -707,7 +707,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, ...@@ -707,7 +707,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
* invalidations, so we must make sure we free each page only * invalidations, so we must make sure we free each page only
* once. */ * once. */
mutex_lock(&umem->odp_data->umem_mutex); mutex_lock(&umem->odp_data->umem_mutex);
for (addr = virt; addr < bound; addr += (u64)umem->page_size) { for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
if (umem->odp_data->page_list[idx]) { if (umem->odp_data->page_list[idx]) {
struct page *page = umem->odp_data->page_list[idx]; struct page *page = umem->odp_data->page_list[idx];
......
...@@ -3016,7 +3016,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, ...@@ -3016,7 +3016,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct bnxt_re_mr *mr; struct bnxt_re_mr *mr;
struct ib_umem *umem; struct ib_umem *umem;
u64 *pbl_tbl, *pbl_tbl_orig; u64 *pbl_tbl, *pbl_tbl_orig;
int i, umem_pgs, pages, page_shift, rc; int i, umem_pgs, pages, rc;
struct scatterlist *sg; struct scatterlist *sg;
int entry; int entry;
...@@ -3062,22 +3062,22 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, ...@@ -3062,22 +3062,22 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
} }
pbl_tbl_orig = pbl_tbl; pbl_tbl_orig = pbl_tbl;
page_shift = ilog2(umem->page_size);
if (umem->hugetlb) { if (umem->hugetlb) {
dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!"); dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
rc = -EFAULT; rc = -EFAULT;
goto fail; goto fail;
} }
if (umem->page_size != PAGE_SIZE) {
dev_err(rdev_to_dev(rdev), "umem page size unsupported!"); if (umem->page_shift != PAGE_SHIFT) {
dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
rc = -EFAULT; rc = -EFAULT;
goto fail; goto fail;
} }
/* Map umem buf ptrs to the PBL */ /* Map umem buf ptrs to the PBL */
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
pages = sg_dma_len(sg) >> page_shift; pages = sg_dma_len(sg) >> umem->page_shift;
for (i = 0; i < pages; i++, pbl_tbl++) for (i = 0; i < pages; i++, pbl_tbl++)
*pbl_tbl = sg_dma_address(sg) + (i << page_shift); *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
} }
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig, rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
umem_pgs, false); umem_pgs, false);
......
...@@ -581,7 +581,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -581,7 +581,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(err); return ERR_PTR(err);
} }
shift = ffs(mhp->umem->page_size) - 1; shift = mhp->umem->page_shift;
n = mhp->umem->nmap; n = mhp->umem->nmap;
...@@ -601,7 +601,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -601,7 +601,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
len = sg_dma_len(sg) >> shift; len = sg_dma_len(sg) >> shift;
for (k = 0; k < len; ++k) { for (k = 0; k < len; ++k) {
pages[i++] = cpu_to_be64(sg_dma_address(sg) + pages[i++] = cpu_to_be64(sg_dma_address(sg) +
mhp->umem->page_size * k); (k << shift));
if (i == PAGE_SIZE / sizeof *pages) { if (i == PAGE_SIZE / sizeof *pages) {
err = iwch_write_pbl(mhp, pages, i, n); err = iwch_write_pbl(mhp, pages, i, n);
if (err) if (err)
......
...@@ -515,7 +515,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -515,7 +515,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(err); return ERR_PTR(err);
} }
shift = ffs(mhp->umem->page_size) - 1; shift = mhp->umem->page_shift;
n = mhp->umem->nmap; n = mhp->umem->nmap;
err = alloc_pbl(mhp, n); err = alloc_pbl(mhp, n);
...@@ -534,7 +534,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -534,7 +534,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
len = sg_dma_len(sg) >> shift; len = sg_dma_len(sg) >> shift;
for (k = 0; k < len; ++k) { for (k = 0; k < len; ++k) {
pages[i++] = cpu_to_be64(sg_dma_address(sg) + pages[i++] = cpu_to_be64(sg_dma_address(sg) +
mhp->umem->page_size * k); (k << shift));
if (i == PAGE_SIZE / sizeof *pages) { if (i == PAGE_SIZE / sizeof *pages) {
err = write_pbl(&mhp->rhp->rdev, err = write_pbl(&mhp->rhp->rdev,
pages, pages,
......
...@@ -219,8 +219,7 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev, ...@@ -219,8 +219,7 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
return PTR_ERR(*umem); return PTR_ERR(*umem);
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem), ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
ilog2((unsigned int)(*umem)->page_size), (*umem)->page_shift, &buf->hr_mtt);
&buf->hr_mtt);
if (ret) if (ret)
goto err_buf; goto err_buf;
......
...@@ -504,7 +504,8 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, ...@@ -504,7 +504,8 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
len = sg_dma_len(sg) >> mtt->page_shift; len = sg_dma_len(sg) >> mtt->page_shift;
for (k = 0; k < len; ++k) { for (k = 0; k < len; ++k) {
pages[i++] = sg_dma_address(sg) + umem->page_size * k; pages[i++] = sg_dma_address(sg) +
(k << umem->page_shift);
if (i == PAGE_SIZE / sizeof(u64)) { if (i == PAGE_SIZE / sizeof(u64)) {
ret = hns_roce_write_mtt(hr_dev, mtt, n, i, ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
pages); pages);
...@@ -564,9 +565,9 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -564,9 +565,9 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
} }
n = ib_umem_page_count(mr->umem); n = ib_umem_page_count(mr->umem);
if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) { if (mr->umem->page_shift != HNS_ROCE_HEM_PAGE_SHIFT) {
dev_err(dev, "Just support 4K page size but is 0x%x now!\n", dev_err(dev, "Just support 4K page size but is 0x%lx now!\n",
mr->umem->page_size); BIT(mr->umem->page_shift));
ret = -EINVAL; ret = -EINVAL;
goto err_umem; goto err_umem;
} }
......
...@@ -437,8 +437,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -437,8 +437,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
} }
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem), ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
ilog2((unsigned int)hr_qp->umem->page_size), hr_qp->umem->page_shift, &hr_qp->mtt);
&hr_qp->mtt);
if (ret) { if (ret) {
dev_err(dev, "hns_roce_mtt_init error for create qp\n"); dev_err(dev, "hns_roce_mtt_init error for create qp\n");
goto err_buf; goto err_buf;
......
...@@ -1345,7 +1345,7 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr, ...@@ -1345,7 +1345,7 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
{ {
struct ib_umem *region = iwmr->region; struct ib_umem *region = iwmr->region;
struct i40iw_pbl *iwpbl = &iwmr->iwpbl; struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
int chunk_pages, entry, pg_shift, i; int chunk_pages, entry, i;
struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
struct i40iw_pble_info *pinfo; struct i40iw_pble_info *pinfo;
struct scatterlist *sg; struct scatterlist *sg;
...@@ -1354,14 +1354,14 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr, ...@@ -1354,14 +1354,14 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf; pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
pg_shift = ffs(region->page_size) - 1;
for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) { for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
chunk_pages = sg_dma_len(sg) >> pg_shift; chunk_pages = sg_dma_len(sg) >> region->page_shift;
if ((iwmr->type == IW_MEMREG_TYPE_QP) && if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
!iwpbl->qp_mr.sq_page) !iwpbl->qp_mr.sq_page)
iwpbl->qp_mr.sq_page = sg_page(sg); iwpbl->qp_mr.sq_page = sg_page(sg);
for (i = 0; i < chunk_pages; i++) { for (i = 0; i < chunk_pages; i++) {
pg_addr = sg_dma_address(sg) + region->page_size * i; pg_addr = sg_dma_address(sg) +
(i << region->page_shift);
if ((entry + i) == 0) if ((entry + i) == 0)
*pbl = cpu_to_le64(pg_addr & iwmr->page_msk); *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
...@@ -1847,7 +1847,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd, ...@@ -1847,7 +1847,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
iwmr->ibmr.device = pd->device; iwmr->ibmr.device = pd->device;
ucontext = to_ucontext(pd->uobject->context); ucontext = to_ucontext(pd->uobject->context);
iwmr->page_size = region->page_size; iwmr->page_size = PAGE_SIZE;
iwmr->page_msk = PAGE_MASK; iwmr->page_msk = PAGE_MASK;
if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM)) if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
......
...@@ -147,7 +147,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont ...@@ -147,7 +147,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
return PTR_ERR(*umem); return PTR_ERR(*umem);
err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem), err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
ilog2((*umem)->page_size), &buf->mtt); (*umem)->page_shift, &buf->mtt);
if (err) if (err)
goto err_buf; goto err_buf;
......
...@@ -107,7 +107,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, ...@@ -107,7 +107,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
len = sg_dma_len(sg) >> mtt->page_shift; len = sg_dma_len(sg) >> mtt->page_shift;
for (k = 0; k < len; ++k) { for (k = 0; k < len; ++k) {
pages[i++] = sg_dma_address(sg) + pages[i++] = sg_dma_address(sg) +
umem->page_size * k; (k << umem->page_shift);
/* /*
* Be friendly to mlx4_write_mtt() and * Be friendly to mlx4_write_mtt() and
* pass it chunks of appropriate size. * pass it chunks of appropriate size.
...@@ -155,7 +155,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -155,7 +155,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
} }
n = ib_umem_page_count(mr->umem); n = ib_umem_page_count(mr->umem);
shift = ilog2(mr->umem->page_size); shift = mr->umem->page_shift;
err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length, err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
convert_access(access_flags), n, shift, &mr->mmr); convert_access(access_flags), n, shift, &mr->mmr);
...@@ -239,7 +239,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, ...@@ -239,7 +239,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
goto release_mpt_entry; goto release_mpt_entry;
} }
n = ib_umem_page_count(mmr->umem); n = ib_umem_page_count(mmr->umem);
shift = ilog2(mmr->umem->page_size); shift = mmr->umem->page_shift;
err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
virt_addr, length, n, shift, virt_addr, length, n, shift,
......
...@@ -745,7 +745,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -745,7 +745,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
} }
err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
ilog2(qp->umem->page_size), &qp->mtt); qp->umem->page_shift, &qp->mtt);
if (err) if (err)
goto err_buf; goto err_buf;
......
...@@ -122,7 +122,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, ...@@ -122,7 +122,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
} }
err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
ilog2(srq->umem->page_size), &srq->mtt); srq->umem->page_shift, &srq->mtt);
if (err) if (err)
goto err_buf; goto err_buf;
......
...@@ -59,7 +59,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, ...@@ -59,7 +59,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
u64 pfn; u64 pfn;
struct scatterlist *sg; struct scatterlist *sg;
int entry; int entry;
unsigned long page_shift = ilog2(umem->page_size); unsigned long page_shift = umem->page_shift;
/* With ODP we must always match OS page size. */ /* With ODP we must always match OS page size. */
if (umem->odp_data) { if (umem->odp_data) {
...@@ -156,7 +156,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, ...@@ -156,7 +156,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, size_t offset, size_t num_pages, int page_shift, size_t offset, size_t num_pages,
__be64 *pas, int access_flags) __be64 *pas, int access_flags)
{ {
unsigned long umem_page_shift = ilog2(umem->page_size); unsigned long umem_page_shift = umem->page_shift;
int shift = page_shift - umem_page_shift; int shift = page_shift - umem_page_shift;
int mask = (1 << shift) - 1; int mask = (1 << shift) - 1;
int i, k, idx; int i, k, idx;
......
...@@ -206,7 +206,7 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, ...@@ -206,7 +206,7 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
* but they will write 0s as well, so no difference in the end result. * but they will write 0s as well, so no difference in the end result.
*/ */
for (addr = start; addr < end; addr += (u64)umem->page_size) { for (addr = start; addr < end; addr += BIT(umem->page_shift)) {
idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
/* /*
* Strive to write the MTTs in chunks, but avoid overwriting * Strive to write the MTTs in chunks, but avoid overwriting
......
...@@ -937,7 +937,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -937,7 +937,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err; goto err;
} }
shift = ffs(mr->umem->page_size) - 1; shift = mr->umem->page_shift;
n = mr->umem->nmap; n = mr->umem->nmap;
mr->mtt = mthca_alloc_mtt(dev, n); mr->mtt = mthca_alloc_mtt(dev, n);
...@@ -959,8 +959,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -959,8 +959,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
len = sg_dma_len(sg) >> shift; len = sg_dma_len(sg) >> shift;
for (k = 0; k < len; ++k) { for (k = 0; k < len; ++k) {
pages[i++] = sg_dma_address(sg) + pages[i++] = sg_dma_address(sg) + (k << shift);
mr->umem->page_size * k;
/* /*
* Be friendly to write_mtt and pass it chunks * Be friendly to write_mtt and pass it chunks
* of appropriate size. * of appropriate size.
......
...@@ -2165,9 +2165,9 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -2165,9 +2165,9 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
} }
nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u," nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u,"
" offset = %u, page size = %u.\n", " offset = %u, page size = %lu.\n",
(unsigned long int)start, (unsigned long int)virt, (u32)length, (unsigned long int)start, (unsigned long int)virt, (u32)length,
ib_umem_offset(region), region->page_size); ib_umem_offset(region), BIT(region->page_shift));
skip_pages = ((u32)ib_umem_offset(region)) >> 12; skip_pages = ((u32)ib_umem_offset(region)) >> 12;
......
...@@ -914,21 +914,18 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, ...@@ -914,21 +914,18 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
pbe = (struct ocrdma_pbe *)pbl_tbl->va; pbe = (struct ocrdma_pbe *)pbl_tbl->va;
pbe_cnt = 0; pbe_cnt = 0;
shift = ilog2(umem->page_size); shift = umem->page_shift;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
pages = sg_dma_len(sg) >> shift; pages = sg_dma_len(sg) >> shift;
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
/* store the page address in pbe */ /* store the page address in pbe */
pbe->pa_lo = pbe->pa_lo =
cpu_to_le32(sg_dma_address cpu_to_le32(sg_dma_address(sg) +
(sg) + (pg_cnt << shift));
(umem->page_size * pg_cnt));
pbe->pa_hi = pbe->pa_hi =
cpu_to_le32(upper_32_bits cpu_to_le32(upper_32_bits(sg_dma_address(sg) +
((sg_dma_address (pg_cnt << shift)));
(sg) +
umem->page_size * pg_cnt)));
pbe_cnt += 1; pbe_cnt += 1;
total_num_pbes += 1; total_num_pbes += 1;
pbe++; pbe++;
...@@ -978,7 +975,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, ...@@ -978,7 +975,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
if (status) if (status)
goto umem_err; goto umem_err;
mr->hwmr.pbe_size = mr->umem->page_size; mr->hwmr.pbe_size = BIT(mr->umem->page_shift);
mr->hwmr.fbo = ib_umem_offset(mr->umem); mr->hwmr.fbo = ib_umem_offset(mr->umem);
mr->hwmr.va = usr_addr; mr->hwmr.va = usr_addr;
mr->hwmr.len = len; mr->hwmr.len = len;
......
...@@ -681,16 +681,16 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, ...@@ -681,16 +681,16 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
pbe_cnt = 0; pbe_cnt = 0;
shift = ilog2(umem->page_size); shift = umem->page_shift;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
pages = sg_dma_len(sg) >> shift; pages = sg_dma_len(sg) >> shift;
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
/* store the page address in pbe */ /* store the page address in pbe */
pbe->lo = cpu_to_le32(sg_dma_address(sg) + pbe->lo = cpu_to_le32(sg_dma_address(sg) +
umem->page_size * pg_cnt); (pg_cnt << shift));
addr = upper_32_bits(sg_dma_address(sg) + addr = upper_32_bits(sg_dma_address(sg) +
umem->page_size * pg_cnt); (pg_cnt << shift));
pbe->hi = cpu_to_le32(addr); pbe->hi = cpu_to_le32(addr);
pbe_cnt++; pbe_cnt++;
total_num_pbes++; total_num_pbes++;
...@@ -2190,7 +2190,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, ...@@ -2190,7 +2190,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa; mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered; mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size); mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
mr->hw_mr.page_size_log = ilog2(mr->umem->page_size); mr->hw_mr.page_size_log = mr->umem->page_shift;
mr->hw_mr.fbo = ib_umem_offset(mr->umem); mr->hw_mr.fbo = ib_umem_offset(mr->umem);
mr->hw_mr.length = len; mr->hw_mr.length = len;
mr->hw_mr.vaddr = usr_addr; mr->hw_mr.vaddr = usr_addr;
......
...@@ -194,7 +194,7 @@ int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir, ...@@ -194,7 +194,7 @@ int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
len = sg_dma_len(sg) >> PAGE_SHIFT; len = sg_dma_len(sg) >> PAGE_SHIFT;
for (j = 0; j < len; j++) { for (j = 0; j < len; j++) {
dma_addr_t addr = sg_dma_address(sg) + dma_addr_t addr = sg_dma_address(sg) +
umem->page_size * j; (j << umem->page_shift);
ret = pvrdma_page_dir_insert_dma(pdir, i, addr); ret = pvrdma_page_dir_insert_dma(pdir, i, addr);
if (ret) if (ret)
......
...@@ -408,8 +408,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -408,8 +408,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.access_flags = mr_access_flags; mr->mr.access_flags = mr_access_flags;
mr->umem = umem; mr->umem = umem;
if (is_power_of_2(umem->page_size)) mr->mr.page_shift = umem->page_shift;
mr->mr.page_shift = ilog2(umem->page_size);
m = 0; m = 0;
n = 0; n = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
...@@ -421,8 +420,9 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -421,8 +420,9 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto bail_inval; goto bail_inval;
} }
mr->mr.map[m]->segs[n].vaddr = vaddr; mr->mr.map[m]->segs[n].vaddr = vaddr;
mr->mr.map[m]->segs[n].length = umem->page_size; mr->mr.map[m]->segs[n].length = BIT(umem->page_shift);
trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, umem->page_size); trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr,
BIT(umem->page_shift));
n++; n++;
if (n == RVT_SEGSZ) { if (n == RVT_SEGSZ) {
m++; m++;
......
...@@ -191,10 +191,8 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start, ...@@ -191,10 +191,8 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
goto err1; goto err1;
} }
WARN_ON_ONCE(!is_power_of_2(umem->page_size)); mem->page_shift = umem->page_shift;
mem->page_mask = BIT(umem->page_shift) - 1;
mem->page_shift = ilog2(umem->page_size);
mem->page_mask = umem->page_size - 1;
num_buf = 0; num_buf = 0;
map = mem->map; map = mem->map;
...@@ -210,7 +208,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start, ...@@ -210,7 +208,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
} }
buf->addr = (uintptr_t)vaddr; buf->addr = (uintptr_t)vaddr;
buf->size = umem->page_size; buf->size = BIT(umem->page_shift);
num_buf++; num_buf++;
buf++; buf++;
......
...@@ -44,7 +44,7 @@ struct ib_umem { ...@@ -44,7 +44,7 @@ struct ib_umem {
struct ib_ucontext *context; struct ib_ucontext *context;
size_t length; size_t length;
unsigned long address; unsigned long address;
int page_size; int page_shift;
int writable; int writable;
int hugetlb; int hugetlb;
struct work_struct work; struct work_struct work;
...@@ -60,7 +60,7 @@ struct ib_umem { ...@@ -60,7 +60,7 @@ struct ib_umem {
/* Returns the offset of the umem start relative to the first page. */ /* Returns the offset of the umem start relative to the first page. */
static inline int ib_umem_offset(struct ib_umem *umem) static inline int ib_umem_offset(struct ib_umem *umem)
{ {
return umem->address & ((unsigned long)umem->page_size - 1); return umem->address & (BIT(umem->page_shift) - 1);
} }
/* Returns the first page of an ODP umem. */ /* Returns the first page of an ODP umem. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment