Commit 0446cad9 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/odp: Provide ib_umem_odp_release() to undo the allocs

Now that there are allocator APIs that return the ib_umem_odp directly
it should be freed through a umem_odp free'er as well.

Link: https://lore.kernel.org/r/20190819111710.18440-8-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 261dc53f
...@@ -326,15 +326,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, ...@@ -326,15 +326,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
} }
EXPORT_SYMBOL(ib_umem_get); EXPORT_SYMBOL(ib_umem_get);
static void __ib_umem_release_tail(struct ib_umem *umem)
{
mmdrop(umem->owning_mm);
if (umem->is_odp)
kfree(to_ib_umem_odp(umem));
else
kfree(umem);
}
/** /**
* ib_umem_release - release memory pinned with ib_umem_get * ib_umem_release - release memory pinned with ib_umem_get
* @umem: umem struct to release * @umem: umem struct to release
...@@ -343,17 +334,14 @@ void ib_umem_release(struct ib_umem *umem) ...@@ -343,17 +334,14 @@ void ib_umem_release(struct ib_umem *umem)
{ {
if (!umem) if (!umem)
return; return;
if (umem->is_odp)
if (umem->is_odp) { return ib_umem_odp_release(to_ib_umem_odp(umem));
ib_umem_odp_release(to_ib_umem_odp(umem));
__ib_umem_release_tail(umem);
return;
}
__ib_umem_release(umem->context->device, umem, 1); __ib_umem_release(umem->context->device, umem, 1);
atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm); atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
__ib_umem_release_tail(umem); mmdrop(umem->owning_mm);
kfree(umem);
} }
EXPORT_SYMBOL(ib_umem_release); EXPORT_SYMBOL(ib_umem_release);
......
...@@ -523,7 +523,10 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp) ...@@ -523,7 +523,10 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
vfree(umem_odp->page_list); vfree(umem_odp->page_list);
} }
put_per_mm(umem_odp); put_per_mm(umem_odp);
mmdrop(umem_odp->umem.owning_mm);
kfree(umem_odp);
} }
EXPORT_SYMBOL(ib_umem_odp_release);
/* /*
* Map for DMA and insert a single page into the on-demand paging page tables. * Map for DMA and insert a single page into the on-demand paging page tables.
......
...@@ -1629,7 +1629,7 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -1629,7 +1629,7 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
* so that there will not be any invalidations in * so that there will not be any invalidations in
* flight, looking at the *mr struct. * flight, looking at the *mr struct.
*/ */
ib_umem_release(umem); ib_umem_odp_release(umem_odp);
atomic_sub(npages, &dev->mdev->priv.reg_pages); atomic_sub(npages, &dev->mdev->priv.reg_pages);
/* Avoid double-freeing the umem. */ /* Avoid double-freeing the umem. */
......
...@@ -206,7 +206,7 @@ static void mr_leaf_free_action(struct work_struct *work) ...@@ -206,7 +206,7 @@ static void mr_leaf_free_action(struct work_struct *work)
mr->parent = NULL; mr->parent = NULL;
synchronize_srcu(&mr->dev->mr_srcu); synchronize_srcu(&mr->dev->mr_srcu);
ib_umem_release(&odp->umem); ib_umem_odp_release(odp);
if (imr->live) if (imr->live)
mlx5_ib_update_xlt(imr, idx, 1, 0, mlx5_ib_update_xlt(imr, idx, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT | MLX5_IB_UPD_XLT_INDIRECT |
...@@ -472,7 +472,7 @@ static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr, ...@@ -472,7 +472,7 @@ static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
mr->access_flags); mr->access_flags);
if (IS_ERR(mtt)) { if (IS_ERR(mtt)) {
mutex_unlock(&odp_mr->umem_mutex); mutex_unlock(&odp_mr->umem_mutex);
ib_umem_release(&odp->umem); ib_umem_odp_release(odp);
return ERR_CAST(mtt); return ERR_CAST(mtt);
} }
...@@ -526,7 +526,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, ...@@ -526,7 +526,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
imr = implicit_mr_alloc(&pd->ibpd, umem_odp, 1, access_flags); imr = implicit_mr_alloc(&pd->ibpd, umem_odp, 1, access_flags);
if (IS_ERR(imr)) { if (IS_ERR(imr)) {
ib_umem_release(&umem_odp->umem); ib_umem_odp_release(umem_odp);
return ERR_CAST(imr); return ERR_CAST(imr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment