Commit 1769c4c5 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/mlx5: Always remove MRs from the cache before destroying them

The cache bucket tracks the total number of MRs that exists, both inside
and outside of the cache. Removing a MR from the cache (by setting
cache_ent to NULL) without updating total_mrs will cause the tracking to
leak and be inflated.

Further fix the rereg_mr path to always destroy the MR. reg_create will
always overwrite all the MR data in mlx5_ib_mr, so the MR must be
completely destroyed, in all cases, before this function can be
called. Detach the MR from the cache and unconditionally destroy it to
avoid leaking HW mkeys.

Fixes: afd14174 ("IB/mlx5: Use direct mkey destroy command upon UMR unreg failure")
Fixes: 56e11d62 ("IB/mlx5: Added support for re-registration of MRs")
Link: https://lore.kernel.org/r/20200310082238.239865-8-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent b91e1751
...@@ -479,6 +479,16 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_cache_ent *req_ent) ...@@ -479,6 +479,16 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_cache_ent *req_ent)
return mr; return mr;
} }
static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
{
struct mlx5_cache_ent *ent = mr->cache_ent;
mr->cache_ent = NULL;
spin_lock_irq(&ent->lock);
ent->total_mrs--;
spin_unlock_irq(&ent->lock);
}
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{ {
struct mlx5_cache_ent *ent = mr->cache_ent; struct mlx5_cache_ent *ent = mr->cache_ent;
...@@ -488,7 +498,7 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -488,7 +498,7 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return; return;
if (mlx5_mr_cache_invalidate(mr)) { if (mlx5_mr_cache_invalidate(mr)) {
mr->cache_ent = NULL; detach_mr_from_cache(mr);
destroy_mkey(dev, mr); destroy_mkey(dev, mr);
if (ent->available_mrs < ent->limit) if (ent->available_mrs < ent->limit)
queue_work(dev->cache.wq, &ent->work); queue_work(dev->cache.wq, &ent->work);
...@@ -1445,8 +1455,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, ...@@ -1445,8 +1455,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
* UMR can't be used - MKey needs to be replaced. * UMR can't be used - MKey needs to be replaced.
*/ */
if (mr->cache_ent) if (mr->cache_ent)
err = mlx5_mr_cache_invalidate(mr); detach_mr_from_cache(mr);
else
err = destroy_mkey(dev, mr); err = destroy_mkey(dev, mr);
if (err) if (err)
goto err; goto err;
...@@ -1459,8 +1468,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, ...@@ -1459,8 +1468,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
mr = to_mmr(ib_mr); mr = to_mmr(ib_mr);
goto err; goto err;
} }
mr->cache_ent = NULL;
} else { } else {
/* /*
* Send a UMR WQE * Send a UMR WQE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment