Commit fc332570 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/mlx5: Fix corruption of reg_pages in mlx5_ib_rereg_user_mr()

reg_pages should always contain mr->npage since when the mr is finally
de-reg'd it is always subtracted out.

If there were any error exits then mlx5_ib_rereg_user_mr() would leave the
reg_pages adjusted and this will cause it to be double subtracted
eventually.

The manipulation of reg_pages is inherently connected to the umem, so lift
it out of set_mr_fields() and only adjust it around creating/destroying a
umem.

reg_pages is only used for diagnostics in sysfs.

Fixes: 7d0cc6ed ("IB/mlx5: Add MR cache for large UMR regions")
Link: https://lore.kernel.org/r/20201026131936.1335664-3-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent b4d031cd
......@@ -1248,10 +1248,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
}
static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
int npages, u64 length, int access_flags)
u64 length, int access_flags)
{
mr->npages = npages;
atomic_add(npages, &dev->mdev->priv.reg_pages);
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
mr->ibmr.length = length;
......@@ -1291,8 +1289,7 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
kfree(in);
mr->umem = NULL;
set_mr_fields(dev, mr, 0, length, acc);
set_mr_fields(dev, mr, length, acc);
return &mr->ibmr;
......@@ -1420,7 +1417,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
mr->umem = umem;
set_mr_fields(dev, mr, npages, length, access_flags);
mr->npages = npages;
atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
set_mr_fields(dev, mr, length, access_flags);
if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) {
/*
......@@ -1532,8 +1531,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
if (!mr->umem)
return -EINVAL;
......@@ -1554,12 +1551,17 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
* used.
*/
flags |= IB_MR_REREG_TRANS;
atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
mr->npages = 0;
ib_umem_release(mr->umem);
mr->umem = NULL;
err = mr_umem_get(dev, addr, len, access_flags, &mr->umem,
&npages, &page_shift, &ncont, &order);
if (err)
goto err;
mr->npages = ncont;
atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
}
if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags,
......@@ -1610,7 +1612,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
goto err;
}
set_mr_fields(dev, mr, npages, len, access_flags);
set_mr_fields(dev, mr, len, access_flags);
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment