Commit b91e1751 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/mlx5: Simplify how the MR cache bucket is located

There are many bad APIs here that are accepting a cache bucket index
instead of a bucket pointer. Many of the callers already have a bucket
pointer, so this results in a lot of confusing uses of order2idx().

Pass the struct mlx5_cache_ent into add_keys(), remove_keys(), and
alloc_cached_mr().

Once the MR is in the cache, store the cache bucket pointer directly in
the MR, replacing the 'bool allocated_from cache'.

In the end there is only one place that needs to form index from order,
alloc_mr_from_cache(). Increase the safety of this function by disallowing
it from accessing cache entries in the ODP special area.

Link: https://lore.kernel.org/r/20200310082238.239865-7-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 7c8691a3
...@@ -617,8 +617,8 @@ struct mlx5_ib_mr { ...@@ -617,8 +617,8 @@ struct mlx5_ib_mr {
struct ib_umem *umem; struct ib_umem *umem;
struct mlx5_shared_mr_info *smr_info; struct mlx5_shared_mr_info *smr_info;
struct list_head list; struct list_head list;
int order; unsigned int order;
bool allocated_from_cache; struct mlx5_cache_ent *cache_ent;
int npages; int npages;
struct mlx5_ib_dev *dev; struct mlx5_ib_dev *dev;
u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
...@@ -1274,7 +1274,8 @@ int mlx5_ib_get_cqe_size(struct ib_cq *ibcq); ...@@ -1274,7 +1274,8 @@ int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry); struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
unsigned int entry);
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr); int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
......
This diff is collapsed.
...@@ -197,7 +197,7 @@ static void dma_fence_odp_mr(struct mlx5_ib_mr *mr) ...@@ -197,7 +197,7 @@ static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
odp->private = NULL; odp->private = NULL;
mutex_unlock(&odp->umem_mutex); mutex_unlock(&odp->umem_mutex);
if (!mr->allocated_from_cache) { if (!mr->cache_ent) {
mlx5_core_destroy_mkey(mr->dev->mdev, &mr->mmkey); mlx5_core_destroy_mkey(mr->dev->mdev, &mr->mmkey);
WARN_ON(mr->descs); WARN_ON(mr->descs);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment