Commit b4d031cd authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/mlx5: Remove mlx5_ib_mr->order

The is only ever set to non-zero if the MR is from the cache, and if it is
cached then the order is in cached_ent->order.

Make it clearer that use_umr_mtt_update() only returns true for cached MRs
and remove the redundant data.

Link: https://lore.kernel.org/r/20201026131936.1335664-2-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent e28bf1f0
......@@ -601,7 +601,6 @@ struct mlx5_ib_mr {
struct ib_umem *umem;
struct mlx5_shared_mr_info *smr_info;
struct list_head list;
unsigned int order;
struct mlx5_cache_ent *cache_ent;
int npages;
struct mlx5_ib_dev *dev;
......
......@@ -126,7 +126,9 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
static inline bool mlx5_ib_pas_fits_in_mr(struct mlx5_ib_mr *mr, u64 start,
u64 length)
{
return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
if (!mr->cache_ent)
return false;
return ((u64)1 << mr->cache_ent->order) * MLX5_ADAPTER_PAGE_SIZE >=
length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
}
......@@ -172,7 +174,6 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return NULL;
mr->order = ent->order;
mr->cache_ent = ent;
mr->dev = ent->dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment