Commit 38f8ff5b authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/mlx5: Reorganize mlx5_ib_reg_user_mr()

This function handles an ODP and regular MR flow all mushed together, even
though the two flows are quite different. Split them into two dedicated
functions.

Link: https://lore.kernel.org/r/20201130075839.278575-5-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 6e0954b1
...@@ -1340,7 +1340,7 @@ void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, ...@@ -1340,7 +1340,7 @@ void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice, enum ib_uverbs_advise_mr_advice advice,
u32 flags, struct ib_sge *sg_list, u32 num_sge); u32 flags, struct ib_sge *sg_list, u32 num_sge);
int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable); int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{ {
...@@ -1362,7 +1362,7 @@ mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, ...@@ -1362,7 +1362,7 @@ mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable) static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
This diff is collapsed.
...@@ -536,6 +536,10 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, ...@@ -536,6 +536,10 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct mlx5_ib_mr *imr; struct mlx5_ib_mr *imr;
int err; int err;
if (!mlx5_ib_can_load_pas_with_umr(dev,
MLX5_IMR_MTT_ENTRIES * PAGE_SIZE))
return ERR_PTR(-EOPNOTSUPP);
umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags); umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
if (IS_ERR(umem_odp)) if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp); return ERR_CAST(umem_odp);
...@@ -831,17 +835,13 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, ...@@ -831,17 +835,13 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
flags); flags);
} }
int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable) int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
{ {
u32 flags = MLX5_PF_FLAGS_SNAPSHOT;
int ret; int ret;
if (enable) ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), mr->umem->address,
flags |= MLX5_PF_FLAGS_ENABLE; mr->umem->length, NULL,
MLX5_PF_FLAGS_SNAPSHOT | MLX5_PF_FLAGS_ENABLE);
ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem),
mr->umem->address, mr->umem->length, NULL,
flags);
return ret >= 0 ? 0 : ret; return ret >= 0 ? 0 : ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment