Commit ad50294d authored by Shay Drory's avatar Shay Drory Committed by Jason Gunthorpe

RDMA/mlx5: Create ODP EQ only when ODP MR is created

There is no need to create the ODP EQ if the user doesn't use ODP MRs.
Hence, create it only when the first ODP MR is created. This EQ will be
destroyed only when the device is unloaded.
This will decrease the number of EQs created per device. for example: If
we creates 1K devices (SF/VF/etc'), than we will decrease the num of EQs
by 1K.

Link: https://lore.kernel.org/r/20210314125418.179716-1-leon@kernel.orgSigned-off-by: default avatarShay Drory <shayd@nvidia.com>
Reviewed-by: default avatarMaor Gottlieb <maorg@nvidia.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 783cf673
...@@ -1080,6 +1080,7 @@ struct mlx5_ib_dev { ...@@ -1080,6 +1080,7 @@ struct mlx5_ib_dev {
struct mutex slow_path_mutex; struct mutex slow_path_mutex;
struct ib_odp_caps odp_caps; struct ib_odp_caps odp_caps;
u64 odp_max_size; u64 odp_max_size;
struct mutex odp_eq_mutex;
struct mlx5_ib_pf_eq odp_pf_eq; struct mlx5_ib_pf_eq odp_pf_eq;
struct xarray odp_mkeys; struct xarray odp_mkeys;
...@@ -1358,6 +1359,7 @@ struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, ...@@ -1358,6 +1359,7 @@ struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev); void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev); void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void); int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void); void mlx5_ib_odp_cleanup(void);
...@@ -1377,6 +1379,11 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) ...@@ -1377,6 +1379,11 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
} }
static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; } static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
struct mlx5_ib_pf_eq *eq)
{
return 0;
}
static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {} static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; } static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {} static inline void mlx5_ib_odp_cleanup(void) {}
......
...@@ -1500,6 +1500,9 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1500,6 +1500,9 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq);
if (err)
return ERR_PTR(err);
if (!start && length == U64_MAX) { if (!start && length == U64_MAX) {
if (iova != 0) if (iova != 0)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
......
...@@ -1531,20 +1531,24 @@ enum { ...@@ -1531,20 +1531,24 @@ enum {
MLX5_IB_NUM_PF_DRAIN = 64, MLX5_IB_NUM_PF_DRAIN = 64,
}; };
static int int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
{ {
struct mlx5_eq_param param = {}; struct mlx5_eq_param param = {};
int err; int err = 0;
mutex_lock(&dev->odp_eq_mutex);
if (eq->core)
goto unlock;
INIT_WORK(&eq->work, mlx5_ib_eq_pf_action); INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
spin_lock_init(&eq->lock); spin_lock_init(&eq->lock);
eq->dev = dev; eq->dev = dev;
eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN, eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
sizeof(struct mlx5_pagefault)); sizeof(struct mlx5_pagefault));
if (!eq->pool) if (!eq->pool) {
return -ENOMEM; err = -ENOMEM;
goto unlock;
}
eq->wq = alloc_workqueue("mlx5_ib_page_fault", eq->wq = alloc_workqueue("mlx5_ib_page_fault",
WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
...@@ -1555,7 +1559,7 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) ...@@ -1555,7 +1559,7 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
} }
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param){
.irq_index = 0, .irq_index = 0,
.nent = MLX5_IB_NUM_PF_EQE, .nent = MLX5_IB_NUM_PF_EQE,
}; };
...@@ -1571,21 +1575,27 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) ...@@ -1571,21 +1575,27 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
goto err_eq; goto err_eq;
} }
mutex_unlock(&dev->odp_eq_mutex);
return 0; return 0;
err_eq: err_eq:
mlx5_eq_destroy_generic(dev->mdev, eq->core); mlx5_eq_destroy_generic(dev->mdev, eq->core);
err_wq: err_wq:
eq->core = NULL;
destroy_workqueue(eq->wq); destroy_workqueue(eq->wq);
err_mempool: err_mempool:
mempool_destroy(eq->pool); mempool_destroy(eq->pool);
unlock:
mutex_unlock(&dev->odp_eq_mutex);
return err; return err;
} }
static int static int
mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
{ {
int err; int err;
if (!eq->core)
return 0;
mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb); mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
err = mlx5_eq_destroy_generic(dev->mdev, eq->core); err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
cancel_work_sync(&eq->work); cancel_work_sync(&eq->work);
...@@ -1642,8 +1652,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev) ...@@ -1642,8 +1652,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
} }
} }
ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq); mutex_init(&dev->odp_eq_mutex);
return ret; return ret;
} }
...@@ -1652,7 +1661,7 @@ void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev) ...@@ -1652,7 +1661,7 @@ void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT)) if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
return; return;
mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq); mlx5_ib_odp_destroy_eq(dev, &dev->odp_pf_eq);
} }
int mlx5_ib_odp_init(void) int mlx5_ib_odp_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment