Commit d8b7515e authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe

RDMA/mlx5: Cleanup DEVX initialization flow

Move DEVX initialization and cleanup flows to the devx.c instead of having
almost empty functions in main.c

Link: https://lore.kernel.org/r/20200702081809.423482-6-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent f7c4ffda
......@@ -2362,17 +2362,24 @@ static int devx_event_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev)
int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
{
struct mlx5_devx_event_table *table = &dev->devx_event_table;
int uid;
uid = mlx5_ib_devx_create(dev, false);
if (uid > 0) {
dev->devx_whitelist_uid = uid;
xa_init(&table->event_xa);
mutex_init(&table->event_xa_lock);
MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
}
xa_init(&table->event_xa);
mutex_init(&table->event_xa_lock);
MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
return 0;
}
void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
{
struct mlx5_devx_event_table *table = &dev->devx_event_table;
struct devx_event_subscription *sub, *tmp;
......@@ -2380,17 +2387,21 @@ void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
void *entry;
unsigned long id;
mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
mutex_lock(&dev->devx_event_table.event_xa_lock);
xa_for_each(&table->event_xa, id, entry) {
event = entry;
list_for_each_entry_safe(sub, tmp, &event->unaffiliated_list,
xa_list)
devx_cleanup_subscription(dev, sub);
kfree(entry);
if (dev->devx_whitelist_uid) {
mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
mutex_lock(&dev->devx_event_table.event_xa_lock);
xa_for_each(&table->event_xa, id, entry) {
event = entry;
list_for_each_entry_safe(
sub, tmp, &event->unaffiliated_list, xa_list)
devx_cleanup_subscription(dev, sub);
kfree(entry);
}
mutex_unlock(&dev->devx_event_table.event_xa_lock);
xa_destroy(&table->event_xa);
mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
}
mutex_unlock(&dev->devx_event_table.event_xa_lock);
xa_destroy(&table->event_xa);
}
static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
......
......@@ -23,4 +23,23 @@ struct devx_obj {
};
struct list_head event_sub; /* holds devx_event_subscription entries */
};
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
int mlx5_ib_devx_init(struct mlx5_ib_dev *dev);
void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev);
#else
static inline int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
{
return -EOPNOTSUPP;
}
static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
static inline int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
{
return 0;
}
static inline void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
{
}
#endif
#endif /* _MLX5_IB_DEVX_H */
......@@ -32,6 +32,7 @@
#include "mlx5_ib.h"
#include "ib_rep.h"
#include "cmd.h"
#include "devx.h"
#include "fs.h"
#include "srq.h"
#include "qp.h"
......@@ -4661,26 +4662,6 @@ static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
}
static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev)
{
int uid;
uid = mlx5_ib_devx_create(dev, false);
if (uid > 0) {
dev->devx_whitelist_uid = uid;
mlx5_ib_devx_init_event_table(dev);
}
return 0;
}
static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
{
if (dev->devx_whitelist_uid) {
mlx5_ib_devx_cleanup_event_table(dev);
mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
}
}
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
int stage)
......@@ -4771,8 +4752,8 @@ static const struct mlx5_ib_profile pf_profile = {
NULL,
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
mlx5_ib_stage_devx_init,
mlx5_ib_stage_devx_cleanup),
mlx5_ib_devx_init,
mlx5_ib_devx_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
mlx5_ib_stage_ib_reg_init,
mlx5_ib_stage_ib_reg_cleanup),
......@@ -4831,8 +4812,8 @@ const struct mlx5_ib_profile raw_eth_profile = {
NULL,
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
mlx5_ib_stage_devx_init,
mlx5_ib_stage_devx_cleanup),
mlx5_ib_devx_init,
mlx5_ib_devx_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
mlx5_ib_stage_ib_reg_init,
mlx5_ib_stage_ib_reg_cleanup),
......
......@@ -1353,20 +1353,6 @@ extern const struct uapi_definition mlx5_ib_flow_defs[];
extern const struct uapi_definition mlx5_ib_qos_defs[];
extern const struct uapi_definition mlx5_ib_std_types_defs[];
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev);
void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev);
#else
static inline int
mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
bool is_user) { return -EOPNOTSUPP; }
static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
static inline void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev) {}
static inline void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev) {}
#endif
static inline void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment