Commit b02a29eb authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Jason Gunthorpe

mlx5: Convert mlx5_srq_table to XArray

Remove the custom spinlock as the XArray handles its own locking.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
Acked-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 270a9833
...@@ -51,10 +51,7 @@ struct mlx5_core_srq { ...@@ -51,10 +51,7 @@ struct mlx5_core_srq {
struct mlx5_srq_table { struct mlx5_srq_table {
struct notifier_block nb; struct notifier_block nb;
/* protect radix tree struct xarray array;
*/
spinlock_t lock;
struct radix_tree_root tree;
}; };
int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
......
...@@ -83,13 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn) ...@@ -83,13 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
struct mlx5_srq_table *table = &dev->srq_table; struct mlx5_srq_table *table = &dev->srq_table;
struct mlx5_core_srq *srq; struct mlx5_core_srq *srq;
spin_lock(&table->lock); xa_lock(&table->array);
srq = xa_load(&table->array, srqn);
srq = radix_tree_lookup(&table->tree, srqn);
if (srq) if (srq)
atomic_inc(&srq->common.refcount); atomic_inc(&srq->common.refcount);
xa_unlock(&table->array);
spin_unlock(&table->lock);
return srq; return srq;
} }
...@@ -597,9 +595,7 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, ...@@ -597,9 +595,7 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
atomic_set(&srq->common.refcount, 1); atomic_set(&srq->common.refcount, 1);
init_completion(&srq->common.free); init_completion(&srq->common.free);
spin_lock_irq(&table->lock); err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
err = radix_tree_insert(&table->tree, srq->srqn, srq);
spin_unlock_irq(&table->lock);
if (err) if (err)
goto err_destroy_srq_split; goto err_destroy_srq_split;
...@@ -617,9 +613,7 @@ int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq) ...@@ -617,9 +613,7 @@ int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
struct mlx5_core_srq *tmp; struct mlx5_core_srq *tmp;
int err; int err;
spin_lock_irq(&table->lock); tmp = xa_erase_irq(&table->array, srq->srqn);
tmp = radix_tree_delete(&table->tree, srq->srqn);
spin_unlock_irq(&table->lock);
if (!tmp || tmp != srq) if (!tmp || tmp != srq)
return -EINVAL; return -EINVAL;
...@@ -680,13 +674,11 @@ static int srq_event_notifier(struct notifier_block *nb, ...@@ -680,13 +674,11 @@ static int srq_event_notifier(struct notifier_block *nb,
eqe = data; eqe = data;
srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
spin_lock(&table->lock); xa_lock(&table->array);
srq = xa_load(&table->array, srqn);
srq = radix_tree_lookup(&table->tree, srqn);
if (srq) if (srq)
atomic_inc(&srq->common.refcount); atomic_inc(&srq->common.refcount);
xa_unlock(&table->array);
spin_unlock(&table->lock);
if (!srq) if (!srq)
return NOTIFY_OK; return NOTIFY_OK;
...@@ -703,8 +695,7 @@ int mlx5_init_srq_table(struct mlx5_ib_dev *dev) ...@@ -703,8 +695,7 @@ int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
struct mlx5_srq_table *table = &dev->srq_table; struct mlx5_srq_table *table = &dev->srq_table;
memset(table, 0, sizeof(*table)); memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock); xa_init_flags(&table->array, XA_FLAGS_LOCK_IRQ);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
table->nb.notifier_call = srq_event_notifier; table->nb.notifier_call = srq_event_notifier;
mlx5_notifier_register(dev->mdev, &table->nb); mlx5_notifier_register(dev->mdev, &table->nb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment