Commit 10f56242 authored by Moni Shoua's avatar Moni Shoua Committed by Jason Gunthorpe

IB/mlx5: Fix the locking of SRQ objects in ODP events

QP and SRQ objects are stored in different containers so the action to get
and lock a common resource during ODP event needs to address that.

While here get rid of 'refcount' and 'free' fields in mlx5_core_srq struct
and use the fields with same semantics in common structure.

Fixes: 032080ab ("IB/mlx5: Lock QP during page fault handling")
Signed-off-by: default avatarMoni Shoua <monis@mellanox.com>
Reviewed-by: default avatarMajd Dibbiny <majd@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent e431a80a
...@@ -187,8 +187,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, ...@@ -187,8 +187,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
wqe_ctr = be16_to_cpu(cqe->wqe_counter); wqe_ctr = be16_to_cpu(cqe->wqe_counter);
wc->wr_id = srq->wrid[wqe_ctr]; wc->wr_id = srq->wrid[wqe_ctr];
mlx5_ib_free_srq_wqe(srq, wqe_ctr); mlx5_ib_free_srq_wqe(srq, wqe_ctr);
if (msrq && atomic_dec_and_test(&msrq->refcount)) if (msrq)
complete(&msrq->free); mlx5_core_res_put(&msrq->common);
} }
} else { } else {
wq = &qp->rq; wq = &qp->rq;
......
...@@ -1115,22 +1115,25 @@ static int mlx5_ib_mr_responder_pfault_handler( ...@@ -1115,22 +1115,25 @@ static int mlx5_ib_mr_responder_pfault_handler(
static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev, static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
u32 wq_num, int pf_type) u32 wq_num, int pf_type)
{ {
enum mlx5_res_type res_type; struct mlx5_core_rsc_common *common = NULL;
struct mlx5_core_srq *srq;
switch (pf_type) { switch (pf_type) {
case MLX5_WQE_PF_TYPE_RMP: case MLX5_WQE_PF_TYPE_RMP:
res_type = MLX5_RES_SRQ; srq = mlx5_cmd_get_srq(dev, wq_num);
if (srq)
common = &srq->common;
break; break;
case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE: case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
case MLX5_WQE_PF_TYPE_RESP: case MLX5_WQE_PF_TYPE_RESP:
case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC: case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
res_type = MLX5_RES_QP; common = mlx5_core_res_hold(dev->mdev, wq_num, MLX5_RES_QP);
break; break;
default: default:
return NULL; break;
} }
return mlx5_core_res_hold(dev->mdev, wq_num, res_type); return common;
} }
static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res) static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
......
...@@ -46,8 +46,6 @@ struct mlx5_core_srq { ...@@ -46,8 +46,6 @@ struct mlx5_core_srq {
int wqe_shift; int wqe_shift;
void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e); void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e);
atomic_t refcount;
struct completion free;
u16 uid; u16 uid;
}; };
......
...@@ -87,7 +87,7 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn) ...@@ -87,7 +87,7 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
srq = radix_tree_lookup(&table->tree, srqn); srq = radix_tree_lookup(&table->tree, srqn);
if (srq) if (srq)
atomic_inc(&srq->refcount); atomic_inc(&srq->common.refcount);
spin_unlock(&table->lock); spin_unlock(&table->lock);
...@@ -594,8 +594,8 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, ...@@ -594,8 +594,8 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
if (err) if (err)
return err; return err;
atomic_set(&srq->refcount, 1); atomic_set(&srq->common.refcount, 1);
init_completion(&srq->free); init_completion(&srq->common.free);
spin_lock_irq(&table->lock); spin_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, srq->srqn, srq); err = radix_tree_insert(&table->tree, srq->srqn, srq);
...@@ -627,9 +627,8 @@ int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq) ...@@ -627,9 +627,8 @@ int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
if (err) if (err)
return err; return err;
if (atomic_dec_and_test(&srq->refcount)) mlx5_core_res_put(&srq->common);
complete(&srq->free); wait_for_completion(&srq->common.free);
wait_for_completion(&srq->free);
return 0; return 0;
} }
...@@ -685,7 +684,7 @@ static int srq_event_notifier(struct notifier_block *nb, ...@@ -685,7 +684,7 @@ static int srq_event_notifier(struct notifier_block *nb,
srq = radix_tree_lookup(&table->tree, srqn); srq = radix_tree_lookup(&table->tree, srqn);
if (srq) if (srq)
atomic_inc(&srq->refcount); atomic_inc(&srq->common.refcount);
spin_unlock(&table->lock); spin_unlock(&table->lock);
...@@ -694,8 +693,7 @@ static int srq_event_notifier(struct notifier_block *nb, ...@@ -694,8 +693,7 @@ static int srq_event_notifier(struct notifier_block *nb,
srq->event(srq, eqe->type); srq->event(srq, eqe->type);
if (atomic_dec_and_test(&srq->refcount)) mlx5_core_res_put(&srq->common);
complete(&srq->free);
return NOTIFY_OK; return NOTIFY_OK;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment