Commit 2638a323 authored by Dakshaja Uppalapati's avatar Dakshaja Uppalapati Committed by Jason Gunthorpe

RDMA/iw_cxgb4: Fix refcount underflow while destroying cqs.

Previous atomic increment/decrement logic expects the atomic count to be
'0' after the final decrement.

Replacing atomic count with refcount does not allow that, as
refcount_dec() considers count of 1 as underflow and triggers a kernel
splat.

Fix the current refcount logic by using the usual pattern of decrementing
the refcount and test if it is '0' on the final deref in
c4iw_destroy_cq(). Use wait_for_completion() instead of wait_event().

Fixes: 7183451f ("RDMA/cxgb4: Use refcount_t instead of atomic_t for reference counting")
Link: https://lore.kernel.org/r/1628167412-12114-1-git-send-email-dakshaja@chelsio.comSigned-off-by: default avatarDakshaja Uppalapati <dakshaja@chelsio.com>
Reviewed-by: default avatarPotnuri Bharat Teja <bharat@chelsio.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 8b436a99
...@@ -967,6 +967,12 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) ...@@ -967,6 +967,12 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return !err || err == -ENODATA ? npolled : err; return !err || err == -ENODATA ? npolled : err;
} }
void c4iw_cq_rem_ref(struct c4iw_cq *chp)
{
if (refcount_dec_and_test(&chp->refcnt))
complete(&chp->cq_rel_comp);
}
int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{ {
struct c4iw_cq *chp; struct c4iw_cq *chp;
...@@ -976,8 +982,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) ...@@ -976,8 +982,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
chp = to_c4iw_cq(ib_cq); chp = to_c4iw_cq(ib_cq);
xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid); xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
refcount_dec(&chp->refcnt); c4iw_cq_rem_ref(chp);
wait_event(chp->wait, !refcount_read(&chp->refcnt)); wait_for_completion(&chp->cq_rel_comp);
ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext, ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
ibucontext); ibucontext);
...@@ -1081,7 +1087,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, ...@@ -1081,7 +1087,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
spin_lock_init(&chp->lock); spin_lock_init(&chp->lock);
spin_lock_init(&chp->comp_handler_lock); spin_lock_init(&chp->comp_handler_lock);
refcount_set(&chp->refcnt, 1); refcount_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait); init_completion(&chp->cq_rel_comp);
ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL); ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
if (ret) if (ret)
goto err_destroy_cq; goto err_destroy_cq;
......
...@@ -213,8 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) ...@@ -213,8 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
break; break;
} }
done: done:
if (refcount_dec_and_test(&chp->refcnt)) c4iw_cq_rem_ref(chp);
wake_up(&chp->wait);
c4iw_qp_rem_ref(&qhp->ibqp); c4iw_qp_rem_ref(&qhp->ibqp);
out: out:
return; return;
...@@ -234,8 +233,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) ...@@ -234,8 +233,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
spin_lock_irqsave(&chp->comp_handler_lock, flag); spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag); spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
if (refcount_dec_and_test(&chp->refcnt)) c4iw_cq_rem_ref(chp);
wake_up(&chp->wait);
} else { } else {
pr_debug("unknown cqid 0x%x\n", qid); pr_debug("unknown cqid 0x%x\n", qid);
xa_unlock_irqrestore(&dev->cqs, flag); xa_unlock_irqrestore(&dev->cqs, flag);
......
...@@ -428,7 +428,7 @@ struct c4iw_cq { ...@@ -428,7 +428,7 @@ struct c4iw_cq {
spinlock_t lock; spinlock_t lock;
spinlock_t comp_handler_lock; spinlock_t comp_handler_lock;
refcount_t refcnt; refcount_t refcnt;
wait_queue_head_t wait; struct completion cq_rel_comp;
struct c4iw_wr_wait *wr_waitp; struct c4iw_wr_wait *wr_waitp;
}; };
...@@ -979,6 +979,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, ...@@ -979,6 +979,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
void c4iw_cq_rem_ref(struct c4iw_cq *chp);
int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata); struct ib_udata *udata);
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment