Commit 2df19e19 authored by Bharat Potnuri's avatar Bharat Potnuri Committed by Doug Ledford

iw_cxgb4: Atomically flush per QP HW CQEs

When a CQ is shared by multiple QPs, c4iw_flush_hw_cq() needs to acquire
corresponding QP lock before moving the CQEs into its corresponding SW
queue and accessing the SQ contents for completing a WR.
Ignore CQEs if corresponding QP is already flushed.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarPotnuri Bharat Teja <bharat@chelsio.com>
Reviewed-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 54e7e48b
...@@ -315,7 +315,7 @@ static void advance_oldest_read(struct t4_wq *wq) ...@@ -315,7 +315,7 @@ static void advance_oldest_read(struct t4_wq *wq)
* Deal with out-of-order and/or completions that complete * Deal with out-of-order and/or completions that complete
* prior unsignalled WRs. * prior unsignalled WRs.
*/ */
void c4iw_flush_hw_cq(struct c4iw_cq *chp) void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
{ {
struct t4_cqe *hw_cqe, *swcqe, read_cqe; struct t4_cqe *hw_cqe, *swcqe, read_cqe;
struct c4iw_qp *qhp; struct c4iw_qp *qhp;
...@@ -339,6 +339,13 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) ...@@ -339,6 +339,13 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
if (qhp == NULL) if (qhp == NULL)
goto next_cqe; goto next_cqe;
if (flush_qhp != qhp) {
spin_lock(&qhp->lock);
if (qhp->wq.flushed == 1)
goto next_cqe;
}
if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
goto next_cqe; goto next_cqe;
...@@ -390,6 +397,8 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) ...@@ -390,6 +397,8 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
next_cqe: next_cqe:
t4_hwcq_consume(&chp->cq); t4_hwcq_consume(&chp->cq);
ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
if (qhp && flush_qhp != qhp)
spin_unlock(&qhp->lock);
} }
} }
......
...@@ -1053,7 +1053,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size); ...@@ -1053,7 +1053,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size); void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size); u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size); void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
void c4iw_flush_hw_cq(struct c4iw_cq *chp); void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp);
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
......
...@@ -1343,12 +1343,12 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, ...@@ -1343,12 +1343,12 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
qhp->wq.flushed = 1; qhp->wq.flushed = 1;
t4_set_wq_in_error(&qhp->wq); t4_set_wq_in_error(&qhp->wq);
c4iw_flush_hw_cq(rchp); c4iw_flush_hw_cq(rchp, qhp);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
if (schp != rchp) if (schp != rchp)
c4iw_flush_hw_cq(schp); c4iw_flush_hw_cq(schp, qhp);
sq_flushed = c4iw_flush_sq(qhp); sq_flushed = c4iw_flush_sq(qhp);
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment