Commit 1cf24dce authored by Steve Wise's avatar Steve Wise Committed by Roland Dreier

RDMA/cxgb4: Fix QP flush logic

This patch makes following fixes in QP flush logic:

- correctly flushes unsignaled WRs followed by a signaled WR
- supports for flushing a CQ bound to multiple QPs
- resets cidx_flush if a active queue starts getting HW CQEs again
- marks WQ in error when we leave RTS. This was only being done for
  user queues, but we need it for kernel queues too so that
  post_send/post_recv will start returning the appropriate error
  synchronously
- eats unsignaled read resp CQEs. HW always inserts CQEs so we must
  silently discard them if the read work request was unsignaled.
- handles QP flushes with pending SW CQEs. The flush and out of order
  completion logic has a bug where if out of order completions are
  flushed but not yet polled by the consumer and the qp is then
  flushed then we end up inserting duplicate completions.
- c4iw_flush_sq() should only flush wrs that have not already been
  flushed.  Since we already track where in the SQ we've flushed via
  sq.cidx_flush, just start at that point and flush any remaining.
  This bug only caused a problem in the presence of unsignaled work
  requests.
Signed-off-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarVipul Pandya <vipul@chelsio.com>

[ Fixed sparse warning due to htonl/ntohl confusion.  - Roland ]
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent 97d7ec0c
This diff is collapsed.
...@@ -917,12 +917,11 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size); ...@@ -917,12 +917,11 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size); u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size); void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb); int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
void c4iw_flush_hw_cq(struct t4_cq *cq); void c4iw_flush_hw_cq(struct c4iw_cq *chp);
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count); int c4iw_flush_sq(struct c4iw_qp *qhp);
int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
u16 c4iw_rqes_posted(struct c4iw_qp *qhp); u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
......
...@@ -737,6 +737,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -737,6 +737,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->idx = qhp->wq.sq.pidx; swsqe->idx = qhp->wq.sq.pidx;
swsqe->complete = 0; swsqe->complete = 0;
swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED); swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
swsqe->flushed = 0;
swsqe->wr_id = wr->wr_id; swsqe->wr_id = wr->wr_id;
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
...@@ -1006,7 +1007,15 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, ...@@ -1006,7 +1007,15 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
/* locking hierarchy: cq lock first, then qp lock. */ /* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&rchp->lock, flag); spin_lock_irqsave(&rchp->lock, flag);
spin_lock(&qhp->lock); spin_lock(&qhp->lock);
c4iw_flush_hw_cq(&rchp->cq);
if (qhp->wq.flushed) {
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, flag);
return;
}
qhp->wq.flushed = 1;
c4iw_flush_hw_cq(rchp);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);
...@@ -1020,9 +1029,9 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, ...@@ -1020,9 +1029,9 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
/* locking hierarchy: cq lock first, then qp lock. */ /* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, flag); spin_lock_irqsave(&schp->lock, flag);
spin_lock(&qhp->lock); spin_lock(&qhp->lock);
c4iw_flush_hw_cq(&schp->cq); if (schp != rchp)
c4iw_count_scqes(&schp->cq, &qhp->wq, &count); c4iw_flush_hw_cq(schp);
flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); flushed = c4iw_flush_sq(qhp);
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, flag); spin_unlock_irqrestore(&schp->lock, flag);
if (flushed) { if (flushed) {
...@@ -1037,11 +1046,11 @@ static void flush_qp(struct c4iw_qp *qhp) ...@@ -1037,11 +1046,11 @@ static void flush_qp(struct c4iw_qp *qhp)
struct c4iw_cq *rchp, *schp; struct c4iw_cq *rchp, *schp;
unsigned long flag; unsigned long flag;
rchp = get_chp(qhp->rhp, qhp->attr.rcq); rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
schp = get_chp(qhp->rhp, qhp->attr.scq); schp = to_c4iw_cq(qhp->ibqp.send_cq);
t4_set_wq_in_error(&qhp->wq);
if (qhp->ibqp.uobject) { if (qhp->ibqp.uobject) {
t4_set_wq_in_error(&qhp->wq);
t4_set_cq_in_error(&rchp->cq); t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag); spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
...@@ -1330,8 +1339,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1330,8 +1339,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
disconnect = 1; disconnect = 1;
c4iw_get_ep(&qhp->ep->com); c4iw_get_ep(&qhp->ep->com);
} }
if (qhp->ibqp.uobject) t4_set_wq_in_error(&qhp->wq);
t4_set_wq_in_error(&qhp->wq);
ret = rdma_fini(rhp, qhp, ep); ret = rdma_fini(rhp, qhp, ep);
if (ret) if (ret)
goto err; goto err;
...@@ -1340,8 +1348,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1340,8 +1348,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
set_state(qhp, C4IW_QP_STATE_TERMINATE); set_state(qhp, C4IW_QP_STATE_TERMINATE);
qhp->attr.layer_etype = attrs->layer_etype; qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode; qhp->attr.ecode = attrs->ecode;
if (qhp->ibqp.uobject) t4_set_wq_in_error(&qhp->wq);
t4_set_wq_in_error(&qhp->wq);
ep = qhp->ep; ep = qhp->ep;
if (!internal) if (!internal)
terminate = 1; terminate = 1;
...@@ -1350,8 +1357,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1350,8 +1357,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
break; break;
case C4IW_QP_STATE_ERROR: case C4IW_QP_STATE_ERROR:
set_state(qhp, C4IW_QP_STATE_ERROR); set_state(qhp, C4IW_QP_STATE_ERROR);
if (qhp->ibqp.uobject) t4_set_wq_in_error(&qhp->wq);
t4_set_wq_in_error(&qhp->wq);
if (!internal) { if (!internal) {
abort = 1; abort = 1;
disconnect = 1; disconnect = 1;
...@@ -1552,12 +1558,12 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -1552,12 +1558,12 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
if (!qhp) if (!qhp)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
qhp->wq.sq.size = sqsize; qhp->wq.sq.size = sqsize;
qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue; qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
qhp->wq.sq.flush_cidx = -1;
qhp->wq.rq.size = rqsize; qhp->wq.rq.size = rqsize;
qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue; qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
......
...@@ -36,9 +36,9 @@ ...@@ -36,9 +36,9 @@
#include "t4_msg.h" #include "t4_msg.h"
#include "t4fw_ri_api.h" #include "t4fw_ri_api.h"
#define T4_MAX_NUM_QP (1<<16) #define T4_MAX_NUM_QP 65536
#define T4_MAX_NUM_CQ (1<<15) #define T4_MAX_NUM_CQ 65536
#define T4_MAX_NUM_PD (1<<15) #define T4_MAX_NUM_PD 65536
#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES) #define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
#define T4_MAX_IQ_SIZE (65520 - 1) #define T4_MAX_IQ_SIZE (65520 - 1)
...@@ -269,6 +269,7 @@ struct t4_swsqe { ...@@ -269,6 +269,7 @@ struct t4_swsqe {
int complete; int complete;
int signaled; int signaled;
u16 idx; u16 idx;
int flushed;
}; };
static inline pgprot_t t4_pgprot_wc(pgprot_t prot) static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
...@@ -300,6 +301,7 @@ struct t4_sq { ...@@ -300,6 +301,7 @@ struct t4_sq {
u16 pidx; u16 pidx;
u16 wq_pidx; u16 wq_pidx;
u16 flags; u16 flags;
short flush_cidx;
}; };
struct t4_swrqe { struct t4_swrqe {
...@@ -330,6 +332,7 @@ struct t4_wq { ...@@ -330,6 +332,7 @@ struct t4_wq {
void __iomem *db; void __iomem *db;
void __iomem *gts; void __iomem *gts;
struct c4iw_rdev *rdev; struct c4iw_rdev *rdev;
int flushed;
}; };
static inline int t4_rqes_posted(struct t4_wq *wq) static inline int t4_rqes_posted(struct t4_wq *wq)
...@@ -412,6 +415,9 @@ static inline void t4_sq_produce(struct t4_wq *wq, u8 len16) ...@@ -412,6 +415,9 @@ static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
static inline void t4_sq_consume(struct t4_wq *wq) static inline void t4_sq_consume(struct t4_wq *wq)
{ {
BUG_ON(wq->sq.in_use < 1);
if (wq->sq.cidx == wq->sq.flush_cidx)
wq->sq.flush_cidx = -1;
wq->sq.in_use--; wq->sq.in_use--;
if (++wq->sq.cidx == wq->sq.size) if (++wq->sq.cidx == wq->sq.size)
wq->sq.cidx = 0; wq->sq.cidx = 0;
...@@ -505,12 +511,18 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se) ...@@ -505,12 +511,18 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
static inline void t4_swcq_produce(struct t4_cq *cq) static inline void t4_swcq_produce(struct t4_cq *cq)
{ {
cq->sw_in_use++; cq->sw_in_use++;
if (cq->sw_in_use == cq->size) {
PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
cq->error = 1;
BUG_ON(1);
}
if (++cq->sw_pidx == cq->size) if (++cq->sw_pidx == cq->size)
cq->sw_pidx = 0; cq->sw_pidx = 0;
} }
static inline void t4_swcq_consume(struct t4_cq *cq) static inline void t4_swcq_consume(struct t4_cq *cq)
{ {
BUG_ON(cq->sw_in_use < 1);
cq->sw_in_use--; cq->sw_in_use--;
if (++cq->sw_cidx == cq->size) if (++cq->sw_cidx == cq->size)
cq->sw_cidx = 0; cq->sw_cidx = 0;
...@@ -552,6 +564,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) ...@@ -552,6 +564,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
ret = -EOVERFLOW; ret = -EOVERFLOW;
cq->error = 1; cq->error = 1;
printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
BUG_ON(1);
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
*cqe = &cq->queue[cq->cidx]; *cqe = &cq->queue[cq->cidx];
ret = 0; ret = 0;
...@@ -562,6 +575,12 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) ...@@ -562,6 +575,12 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq) static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
{ {
if (cq->sw_in_use == cq->size) {
PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
cq->error = 1;
BUG_ON(1);
return NULL;
}
if (cq->sw_in_use) if (cq->sw_in_use)
return &cq->sw_queue[cq->sw_cidx]; return &cq->sw_queue[cq->sw_cidx];
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment