Commit 9294000d authored by Bart Van Assche's avatar Bart Van Assche Committed by Doug Ledford

IB/srp: Drain the send queue before destroying a QP

A quote from the IB spec:

However, if the Consumer does not wait for the Affiliated Asynchronous
Last WQE Reached Event, then WQE and Data Segment leakage may occur.
Therefore, it is good programming practice to tear down a QP that is
associated with an SRQ by using the following process:
* Put the QP in the Error State;
* wait for the Affiliated Asynchronous Last WQE Reached Event;
* either:
  * drain the CQ by invoking the Poll CQ verb and either wait for CQ
    to be empty or the number of Poll CQ operations has exceeded CQ
    capacity size; or
  * post another WR that completes on the same CQ and wait for this WR to return as a WC;
* and then invoke a Destroy QP or Reset QP.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Israel Rukshin <israelr@mellanox.com>
Cc: Max Gurtovoy <maxg@mellanox.com>
Cc: Laurence Oberman <loberman@redhat.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent f039f44f
...@@ -466,9 +466,13 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target) ...@@ -466,9 +466,13 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
* completion handler can access the queue pair while it is * completion handler can access the queue pair while it is
* being destroyed. * being destroyed.
*/ */
static void srp_destroy_qp(struct ib_qp *qp) static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)
{ {
ib_drain_rq(qp); spin_lock_irq(&ch->lock);
ib_process_cq_direct(ch->send_cq, -1);
spin_unlock_irq(&ch->lock);
ib_drain_qp(qp);
ib_destroy_qp(qp); ib_destroy_qp(qp);
} }
...@@ -542,7 +546,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) ...@@ -542,7 +546,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
} }
if (ch->qp) if (ch->qp)
srp_destroy_qp(ch->qp); srp_destroy_qp(ch, ch->qp);
if (ch->recv_cq) if (ch->recv_cq)
ib_free_cq(ch->recv_cq); ib_free_cq(ch->recv_cq);
if (ch->send_cq) if (ch->send_cq)
...@@ -566,7 +570,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) ...@@ -566,7 +570,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
return 0; return 0;
err_qp: err_qp:
srp_destroy_qp(qp); srp_destroy_qp(ch, qp);
err_send_cq: err_send_cq:
ib_free_cq(send_cq); ib_free_cq(send_cq);
...@@ -609,7 +613,7 @@ static void srp_free_ch_ib(struct srp_target_port *target, ...@@ -609,7 +613,7 @@ static void srp_free_ch_ib(struct srp_target_port *target,
ib_destroy_fmr_pool(ch->fmr_pool); ib_destroy_fmr_pool(ch->fmr_pool);
} }
srp_destroy_qp(ch->qp); srp_destroy_qp(ch, ch->qp);
ib_free_cq(ch->send_cq); ib_free_cq(ch->send_cq);
ib_free_cq(ch->recv_cq); ib_free_cq(ch->recv_cq);
...@@ -1822,6 +1826,11 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch, ...@@ -1822,6 +1826,11 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
return iu; return iu;
} }
/*
* Note: if this function is called from inside ib_drain_sq() then it will
* be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
* with status IB_WC_SUCCESS then that's a bug.
*/
static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc) static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
{ {
struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment