Commit c2b75ef7 authored by Ismail, Mustafa's avatar Ismail, Mustafa Committed by Doug Ledford

RDMA/i40iw: Adding queue drain functions

Adding sq and rq drain functions, which block until all
previously posted wr-s in the specified queue have completed.
A completion object is signaled to unblock the thread,
when the last cqe for the corresponding queue is processed.
Signed-off-by: default avatarMustafa Ismail <mustafa.ismail@intel.com>
Signed-off-by: default avatarFaisal Latif <faisal.latif@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent fa415379
...@@ -789,6 +789,8 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, ...@@ -789,6 +789,8 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
return ERR_PTR(err_code); return ERR_PTR(err_code);
} }
} }
init_completion(&iwqp->sq_drained);
init_completion(&iwqp->rq_drained);
return &iwqp->ibqp; return &iwqp->ibqp;
error: error:
...@@ -1581,6 +1583,32 @@ static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_ne ...@@ -1581,6 +1583,32 @@ static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_ne
return ib_sg_to_pages(ibmr, sg, sg_nents, i40iw_set_page); return ib_sg_to_pages(ibmr, sg, sg_nents, i40iw_set_page);
} }
/**
* i40iw_drain_sq - drain the send queue
* @ibqp: ib qp pointer
*/
static void i40iw_drain_sq(struct ib_qp *ibqp)
{
struct i40iw_qp *iwqp = to_iwqp(ibqp);
struct i40iw_sc_qp *qp = &iwqp->sc_qp;
if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
wait_for_completion(&iwqp->sq_drained);
}
/**
* i40iw_drain_rq - drain the receive queue
* @ibqp: ib qp pointer
*/
static void i40iw_drain_rq(struct ib_qp *ibqp)
{
struct i40iw_qp *iwqp = to_iwqp(ibqp);
struct i40iw_sc_qp *qp = &iwqp->sc_qp;
if (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
wait_for_completion(&iwqp->rq_drained);
}
/** /**
* i40iw_hwreg_mr - send cqp command for memory registration * i40iw_hwreg_mr - send cqp command for memory registration
* @iwdev: iwarp device * @iwdev: iwarp device
...@@ -2218,6 +2246,7 @@ static int i40iw_poll_cq(struct ib_cq *ibcq, ...@@ -2218,6 +2246,7 @@ static int i40iw_poll_cq(struct ib_cq *ibcq,
enum i40iw_status_code ret; enum i40iw_status_code ret;
struct i40iw_cq_uk *ukcq; struct i40iw_cq_uk *ukcq;
struct i40iw_sc_qp *qp; struct i40iw_sc_qp *qp;
struct i40iw_qp *iwqp;
unsigned long flags; unsigned long flags;
iwcq = (struct i40iw_cq *)ibcq; iwcq = (struct i40iw_cq *)ibcq;
...@@ -2268,6 +2297,13 @@ static int i40iw_poll_cq(struct ib_cq *ibcq, ...@@ -2268,6 +2297,13 @@ static int i40iw_poll_cq(struct ib_cq *ibcq,
qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle; qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
entry->qp = (struct ib_qp *)qp->back_qp; entry->qp = (struct ib_qp *)qp->back_qp;
entry->src_qp = cq_poll_info.qp_id; entry->src_qp = cq_poll_info.qp_id;
iwqp = (struct i40iw_qp *)qp->back_qp;
if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {
if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
complete(&iwqp->sq_drained);
if (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
complete(&iwqp->rq_drained);
}
entry->byte_len = cq_poll_info.bytes_xfered; entry->byte_len = cq_poll_info.bytes_xfered;
entry++; entry++;
cqe_count++; cqe_count++;
...@@ -2514,6 +2550,8 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev ...@@ -2514,6 +2550,8 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
iwibdev->ibdev.query_device = i40iw_query_device; iwibdev->ibdev.query_device = i40iw_query_device;
iwibdev->ibdev.create_ah = i40iw_create_ah; iwibdev->ibdev.create_ah = i40iw_create_ah;
iwibdev->ibdev.destroy_ah = i40iw_destroy_ah; iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
iwibdev->ibdev.drain_sq = i40iw_drain_sq;
iwibdev->ibdev.drain_rq = i40iw_drain_rq;
iwibdev->ibdev.alloc_mr = i40iw_alloc_mr; iwibdev->ibdev.alloc_mr = i40iw_alloc_mr;
iwibdev->ibdev.map_mr_sg = i40iw_map_mr_sg; iwibdev->ibdev.map_mr_sg = i40iw_map_mr_sg;
iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL); iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
......
...@@ -170,5 +170,7 @@ struct i40iw_qp { ...@@ -170,5 +170,7 @@ struct i40iw_qp {
struct i40iw_pbl *iwpbl; struct i40iw_pbl *iwpbl;
struct i40iw_dma_mem q2_ctx_mem; struct i40iw_dma_mem q2_ctx_mem;
struct i40iw_dma_mem ietf_mem; struct i40iw_dma_mem ietf_mem;
struct completion sq_drained;
struct completion rq_drained;
}; };
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment