Commit d6cff021 authored by Roland Dreier's avatar Roland Dreier

[PATCH] IB/mthca: fix posting of first work request

Fix posting first WQE for mem-free HCAs: we need to link to previous
WQE even in that case.  While we're at it, simplify code for
Tavor-mode HCAs.  We don't really need the conditional test there
either; we can similarly always link to the previous WQE.

Based on Michael S. Tsirkin's analogous fix for userspace libmthca.
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent bb4a7f0d
...@@ -227,7 +227,6 @@ static void mthca_wq_init(struct mthca_wq *wq) ...@@ -227,7 +227,6 @@ static void mthca_wq_init(struct mthca_wq *wq)
wq->last_comp = wq->max - 1; wq->last_comp = wq->max - 1;
wq->head = 0; wq->head = 0;
wq->tail = 0; wq->tail = 0;
wq->last = NULL;
} }
void mthca_qp_event(struct mthca_dev *dev, u32 qpn, void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
...@@ -1103,6 +1102,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, ...@@ -1103,6 +1102,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
} }
} }
qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
return 0; return 0;
} }
...@@ -1583,15 +1585,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1583,15 +1585,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
if (prev_wqe) { ((struct mthca_next_seg *) prev_wqe)->nda_op =
((struct mthca_next_seg *) prev_wqe)->nda_op = cpu_to_be32(((ind << qp->sq.wqe_shift) +
cpu_to_be32(((ind << qp->sq.wqe_shift) + qp->send_wqe_offset) |
qp->send_wqe_offset) | mthca_opcode[wr->opcode]);
mthca_opcode[wr->opcode]); wmb();
wmb(); ((struct mthca_next_seg *) prev_wqe)->ee_nds =
((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
}
if (!size0) { if (!size0) {
size0 = size; size0 = size;
...@@ -1688,13 +1688,11 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -1688,13 +1688,11 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qp->wrid[ind] = wr->wr_id; qp->wrid[ind] = wr->wr_id;
if (likely(prev_wqe)) { ((struct mthca_next_seg *) prev_wqe)->nda_op =
((struct mthca_next_seg *) prev_wqe)->nda_op = cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
cpu_to_be32((ind << qp->rq.wqe_shift) | 1); wmb();
wmb(); ((struct mthca_next_seg *) prev_wqe)->ee_nds =
((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD | size);
cpu_to_be32(MTHCA_NEXT_DBD | size);
}
if (!size0) if (!size0)
size0 = size; size0 = size;
...@@ -1905,15 +1903,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1905,15 +1903,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
if (likely(prev_wqe)) { ((struct mthca_next_seg *) prev_wqe)->nda_op =
((struct mthca_next_seg *) prev_wqe)->nda_op = cpu_to_be32(((ind << qp->sq.wqe_shift) +
cpu_to_be32(((ind << qp->sq.wqe_shift) + qp->send_wqe_offset) |
qp->send_wqe_offset) | mthca_opcode[wr->opcode]);
mthca_opcode[wr->opcode]); wmb();
wmb(); ((struct mthca_next_seg *) prev_wqe)->ee_nds =
((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD | size);
cpu_to_be32(MTHCA_NEXT_DBD | size);
}
if (!size0) { if (!size0) {
size0 = size; size0 = size;
......
...@@ -189,7 +189,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, ...@@ -189,7 +189,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
srq->max = attr->max_wr; srq->max = attr->max_wr;
srq->max_gs = attr->max_sge; srq->max_gs = attr->max_sge;
srq->last = NULL;
srq->counter = 0; srq->counter = 0;
if (mthca_is_memfree(dev)) if (mthca_is_memfree(dev))
...@@ -264,6 +263,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, ...@@ -264,6 +263,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
srq->first_free = 0; srq->first_free = 0;
srq->last_free = srq->max - 1; srq->last_free = srq->max - 1;
srq->last = get_wqe(srq, srq->max - 1);
return 0; return 0;
...@@ -446,13 +446,11 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, ...@@ -446,13 +446,11 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
((struct mthca_data_seg *) wqe)->addr = 0; ((struct mthca_data_seg *) wqe)->addr = 0;
} }
if (likely(prev_wqe)) { ((struct mthca_next_seg *) prev_wqe)->nda_op =
((struct mthca_next_seg *) prev_wqe)->nda_op = cpu_to_be32((ind << srq->wqe_shift) | 1);
cpu_to_be32((ind << srq->wqe_shift) | 1); wmb();
wmb(); ((struct mthca_next_seg *) prev_wqe)->ee_nds =
((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD);
cpu_to_be32(MTHCA_NEXT_DBD);
}
srq->wrid[ind] = wr->wr_id; srq->wrid[ind] = wr->wr_id;
srq->first_free = next_ind; srq->first_free = next_ind;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment