Commit b6bbee0d authored by Parav Pandit's avatar Parav Pandit Committed by Doug Ledford

IB/rxe: Properly honor max IRD value for rd/atomic.

This patch honoris the max incoming read request count instead of
outgoing read req count
(a) during modify qp by allocating response queue metadata
(b) during incoming read request processing
Signed-off-by: default avatarParav Pandit <pandit.parav@gmail.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent d9703650
...@@ -198,7 +198,7 @@ void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res); ...@@ -198,7 +198,7 @@ void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
static inline void rxe_advance_resp_resource(struct rxe_qp *qp) static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
{ {
qp->resp.res_head++; qp->resp.res_head++;
if (unlikely(qp->resp.res_head == qp->attr.max_rd_atomic)) if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic))
qp->resp.res_head = 0; qp->resp.res_head = 0;
} }
......
...@@ -146,7 +146,7 @@ static void free_rd_atomic_resources(struct rxe_qp *qp) ...@@ -146,7 +146,7 @@ static void free_rd_atomic_resources(struct rxe_qp *qp)
if (qp->resp.resources) { if (qp->resp.resources) {
int i; int i;
for (i = 0; i < qp->attr.max_rd_atomic; i++) { for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
struct resp_res *res = &qp->resp.resources[i]; struct resp_res *res = &qp->resp.resources[i];
free_rd_atomic_resource(qp, res); free_rd_atomic_resource(qp, res);
...@@ -174,7 +174,7 @@ static void cleanup_rd_atomic_resources(struct rxe_qp *qp) ...@@ -174,7 +174,7 @@ static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
struct resp_res *res; struct resp_res *res;
if (qp->resp.resources) { if (qp->resp.resources) {
for (i = 0; i < qp->attr.max_rd_atomic; i++) { for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
res = &qp->resp.resources[i]; res = &qp->resp.resources[i];
free_rd_atomic_resource(qp, res); free_rd_atomic_resource(qp, res);
} }
...@@ -596,14 +596,21 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, ...@@ -596,14 +596,21 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
if (mask & IB_QP_MAX_QP_RD_ATOMIC) { if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
qp->attr.max_rd_atomic = max_rd_atomic;
atomic_set(&qp->req.rd_atomic, max_rd_atomic);
}
if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
int max_dest_rd_atomic =
__roundup_pow_of_two(attr->max_dest_rd_atomic);
qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
free_rd_atomic_resources(qp); free_rd_atomic_resources(qp);
err = alloc_rd_atomic_resources(qp, max_rd_atomic); err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
if (err) if (err)
return err; return err;
qp->attr.max_rd_atomic = max_rd_atomic;
atomic_set(&qp->req.rd_atomic, max_rd_atomic);
} }
if (mask & IB_QP_CUR_STATE) if (mask & IB_QP_CUR_STATE)
...@@ -701,11 +708,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, ...@@ -701,11 +708,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
pr_debug("set req psn = 0x%x\n", qp->req.psn); pr_debug("set req psn = 0x%x\n", qp->req.psn);
} }
if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
qp->attr.max_dest_rd_atomic =
__roundup_pow_of_two(attr->max_dest_rd_atomic);
}
if (mask & IB_QP_PATH_MIG_STATE) if (mask & IB_QP_PATH_MIG_STATE)
qp->attr.path_mig_state = attr->path_mig_state; qp->attr.path_mig_state = attr->path_mig_state;
......
...@@ -383,7 +383,7 @@ static enum resp_states check_resource(struct rxe_qp *qp, ...@@ -383,7 +383,7 @@ static enum resp_states check_resource(struct rxe_qp *qp,
* too many read/atomic ops, we just * too many read/atomic ops, we just
* recycle the responder resource queue * recycle the responder resource queue
*/ */
if (likely(qp->attr.max_rd_atomic > 0)) if (likely(qp->attr.max_dest_rd_atomic > 0))
return RESPST_CHK_LENGTH; return RESPST_CHK_LENGTH;
else else
return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ; return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment