Commit 44dcfa4b authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Doug Ledford

IB/rdmavt: Avoid reseting wqe send_flags in unreserve

The wqe should be read only and in fact the superfluous reset of the
RVT_SEND_RESERVE_USED flag causes an issue where reserved operations
elicit a bad completion to the ULP.

The maintenance of the flag is now entirely within rvt_post_one_wr()
where a reserved operation will set the flag and a non-reserved operation
will insure the operation that is about to be posted has the flag reset.

Fixes: Commit 856cc4c2 ("IB/hfi1: Add the capability for reserved operations")
Reviewed-by: default avatarDon Hiatt <don.hiatt@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 5f14e4e6
...@@ -1772,10 +1772,13 @@ static int rvt_post_one_wr(struct rvt_qp *qp, ...@@ -1772,10 +1772,13 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
0); 0);
qp->s_next_psn = wqe->lpsn + 1; qp->s_next_psn = wqe->lpsn + 1;
} }
if (unlikely(reserved_op)) if (unlikely(reserved_op)) {
wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
rvt_qp_wqe_reserve(qp, wqe); rvt_qp_wqe_reserve(qp, wqe);
else } else {
wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
qp->s_avail--; qp->s_avail--;
}
trace_rvt_post_one_wr(qp, wqe); trace_rvt_post_one_wr(qp, wqe);
smp_wmb(); /* see request builders */ smp_wmb(); /* see request builders */
qp->s_head = next; qp->s_head = next;
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define DEF_RDMAVT_INCQP_H #define DEF_RDMAVT_INCQP_H
/* /*
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016, 2017 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -526,7 +526,6 @@ static inline void rvt_qp_wqe_reserve( ...@@ -526,7 +526,6 @@ static inline void rvt_qp_wqe_reserve(
struct rvt_qp *qp, struct rvt_qp *qp,
struct rvt_swqe *wqe) struct rvt_swqe *wqe)
{ {
wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
atomic_inc(&qp->s_reserved_used); atomic_inc(&qp->s_reserved_used);
} }
...@@ -550,7 +549,6 @@ static inline void rvt_qp_wqe_unreserve( ...@@ -550,7 +549,6 @@ static inline void rvt_qp_wqe_unreserve(
struct rvt_swqe *wqe) struct rvt_swqe *wqe)
{ {
if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) { if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
atomic_dec(&qp->s_reserved_used); atomic_dec(&qp->s_reserved_used);
/* insure no compiler re-order up to s_last change */ /* insure no compiler re-order up to s_last change */
smp_mb__after_atomic(); smp_mb__after_atomic();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment