Commit 057ae62f authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Roland Dreier

IB/qib: Add fix missing from earlier patch

The upstream code was missing part of a receive/error race fix from
the internal tree.  Add the missing part, which makes future merges
possible.
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@qlogic.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 2528ea60
...@@ -445,13 +445,14 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -445,13 +445,14 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
qkey = be32_to_cpu(ohdr->u.ud.deth[0]); qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK; src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
/* Get the number of bytes the message was padded by. */ /*
* Get the number of bytes the message was padded by
* and drop incomplete packets.
*/
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
if (unlikely(tlen < (hdrsize + pad + 4))) { if (unlikely(tlen < (hdrsize + pad + 4)))
/* Drop incomplete packets. */ goto drop;
ibp->n_pkt_drops++;
goto bail;
}
tlen -= hdrsize + pad + 4; tlen -= hdrsize + pad + 4;
/* /*
...@@ -460,10 +461,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -460,10 +461,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
*/ */
if (qp->ibqp.qp_num) { if (qp->ibqp.qp_num) {
if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE || if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
hdr->lrh[3] == IB_LID_PERMISSIVE)) { hdr->lrh[3] == IB_LID_PERMISSIVE))
ibp->n_pkt_drops++; goto drop;
goto bail;
}
if (qp->ibqp.qp_num > 1) { if (qp->ibqp.qp_num > 1) {
u16 pkey1, pkey2; u16 pkey1, pkey2;
...@@ -476,7 +475,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -476,7 +475,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
0xF, 0xF,
src_qp, qp->ibqp.qp_num, src_qp, qp->ibqp.qp_num,
hdr->lrh[3], hdr->lrh[1]); hdr->lrh[3], hdr->lrh[1]);
goto bail; return;
} }
} }
if (unlikely(qkey != qp->qkey)) { if (unlikely(qkey != qp->qkey)) {
...@@ -484,30 +483,24 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -484,30 +483,24 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
src_qp, qp->ibqp.qp_num, src_qp, qp->ibqp.qp_num,
hdr->lrh[3], hdr->lrh[1]); hdr->lrh[3], hdr->lrh[1]);
goto bail; return;
} }
/* Drop invalid MAD packets (see 13.5.3.1). */ /* Drop invalid MAD packets (see 13.5.3.1). */
if (unlikely(qp->ibqp.qp_num == 1 && if (unlikely(qp->ibqp.qp_num == 1 &&
(tlen != 256 || (tlen != 256 ||
(be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) { (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
ibp->n_pkt_drops++; goto drop;
goto bail;
}
} else { } else {
struct ib_smp *smp; struct ib_smp *smp;
/* Drop invalid MAD packets (see 13.5.3.1). */ /* Drop invalid MAD packets (see 13.5.3.1). */
if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) { if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
ibp->n_pkt_drops++; goto drop;
goto bail;
}
smp = (struct ib_smp *) data; smp = (struct ib_smp *) data;
if ((hdr->lrh[1] == IB_LID_PERMISSIVE || if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
hdr->lrh[3] == IB_LID_PERMISSIVE) && hdr->lrh[3] == IB_LID_PERMISSIVE) &&
smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
ibp->n_pkt_drops++; goto drop;
goto bail;
}
} }
/* /*
...@@ -523,10 +516,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -523,10 +516,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) { } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0; wc.ex.imm_data = 0;
wc.wc_flags = 0; wc.wc_flags = 0;
} else { } else
ibp->n_pkt_drops++; goto drop;
goto bail;
}
/* /*
* A GRH is expected to preceed the data even if not * A GRH is expected to preceed the data even if not
...@@ -556,8 +547,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -556,8 +547,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
/* Silently drop packets which are too big. */ /* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) { if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= QIB_R_REUSE_SGE; qp->r_flags |= QIB_R_REUSE_SGE;
ibp->n_pkt_drops++; goto drop;
return;
} }
if (has_grh) { if (has_grh) {
qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
...@@ -594,5 +584,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -594,5 +584,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] & (ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0); cpu_to_be32(IB_BTH_SOLICITED)) != 0);
bail:; return;
drop:
ibp->n_pkt_drops++;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment