Commit 696513e8 authored by Brian Welty's avatar Brian Welty Committed by Doug Ledford

IB/hfi1, qib, rdmavt: Move AETH credit functions into rdmavt

Add rvt_compute_aeth() and rvt_get_credit() as shared functions in
rdmavt, moved from hfi1/qib logic.
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarBrian Welty <brian.welty@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent beb5a042
...@@ -331,10 +331,6 @@ struct diag_pkt { ...@@ -331,10 +331,6 @@ struct diag_pkt {
#define FULL_MGMT_P_KEY 0xFFFF #define FULL_MGMT_P_KEY 0xFFFF
#define DEFAULT_P_KEY LIM_MGMT_P_KEY #define DEFAULT_P_KEY LIM_MGMT_P_KEY
#define HFI1_AETH_CREDIT_SHIFT 24
#define HFI1_AETH_CREDIT_MASK 0x1F
#define HFI1_AETH_CREDIT_INVAL 0x1F
#define HFI1_MSN_MASK 0xFFFFFF
#define HFI1_FECN_SHIFT 31 #define HFI1_FECN_SHIFT 31
#define HFI1_FECN_MASK 1 #define HFI1_FECN_MASK 1
#define HFI1_FECN_SMASK BIT(HFI1_FECN_SHIFT) #define HFI1_FECN_SMASK BIT(HFI1_FECN_SHIFT)
......
...@@ -79,43 +79,6 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, ...@@ -79,43 +79,6 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
return (map - qpt->map) * RVT_BITS_PER_PAGE + off; return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
} }
/*
* Convert the AETH credit code into the number of credits.
*/
static const u16 credit_table[31] = {
0, /* 0 */
1, /* 1 */
2, /* 2 */
3, /* 3 */
4, /* 4 */
6, /* 5 */
8, /* 6 */
12, /* 7 */
16, /* 8 */
24, /* 9 */
32, /* A */
48, /* B */
64, /* C */
96, /* D */
128, /* E */
192, /* F */
256, /* 10 */
384, /* 11 */
512, /* 12 */
768, /* 13 */
1024, /* 14 */
1536, /* 15 */
2048, /* 16 */
3072, /* 17 */
4096, /* 18 */
6144, /* 19 */
8192, /* 1A */
12288, /* 1B */
16384, /* 1C */
24576, /* 1D */
32768 /* 1E */
};
const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = { const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
[IB_WR_RDMA_WRITE] = { [IB_WR_RDMA_WRITE] = {
.length = sizeof(struct ib_rdma_wr), .length = sizeof(struct ib_rdma_wr),
...@@ -339,68 +302,6 @@ int hfi1_check_send_wqe(struct rvt_qp *qp, ...@@ -339,68 +302,6 @@ int hfi1_check_send_wqe(struct rvt_qp *qp,
return wqe->length <= piothreshold; return wqe->length <= piothreshold;
} }
/**
* hfi1_compute_aeth - compute the AETH (syndrome + MSN)
* @qp: the queue pair to compute the AETH for
*
* Returns the AETH.
*/
__be32 hfi1_compute_aeth(struct rvt_qp *qp)
{
u32 aeth = qp->r_msn & HFI1_MSN_MASK;
if (qp->ibqp.srq) {
/*
* Shared receive queues don't generate credits.
* Set the credit field to the invalid value.
*/
aeth |= HFI1_AETH_CREDIT_INVAL << HFI1_AETH_CREDIT_SHIFT;
} else {
u32 min, max, x;
u32 credits;
struct rvt_rwq *wq = qp->r_rq.wq;
u32 head;
u32 tail;
/* sanity check pointers before trusting them */
head = wq->head;
if (head >= qp->r_rq.size)
head = 0;
tail = wq->tail;
if (tail >= qp->r_rq.size)
tail = 0;
/*
* Compute the number of credits available (RWQEs).
* There is a small chance that the pair of reads are
* not atomic, which is OK, since the fuzziness is
* resolved as further ACKs go out.
*/
credits = head - tail;
if ((int)credits < 0)
credits += qp->r_rq.size;
/*
* Binary search the credit table to find the code to
* use.
*/
min = 0;
max = 31;
for (;;) {
x = (min + max) / 2;
if (credit_table[x] == credits)
break;
if (credit_table[x] > credits) {
max = x;
} else {
if (min == x)
break;
min = x;
}
}
aeth |= x << HFI1_AETH_CREDIT_SHIFT;
}
return cpu_to_be32(aeth);
}
/** /**
* _hfi1_schedule_send - schedule progress * _hfi1_schedule_send - schedule progress
* @qp: the QP * @qp: the QP
...@@ -457,44 +358,6 @@ void hfi1_schedule_send(struct rvt_qp *qp) ...@@ -457,44 +358,6 @@ void hfi1_schedule_send(struct rvt_qp *qp)
_hfi1_schedule_send(qp); _hfi1_schedule_send(qp);
} }
/**
* hfi1_get_credit - handle credit in aeth
* @qp: the qp
* @aeth: the Acknowledge Extended Transport Header
*
* The QP s_lock should be held.
*/
void hfi1_get_credit(struct rvt_qp *qp, u32 aeth)
{
u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK;
lockdep_assert_held(&qp->s_lock);
/*
* If the credit is invalid, we can send
* as many packets as we like. Otherwise, we have to
* honor the credit field.
*/
if (credit == HFI1_AETH_CREDIT_INVAL) {
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
hfi1_schedule_send(qp);
}
}
} else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
/* Compute new LSN (i.e., MSN + credit) */
credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK;
if (cmp_msn(credit, qp->s_lsn) > 0) {
qp->s_lsn = credit;
if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
hfi1_schedule_send(qp);
}
}
}
}
void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
{ {
unsigned long flags; unsigned long flags;
......
...@@ -70,14 +70,6 @@ static inline void clear_ahg(struct rvt_qp *qp) ...@@ -70,14 +70,6 @@ static inline void clear_ahg(struct rvt_qp *qp)
qp->s_ahgidx = -1; qp->s_ahgidx = -1;
} }
/**
* hfi1_compute_aeth - compute the AETH (syndrome + MSN)
* @qp: the queue pair to compute the AETH for
*
* Returns the AETH.
*/
__be32 hfi1_compute_aeth(struct rvt_qp *qp);
/** /**
* hfi1_create_qp - create a queue pair for a device * hfi1_create_qp - create a queue pair for a device
* @ibpd: the protection domain who's device we create the queue pair for * @ibpd: the protection domain who's device we create the queue pair for
...@@ -91,14 +83,6 @@ __be32 hfi1_compute_aeth(struct rvt_qp *qp); ...@@ -91,14 +83,6 @@ __be32 hfi1_compute_aeth(struct rvt_qp *qp);
struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
/**
* hfi1_get_credit - flush the send work queue of a QP
* @qp: the qp who's send work queue to flush
* @aeth: the Acknowledge Extended Transport Header
*
* The QP s_lock should be held.
*/
void hfi1_get_credit(struct rvt_qp *qp, u32 aeth);
/** /**
* hfi1_qp_wakeup - wake up on the indicated event * hfi1_qp_wakeup - wake up on the indicated event
......
...@@ -284,7 +284,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, ...@@ -284,7 +284,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
e->sent = 1; e->sent = 1;
} }
ohdr->u.aeth = hfi1_compute_aeth(qp); ohdr->u.aeth = rvt_compute_aeth(qp);
hwords++; hwords++;
qp->s_ack_rdma_psn = e->psn; qp->s_ack_rdma_psn = e->psn;
bth2 = mask_psn(qp->s_ack_rdma_psn++); bth2 = mask_psn(qp->s_ack_rdma_psn++);
...@@ -293,7 +293,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, ...@@ -293,7 +293,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
ps->s_txreq->ss = NULL; ps->s_txreq->ss = NULL;
len = 0; len = 0;
qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
ohdr->u.at.aeth = hfi1_compute_aeth(qp); ohdr->u.at.aeth = rvt_compute_aeth(qp);
ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth); ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
hwords += sizeof(ohdr->u.at) / sizeof(u32); hwords += sizeof(ohdr->u.at) / sizeof(u32);
bth2 = mask_psn(e->psn); bth2 = mask_psn(e->psn);
...@@ -315,7 +315,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, ...@@ -315,7 +315,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
len = pmtu; len = pmtu;
middle = HFI1_CAP_IS_KSET(SDMA_AHG); middle = HFI1_CAP_IS_KSET(SDMA_AHG);
} else { } else {
ohdr->u.aeth = hfi1_compute_aeth(qp); ohdr->u.aeth = rvt_compute_aeth(qp);
hwords++; hwords++;
qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
e = &qp->s_ack_queue[qp->s_tail_ack_queue]; e = &qp->s_ack_queue[qp->s_tail_ack_queue];
...@@ -338,11 +338,11 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, ...@@ -338,11 +338,11 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
ps->s_txreq->ss = NULL; ps->s_txreq->ss = NULL;
if (qp->s_nak_state) if (qp->s_nak_state)
ohdr->u.aeth = ohdr->u.aeth =
cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) | cpu_to_be32((qp->r_msn & RVT_MSN_MASK) |
(qp->s_nak_state << (qp->s_nak_state <<
HFI1_AETH_CREDIT_SHIFT)); RVT_AETH_CREDIT_SHIFT));
else else
ohdr->u.aeth = hfi1_compute_aeth(qp); ohdr->u.aeth = rvt_compute_aeth(qp);
hwords++; hwords++;
len = 0; len = 0;
bth0 = OP(ACKNOWLEDGE) << 24; bth0 = OP(ACKNOWLEDGE) << 24;
...@@ -519,7 +519,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -519,7 +519,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
case IB_WR_SEND_WITH_INV: case IB_WR_SEND_WITH_INV:
/* If no credit, return. */ /* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail; goto bail;
} }
...@@ -556,7 +556,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -556,7 +556,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */ /* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail; goto bail;
} }
...@@ -885,11 +885,11 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, ...@@ -885,11 +885,11 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
if (qp->s_mig_state == IB_MIG_MIGRATED) if (qp->s_mig_state == IB_MIG_MIGRATED)
bth0 |= IB_BTH_MIG_REQ; bth0 |= IB_BTH_MIG_REQ;
if (qp->r_nak_state) if (qp->r_nak_state)
ohdr->u.aeth = cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) | ohdr->u.aeth = cpu_to_be32((qp->r_msn & RVT_MSN_MASK) |
(qp->r_nak_state << (qp->r_nak_state <<
HFI1_AETH_CREDIT_SHIFT)); RVT_AETH_CREDIT_SHIFT));
else else
ohdr->u.aeth = hfi1_compute_aeth(qp); ohdr->u.aeth = rvt_compute_aeth(qp);
sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl]; sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
/* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT); pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
...@@ -1323,7 +1323,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1323,7 +1323,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
* request but will include an ACK'ed request(s). * request but will include an ACK'ed request(s).
*/ */
ack_psn = psn; ack_psn = psn;
if (aeth >> 29) if (aeth >> RVT_AETH_NAK_SHIFT)
ack_psn--; ack_psn--;
wqe = rvt_get_swqe_ptr(qp, qp->s_acked); wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
ibp = rcd_to_iport(rcd); ibp = rcd_to_iport(rcd);
...@@ -1403,7 +1403,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1403,7 +1403,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
break; break;
} }
switch (aeth >> 29) { switch (aeth >> RVT_AETH_NAK_SHIFT) {
case 0: /* ACK */ case 0: /* ACK */
this_cpu_inc(*ibp->rvp.rc_acks); this_cpu_inc(*ibp->rvp.rc_acks);
if (qp->s_acked != qp->s_tail) { if (qp->s_acked != qp->s_tail) {
...@@ -1430,7 +1430,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1430,7 +1430,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
qp->s_flags &= ~RVT_S_WAIT_ACK; qp->s_flags &= ~RVT_S_WAIT_ACK;
hfi1_schedule_send(qp); hfi1_schedule_send(qp);
} }
hfi1_get_credit(qp, aeth); rvt_get_credit(qp, aeth);
qp->s_rnr_retry = qp->s_rnr_retry_cnt; qp->s_rnr_retry = qp->s_rnr_retry_cnt;
qp->s_retry = qp->s_retry_cnt; qp->s_retry = qp->s_retry_cnt;
update_last_psn(qp, psn); update_last_psn(qp, psn);
...@@ -1459,8 +1459,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1459,8 +1459,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
hfi1_stop_rc_timers(qp); hfi1_stop_rc_timers(qp);
to = to =
ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) & ib_hfi1_rnr_table[(aeth >> RVT_AETH_CREDIT_SHIFT) &
HFI1_AETH_CREDIT_MASK]; RVT_AETH_CREDIT_MASK];
hfi1_add_rnr_timer(qp, to); hfi1_add_rnr_timer(qp, to);
return 0; return 0;
...@@ -1469,8 +1469,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1469,8 +1469,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
goto bail_stop; goto bail_stop;
/* The last valid PSN is the previous PSN. */ /* The last valid PSN is the previous PSN. */
update_last_psn(qp, psn - 1); update_last_psn(qp, psn - 1);
switch ((aeth >> HFI1_AETH_CREDIT_SHIFT) & switch ((aeth >> RVT_AETH_CREDIT_SHIFT) &
HFI1_AETH_CREDIT_MASK) { RVT_AETH_CREDIT_MASK) {
case 0: /* PSN sequence error */ case 0: /* PSN sequence error */
ibp->rvp.n_seq_naks++; ibp->rvp.n_seq_naks++;
/* /*
...@@ -1600,8 +1600,8 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp, ...@@ -1600,8 +1600,8 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
/* Update credits for "ghost" ACKs */ /* Update credits for "ghost" ACKs */
if (diff == 0 && opcode == OP(ACKNOWLEDGE)) { if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
aeth = be32_to_cpu(ohdr->u.aeth); aeth = be32_to_cpu(ohdr->u.aeth);
if ((aeth >> 29) == 0) if ((aeth >> RVT_AETH_NAK_SHIFT) == 0)
hfi1_get_credit(qp, aeth); rvt_get_credit(qp, aeth);
} }
goto ack_done; goto ack_done;
} }
......
...@@ -130,14 +130,14 @@ const char *parse_everbs_hdrs( ...@@ -130,14 +130,14 @@ const char *parse_everbs_hdrs(
case OP(RC, ACKNOWLEDGE): case OP(RC, ACKNOWLEDGE):
trace_seq_printf(p, AETH_PRN, be32_to_cpu(eh->aeth) >> 24, trace_seq_printf(p, AETH_PRN, be32_to_cpu(eh->aeth) >> 24,
parse_syndrome(be32_to_cpu(eh->aeth) >> 24), parse_syndrome(be32_to_cpu(eh->aeth) >> 24),
be32_to_cpu(eh->aeth) & HFI1_MSN_MASK); be32_to_cpu(eh->aeth) & RVT_MSN_MASK);
break; break;
/* aeth + atomicacketh */ /* aeth + atomicacketh */
case OP(RC, ATOMIC_ACKNOWLEDGE): case OP(RC, ATOMIC_ACKNOWLEDGE):
trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN, trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN,
be32_to_cpu(eh->at.aeth) >> 24, be32_to_cpu(eh->at.aeth) >> 24,
parse_syndrome(be32_to_cpu(eh->at.aeth) >> 24), parse_syndrome(be32_to_cpu(eh->at.aeth) >> 24),
be32_to_cpu(eh->at.aeth) & HFI1_MSN_MASK, be32_to_cpu(eh->at.aeth) & RVT_MSN_MASK,
ib_u64_get(&eh->at.atomic_ack_eth)); ib_u64_get(&eh->at.atomic_ack_eth));
break; break;
/* atomiceth */ /* atomiceth */
......
...@@ -259,15 +259,6 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, ...@@ -259,15 +259,6 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
#endif #endif
#define PSN_MODIFY_MASK 0xFFFFFF #define PSN_MODIFY_MASK 0xFFFFFF
/*
* Compare the lower 24 bits of the msn values.
* Returns an integer <, ==, or > than zero.
*/
static inline int cmp_msn(u32 a, u32 b)
{
return (((int)a) - ((int)b)) << 8;
}
/* /*
* Compare two PSNs * Compare two PSNs
* Returns an integer <, ==, or > than zero. * Returns an integer <, ==, or > than zero.
......
...@@ -742,11 +742,7 @@ struct qib_tid_session_member { ...@@ -742,11 +742,7 @@ struct qib_tid_session_member {
#define SIZE_OF_CRC 1 #define SIZE_OF_CRC 1
#define QIB_DEFAULT_P_KEY 0xFFFF #define QIB_DEFAULT_P_KEY 0xFFFF
#define QIB_AETH_CREDIT_SHIFT 24
#define QIB_AETH_CREDIT_MASK 0x1F
#define QIB_AETH_CREDIT_INVAL 0x1F
#define QIB_PSN_MASK 0xFFFFFF #define QIB_PSN_MASK 0xFFFFFF
#define QIB_MSN_MASK 0xFFFFFF
#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK #define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK
#define QIB_MULTICAST_QPN 0xFFFFFF #define QIB_MULTICAST_QPN 0xFFFFFF
......
...@@ -61,43 +61,6 @@ static inline unsigned find_next_offset(struct rvt_qpn_table *qpt, ...@@ -61,43 +61,6 @@ static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
return off; return off;
} }
/*
* Convert the AETH credit code into the number of credits.
*/
static u32 credit_table[31] = {
0, /* 0 */
1, /* 1 */
2, /* 2 */
3, /* 3 */
4, /* 4 */
6, /* 5 */
8, /* 6 */
12, /* 7 */
16, /* 8 */
24, /* 9 */
32, /* A */
48, /* B */
64, /* C */
96, /* D */
128, /* E */
192, /* F */
256, /* 10 */
384, /* 11 */
512, /* 12 */
768, /* 13 */
1024, /* 14 */
1536, /* 15 */
2048, /* 16 */
3072, /* 17 */
4096, /* 18 */
6144, /* 19 */
8192, /* 1A */
12288, /* 1B */
16384, /* 1C */
24576, /* 1D */
32768 /* 1E */
};
const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = { const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
[IB_WR_RDMA_WRITE] = { [IB_WR_RDMA_WRITE] = {
.length = sizeof(struct ib_rdma_wr), .length = sizeof(struct ib_rdma_wr),
...@@ -354,66 +317,6 @@ u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) ...@@ -354,66 +317,6 @@ u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
return ib_mtu_enum_to_int(pmtu); return ib_mtu_enum_to_int(pmtu);
} }
/**
* qib_compute_aeth - compute the AETH (syndrome + MSN)
* @qp: the queue pair to compute the AETH for
*
* Returns the AETH.
*/
__be32 qib_compute_aeth(struct rvt_qp *qp)
{
u32 aeth = qp->r_msn & QIB_MSN_MASK;
if (qp->ibqp.srq) {
/*
* Shared receive queues don't generate credits.
* Set the credit field to the invalid value.
*/
aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
} else {
u32 min, max, x;
u32 credits;
struct rvt_rwq *wq = qp->r_rq.wq;
u32 head;
u32 tail;
/* sanity check pointers before trusting them */
head = wq->head;
if (head >= qp->r_rq.size)
head = 0;
tail = wq->tail;
if (tail >= qp->r_rq.size)
tail = 0;
/*
* Compute the number of credits available (RWQEs).
* XXX Not holding the r_rq.lock here so there is a small
* chance that the pair of reads are not atomic.
*/
credits = head - tail;
if ((int)credits < 0)
credits += qp->r_rq.size;
/*
* Binary search the credit table to find the code to
* use.
*/
min = 0;
max = 31;
for (;;) {
x = (min + max) / 2;
if (credit_table[x] == credits)
break;
if (credit_table[x] > credits)
max = x;
else if (min == x)
break;
else
min = x;
}
aeth |= x << QIB_AETH_CREDIT_SHIFT;
}
return cpu_to_be32(aeth);
}
void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp) void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
{ {
struct qib_qp_priv *priv; struct qib_qp_priv *priv;
...@@ -473,43 +376,6 @@ void qib_flush_qp_waiters(struct rvt_qp *qp) ...@@ -473,43 +376,6 @@ void qib_flush_qp_waiters(struct rvt_qp *qp)
spin_unlock(&dev->rdi.pending_lock); spin_unlock(&dev->rdi.pending_lock);
} }
/**
* qib_get_credit - flush the send work queue of a QP
* @qp: the qp who's send work queue to flush
* @aeth: the Acknowledge Extended Transport Header
*
* The QP s_lock should be held.
*/
void qib_get_credit(struct rvt_qp *qp, u32 aeth)
{
u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
/*
* If the credit is invalid, we can send
* as many packets as we like. Otherwise, we have to
* honor the credit field.
*/
if (credit == QIB_AETH_CREDIT_INVAL) {
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
qib_schedule_send(qp);
}
}
} else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
/* Compute new LSN (i.e., MSN + credit) */
credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
if (qib_cmp24(credit, qp->s_lsn) > 0) {
qp->s_lsn = credit;
if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
qib_schedule_send(qp);
}
}
}
}
/** /**
* qib_check_send_wqe - validate wr/wqe * qib_check_send_wqe - validate wr/wqe
* @qp - The qp * @qp - The qp
......
...@@ -144,7 +144,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, ...@@ -144,7 +144,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
e->sent = 1; e->sent = 1;
} }
ohdr->u.aeth = qib_compute_aeth(qp); ohdr->u.aeth = rvt_compute_aeth(qp);
hwords++; hwords++;
qp->s_ack_rdma_psn = e->psn; qp->s_ack_rdma_psn = e->psn;
bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK; bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
...@@ -153,7 +153,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, ...@@ -153,7 +153,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
qp->s_cur_sge = NULL; qp->s_cur_sge = NULL;
len = 0; len = 0;
qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
ohdr->u.at.aeth = qib_compute_aeth(qp); ohdr->u.at.aeth = rvt_compute_aeth(qp);
ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth); ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
hwords += sizeof(ohdr->u.at) / sizeof(u32); hwords += sizeof(ohdr->u.at) / sizeof(u32);
bth2 = e->psn & QIB_PSN_MASK; bth2 = e->psn & QIB_PSN_MASK;
...@@ -174,7 +174,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, ...@@ -174,7 +174,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
if (len > pmtu) if (len > pmtu)
len = pmtu; len = pmtu;
else { else {
ohdr->u.aeth = qib_compute_aeth(qp); ohdr->u.aeth = rvt_compute_aeth(qp);
hwords++; hwords++;
qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
e = &qp->s_ack_queue[qp->s_tail_ack_queue]; e = &qp->s_ack_queue[qp->s_tail_ack_queue];
...@@ -197,11 +197,11 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, ...@@ -197,11 +197,11 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
qp->s_cur_sge = NULL; qp->s_cur_sge = NULL;
if (qp->s_nak_state) if (qp->s_nak_state)
ohdr->u.aeth = ohdr->u.aeth =
cpu_to_be32((qp->r_msn & QIB_MSN_MASK) | cpu_to_be32((qp->r_msn & RVT_MSN_MASK) |
(qp->s_nak_state << (qp->s_nak_state <<
QIB_AETH_CREDIT_SHIFT)); RVT_AETH_CREDIT_SHIFT));
else else
ohdr->u.aeth = qib_compute_aeth(qp); ohdr->u.aeth = rvt_compute_aeth(qp);
hwords++; hwords++;
len = 0; len = 0;
bth0 = OP(ACKNOWLEDGE) << 24; bth0 = OP(ACKNOWLEDGE) << 24;
...@@ -331,7 +331,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -331,7 +331,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
case IB_WR_SEND_WITH_IMM: case IB_WR_SEND_WITH_IMM:
/* If no credit, return. */ /* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail; goto bail;
} }
...@@ -362,7 +362,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -362,7 +362,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */ /* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail; goto bail;
} }
...@@ -658,11 +658,11 @@ void qib_send_rc_ack(struct rvt_qp *qp) ...@@ -658,11 +658,11 @@ void qib_send_rc_ack(struct rvt_qp *qp)
if (qp->s_mig_state == IB_MIG_MIGRATED) if (qp->s_mig_state == IB_MIG_MIGRATED)
bth0 |= IB_BTH_MIG_REQ; bth0 |= IB_BTH_MIG_REQ;
if (qp->r_nak_state) if (qp->r_nak_state)
ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) | ohdr->u.aeth = cpu_to_be32((qp->r_msn & RVT_MSN_MASK) |
(qp->r_nak_state << (qp->r_nak_state <<
QIB_AETH_CREDIT_SHIFT)); RVT_AETH_CREDIT_SHIFT));
else else
ohdr->u.aeth = qib_compute_aeth(qp); ohdr->u.aeth = rvt_compute_aeth(qp);
lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
qp->remote_ah_attr.sl << 4; qp->remote_ah_attr.sl << 4;
hdr.lrh[0] = cpu_to_be16(lrh0); hdr.lrh[0] = cpu_to_be16(lrh0);
...@@ -1098,7 +1098,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1098,7 +1098,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
* request but will include an ACK'ed request(s). * request but will include an ACK'ed request(s).
*/ */
ack_psn = psn; ack_psn = psn;
if (aeth >> 29) if (aeth >> RVT_AETH_NAK_SHIFT)
ack_psn--; ack_psn--;
wqe = rvt_get_swqe_ptr(qp, qp->s_acked); wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
ibp = to_iport(qp->ibqp.device, qp->port_num); ibp = to_iport(qp->ibqp.device, qp->port_num);
...@@ -1178,7 +1178,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1178,7 +1178,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
break; break;
} }
switch (aeth >> 29) { switch (aeth >> RVT_AETH_NAK_SHIFT) {
case 0: /* ACK */ case 0: /* ACK */
this_cpu_inc(*ibp->rvp.rc_acks); this_cpu_inc(*ibp->rvp.rc_acks);
if (qp->s_acked != qp->s_tail) { if (qp->s_acked != qp->s_tail) {
...@@ -1201,7 +1201,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1201,7 +1201,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
qp->s_flags &= ~RVT_S_WAIT_ACK; qp->s_flags &= ~RVT_S_WAIT_ACK;
qib_schedule_send(qp); qib_schedule_send(qp);
} }
qib_get_credit(qp, aeth); rvt_get_credit(qp, aeth);
qp->s_rnr_retry = qp->s_rnr_retry_cnt; qp->s_rnr_retry = qp->s_rnr_retry_cnt;
qp->s_retry = qp->s_retry_cnt; qp->s_retry = qp->s_retry_cnt;
update_last_psn(qp, psn); update_last_psn(qp, psn);
...@@ -1232,8 +1232,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1232,8 +1232,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
qp->s_flags |= RVT_S_WAIT_RNR; qp->s_flags |= RVT_S_WAIT_RNR;
qp->s_timer.function = qib_rc_rnr_retry; qp->s_timer.function = qib_rc_rnr_retry;
qp->s_timer.expires = jiffies + usecs_to_jiffies( qp->s_timer.expires = jiffies + usecs_to_jiffies(
ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) & ib_qib_rnr_table[(aeth >> RVT_AETH_CREDIT_SHIFT) &
QIB_AETH_CREDIT_MASK]); RVT_AETH_CREDIT_MASK]);
add_timer(&qp->s_timer); add_timer(&qp->s_timer);
goto bail; goto bail;
...@@ -1242,8 +1242,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1242,8 +1242,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
goto bail; goto bail;
/* The last valid PSN is the previous PSN. */ /* The last valid PSN is the previous PSN. */
update_last_psn(qp, psn - 1); update_last_psn(qp, psn - 1);
switch ((aeth >> QIB_AETH_CREDIT_SHIFT) & switch ((aeth >> RVT_AETH_CREDIT_SHIFT) &
QIB_AETH_CREDIT_MASK) { RVT_AETH_CREDIT_MASK) {
case 0: /* PSN sequence error */ case 0: /* PSN sequence error */
ibp->rvp.n_seq_naks++; ibp->rvp.n_seq_naks++;
/* /*
...@@ -1400,8 +1400,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, ...@@ -1400,8 +1400,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
/* Update credits for "ghost" ACKs */ /* Update credits for "ghost" ACKs */
if (diff == 0 && opcode == OP(ACKNOWLEDGE)) { if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
aeth = be32_to_cpu(ohdr->u.aeth); aeth = be32_to_cpu(ohdr->u.aeth);
if ((aeth >> 29) == 0) if ((aeth >> RVT_AETH_NAK_SHIFT) == 0)
qib_get_credit(qp, aeth); rvt_get_credit(qp, aeth);
} }
goto ack_done; goto ack_done;
} }
......
...@@ -270,8 +270,6 @@ int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, ...@@ -270,8 +270,6 @@ int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
int qib_get_counters(struct qib_pportdata *ppd, int qib_get_counters(struct qib_pportdata *ppd,
struct qib_verbs_counters *cntrs); struct qib_verbs_counters *cntrs);
__be32 qib_compute_aeth(struct rvt_qp *qp);
/* /*
* Functions provided by qib driver for rdmavt to use * Functions provided by qib driver for rdmavt to use
*/ */
...@@ -294,8 +292,6 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter); ...@@ -294,8 +292,6 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
#endif #endif
void qib_get_credit(struct rvt_qp *qp, u32 aeth);
unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult); unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail); void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
# #
obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt.o obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt.o
rdmavt-y := vt.o ah.o cq.o dma.o mad.o mcast.o mmap.o mr.o pd.o qp.o srq.o \ rdmavt-y := vt.o ah.o cq.o dma.o mad.o mcast.o mmap.o mr.o pd.o qp.o \
trace.o rc.o srq.o trace.o
CFLAGS_trace.o = -I$(src) CFLAGS_trace.o = -I$(src)
/*
* Copyright(c) 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <rdma/rdma_vt.h>
#define RVT_AETH_CREDIT_INVAL RVT_AETH_CREDIT_MASK
/*
* Convert the AETH credit code into the number of credits.
*/
static const u16 credit_table[31] = {
0, /* 0 */
1, /* 1 */
2, /* 2 */
3, /* 3 */
4, /* 4 */
6, /* 5 */
8, /* 6 */
12, /* 7 */
16, /* 8 */
24, /* 9 */
32, /* A */
48, /* B */
64, /* C */
96, /* D */
128, /* E */
192, /* F */
256, /* 10 */
384, /* 11 */
512, /* 12 */
768, /* 13 */
1024, /* 14 */
1536, /* 15 */
2048, /* 16 */
3072, /* 17 */
4096, /* 18 */
6144, /* 19 */
8192, /* 1A */
12288, /* 1B */
16384, /* 1C */
24576, /* 1D */
32768 /* 1E */
};
/**
* rvt_compute_aeth - compute the AETH (syndrome + MSN)
* @qp: the queue pair to compute the AETH for
*
* Returns the AETH.
*/
__be32 rvt_compute_aeth(struct rvt_qp *qp)
{
u32 aeth = qp->r_msn & RVT_MSN_MASK;
if (qp->ibqp.srq) {
/*
* Shared receive queues don't generate credits.
* Set the credit field to the invalid value.
*/
aeth |= RVT_AETH_CREDIT_INVAL << RVT_AETH_CREDIT_SHIFT;
} else {
u32 min, max, x;
u32 credits;
struct rvt_rwq *wq = qp->r_rq.wq;
u32 head;
u32 tail;
/* sanity check pointers before trusting them */
head = wq->head;
if (head >= qp->r_rq.size)
head = 0;
tail = wq->tail;
if (tail >= qp->r_rq.size)
tail = 0;
/*
* Compute the number of credits available (RWQEs).
* There is a small chance that the pair of reads are
* not atomic, which is OK, since the fuzziness is
* resolved as further ACKs go out.
*/
credits = head - tail;
if ((int)credits < 0)
credits += qp->r_rq.size;
/*
* Binary search the credit table to find the code to
* use.
*/
min = 0;
max = 31;
for (;;) {
x = (min + max) / 2;
if (credit_table[x] == credits)
break;
if (credit_table[x] > credits) {
max = x;
} else {
if (min == x)
break;
min = x;
}
}
aeth |= x << RVT_AETH_CREDIT_SHIFT;
}
return cpu_to_be32(aeth);
}
EXPORT_SYMBOL(rvt_compute_aeth);
/**
* rvt_get_credit - flush the send work queue of a QP
* @qp: the qp who's send work queue to flush
* @aeth: the Acknowledge Extended Transport Header
*
* The QP s_lock should be held.
*/
void rvt_get_credit(struct rvt_qp *qp, u32 aeth)
{
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
u32 credit = (aeth >> RVT_AETH_CREDIT_SHIFT) & RVT_AETH_CREDIT_MASK;
lockdep_assert_held(&qp->s_lock);
/*
* If the credit is invalid, we can send
* as many packets as we like. Otherwise, we have to
* honor the credit field.
*/
if (credit == RVT_AETH_CREDIT_INVAL) {
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
rdi->driver_f.schedule_send(qp);
}
}
} else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
/* Compute new LSN (i.e., MSN + credit) */
credit = (aeth + credit_table[credit]) & RVT_MSN_MASK;
if (rvt_cmp_msn(credit, qp->s_lsn) > 0) {
qp->s_lsn = credit;
if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
rdi->driver_f.schedule_send(qp);
}
}
}
}
EXPORT_SYMBOL(rvt_get_credit);
...@@ -582,6 +582,37 @@ static inline void rvt_qp_swqe_complete( ...@@ -582,6 +582,37 @@ static inline void rvt_qp_swqe_complete(
} }
} }
#define RVT_AETH_CREDIT_SHIFT 24
#define RVT_AETH_CREDIT_MASK 0x1F
#define RVT_AETH_NAK_SHIFT 29
#define RVT_MSN_MASK 0xFFFFFF
/*
* Compare the lower 24 bits of the msn values.
* Returns an integer <, ==, or > than zero.
*/
static inline int rvt_cmp_msn(u32 a, u32 b)
{
return (((int)a) - ((int)b)) << 8;
}
/**
* rvt_compute_aeth - compute the AETH (syndrome + MSN)
* @qp: the queue pair to compute the AETH for
*
* Returns the AETH.
*/
__be32 rvt_compute_aeth(struct rvt_qp *qp);
/**
* rvt_get_credit - flush the send work queue of a QP
* @qp: the qp who's send work queue to flush
* @aeth: the Acknowledge Extended Transport Header
*
* The QP s_lock should be held.
*/
void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
/** /**
* @qp - the qp pair * @qp - the qp pair
* @len - the length * @len - the length
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment