Commit f24a6d48 authored by Harish Chegondi's avatar Harish Chegondi Committed by Doug Ledford

IB/qib: Remove ibport and use rdmavt version

Remove several ibport members from qib and use the rdmavt version. rc_acks,
rc_qacks, and rc_delayed_comp are defined as per CPU variables in rdmavt.
Add support for these rdmavt per CPU variables which were not per cpu
variables in qib ibport structure.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarHarish Chegondi <harish.chegondi@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent d2b8d4da
...@@ -379,7 +379,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, ...@@ -379,7 +379,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
/* Check for valid receive state. */ /* Check for valid receive state. */
if (!(ib_qib_state_ops[qp->state] & if (!(ib_qib_state_ops[qp->state] &
QIB_PROCESS_RECV_OK)) { QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++; ibp->rvp.n_pkt_drops++;
goto unlock; goto unlock;
} }
...@@ -399,7 +399,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, ...@@ -399,7 +399,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
diff = qib_cmp24(psn, qp->r_psn); diff = qib_cmp24(psn, qp->r_psn);
if (!qp->r_nak_state && diff >= 0) { if (!qp->r_nak_state && diff >= 0) {
ibp->n_rc_seqnak++; ibp->rvp.n_rc_seqnak++;
qp->r_nak_state = qp->r_nak_state =
IB_NAK_PSN_ERROR; IB_NAK_PSN_ERROR;
/* Use the expected PSN. */ /* Use the expected PSN. */
......
...@@ -2956,13 +2956,13 @@ static void pma_6120_timer(unsigned long data) ...@@ -2956,13 +2956,13 @@ static void pma_6120_timer(unsigned long data)
struct qib_ibport *ibp = &ppd->ibport_data; struct qib_ibport *ibp = &ppd->ibport_data;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&ibp->lock, flags); spin_lock_irqsave(&ibp->rvp.lock, flags);
if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) { if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
qib_snapshot_counters(ppd, &cs->sword, &cs->rword, qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
&cs->spkts, &cs->rpkts, &cs->xmit_wait); &cs->spkts, &cs->rpkts, &cs->xmit_wait);
mod_timer(&cs->pma_timer, mod_timer(&cs->pma_timer,
jiffies + usecs_to_jiffies(ibp->pma_sample_interval)); jiffies + usecs_to_jiffies(ibp->rvp.pma_sample_interval));
} else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
u64 ta, tb, tc, td, te; u64 ta, tb, tc, td, te;
...@@ -2975,11 +2975,11 @@ static void pma_6120_timer(unsigned long data) ...@@ -2975,11 +2975,11 @@ static void pma_6120_timer(unsigned long data)
cs->rpkts = td - cs->rpkts; cs->rpkts = td - cs->rpkts;
cs->xmit_wait = te - cs->xmit_wait; cs->xmit_wait = te - cs->xmit_wait;
} }
spin_unlock_irqrestore(&ibp->lock, flags); spin_unlock_irqrestore(&ibp->rvp.lock, flags);
} }
/* /*
* Note that the caller has the ibp->lock held. * Note that the caller has the ibp->rvp.lock held.
*/ */
static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv, static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
u32 start) u32 start)
......
...@@ -5497,7 +5497,7 @@ static void try_7322_ipg(struct qib_pportdata *ppd) ...@@ -5497,7 +5497,7 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
unsigned delay; unsigned delay;
int ret; int ret;
agent = ibp->send_agent; agent = ibp->rvp.send_agent;
if (!agent) if (!agent)
goto retry; goto retry;
......
...@@ -245,6 +245,13 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, ...@@ -245,6 +245,13 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
alloc_percpu(struct qib_pma_counters); alloc_percpu(struct qib_pma_counters);
if (!ppd->ibport_data.pmastats) if (!ppd->ibport_data.pmastats)
return -ENOMEM; return -ENOMEM;
ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
if (!(ppd->ibport_data.rvp.rc_acks) ||
!(ppd->ibport_data.rvp.rc_qacks) ||
!(ppd->ibport_data.rvp.rc_delayed_comp))
return -ENOMEM;
if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
goto bail; goto bail;
...@@ -632,6 +639,9 @@ static int qib_create_workqueues(struct qib_devdata *dd) ...@@ -632,6 +639,9 @@ static int qib_create_workqueues(struct qib_devdata *dd)
static void qib_free_pportdata(struct qib_pportdata *ppd) static void qib_free_pportdata(struct qib_pportdata *ppd)
{ {
free_percpu(ppd->ibport_data.pmastats); free_percpu(ppd->ibport_data.pmastats);
free_percpu(ppd->ibport_data.rvp.rc_acks);
free_percpu(ppd->ibport_data.rvp.rc_qacks);
free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
ppd->ibport_data.pmastats = NULL; ppd->ibport_data.pmastats = NULL;
} }
......
...@@ -70,7 +70,7 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) ...@@ -70,7 +70,7 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
unsigned long flags; unsigned long flags;
unsigned long timeout; unsigned long timeout;
agent = ibp->send_agent; agent = ibp->rvp.send_agent;
if (!agent) if (!agent)
return; return;
...@@ -79,7 +79,8 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) ...@@ -79,7 +79,8 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
return; return;
/* o14-2 */ /* o14-2 */
if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout)) if (ibp->rvp.trap_timeout &&
time_before(jiffies, ibp->rvp.trap_timeout))
return; return;
send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
...@@ -93,18 +94,18 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) ...@@ -93,18 +94,18 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
smp->class_version = 1; smp->class_version = 1;
smp->method = IB_MGMT_METHOD_TRAP; smp->method = IB_MGMT_METHOD_TRAP;
ibp->tid++; ibp->rvp.tid++;
smp->tid = cpu_to_be64(ibp->tid); smp->tid = cpu_to_be64(ibp->rvp.tid);
smp->attr_id = IB_SMP_ATTR_NOTICE; smp->attr_id = IB_SMP_ATTR_NOTICE;
/* o14-1: smp->mkey = 0; */ /* o14-1: smp->mkey = 0; */
memcpy(smp->data, data, len); memcpy(smp->data, data, len);
spin_lock_irqsave(&ibp->lock, flags); spin_lock_irqsave(&ibp->rvp.lock, flags);
if (!ibp->sm_ah) { if (!ibp->sm_ah) {
if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
struct ib_ah *ah; struct ib_ah *ah;
ah = qib_create_qp0_ah(ibp, ibp->sm_lid); ah = qib_create_qp0_ah(ibp, ibp->rvp.sm_lid);
if (IS_ERR(ah)) if (IS_ERR(ah))
ret = PTR_ERR(ah); ret = PTR_ERR(ah);
else { else {
...@@ -118,17 +119,17 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) ...@@ -118,17 +119,17 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
send_buf->ah = &ibp->sm_ah->ibah; send_buf->ah = &ibp->sm_ah->ibah;
ret = 0; ret = 0;
} }
spin_unlock_irqrestore(&ibp->lock, flags); spin_unlock_irqrestore(&ibp->rvp.lock, flags);
if (!ret) if (!ret)
ret = ib_post_send_mad(send_buf, NULL); ret = ib_post_send_mad(send_buf, NULL);
if (!ret) { if (!ret) {
/* 4.096 usec. */ /* 4.096 usec. */
timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000; timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout); ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
} else { } else {
ib_free_send_mad(send_buf); ib_free_send_mad(send_buf);
ibp->trap_timeout = 0; ibp->rvp.trap_timeout = 0;
} }
} }
...@@ -141,10 +142,10 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, ...@@ -141,10 +142,10 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
struct ib_mad_notice_attr data; struct ib_mad_notice_attr data;
if (trap_num == IB_NOTICE_TRAP_BAD_PKEY) if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
ibp->pkey_violations++; ibp->rvp.pkey_violations++;
else else
ibp->qkey_violations++; ibp->rvp.qkey_violations++;
ibp->n_pkt_drops++; ibp->rvp.n_pkt_drops++;
/* Send violation trap */ /* Send violation trap */
data.generic_type = IB_NOTICE_TYPE_SECURITY; data.generic_type = IB_NOTICE_TYPE_SECURITY;
...@@ -217,8 +218,8 @@ void qib_cap_mask_chg(struct qib_ibport *ibp) ...@@ -217,8 +218,8 @@ void qib_cap_mask_chg(struct qib_ibport *ibp)
data.toggle_count = 0; data.toggle_count = 0;
memset(&data.details, 0, sizeof(data.details)); memset(&data.details, 0, sizeof(data.details));
data.details.ntc_144.lid = data.issuer_lid; data.details.ntc_144.lid = data.issuer_lid;
data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); data.details.ntc_144.new_cap_mask =
cpu_to_be32(ibp->rvp.port_cap_flags);
qib_send_trap(ibp, &data, sizeof(data)); qib_send_trap(ibp, &data, sizeof(data));
} }
...@@ -409,37 +410,38 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) ...@@ -409,37 +410,38 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
int ret = 0; int ret = 0;
/* Is the mkey in the process of expiring? */ /* Is the mkey in the process of expiring? */
if (ibp->mkey_lease_timeout && if (ibp->rvp.mkey_lease_timeout &&
time_after_eq(jiffies, ibp->mkey_lease_timeout)) { time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
/* Clear timeout and mkey protection field. */ /* Clear timeout and mkey protection field. */
ibp->mkey_lease_timeout = 0; ibp->rvp.mkey_lease_timeout = 0;
ibp->mkeyprot = 0; ibp->rvp.mkeyprot = 0;
} }
if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 || if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
ibp->mkey == smp->mkey) ibp->rvp.mkey == smp->mkey)
valid_mkey = 1; valid_mkey = 1;
/* Unset lease timeout on any valid Get/Set/TrapRepress */ /* Unset lease timeout on any valid Get/Set/TrapRepress */
if (valid_mkey && ibp->mkey_lease_timeout && if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
(smp->method == IB_MGMT_METHOD_GET || (smp->method == IB_MGMT_METHOD_GET ||
smp->method == IB_MGMT_METHOD_SET || smp->method == IB_MGMT_METHOD_SET ||
smp->method == IB_MGMT_METHOD_TRAP_REPRESS)) smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
ibp->mkey_lease_timeout = 0; ibp->rvp.mkey_lease_timeout = 0;
if (!valid_mkey) { if (!valid_mkey) {
switch (smp->method) { switch (smp->method) {
case IB_MGMT_METHOD_GET: case IB_MGMT_METHOD_GET:
/* Bad mkey not a violation below level 2 */ /* Bad mkey not a violation below level 2 */
if (ibp->mkeyprot < 2) if (ibp->rvp.mkeyprot < 2)
break; break;
case IB_MGMT_METHOD_SET: case IB_MGMT_METHOD_SET:
case IB_MGMT_METHOD_TRAP_REPRESS: case IB_MGMT_METHOD_TRAP_REPRESS:
if (ibp->mkey_violations != 0xFFFF) if (ibp->rvp.mkey_violations != 0xFFFF)
++ibp->mkey_violations; ++ibp->rvp.mkey_violations;
if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period) if (!ibp->rvp.mkey_lease_timeout &&
ibp->mkey_lease_timeout = jiffies + ibp->rvp.mkey_lease_period)
ibp->mkey_lease_period * HZ; ibp->rvp.mkey_lease_timeout = jiffies +
ibp->rvp.mkey_lease_period * HZ;
/* Generate a trap notice. */ /* Generate a trap notice. */
qib_bad_mkey(ibp, smp); qib_bad_mkey(ibp, smp);
ret = 1; ret = 1;
...@@ -489,15 +491,15 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, ...@@ -489,15 +491,15 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
/* Only return the mkey if the protection field allows it. */ /* Only return the mkey if the protection field allows it. */
if (!(smp->method == IB_MGMT_METHOD_GET && if (!(smp->method == IB_MGMT_METHOD_GET &&
ibp->mkey != smp->mkey && ibp->rvp.mkey != smp->mkey &&
ibp->mkeyprot == 1)) ibp->rvp.mkeyprot == 1))
pip->mkey = ibp->mkey; pip->mkey = ibp->rvp.mkey;
pip->gid_prefix = ibp->gid_prefix; pip->gid_prefix = ibp->rvp.gid_prefix;
pip->lid = cpu_to_be16(ppd->lid); pip->lid = cpu_to_be16(ppd->lid);
pip->sm_lid = cpu_to_be16(ibp->sm_lid); pip->sm_lid = cpu_to_be16(ibp->rvp.sm_lid);
pip->cap_mask = cpu_to_be32(ibp->port_cap_flags); pip->cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
/* pip->diag_code; */ /* pip->diag_code; */
pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period); pip->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
pip->local_port_num = port; pip->local_port_num = port;
pip->link_width_enabled = ppd->link_width_enabled; pip->link_width_enabled = ppd->link_width_enabled;
pip->link_width_supported = ppd->link_width_supported; pip->link_width_supported = ppd->link_width_supported;
...@@ -508,7 +510,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, ...@@ -508,7 +510,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
pip->portphysstate_linkdown = pip->portphysstate_linkdown =
(dd->f_ibphys_portstate(ppd->lastibcstat) << 4) | (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
(get_linkdowndefaultstate(ppd) ? 1 : 2); (get_linkdowndefaultstate(ppd) ? 1 : 2);
pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc; pip->mkeyprot_resv_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) | pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
ppd->link_speed_enabled; ppd->link_speed_enabled;
switch (ppd->ibmtu) { switch (ppd->ibmtu) {
...@@ -529,9 +531,9 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, ...@@ -529,9 +531,9 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
mtu = IB_MTU_256; mtu = IB_MTU_256;
break; break;
} }
pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl; pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->rvp.sm_sl;
pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */ pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
pip->vl_high_limit = ibp->vl_high_limit; pip->vl_high_limit = ibp->rvp.vl_high_limit;
pip->vl_arb_high_cap = pip->vl_arb_high_cap =
dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP); dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
pip->vl_arb_low_cap = pip->vl_arb_low_cap =
...@@ -542,20 +544,20 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, ...@@ -542,20 +544,20 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
/* pip->vlstallcnt_hoqlife; */ /* pip->vlstallcnt_hoqlife; */
pip->operationalvl_pei_peo_fpi_fpo = pip->operationalvl_pei_peo_fpi_fpo =
dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4; dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
pip->mkey_violations = cpu_to_be16(ibp->mkey_violations); pip->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
/* P_KeyViolations are counted by hardware. */ /* P_KeyViolations are counted by hardware. */
pip->pkey_violations = cpu_to_be16(ibp->pkey_violations); pip->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
pip->qkey_violations = cpu_to_be16(ibp->qkey_violations); pip->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
/* Only the hardware GUID is supported for now */ /* Only the hardware GUID is supported for now */
pip->guid_cap = QIB_GUIDS_PER_PORT; pip->guid_cap = QIB_GUIDS_PER_PORT;
pip->clientrereg_resv_subnetto = ibp->subnet_timeout; pip->clientrereg_resv_subnetto = ibp->rvp.subnet_timeout;
/* 32.768 usec. response time (guessing) */ /* 32.768 usec. response time (guessing) */
pip->resv_resptimevalue = 3; pip->resv_resptimevalue = 3;
pip->localphyerrors_overrunerrors = pip->localphyerrors_overrunerrors =
(get_phyerrthreshold(ppd) << 4) | (get_phyerrthreshold(ppd) << 4) |
get_overrunthreshold(ppd); get_overrunthreshold(ppd);
/* pip->max_credit_hint; */ /* pip->max_credit_hint; */
if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { if (ibp->rvp.port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
u32 v; u32 v;
v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY); v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
...@@ -685,9 +687,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, ...@@ -685,9 +687,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
event.device = ibdev; event.device = ibdev;
event.element.port_num = port; event.element.port_num = port;
ibp->mkey = pip->mkey; ibp->rvp.mkey = pip->mkey;
ibp->gid_prefix = pip->gid_prefix; ibp->rvp.gid_prefix = pip->gid_prefix;
ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); ibp->rvp.mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
lid = be16_to_cpu(pip->lid); lid = be16_to_cpu(pip->lid);
/* Must be a valid unicast LID address. */ /* Must be a valid unicast LID address. */
...@@ -708,19 +710,19 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, ...@@ -708,19 +710,19 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
/* Must be a valid unicast LID address. */ /* Must be a valid unicast LID address. */
if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
smp->status |= IB_SMP_INVALID_FIELD; smp->status |= IB_SMP_INVALID_FIELD;
else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
spin_lock_irqsave(&ibp->lock, flags); spin_lock_irqsave(&ibp->rvp.lock, flags);
if (ibp->sm_ah) { if (ibp->sm_ah) {
if (smlid != ibp->sm_lid) if (smlid != ibp->rvp.sm_lid)
ibp->sm_ah->attr.dlid = smlid; ibp->sm_ah->attr.dlid = smlid;
if (msl != ibp->sm_sl) if (msl != ibp->rvp.sm_sl)
ibp->sm_ah->attr.sl = msl; ibp->sm_ah->attr.sl = msl;
} }
spin_unlock_irqrestore(&ibp->lock, flags); spin_unlock_irqrestore(&ibp->rvp.lock, flags);
if (smlid != ibp->sm_lid) if (smlid != ibp->rvp.sm_lid)
ibp->sm_lid = smlid; ibp->rvp.sm_lid = smlid;
if (msl != ibp->sm_sl) if (msl != ibp->rvp.sm_sl)
ibp->sm_sl = msl; ibp->rvp.sm_sl = msl;
event.event = IB_EVENT_SM_CHANGE; event.event = IB_EVENT_SM_CHANGE;
ib_dispatch_event(&event); ib_dispatch_event(&event);
} }
...@@ -768,10 +770,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, ...@@ -768,10 +770,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
smp->status |= IB_SMP_INVALID_FIELD; smp->status |= IB_SMP_INVALID_FIELD;
} }
ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; ibp->rvp.mkeyprot = pip->mkeyprot_resv_lmc >> 6;
ibp->vl_high_limit = pip->vl_high_limit; ibp->rvp.vl_high_limit = pip->vl_high_limit;
(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT, (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
ibp->vl_high_limit); ibp->rvp.vl_high_limit);
mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
if (mtu == -1) if (mtu == -1)
...@@ -789,13 +791,13 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, ...@@ -789,13 +791,13 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
} }
if (pip->mkey_violations == 0) if (pip->mkey_violations == 0)
ibp->mkey_violations = 0; ibp->rvp.mkey_violations = 0;
if (pip->pkey_violations == 0) if (pip->pkey_violations == 0)
ibp->pkey_violations = 0; ibp->rvp.pkey_violations = 0;
if (pip->qkey_violations == 0) if (pip->qkey_violations == 0)
ibp->qkey_violations = 0; ibp->rvp.qkey_violations = 0;
ore = pip->localphyerrors_overrunerrors; ore = pip->localphyerrors_overrunerrors;
if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
...@@ -804,7 +806,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, ...@@ -804,7 +806,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
if (set_overrunthreshold(ppd, (ore & 0xF))) if (set_overrunthreshold(ppd, (ore & 0xF)))
smp->status |= IB_SMP_INVALID_FIELD; smp->status |= IB_SMP_INVALID_FIELD;
ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; ibp->rvp.subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
/* /*
* Do the port state change now that the other link parameters * Do the port state change now that the other link parameters
...@@ -1062,7 +1064,7 @@ static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, ...@@ -1062,7 +1064,7 @@ static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
memset(smp->data, 0, sizeof(smp->data)); memset(smp->data, 0, sizeof(smp->data));
if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP))
smp->status |= IB_SMP_UNSUP_METHOD; smp->status |= IB_SMP_UNSUP_METHOD;
else else
for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2) for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
...@@ -1078,7 +1080,7 @@ static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, ...@@ -1078,7 +1080,7 @@ static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
u8 *p = (u8 *) smp->data; u8 *p = (u8 *) smp->data;
unsigned i; unsigned i;
if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) { if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) {
smp->status |= IB_SMP_UNSUP_METHOD; smp->status |= IB_SMP_UNSUP_METHOD;
return reply(smp); return reply(smp);
} }
...@@ -1195,20 +1197,20 @@ static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp, ...@@ -1195,20 +1197,20 @@ static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
goto bail; goto bail;
} }
spin_lock_irqsave(&ibp->lock, flags); spin_lock_irqsave(&ibp->rvp.lock, flags);
p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS); p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
p->counter_width = 4; /* 32 bit counters */ p->counter_width = 4; /* 32 bit counters */
p->counter_mask0_9 = COUNTER_MASK0_9; p->counter_mask0_9 = COUNTER_MASK0_9;
p->sample_start = cpu_to_be32(ibp->pma_sample_start); p->sample_start = cpu_to_be32(ibp->rvp.pma_sample_start);
p->sample_interval = cpu_to_be32(ibp->pma_sample_interval); p->sample_interval = cpu_to_be32(ibp->rvp.pma_sample_interval);
p->tag = cpu_to_be16(ibp->pma_tag); p->tag = cpu_to_be16(ibp->rvp.pma_tag);
p->counter_select[0] = ibp->pma_counter_select[0]; p->counter_select[0] = ibp->rvp.pma_counter_select[0];
p->counter_select[1] = ibp->pma_counter_select[1]; p->counter_select[1] = ibp->rvp.pma_counter_select[1];
p->counter_select[2] = ibp->pma_counter_select[2]; p->counter_select[2] = ibp->rvp.pma_counter_select[2];
p->counter_select[3] = ibp->pma_counter_select[3]; p->counter_select[3] = ibp->rvp.pma_counter_select[3];
p->counter_select[4] = ibp->pma_counter_select[4]; p->counter_select[4] = ibp->rvp.pma_counter_select[4];
spin_unlock_irqrestore(&ibp->lock, flags); spin_unlock_irqrestore(&ibp->rvp.lock, flags);
bail: bail:
return reply((struct ib_smp *) pmp); return reply((struct ib_smp *) pmp);
...@@ -1233,7 +1235,7 @@ static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp, ...@@ -1233,7 +1235,7 @@ static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
goto bail; goto bail;
} }
spin_lock_irqsave(&ibp->lock, flags); spin_lock_irqsave(&ibp->rvp.lock, flags);
/* Port Sampling code owns the PS* HW counters */ /* Port Sampling code owns the PS* HW counters */
xmit_flags = ppd->cong_stats.flags; xmit_flags = ppd->cong_stats.flags;
...@@ -1242,18 +1244,18 @@ static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp, ...@@ -1242,18 +1244,18 @@ static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
if (status == IB_PMA_SAMPLE_STATUS_DONE || if (status == IB_PMA_SAMPLE_STATUS_DONE ||
(status == IB_PMA_SAMPLE_STATUS_RUNNING && (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) { xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
ibp->pma_sample_start = be32_to_cpu(p->sample_start); ibp->rvp.pma_sample_start = be32_to_cpu(p->sample_start);
ibp->pma_sample_interval = be32_to_cpu(p->sample_interval); ibp->rvp.pma_sample_interval = be32_to_cpu(p->sample_interval);
ibp->pma_tag = be16_to_cpu(p->tag); ibp->rvp.pma_tag = be16_to_cpu(p->tag);
ibp->pma_counter_select[0] = p->counter_select[0]; ibp->rvp.pma_counter_select[0] = p->counter_select[0];
ibp->pma_counter_select[1] = p->counter_select[1]; ibp->rvp.pma_counter_select[1] = p->counter_select[1];
ibp->pma_counter_select[2] = p->counter_select[2]; ibp->rvp.pma_counter_select[2] = p->counter_select[2];
ibp->pma_counter_select[3] = p->counter_select[3]; ibp->rvp.pma_counter_select[3] = p->counter_select[3];
ibp->pma_counter_select[4] = p->counter_select[4]; ibp->rvp.pma_counter_select[4] = p->counter_select[4];
dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval, dd->f_set_cntr_sample(ppd, ibp->rvp.pma_sample_interval,
ibp->pma_sample_start); ibp->rvp.pma_sample_start);
} }
spin_unlock_irqrestore(&ibp->lock, flags); spin_unlock_irqrestore(&ibp->rvp.lock, flags);
ret = pma_get_portsamplescontrol(pmp, ibdev, port); ret = pma_get_portsamplescontrol(pmp, ibdev, port);
...@@ -1357,8 +1359,8 @@ static int pma_get_portsamplesresult(struct ib_pma_mad *pmp, ...@@ -1357,8 +1359,8 @@ static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
int i; int i;
memset(pmp->data, 0, sizeof(pmp->data)); memset(pmp->data, 0, sizeof(pmp->data));
spin_lock_irqsave(&ibp->lock, flags); spin_lock_irqsave(&ibp->rvp.lock, flags);
p->tag = cpu_to_be16(ibp->pma_tag); p->tag = cpu_to_be16(ibp->rvp.pma_tag);
if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
else { else {
...@@ -1373,11 +1375,11 @@ static int pma_get_portsamplesresult(struct ib_pma_mad *pmp, ...@@ -1373,11 +1375,11 @@ static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
} }
} }
for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
p->counter[i] = cpu_to_be32( p->counter[i] = cpu_to_be32(
get_cache_hw_sample_counters( get_cache_hw_sample_counters(
ppd, ibp->pma_counter_select[i])); ppd, ibp->rvp.pma_counter_select[i]));
spin_unlock_irqrestore(&ibp->lock, flags); spin_unlock_irqrestore(&ibp->rvp.lock, flags);
return reply((struct ib_smp *) pmp); return reply((struct ib_smp *) pmp);
} }
...@@ -1397,8 +1399,8 @@ static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp, ...@@ -1397,8 +1399,8 @@ static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
/* Port Sampling code owns the PS* HW counters */ /* Port Sampling code owns the PS* HW counters */
memset(pmp->data, 0, sizeof(pmp->data)); memset(pmp->data, 0, sizeof(pmp->data));
spin_lock_irqsave(&ibp->lock, flags); spin_lock_irqsave(&ibp->rvp.lock, flags);
p->tag = cpu_to_be16(ibp->pma_tag); p->tag = cpu_to_be16(ibp->rvp.pma_tag);
if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
else { else {
...@@ -1415,11 +1417,11 @@ static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp, ...@@ -1415,11 +1417,11 @@ static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
} }
} }
for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
p->counter[i] = cpu_to_be64( p->counter[i] = cpu_to_be64(
get_cache_hw_sample_counters( get_cache_hw_sample_counters(
ppd, ibp->pma_counter_select[i])); ppd, ibp->rvp.pma_counter_select[i]));
spin_unlock_irqrestore(&ibp->lock, flags); spin_unlock_irqrestore(&ibp->rvp.lock, flags);
return reply((struct ib_smp *) pmp); return reply((struct ib_smp *) pmp);
} }
...@@ -1453,7 +1455,7 @@ static int pma_get_portcounters(struct ib_pma_mad *pmp, ...@@ -1453,7 +1455,7 @@ static int pma_get_portcounters(struct ib_pma_mad *pmp,
cntrs.excessive_buffer_overrun_errors -= cntrs.excessive_buffer_overrun_errors -=
ibp->z_excessive_buffer_overrun_errors; ibp->z_excessive_buffer_overrun_errors;
cntrs.vl15_dropped -= ibp->z_vl15_dropped; cntrs.vl15_dropped -= ibp->z_vl15_dropped;
cntrs.vl15_dropped += ibp->n_vl15_dropped; cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
memset(pmp->data, 0, sizeof(pmp->data)); memset(pmp->data, 0, sizeof(pmp->data));
...@@ -1546,9 +1548,9 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp, ...@@ -1546,9 +1548,9 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
qib_get_counters(ppd, &cntrs); qib_get_counters(ppd, &cntrs);
spin_lock_irqsave(&ppd->ibport_data.lock, flags); spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
xmit_wait_counter = xmit_wait_get_value_delta(ppd); xmit_wait_counter = xmit_wait_get_value_delta(ppd);
spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
/* Adjust counters for any resets done. */ /* Adjust counters for any resets done. */
cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
...@@ -1564,7 +1566,7 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp, ...@@ -1564,7 +1566,7 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
cntrs.excessive_buffer_overrun_errors -= cntrs.excessive_buffer_overrun_errors -=
ibp->z_excessive_buffer_overrun_errors; ibp->z_excessive_buffer_overrun_errors;
cntrs.vl15_dropped -= ibp->z_vl15_dropped; cntrs.vl15_dropped -= ibp->z_vl15_dropped;
cntrs.vl15_dropped += ibp->n_vl15_dropped; cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
cntrs.port_xmit_data -= ibp->z_port_xmit_data; cntrs.port_xmit_data -= ibp->z_port_xmit_data;
cntrs.port_rcv_data -= ibp->z_port_rcv_data; cntrs.port_rcv_data -= ibp->z_port_rcv_data;
cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
...@@ -1743,7 +1745,7 @@ static int pma_set_portcounters(struct ib_pma_mad *pmp, ...@@ -1743,7 +1745,7 @@ static int pma_set_portcounters(struct ib_pma_mad *pmp,
cntrs.excessive_buffer_overrun_errors; cntrs.excessive_buffer_overrun_errors;
if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) { if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
ibp->n_vl15_dropped = 0; ibp->rvp.n_vl15_dropped = 0;
ibp->z_vl15_dropped = cntrs.vl15_dropped; ibp->z_vl15_dropped = cntrs.vl15_dropped;
} }
...@@ -1778,11 +1780,11 @@ static int pma_set_portcounters_cong(struct ib_pma_mad *pmp, ...@@ -1778,11 +1780,11 @@ static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
ret = pma_get_portcounters_cong(pmp, ibdev, port); ret = pma_get_portcounters_cong(pmp, ibdev, port);
if (counter_select & IB_PMA_SEL_CONG_XMIT) { if (counter_select & IB_PMA_SEL_CONG_XMIT) {
spin_lock_irqsave(&ppd->ibport_data.lock, flags); spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
ppd->cong_stats.counter = 0; ppd->cong_stats.counter = 0;
dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
0x0); 0x0);
spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
} }
if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) { if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
ibp->z_port_xmit_data = cntrs.port_xmit_data; ibp->z_port_xmit_data = cntrs.port_xmit_data;
...@@ -1806,7 +1808,7 @@ static int pma_set_portcounters_cong(struct ib_pma_mad *pmp, ...@@ -1806,7 +1808,7 @@ static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
cntrs.local_link_integrity_errors; cntrs.local_link_integrity_errors;
ibp->z_excessive_buffer_overrun_errors = ibp->z_excessive_buffer_overrun_errors =
cntrs.excessive_buffer_overrun_errors; cntrs.excessive_buffer_overrun_errors;
ibp->n_vl15_dropped = 0; ibp->rvp.n_vl15_dropped = 0;
ibp->z_vl15_dropped = cntrs.vl15_dropped; ibp->z_vl15_dropped = cntrs.vl15_dropped;
} }
...@@ -1916,12 +1918,12 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ...@@ -1916,12 +1918,12 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
ret = subn_get_vl_arb(smp, ibdev, port); ret = subn_get_vl_arb(smp, ibdev, port);
goto bail; goto bail;
case IB_SMP_ATTR_SM_INFO: case IB_SMP_ATTR_SM_INFO:
if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
ret = IB_MAD_RESULT_SUCCESS | ret = IB_MAD_RESULT_SUCCESS |
IB_MAD_RESULT_CONSUMED; IB_MAD_RESULT_CONSUMED;
goto bail; goto bail;
} }
if (ibp->port_cap_flags & IB_PORT_SM) { if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
ret = IB_MAD_RESULT_SUCCESS; ret = IB_MAD_RESULT_SUCCESS;
goto bail; goto bail;
} }
...@@ -1950,12 +1952,12 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ...@@ -1950,12 +1952,12 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
ret = subn_set_vl_arb(smp, ibdev, port); ret = subn_set_vl_arb(smp, ibdev, port);
goto bail; goto bail;
case IB_SMP_ATTR_SM_INFO: case IB_SMP_ATTR_SM_INFO:
if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
ret = IB_MAD_RESULT_SUCCESS | ret = IB_MAD_RESULT_SUCCESS |
IB_MAD_RESULT_CONSUMED; IB_MAD_RESULT_CONSUMED;
goto bail; goto bail;
} }
if (ibp->port_cap_flags & IB_PORT_SM) { if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
ret = IB_MAD_RESULT_SUCCESS; ret = IB_MAD_RESULT_SUCCESS;
goto bail; goto bail;
} }
...@@ -2456,7 +2458,7 @@ static void xmit_wait_timer_func(unsigned long opaque) ...@@ -2456,7 +2458,7 @@ static void xmit_wait_timer_func(unsigned long opaque)
unsigned long flags; unsigned long flags;
u8 status; u8 status;
spin_lock_irqsave(&ppd->ibport_data.lock, flags); spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) { if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
if (status == IB_PMA_SAMPLE_STATUS_DONE) { if (status == IB_PMA_SAMPLE_STATUS_DONE) {
...@@ -2469,7 +2471,7 @@ static void xmit_wait_timer_func(unsigned long opaque) ...@@ -2469,7 +2471,7 @@ static void xmit_wait_timer_func(unsigned long opaque)
ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0); dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
done: done:
spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
mod_timer(&ppd->cong_stats.timer, jiffies + HZ); mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
} }
...@@ -2501,7 +2503,7 @@ int qib_create_agents(struct qib_ibdev *dev) ...@@ -2501,7 +2503,7 @@ int qib_create_agents(struct qib_ibdev *dev)
dd->pport[p].cong_stats.timer.expires = 0; dd->pport[p].cong_stats.timer.expires = 0;
add_timer(&dd->pport[p].cong_stats.timer); add_timer(&dd->pport[p].cong_stats.timer);
ibp->send_agent = agent; ibp->rvp.send_agent = agent;
} }
return 0; return 0;
...@@ -2509,9 +2511,9 @@ int qib_create_agents(struct qib_ibdev *dev) ...@@ -2509,9 +2511,9 @@ int qib_create_agents(struct qib_ibdev *dev)
err: err:
for (p = 0; p < dd->num_pports; p++) { for (p = 0; p < dd->num_pports; p++) {
ibp = &dd->pport[p].ibport_data; ibp = &dd->pport[p].ibport_data;
if (ibp->send_agent) { if (ibp->rvp.send_agent) {
agent = ibp->send_agent; agent = ibp->rvp.send_agent;
ibp->send_agent = NULL; ibp->rvp.send_agent = NULL;
ib_unregister_mad_agent(agent); ib_unregister_mad_agent(agent);
} }
} }
...@@ -2528,9 +2530,9 @@ void qib_free_agents(struct qib_ibdev *dev) ...@@ -2528,9 +2530,9 @@ void qib_free_agents(struct qib_ibdev *dev)
for (p = 0; p < dd->num_pports; p++) { for (p = 0; p < dd->num_pports; p++) {
ibp = &dd->pport[p].ibport_data; ibp = &dd->pport[p].ibport_data;
if (ibp->send_agent) { if (ibp->rvp.send_agent) {
agent = ibp->send_agent; agent = ibp->rvp.send_agent;
ibp->send_agent = NULL; ibp->rvp.send_agent = NULL;
ib_unregister_mad_agent(agent); ib_unregister_mad_agent(agent);
} }
if (ibp->sm_ah) { if (ibp->sm_ah) {
......
...@@ -231,9 +231,9 @@ static void insert_qp(struct qib_ibdev *dev, struct rvt_qp *qp) ...@@ -231,9 +231,9 @@ static void insert_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
spin_lock_irqsave(&dev->qpt_lock, flags); spin_lock_irqsave(&dev->qpt_lock, flags);
if (qp->ibqp.qp_num == 0) if (qp->ibqp.qp_num == 0)
rcu_assign_pointer(ibp->qp0, qp); rcu_assign_pointer(ibp->rvp.qp[0], qp);
else if (qp->ibqp.qp_num == 1) else if (qp->ibqp.qp_num == 1)
rcu_assign_pointer(ibp->qp1, qp); rcu_assign_pointer(ibp->rvp.qp[1], qp);
else { else {
qp->next = dev->qp_table[n]; qp->next = dev->qp_table[n];
rcu_assign_pointer(dev->qp_table[n], qp); rcu_assign_pointer(dev->qp_table[n], qp);
...@@ -255,12 +255,12 @@ static void remove_qp(struct qib_ibdev *dev, struct rvt_qp *qp) ...@@ -255,12 +255,12 @@ static void remove_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
spin_lock_irqsave(&dev->qpt_lock, flags); spin_lock_irqsave(&dev->qpt_lock, flags);
if (rcu_dereference_protected(ibp->qp0, if (rcu_dereference_protected(ibp->rvp.qp[0],
lockdep_is_held(&dev->qpt_lock)) == qp) { lockdep_is_held(&dev->qpt_lock)) == qp) {
RCU_INIT_POINTER(ibp->qp0, NULL); RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
} else if (rcu_dereference_protected(ibp->qp1, } else if (rcu_dereference_protected(ibp->rvp.qp[1],
lockdep_is_held(&dev->qpt_lock)) == qp) { lockdep_is_held(&dev->qpt_lock)) == qp) {
RCU_INIT_POINTER(ibp->qp1, NULL); RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
} else { } else {
struct rvt_qp *q; struct rvt_qp *q;
struct rvt_qp __rcu **qpp; struct rvt_qp __rcu **qpp;
...@@ -306,9 +306,9 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) ...@@ -306,9 +306,9 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
if (!qib_mcast_tree_empty(ibp)) if (!qib_mcast_tree_empty(ibp))
qp_inuse++; qp_inuse++;
rcu_read_lock(); rcu_read_lock();
if (rcu_dereference(ibp->qp0)) if (rcu_dereference(ibp->rvp.qp[0]))
qp_inuse++; qp_inuse++;
if (rcu_dereference(ibp->qp1)) if (rcu_dereference(ibp->rvp.qp[1]))
qp_inuse++; qp_inuse++;
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -344,9 +344,9 @@ struct rvt_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) ...@@ -344,9 +344,9 @@ struct rvt_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
rcu_read_lock(); rcu_read_lock();
if (unlikely(qpn <= 1)) { if (unlikely(qpn <= 1)) {
if (qpn == 0) if (qpn == 0)
qp = rcu_dereference(ibp->qp0); qp = rcu_dereference(ibp->rvp.qp[0]);
else else
qp = rcu_dereference(ibp->qp1); qp = rcu_dereference(ibp->rvp.qp[1]);
if (qp) if (qp)
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
} else { } else {
......
...@@ -760,7 +760,7 @@ void qib_send_rc_ack(struct rvt_qp *qp) ...@@ -760,7 +760,7 @@ void qib_send_rc_ack(struct rvt_qp *qp)
queue_ack: queue_ack:
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
ibp->n_rc_qacks++; this_cpu_inc(*ibp->rvp.rc_qacks);
qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING; qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING;
qp->s_nak_state = qp->r_nak_state; qp->s_nak_state = qp->r_nak_state;
qp->s_ack_psn = qp->r_ack_psn; qp->s_ack_psn = qp->r_ack_psn;
...@@ -888,9 +888,9 @@ static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait) ...@@ -888,9 +888,9 @@ static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
ibp = to_iport(qp->ibqp.device, qp->port_num); ibp = to_iport(qp->ibqp.device, qp->port_num);
if (wqe->wr.opcode == IB_WR_RDMA_READ) if (wqe->wr.opcode == IB_WR_RDMA_READ)
ibp->n_rc_resends++; ibp->rvp.n_rc_resends++;
else else
ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR |
QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN |
...@@ -913,7 +913,7 @@ static void rc_timeout(unsigned long arg) ...@@ -913,7 +913,7 @@ static void rc_timeout(unsigned long arg)
spin_lock(&qp->s_lock); spin_lock(&qp->s_lock);
if (qp->s_flags & QIB_S_TIMER) { if (qp->s_flags & QIB_S_TIMER) {
ibp = to_iport(qp->ibqp.device, qp->port_num); ibp = to_iport(qp->ibqp.device, qp->port_num);
ibp->n_rc_timeouts++; ibp->rvp.n_rc_timeouts++;
qp->s_flags &= ~QIB_S_TIMER; qp->s_flags &= ~QIB_S_TIMER;
del_timer(&qp->s_timer); del_timer(&qp->s_timer);
qib_restart_rc(qp, qp->s_last_psn + 1, 1); qib_restart_rc(qp, qp->s_last_psn + 1, 1);
...@@ -1087,7 +1087,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, ...@@ -1087,7 +1087,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
if (++qp->s_last >= qp->s_size) if (++qp->s_last >= qp->s_size)
qp->s_last = 0; qp->s_last = 0;
} else } else
ibp->n_rc_delayed_comp++; this_cpu_inc(*ibp->rvp.rc_delayed_comp);
qp->s_retry = qp->s_retry_cnt; qp->s_retry = qp->s_retry_cnt;
update_last_psn(qp, wqe->lpsn); update_last_psn(qp, wqe->lpsn);
...@@ -1232,7 +1232,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1232,7 +1232,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
switch (aeth >> 29) { switch (aeth >> 29) {
case 0: /* ACK */ case 0: /* ACK */
ibp->n_rc_acks++; this_cpu_inc(*ibp->rvp.rc_acks);
if (qp->s_acked != qp->s_tail) { if (qp->s_acked != qp->s_tail) {
/* /*
* We are expecting more ACKs so * We are expecting more ACKs so
...@@ -1261,7 +1261,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1261,7 +1261,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
goto bail; goto bail;
case 1: /* RNR NAK */ case 1: /* RNR NAK */
ibp->n_rnr_naks++; ibp->rvp.n_rnr_naks++;
if (qp->s_acked == qp->s_tail) if (qp->s_acked == qp->s_tail)
goto bail; goto bail;
if (qp->s_flags & QIB_S_WAIT_RNR) if (qp->s_flags & QIB_S_WAIT_RNR)
...@@ -1276,7 +1276,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1276,7 +1276,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
/* The last valid PSN is the previous PSN. */ /* The last valid PSN is the previous PSN. */
update_last_psn(qp, psn - 1); update_last_psn(qp, psn - 1);
ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
reset_psn(qp, psn); reset_psn(qp, psn);
...@@ -1297,7 +1297,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1297,7 +1297,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
switch ((aeth >> QIB_AETH_CREDIT_SHIFT) & switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
QIB_AETH_CREDIT_MASK) { QIB_AETH_CREDIT_MASK) {
case 0: /* PSN sequence error */ case 0: /* PSN sequence error */
ibp->n_seq_naks++; ibp->rvp.n_seq_naks++;
/* /*
* Back up to the responder's expected PSN. * Back up to the responder's expected PSN.
* Note that we might get a NAK in the middle of an * Note that we might get a NAK in the middle of an
...@@ -1310,17 +1310,17 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1310,17 +1310,17 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
case 1: /* Invalid Request */ case 1: /* Invalid Request */
status = IB_WC_REM_INV_REQ_ERR; status = IB_WC_REM_INV_REQ_ERR;
ibp->n_other_naks++; ibp->rvp.n_other_naks++;
goto class_b; goto class_b;
case 2: /* Remote Access Error */ case 2: /* Remote Access Error */
status = IB_WC_REM_ACCESS_ERR; status = IB_WC_REM_ACCESS_ERR;
ibp->n_other_naks++; ibp->rvp.n_other_naks++;
goto class_b; goto class_b;
case 3: /* Remote Operation Error */ case 3: /* Remote Operation Error */
status = IB_WC_REM_OP_ERR; status = IB_WC_REM_OP_ERR;
ibp->n_other_naks++; ibp->rvp.n_other_naks++;
class_b: class_b:
if (qp->s_last == qp->s_acked) { if (qp->s_last == qp->s_acked) {
qib_send_complete(qp, wqe, status); qib_send_complete(qp, wqe, status);
...@@ -1371,7 +1371,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn, ...@@ -1371,7 +1371,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
wqe = do_rc_completion(qp, wqe, ibp); wqe = do_rc_completion(qp, wqe, ibp);
} }
ibp->n_rdma_seq++; ibp->rvp.n_rdma_seq++;
qp->r_flags |= QIB_R_RDMAR_SEQ; qp->r_flags |= QIB_R_RDMAR_SEQ;
qib_restart_rc(qp, qp->s_last_psn + 1, 0); qib_restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) { if (list_empty(&qp->rspwait)) {
...@@ -1643,7 +1643,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, ...@@ -1643,7 +1643,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
* Don't queue the NAK if we already sent one. * Don't queue the NAK if we already sent one.
*/ */
if (!qp->r_nak_state) { if (!qp->r_nak_state) {
ibp->n_rc_seqnak++; ibp->rvp.n_rc_seqnak++;
qp->r_nak_state = IB_NAK_PSN_ERROR; qp->r_nak_state = IB_NAK_PSN_ERROR;
/* Use the expected PSN. */ /* Use the expected PSN. */
qp->r_ack_psn = qp->r_psn; qp->r_ack_psn = qp->r_psn;
...@@ -1679,7 +1679,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, ...@@ -1679,7 +1679,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
*/ */
e = NULL; e = NULL;
old_req = 1; old_req = 1;
ibp->n_rc_dupreq++; ibp->rvp.n_rc_dupreq++;
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
......
...@@ -279,7 +279,8 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -279,7 +279,8 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH)) if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
goto err; goto err;
guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index); guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) if (!gid_ok(&hdr->u.l.grh.dgid,
ibp->rvp.gid_prefix, guid))
goto err; goto err;
if (!gid_ok(&hdr->u.l.grh.sgid, if (!gid_ok(&hdr->u.l.grh.sgid,
qp->alt_ah_attr.grh.dgid.global.subnet_prefix, qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
...@@ -311,7 +312,8 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -311,7 +312,8 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
goto err; goto err;
guid = get_sguid(ibp, guid = get_sguid(ibp,
qp->remote_ah_attr.grh.sgid_index); qp->remote_ah_attr.grh.sgid_index);
if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) if (!gid_ok(&hdr->u.l.grh.dgid,
ibp->rvp.gid_prefix, guid))
goto err; goto err;
if (!gid_ok(&hdr->u.l.grh.sgid, if (!gid_ok(&hdr->u.l.grh.sgid,
qp->remote_ah_attr.grh.dgid.global.subnet_prefix, qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
...@@ -409,7 +411,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) ...@@ -409,7 +411,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) || if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
qp->ibqp.qp_type != sqp->ibqp.qp_type) { qp->ibqp.qp_type != sqp->ibqp.qp_type) {
ibp->n_pkt_drops++; ibp->rvp.n_pkt_drops++;
/* /*
* For RC, the requester would timeout and retry so * For RC, the requester would timeout and retry so
* shortcut the timeouts and just signal too many retries. * shortcut the timeouts and just signal too many retries.
...@@ -566,7 +568,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) ...@@ -566,7 +568,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
send_comp: send_comp:
spin_lock_irqsave(&sqp->s_lock, flags); spin_lock_irqsave(&sqp->s_lock, flags);
ibp->n_loop_pkts++; ibp->rvp.n_loop_pkts++;
flush_send: flush_send:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
qib_send_complete(sqp, wqe, send_status); qib_send_complete(sqp, wqe, send_status);
...@@ -576,7 +578,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) ...@@ -576,7 +578,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
/* Handle RNR NAK */ /* Handle RNR NAK */
if (qp->ibqp.qp_type == IB_QPT_UC) if (qp->ibqp.qp_type == IB_QPT_UC)
goto send_comp; goto send_comp;
ibp->n_rnr_naks++; ibp->rvp.n_rnr_naks++;
/* /*
* Note: we don't need the s_lock held since the BUSY flag * Note: we don't need the s_lock held since the BUSY flag
* makes this single threaded. * makes this single threaded.
...@@ -663,7 +665,7 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, ...@@ -663,7 +665,7 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
hdr->next_hdr = IB_GRH_NEXT_HDR; hdr->next_hdr = IB_GRH_NEXT_HDR;
hdr->hop_limit = grh->hop_limit; hdr->hop_limit = grh->hop_limit;
/* The SGID is 32-bit aligned. */ /* The SGID is 32-bit aligned. */
hdr->sgid.global.subnet_prefix = ibp->gid_prefix; hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
hdr->sgid.global.interface_id = grh->sgid_index ? hdr->sgid.global.interface_id = grh->sgid_index ?
ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid; ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
hdr->dgid = grh->dgid; hdr->dgid = grh->dgid;
......
...@@ -702,7 +702,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd, ...@@ -702,7 +702,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
struct qib_ibport *ibp; struct qib_ibport *ibp;
ibp = &ppd->ibport_data; ibp = &ppd->ibport_data;
ibp->n_dmawait++; ibp->rvp.n_dmawait++;
qp->s_flags |= QIB_S_WAIT_DMA_DESC; qp->s_flags |= QIB_S_WAIT_DMA_DESC;
list_add_tail(&priv->iowait, &dev->dmawait); list_add_tail(&priv->iowait, &dev->dmawait);
} }
......
...@@ -406,7 +406,13 @@ static struct kobj_type qib_sl2vl_ktype = { ...@@ -406,7 +406,13 @@ static struct kobj_type qib_sl2vl_ktype = {
#define QIB_DIAGC_ATTR(N) \ #define QIB_DIAGC_ATTR(N) \
static struct qib_diagc_attr qib_diagc_attr_##N = { \ static struct qib_diagc_attr qib_diagc_attr_##N = { \
.attr = { .name = __stringify(N), .mode = 0664 }, \ .attr = { .name = __stringify(N), .mode = 0664 }, \
.counter = offsetof(struct qib_ibport, n_##N) \ .counter = offsetof(struct qib_ibport, rvp.n_##N) \
}
#define QIB_DIAGC_ATTR_PER_CPU(N) \
static struct qib_diagc_attr qib_diagc_attr_##N = { \
.attr = { .name = __stringify(N), .mode = 0664 }, \
.counter = offsetof(struct qib_ibport, rvp.z_##N) \
} }
struct qib_diagc_attr { struct qib_diagc_attr {
...@@ -414,10 +420,11 @@ struct qib_diagc_attr { ...@@ -414,10 +420,11 @@ struct qib_diagc_attr {
size_t counter; size_t counter;
}; };
QIB_DIAGC_ATTR_PER_CPU(rc_acks);
QIB_DIAGC_ATTR_PER_CPU(rc_qacks);
QIB_DIAGC_ATTR_PER_CPU(rc_delayed_comp);
QIB_DIAGC_ATTR(rc_resends); QIB_DIAGC_ATTR(rc_resends);
QIB_DIAGC_ATTR(rc_acks);
QIB_DIAGC_ATTR(rc_qacks);
QIB_DIAGC_ATTR(rc_delayed_comp);
QIB_DIAGC_ATTR(seq_naks); QIB_DIAGC_ATTR(seq_naks);
QIB_DIAGC_ATTR(rdma_seq); QIB_DIAGC_ATTR(rdma_seq);
QIB_DIAGC_ATTR(rnr_naks); QIB_DIAGC_ATTR(rnr_naks);
...@@ -449,6 +456,35 @@ static struct attribute *diagc_default_attributes[] = { ...@@ -449,6 +456,35 @@ static struct attribute *diagc_default_attributes[] = {
NULL NULL
}; };
static u64 get_all_cpu_total(u64 __percpu *cntr)
{
int cpu;
u64 counter = 0;
for_each_possible_cpu(cpu)
counter += *per_cpu_ptr(cntr, cpu);
return counter;
}
#define def_write_per_cpu(cntr) \
static void write_per_cpu_##cntr(struct qib_pportdata *ppd, u32 data) \
{ \
struct qib_devdata *dd = ppd->dd; \
struct qib_ibport *qibp = &ppd->ibport_data; \
/* A write can only zero the counter */ \
if (data == 0) \
qibp->rvp.z_##cntr = get_all_cpu_total(qibp->rvp.cntr); \
else \
qib_dev_err(dd, "Per CPU cntrs can only be zeroed"); \
}
def_write_per_cpu(rc_acks)
def_write_per_cpu(rc_qacks)
def_write_per_cpu(rc_delayed_comp)
#define READ_PER_CPU_CNTR(cntr) (get_all_cpu_total(qibp->rvp.cntr) - \
qibp->rvp.z_##cntr)
static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr, static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf) char *buf)
{ {
...@@ -458,7 +494,16 @@ static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr, ...@@ -458,7 +494,16 @@ static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
container_of(kobj, struct qib_pportdata, diagc_kobj); container_of(kobj, struct qib_pportdata, diagc_kobj);
struct qib_ibport *qibp = &ppd->ibport_data; struct qib_ibport *qibp = &ppd->ibport_data;
return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter)); if (!strncmp(dattr->attr.name, "rc_acks", 7))
return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_acks));
else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_qacks));
else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
return sprintf(buf, "%llu\n",
READ_PER_CPU_CNTR(rc_delayed_comp));
else
return sprintf(buf, "%u\n",
*(u32 *)((char *)qibp + dattr->counter));
} }
static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr, static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
...@@ -475,7 +520,15 @@ static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr, ...@@ -475,7 +520,15 @@ static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
ret = kstrtou32(buf, 0, &val); ret = kstrtou32(buf, 0, &val);
if (ret) if (ret)
return ret; return ret;
*(u32 *)((char *) qibp + dattr->counter) = val;
if (!strncmp(dattr->attr.name, "rc_acks", 7))
write_per_cpu_rc_acks(ppd, val);
else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
write_per_cpu_rc_qacks(ppd, val);
else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
write_per_cpu_rc_delayed_comp(ppd, val);
else
*(u32 *)((char *)qibp + dattr->counter) = val;
return size; return size;
} }
......
...@@ -527,7 +527,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -527,7 +527,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0; qp->r_sge.num_sge = 0;
drop: drop:
ibp->n_pkt_drops++; ibp->rvp.n_pkt_drops++;
return; return;
op_err: op_err:
......
...@@ -62,7 +62,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -62,7 +62,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
qp = qib_lookup_qpn(ibp, swqe->ud_wr.remote_qpn); qp = qib_lookup_qpn(ibp, swqe->ud_wr.remote_qpn);
if (!qp) { if (!qp) {
ibp->n_pkt_drops++; ibp->rvp.n_pkt_drops++;
return; return;
} }
...@@ -73,7 +73,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -73,7 +73,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (dqptype != sqptype || if (dqptype != sqptype ||
!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++; ibp->rvp.n_pkt_drops++;
goto drop; goto drop;
} }
...@@ -153,14 +153,14 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -153,14 +153,14 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
} }
if (!ret) { if (!ret) {
if (qp->ibqp.qp_num == 0) if (qp->ibqp.qp_num == 0)
ibp->n_vl15_dropped++; ibp->rvp.n_vl15_dropped++;
goto bail_unlock; goto bail_unlock;
} }
} }
/* Silently drop packets which are too big. */ /* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) { if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= QIB_R_REUSE_SGE; qp->r_flags |= QIB_R_REUSE_SGE;
ibp->n_pkt_drops++; ibp->rvp.n_pkt_drops++;
goto bail_unlock; goto bail_unlock;
} }
...@@ -219,7 +219,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -219,7 +219,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
/* Signal completion event if the solicited bit is set. */ /* Signal completion event if the solicited bit is set. */
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
swqe->wr.send_flags & IB_SEND_SOLICITED); swqe->wr.send_flags & IB_SEND_SOLICITED);
ibp->n_loop_pkts++; ibp->rvp.n_loop_pkts++;
bail_unlock: bail_unlock:
spin_unlock_irqrestore(&qp->r_lock, flags); spin_unlock_irqrestore(&qp->r_lock, flags);
drop: drop:
...@@ -546,7 +546,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -546,7 +546,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
} }
if (!ret) { if (!ret) {
if (qp->ibqp.qp_num == 0) if (qp->ibqp.qp_num == 0)
ibp->n_vl15_dropped++; ibp->rvp.n_vl15_dropped++;
return; return;
} }
} }
...@@ -589,5 +589,5 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -589,5 +589,5 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
return; return;
drop: drop:
ibp->n_pkt_drops++; ibp->rvp.n_pkt_drops++;
} }
...@@ -586,7 +586,7 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, ...@@ -586,7 +586,7 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
/* Check for valid receive state. */ /* Check for valid receive state. */
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++; ibp->rvp.n_pkt_drops++;
goto unlock; goto unlock;
} }
...@@ -716,7 +716,7 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) ...@@ -716,7 +716,7 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
return; return;
drop: drop:
ibp->n_pkt_drops++; ibp->rvp.n_pkt_drops++;
} }
/* /*
...@@ -1256,7 +1256,7 @@ static int qib_verbs_send_dma(struct rvt_qp *qp, struct qib_ib_header *hdr, ...@@ -1256,7 +1256,7 @@ static int qib_verbs_send_dma(struct rvt_qp *qp, struct qib_ib_header *hdr,
qib_put_txreq(tx); qib_put_txreq(tx);
ret = wait_kmem(dev, qp); ret = wait_kmem(dev, qp);
unaligned: unaligned:
ibp->n_unaligned++; ibp->rvp.n_unaligned++;
bail: bail:
return ret; return ret;
bail_tx: bail_tx:
...@@ -1647,16 +1647,16 @@ static int qib_query_port(struct ib_device *ibdev, u8 port, ...@@ -1647,16 +1647,16 @@ static int qib_query_port(struct ib_device *ibdev, u8 port,
memset(props, 0, sizeof(*props)); memset(props, 0, sizeof(*props));
props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
props->lmc = ppd->lmc; props->lmc = ppd->lmc;
props->sm_lid = ibp->sm_lid; props->sm_lid = ibp->rvp.sm_lid;
props->sm_sl = ibp->sm_sl; props->sm_sl = ibp->rvp.sm_sl;
props->state = dd->f_iblink_state(ppd->lastibcstat); props->state = dd->f_iblink_state(ppd->lastibcstat);
props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat); props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
props->port_cap_flags = ibp->port_cap_flags; props->port_cap_flags = ibp->rvp.port_cap_flags;
props->gid_tbl_len = QIB_GUIDS_PER_PORT; props->gid_tbl_len = QIB_GUIDS_PER_PORT;
props->max_msg_sz = 0x80000000; props->max_msg_sz = 0x80000000;
props->pkey_tbl_len = qib_get_npkeys(dd); props->pkey_tbl_len = qib_get_npkeys(dd);
props->bad_pkey_cntr = ibp->pkey_violations; props->bad_pkey_cntr = ibp->rvp.pkey_violations;
props->qkey_viol_cntr = ibp->qkey_violations; props->qkey_viol_cntr = ibp->rvp.qkey_violations;
props->active_width = ppd->link_width_active; props->active_width = ppd->link_width_active;
/* See rate_show() */ /* See rate_show() */
props->active_speed = ppd->link_speed_active; props->active_speed = ppd->link_speed_active;
...@@ -1684,7 +1684,7 @@ static int qib_query_port(struct ib_device *ibdev, u8 port, ...@@ -1684,7 +1684,7 @@ static int qib_query_port(struct ib_device *ibdev, u8 port,
mtu = IB_MTU_2048; mtu = IB_MTU_2048;
} }
props->active_mtu = mtu; props->active_mtu = mtu;
props->subnet_timeout = ibp->subnet_timeout; props->subnet_timeout = ibp->rvp.subnet_timeout;
return 0; return 0;
} }
...@@ -1734,14 +1734,14 @@ static int qib_modify_port(struct ib_device *ibdev, u8 port, ...@@ -1734,14 +1734,14 @@ static int qib_modify_port(struct ib_device *ibdev, u8 port,
struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_pportdata *ppd = ppd_from_ibp(ibp);
ibp->port_cap_flags |= props->set_port_cap_mask; ibp->rvp.port_cap_flags |= props->set_port_cap_mask;
ibp->port_cap_flags &= ~props->clr_port_cap_mask; ibp->rvp.port_cap_flags &= ~props->clr_port_cap_mask;
if (props->set_port_cap_mask || props->clr_port_cap_mask) if (props->set_port_cap_mask || props->clr_port_cap_mask)
qib_cap_mask_chg(ibp); qib_cap_mask_chg(ibp);
if (port_modify_mask & IB_PORT_SHUTDOWN) if (port_modify_mask & IB_PORT_SHUTDOWN)
qib_set_linkstate(ppd, QIB_IB_LINKDOWN); qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
ibp->qkey_violations = 0; ibp->rvp.qkey_violations = 0;
return 0; return 0;
} }
...@@ -1757,7 +1757,7 @@ static int qib_query_gid(struct ib_device *ibdev, u8 port, ...@@ -1757,7 +1757,7 @@ static int qib_query_gid(struct ib_device *ibdev, u8 port,
struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_pportdata *ppd = ppd_from_ibp(ibp);
gid->global.subnet_prefix = ibp->gid_prefix; gid->global.subnet_prefix = ibp->rvp.gid_prefix;
if (index == 0) if (index == 0)
gid->global.interface_id = ppd->guid; gid->global.interface_id = ppd->guid;
else if (index < QIB_GUIDS_PER_PORT) else if (index < QIB_GUIDS_PER_PORT)
...@@ -1787,7 +1787,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid) ...@@ -1787,7 +1787,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
attr.dlid = dlid; attr.dlid = dlid;
attr.port_num = ppd_from_ibp(ibp)->port; attr.port_num = ppd_from_ibp(ibp)->port;
rcu_read_lock(); rcu_read_lock();
qp0 = rcu_dereference(ibp->qp0); qp0 = rcu_dereference(ibp->rvp.qp[0]);
if (qp0) if (qp0)
ah = ib_create_ah(qp0->ibqp.pd, &attr); ah = ib_create_ah(qp0->ibqp.pd, &attr);
rcu_read_unlock(); rcu_read_unlock();
...@@ -1876,22 +1876,22 @@ static void init_ibport(struct qib_pportdata *ppd) ...@@ -1876,22 +1876,22 @@ static void init_ibport(struct qib_pportdata *ppd)
struct qib_verbs_counters cntrs; struct qib_verbs_counters cntrs;
struct qib_ibport *ibp = &ppd->ibport_data; struct qib_ibport *ibp = &ppd->ibport_data;
spin_lock_init(&ibp->lock); spin_lock_init(&ibp->rvp.lock);
/* Set the prefix to the default value (see ch. 4.1.1) */ /* Set the prefix to the default value (see ch. 4.1.1) */
ibp->gid_prefix = IB_DEFAULT_GID_PREFIX; ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE); ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP | IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP | IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP | IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
IB_PORT_OTHER_LOCAL_CHANGES_SUP; IB_PORT_OTHER_LOCAL_CHANGES_SUP;
if (ppd->dd->flags & QIB_HAS_LINK_LATENCY) if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
/* Snapshot current HW counters to "clear" them. */ /* Snapshot current HW counters to "clear" them. */
qib_get_counters(ppd, &cntrs); qib_get_counters(ppd, &cntrs);
...@@ -1911,8 +1911,8 @@ static void init_ibport(struct qib_pportdata *ppd) ...@@ -1911,8 +1911,8 @@ static void init_ibport(struct qib_pportdata *ppd)
ibp->z_excessive_buffer_overrun_errors = ibp->z_excessive_buffer_overrun_errors =
cntrs.excessive_buffer_overrun_errors; cntrs.excessive_buffer_overrun_errors;
ibp->z_vl15_dropped = cntrs.vl15_dropped; ibp->z_vl15_dropped = cntrs.vl15_dropped;
RCU_INIT_POINTER(ibp->qp0, NULL); RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
RCU_INIT_POINTER(ibp->qp1, NULL); RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
} }
static int qib_port_immutable(struct ib_device *ibdev, u8 port_num, static int qib_port_immutable(struct ib_device *ibdev, u8 port_num,
......
...@@ -401,21 +401,10 @@ struct qib_pma_counters { ...@@ -401,21 +401,10 @@ struct qib_pma_counters {
}; };
struct qib_ibport { struct qib_ibport {
struct rvt_qp __rcu *qp0; struct rvt_ibport rvp;
struct rvt_qp __rcu *qp1;
struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
struct rvt_ah *sm_ah; struct rvt_ah *sm_ah;
struct rvt_ah *smi_ah; struct rvt_ah *smi_ah;
struct rb_root mcast_tree;
spinlock_t lock; /* protect changes in this struct */
/* non-zero when timer is set */
unsigned long mkey_lease_timeout;
unsigned long trap_timeout;
__be64 gid_prefix; /* in network order */
__be64 mkey;
__be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */ __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
u64 tid; /* TID for traps */
struct qib_pma_counters __percpu *pmastats; struct qib_pma_counters __percpu *pmastats;
u64 z_unicast_xmit; /* starting count for PMA */ u64 z_unicast_xmit; /* starting count for PMA */
u64 z_unicast_rcv; /* starting count for PMA */ u64 z_unicast_rcv; /* starting count for PMA */
...@@ -434,42 +423,9 @@ struct qib_ibport { ...@@ -434,42 +423,9 @@ struct qib_ibport {
u32 z_local_link_integrity_errors; /* starting count for PMA */ u32 z_local_link_integrity_errors; /* starting count for PMA */
u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */ u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
u32 z_vl15_dropped; /* starting count for PMA */ u32 z_vl15_dropped; /* starting count for PMA */
u32 n_rc_resends;
u32 n_rc_acks;
u32 n_rc_qacks;
u32 n_rc_delayed_comp;
u32 n_seq_naks;
u32 n_rdma_seq;
u32 n_rnr_naks;
u32 n_other_naks;
u32 n_loop_pkts;
u32 n_pkt_drops;
u32 n_vl15_dropped;
u32 n_rc_timeouts;
u32 n_dmawait;
u32 n_unaligned;
u32 n_rc_dupreq;
u32 n_rc_seqnak;
u32 port_cap_flags;
u32 pma_sample_start;
u32 pma_sample_interval;
__be16 pma_counter_select[5];
u16 pma_tag;
u16 pkey_violations;
u16 qkey_violations;
u16 mkey_violations;
u16 mkey_lease_period;
u16 sm_lid;
u16 repress_traps;
u8 sm_sl;
u8 mkeyprot;
u8 subnet_timeout;
u8 vl_high_limit;
u8 sl_to_vl[16]; u8 sl_to_vl[16];
}; };
struct qib_ibdev { struct qib_ibdev {
struct rvt_dev_info rdi; struct rvt_dev_info rdi;
struct list_head pending_mmaps; struct list_head pending_mmaps;
......
...@@ -114,8 +114,8 @@ struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid) ...@@ -114,8 +114,8 @@ struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
unsigned long flags; unsigned long flags;
struct qib_mcast *mcast; struct qib_mcast *mcast;
spin_lock_irqsave(&ibp->lock, flags); spin_lock_irqsave(&ibp->rvp.lock, flags);
n = ibp->mcast_tree.rb_node; n = ibp->rvp.mcast_tree.rb_node;
while (n) { while (n) {
int ret; int ret;
...@@ -129,11 +129,11 @@ struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid) ...@@ -129,11 +129,11 @@ struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
n = n->rb_right; n = n->rb_right;
else { else {
atomic_inc(&mcast->refcount); atomic_inc(&mcast->refcount);
spin_unlock_irqrestore(&ibp->lock, flags); spin_unlock_irqrestore(&ibp->rvp.lock, flags);
goto bail; goto bail;
} }
} }
spin_unlock_irqrestore(&ibp->lock, flags); spin_unlock_irqrestore(&ibp->rvp.lock, flags);
mcast = NULL; mcast = NULL;
...@@ -153,11 +153,11 @@ struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid) ...@@ -153,11 +153,11 @@ struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp, static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
struct qib_mcast *mcast, struct qib_mcast_qp *mqp) struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
{ {
struct rb_node **n = &ibp->mcast_tree.rb_node; struct rb_node **n = &ibp->rvp.mcast_tree.rb_node;
struct rb_node *pn = NULL; struct rb_node *pn = NULL;
int ret; int ret;
spin_lock_irq(&ibp->lock); spin_lock_irq(&ibp->rvp.lock);
while (*n) { while (*n) {
struct qib_mcast *tmcast; struct qib_mcast *tmcast;
...@@ -212,12 +212,12 @@ static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp, ...@@ -212,12 +212,12 @@ static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
atomic_inc(&mcast->refcount); atomic_inc(&mcast->refcount);
rb_link_node(&mcast->rb_node, pn, n); rb_link_node(&mcast->rb_node, pn, n);
rb_insert_color(&mcast->rb_node, &ibp->mcast_tree); rb_insert_color(&mcast->rb_node, &ibp->rvp.mcast_tree);
ret = 0; ret = 0;
bail: bail:
spin_unlock_irq(&ibp->lock); spin_unlock_irq(&ibp->rvp.lock);
return ret; return ret;
} }
...@@ -294,13 +294,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -294,13 +294,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
return -EINVAL; return -EINVAL;
spin_lock_irq(&ibp->lock); spin_lock_irq(&ibp->rvp.lock);
/* Find the GID in the mcast table. */ /* Find the GID in the mcast table. */
n = ibp->mcast_tree.rb_node; n = ibp->rvp.mcast_tree.rb_node;
while (1) { while (1) {
if (n == NULL) { if (n == NULL) {
spin_unlock_irq(&ibp->lock); spin_unlock_irq(&ibp->rvp.lock);
return -EINVAL; return -EINVAL;
} }
...@@ -329,13 +329,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -329,13 +329,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
/* If this was the last attached QP, remove the GID too. */ /* If this was the last attached QP, remove the GID too. */
if (list_empty(&mcast->qp_list)) { if (list_empty(&mcast->qp_list)) {
rb_erase(&mcast->rb_node, &ibp->mcast_tree); rb_erase(&mcast->rb_node, &ibp->rvp.mcast_tree);
last = 1; last = 1;
} }
break; break;
} }
spin_unlock_irq(&ibp->lock); spin_unlock_irq(&ibp->rvp.lock);
/* QP not attached */ /* QP not attached */
if (!delp) if (!delp)
return -EINVAL; return -EINVAL;
...@@ -359,5 +359,5 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -359,5 +359,5 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int qib_mcast_tree_empty(struct qib_ibport *ibp) int qib_mcast_tree_empty(struct qib_ibport *ibp)
{ {
return ibp->mcast_tree.rb_node == NULL; return !(ibp->rvp.mcast_tree.rb_node);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment