Commit 3edac25f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IPoIB: Fix crash in path_rec_completion()
  IPoIB: Fix hang in ipoib_flush_paths()
  IPoIB: Don't enable NAPI when it's already enabled
  RDMA/cxgb3: Fix deadlock in iw_cxgb3 (hang when configuring interface)
  IB/ehca: Remove reference to special QP in case of port activation failure
  IB/mlx4: Set umem field to NULL in mlx4_ib_alloc_fast_reg_mr()
  mlx4_core: Fix unused variable warning
  RDMA/nes: Mitigate compatibility issue regarding PCIe write credits
  RDMA/nes: Fix CQ allocation scheme for multicast receive queue apps
  RDMA/nes: Correct handling of PBL resources
  RDMA/nes: Reindent mis-indented spinlocks
  RDMA/cxgb3: Fix too-big reserved field zeroing in iwch_post_zb_read()
  IB/ipath: Fix RDMA write with immediate copy of last packet
parents 504765f3 c35a2549
...@@ -1102,9 +1102,7 @@ static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev) ...@@ -1102,9 +1102,7 @@ static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
char *cp, *next; char *cp, *next;
unsigned fw_maj, fw_min, fw_mic; unsigned fw_maj, fw_min, fw_mic;
rtnl_lock();
lldev->ethtool_ops->get_drvinfo(lldev, &info); lldev->ethtool_ops->get_drvinfo(lldev, &info);
rtnl_unlock();
next = info.fw_version + 1; next = info.fw_version + 1;
cp = strsep(&next, "."); cp = strsep(&next, ".");
...@@ -1192,9 +1190,7 @@ static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, ch ...@@ -1192,9 +1190,7 @@ static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, ch
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
PDBG("%s dev 0x%p\n", __func__, dev); PDBG("%s dev 0x%p\n", __func__, dev);
rtnl_lock();
lldev->ethtool_ops->get_drvinfo(lldev, &info); lldev->ethtool_ops->get_drvinfo(lldev, &info);
rtnl_unlock();
return sprintf(buf, "%s\n", info.fw_version); return sprintf(buf, "%s\n", info.fw_version);
} }
...@@ -1207,9 +1203,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr, ...@@ -1207,9 +1203,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
PDBG("%s dev 0x%p\n", __func__, dev); PDBG("%s dev 0x%p\n", __func__, dev);
rtnl_lock();
lldev->ethtool_ops->get_drvinfo(lldev, &info); lldev->ethtool_ops->get_drvinfo(lldev, &info);
rtnl_unlock();
return sprintf(buf, "%s\n", info.driver); return sprintf(buf, "%s\n", info.driver);
} }
......
...@@ -745,7 +745,6 @@ int iwch_post_zb_read(struct iwch_qp *qhp) ...@@ -745,7 +745,6 @@ int iwch_post_zb_read(struct iwch_qp *qhp)
wqe->read.rdmaop = T3_READ_REQ; wqe->read.rdmaop = T3_READ_REQ;
wqe->read.reserved[0] = 0; wqe->read.reserved[0] = 0;
wqe->read.reserved[1] = 0; wqe->read.reserved[1] = 0;
wqe->read.reserved[2] = 0;
wqe->read.rem_stag = cpu_to_be32(1); wqe->read.rem_stag = cpu_to_be32(1);
wqe->read.rem_to = cpu_to_be64(1); wqe->read.rem_to = cpu_to_be64(1);
wqe->read.local_stag = cpu_to_be32(1); wqe->read.local_stag = cpu_to_be32(1);
......
...@@ -359,36 +359,48 @@ static void notify_port_conf_change(struct ehca_shca *shca, int port_num) ...@@ -359,36 +359,48 @@ static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
*old_attr = new_attr; *old_attr = new_attr;
} }
/* replay modify_qp for sqps -- return 0 if all is well, 1 if AQP1 destroyed */
static int replay_modify_qp(struct ehca_sport *sport)
{
int aqp1_destroyed;
unsigned long flags;
spin_lock_irqsave(&sport->mod_sqp_lock, flags);
aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI];
if (sport->ibqp_sqp[IB_QPT_SMI])
ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
if (!aqp1_destroyed)
ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
return aqp1_destroyed;
}
static void parse_ec(struct ehca_shca *shca, u64 eqe) static void parse_ec(struct ehca_shca *shca, u64 eqe)
{ {
u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
u8 spec_event; u8 spec_event;
struct ehca_sport *sport = &shca->sport[port - 1]; struct ehca_sport *sport = &shca->sport[port - 1];
unsigned long flags;
switch (ec) { switch (ec) {
case 0x30: /* port availability change */ case 0x30: /* port availability change */
if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
int suppress_event; /* only replay modify_qp calls in autodetect mode;
/* replay modify_qp for sqps */ * if AQP1 was destroyed, the port is already down
spin_lock_irqsave(&sport->mod_sqp_lock, flags); * again and we can drop the event.
suppress_event = !sport->ibqp_sqp[IB_QPT_GSI]; */
if (sport->ibqp_sqp[IB_QPT_SMI]) if (ehca_nr_ports < 0)
ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]); if (replay_modify_qp(sport))
if (!suppress_event)
ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
/* AQP1 was destroyed, ignore this event */
if (suppress_event)
break; break;
sport->port_state = IB_PORT_ACTIVE; sport->port_state = IB_PORT_ACTIVE;
dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
"is active"); "is active");
ehca_query_sma_attr(shca, port, ehca_query_sma_attr(shca, port, &sport->saved_attr);
&sport->saved_attr);
} else { } else {
sport->port_state = IB_PORT_DOWN; sport->port_state = IB_PORT_DOWN;
dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
......
...@@ -860,6 +860,11 @@ static struct ehca_qp *internal_create_qp( ...@@ -860,6 +860,11 @@ static struct ehca_qp *internal_create_qp(
if (qp_type == IB_QPT_GSI) { if (qp_type == IB_QPT_GSI) {
h_ret = ehca_define_sqp(shca, my_qp, init_attr); h_ret = ehca_define_sqp(shca, my_qp, init_attr);
if (h_ret != H_SUCCESS) { if (h_ret != H_SUCCESS) {
kfree(my_qp->mod_qp_parm);
my_qp->mod_qp_parm = NULL;
/* the QP pointer is no longer valid */
shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
NULL;
ret = ehca2ib_return_code(h_ret); ret = ehca2ib_return_code(h_ret);
goto create_qp_exit6; goto create_qp_exit6;
} }
......
...@@ -156,7 +156,7 @@ int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, ...@@ -156,7 +156,7 @@ int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
/** /**
* ipath_get_rwqe - copy the next RWQE into the QP's RWQE * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
* @qp: the QP * @qp: the QP
* @wr_id_only: update wr_id only, not SGEs * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
* *
* Return 0 if no RWQE is available, otherwise return 1. * Return 0 if no RWQE is available, otherwise return 1.
* *
...@@ -173,8 +173,6 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) ...@@ -173,8 +173,6 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
u32 tail; u32 tail;
int ret; int ret;
qp->r_sge.sg_list = qp->r_sg_list;
if (qp->ibqp.srq) { if (qp->ibqp.srq) {
srq = to_isrq(qp->ibqp.srq); srq = to_isrq(qp->ibqp.srq);
handler = srq->ibsrq.event_handler; handler = srq->ibsrq.event_handler;
...@@ -206,8 +204,10 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) ...@@ -206,8 +204,10 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
wqe = get_rwqe_ptr(rq, tail); wqe = get_rwqe_ptr(rq, tail);
if (++tail >= rq->size) if (++tail >= rq->size)
tail = 0; tail = 0;
} while (!wr_id_only && !ipath_init_sge(qp, wqe, &qp->r_len, if (wr_id_only)
&qp->r_sge)); break;
qp->r_sge.sg_list = qp->r_sg_list;
} while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge));
qp->r_wr_id = wqe->wr_id; qp->r_wr_id = wqe->wr_id;
wq->tail = tail; wq->tail = tail;
......
...@@ -205,6 +205,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, ...@@ -205,6 +205,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
goto err_mr; goto err_mr;
mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
mr->umem = NULL;
return &mr->ibmr; return &mr->ibmr;
......
...@@ -95,6 +95,10 @@ unsigned int wqm_quanta = 0x10000; ...@@ -95,6 +95,10 @@ unsigned int wqm_quanta = 0x10000;
module_param(wqm_quanta, int, 0644); module_param(wqm_quanta, int, 0644);
MODULE_PARM_DESC(wqm_quanta, "WQM quanta"); MODULE_PARM_DESC(wqm_quanta, "WQM quanta");
static unsigned int limit_maxrdreqsz;
module_param(limit_maxrdreqsz, bool, 0644);
MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
LIST_HEAD(nes_adapter_list); LIST_HEAD(nes_adapter_list);
static LIST_HEAD(nes_dev_list); static LIST_HEAD(nes_dev_list);
...@@ -588,6 +592,18 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i ...@@ -588,6 +592,18 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
nesdev->nesadapter->port_count; nesdev->nesadapter->port_count;
} }
if ((limit_maxrdreqsz ||
((nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_GLADIUS) &&
(hw_rev == NE020_REV1))) &&
(pcie_get_readrq(pcidev) > 256)) {
if (pcie_set_readrq(pcidev, 256))
printk(KERN_ERR PFX "Unable to set max read request"
" to 256 bytes\n");
else
nes_debug(NES_DBG_INIT, "Max read request size set"
" to 256 bytes\n");
}
tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev); tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
/* bring up the Control QP */ /* bring up the Control QP */
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#define NES_PHY_TYPE_ARGUS 4 #define NES_PHY_TYPE_ARGUS 4
#define NES_PHY_TYPE_PUMA_1G 5 #define NES_PHY_TYPE_PUMA_1G 5
#define NES_PHY_TYPE_PUMA_10G 6 #define NES_PHY_TYPE_PUMA_10G 6
#define NES_PHY_TYPE_GLADIUS 7
#define NES_MULTICAST_PF_MAX 8 #define NES_MULTICAST_PF_MAX 8
......
...@@ -349,7 +349,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, ...@@ -349,7 +349,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) { if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) {
spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
ret = -ENOMEM; ret = -ENOMEM;
goto failed_vpbl_alloc; goto failed_vpbl_avail;
} else { } else {
nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used; nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used;
} }
...@@ -357,7 +357,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, ...@@ -357,7 +357,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) { if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) {
spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
ret = -ENOMEM; ret = -ENOMEM;
goto failed_vpbl_alloc; goto failed_vpbl_avail;
} else { } else {
nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used; nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used;
} }
...@@ -391,14 +391,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, ...@@ -391,14 +391,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
goto failed_vpbl_alloc; goto failed_vpbl_alloc;
} }
nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_KERNEL); nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1;
nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_ATOMIC);
if (!nesfmr->root_vpbl.leaf_vpbl) { if (!nesfmr->root_vpbl.leaf_vpbl) {
spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
ret = -ENOMEM; ret = -ENOMEM;
goto failed_leaf_vpbl_alloc; goto failed_leaf_vpbl_alloc;
} }
nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1;
nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p" nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p"
" leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n", " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n",
nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl); nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl);
...@@ -519,6 +519,16 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, ...@@ -519,6 +519,16 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
nesfmr->root_vpbl.pbl_pbase); nesfmr->root_vpbl.pbl_pbase);
failed_vpbl_alloc: failed_vpbl_alloc:
if (nesfmr->nesmr.pbls_used != 0) {
spin_lock_irqsave(&nesadapter->pbl_lock, flags);
if (nesfmr->nesmr.pbl_4k)
nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
else
nesadapter->free_256pbl += nesfmr->nesmr.pbls_used;
spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
}
failed_vpbl_avail:
kfree(nesfmr); kfree(nesfmr);
failed_fmr_alloc: failed_fmr_alloc:
...@@ -534,18 +544,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, ...@@ -534,18 +544,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
*/ */
static int nes_dealloc_fmr(struct ib_fmr *ibfmr) static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
{ {
unsigned long flags;
struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr); struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr);
struct nes_fmr *nesfmr = to_nesfmr(nesmr); struct nes_fmr *nesfmr = to_nesfmr(nesmr);
struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device); struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device);
struct nes_device *nesdev = nesvnic->nesdev; struct nes_device *nesdev = nesvnic->nesdev;
struct nes_mr temp_nesmr = *nesmr; struct nes_adapter *nesadapter = nesdev->nesadapter;
int i = 0; int i = 0;
temp_nesmr.ibmw.device = ibfmr->device;
temp_nesmr.ibmw.pd = ibfmr->pd;
temp_nesmr.ibmw.rkey = ibfmr->rkey;
temp_nesmr.ibmw.uobject = NULL;
/* free the resources */ /* free the resources */
if (nesfmr->leaf_pbl_cnt == 0) { if (nesfmr->leaf_pbl_cnt == 0) {
/* single PBL case */ /* single PBL case */
...@@ -561,8 +567,24 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr) ...@@ -561,8 +567,24 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase, pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
nesfmr->root_vpbl.pbl_pbase); nesfmr->root_vpbl.pbl_pbase);
} }
nesmr->ibmw.device = ibfmr->device;
nesmr->ibmw.pd = ibfmr->pd;
nesmr->ibmw.rkey = ibfmr->rkey;
nesmr->ibmw.uobject = NULL;
if (nesfmr->nesmr.pbls_used != 0) {
spin_lock_irqsave(&nesadapter->pbl_lock, flags);
if (nesfmr->nesmr.pbl_4k) {
nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
WARN_ON(nesadapter->free_4kpbl > nesadapter->max_4kpbl);
} else {
nesadapter->free_256pbl += nesfmr->nesmr.pbls_used;
WARN_ON(nesadapter->free_256pbl > nesadapter->max_256pbl);
}
spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
}
return nes_dealloc_mw(&temp_nesmr.ibmw); return nes_dealloc_mw(&nesmr->ibmw);
} }
...@@ -1595,7 +1617,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, ...@@ -1595,7 +1617,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
nes_ucontext->mcrqf = req.mcrqf; nes_ucontext->mcrqf = req.mcrqf;
if (nes_ucontext->mcrqf) { if (nes_ucontext->mcrqf) {
if (nes_ucontext->mcrqf & 0x80000000) if (nes_ucontext->mcrqf & 0x80000000)
nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 12 + (nes_ucontext->mcrqf & 0xf) - 1; nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 28 + 2 * ((nes_ucontext->mcrqf & 0xf) - 1);
else if (nes_ucontext->mcrqf & 0x40000000) else if (nes_ucontext->mcrqf & 0x40000000)
nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff; nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff;
else else
......
...@@ -106,12 +106,13 @@ int ipoib_open(struct net_device *dev) ...@@ -106,12 +106,13 @@ int ipoib_open(struct net_device *dev)
ipoib_dbg(priv, "bringing up interface\n"); ipoib_dbg(priv, "bringing up interface\n");
napi_enable(&priv->napi);
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
if (ipoib_pkey_dev_delay_open(dev)) if (ipoib_pkey_dev_delay_open(dev))
return 0; return 0;
napi_enable(&priv->napi);
if (ipoib_ib_dev_open(dev)) { if (ipoib_ib_dev_open(dev)) {
napi_disable(&priv->napi); napi_disable(&priv->napi);
return -EINVAL; return -EINVAL;
...@@ -546,6 +547,7 @@ static int path_rec_start(struct net_device *dev, ...@@ -546,6 +547,7 @@ static int path_rec_start(struct net_device *dev,
if (path->query_id < 0) { if (path->query_id < 0) {
ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
path->query = NULL; path->query = NULL;
complete(&path->done);
return path->query_id; return path->query_id;
} }
...@@ -662,7 +664,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, ...@@ -662,7 +664,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
skb_push(skb, sizeof *phdr); skb_push(skb, sizeof *phdr);
__skb_queue_tail(&path->queue, skb); __skb_queue_tail(&path->queue, skb);
if (path_rec_start(dev, path)) { if (!path->query && path_rec_start(dev, path)) {
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
path_free(dev, path); path_free(dev, path);
return; return;
......
...@@ -1307,8 +1307,10 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) ...@@ -1307,8 +1307,10 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
u32 fw_vers = 0; u32 fw_vers = 0;
u32 tp_vers = 0; u32 tp_vers = 0;
spin_lock(&adapter->stats_lock);
t3_get_fw_version(adapter, &fw_vers); t3_get_fw_version(adapter, &fw_vers);
t3_get_tp_version(adapter, &tp_vers); t3_get_tp_version(adapter, &tp_vers);
spin_unlock(&adapter->stats_lock);
strcpy(info->driver, DRV_NAME); strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION); strcpy(info->version, DRV_VERSION);
......
...@@ -87,6 +87,9 @@ enum { ...@@ -87,6 +87,9 @@ enum {
#ifdef CONFIG_MLX4_DEBUG #ifdef CONFIG_MLX4_DEBUG
extern int mlx4_debug_level; extern int mlx4_debug_level;
#else /* CONFIG_MLX4_DEBUG */
#define mlx4_debug_level (0)
#endif /* CONFIG_MLX4_DEBUG */
#define mlx4_dbg(mdev, format, arg...) \ #define mlx4_dbg(mdev, format, arg...) \
do { \ do { \
...@@ -94,12 +97,6 @@ extern int mlx4_debug_level; ...@@ -94,12 +97,6 @@ extern int mlx4_debug_level;
dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \ dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \
} while (0) } while (0)
#else /* CONFIG_MLX4_DEBUG */
#define mlx4_dbg(mdev, format, arg...) do { (void) mdev; } while (0)
#endif /* CONFIG_MLX4_DEBUG */
#define mlx4_err(mdev, format, arg...) \ #define mlx4_err(mdev, format, arg...) \
dev_err(&mdev->pdev->dev, format, ## arg) dev_err(&mdev->pdev->dev, format, ## arg)
#define mlx4_info(mdev, format, arg...) \ #define mlx4_info(mdev, format, arg...) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment