Commit 1e60508c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:
 "Three late 4.4-rc fixes.

  The first two were very small in terms of number of lines, the third
  is more lines of change than I like this late in the cycle, but there
  are positive test results from Avagotech and from my own test setup
  with the target hardware, and given the problem was a 100% failure
  case, I sent it through.

   - A previous patch updated the mlx4 driver to use vmalloc when there
     was not enough memory to get a contiguous region large enough for
     our needs, so we need kvfree() whenever we free that item.  We
     missed one place, so fix that now.

   - A previous patch added code to match incoming packets against a
     specific device, but failed to compensate for devices that have
     both InfiniBand and Ethernet ports.  Fix that.

   - Under certain vlan conditions, the ocrdma driver would fail to
     bring up any vlan interfaces and would print out a circular locking
     failure.  Fix that"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  RDMA/be2net: Remove open and close entry points
  RDMA/ocrdma: Depend on async link events from CNA
  RDMA/ocrdma: Dispatch only port event when port state changes
  RDMA/ocrdma: Fix vlan-id assignment in qp parameters
  IB/mlx4: Replace kfree with kvfree in mlx4_ib_destroy_srq
  IB/cma: cma_match_net_dev needs to take into account port_num
parents 85133421 f41647ef
...@@ -1265,15 +1265,17 @@ static bool cma_protocol_roce(const struct rdma_cm_id *id) ...@@ -1265,15 +1265,17 @@ static bool cma_protocol_roce(const struct rdma_cm_id *id)
return cma_protocol_roce_dev_port(device, port_num); return cma_protocol_roce_dev_port(device, port_num);
} }
static bool cma_match_net_dev(const struct rdma_id_private *id_priv, static bool cma_match_net_dev(const struct rdma_cm_id *id,
const struct net_device *net_dev) const struct net_device *net_dev,
u8 port_num)
{ {
const struct rdma_addr *addr = &id_priv->id.route.addr; const struct rdma_addr *addr = &id->route.addr;
if (!net_dev) if (!net_dev)
/* This request is an AF_IB request or a RoCE request */ /* This request is an AF_IB request or a RoCE request */
return addr->src_addr.ss_family == AF_IB || return (!id->port_num || id->port_num == port_num) &&
cma_protocol_roce(&id_priv->id); (addr->src_addr.ss_family == AF_IB ||
cma_protocol_roce_dev_port(id->device, port_num));
return !addr->dev_addr.bound_dev_if || return !addr->dev_addr.bound_dev_if ||
(net_eq(dev_net(net_dev), addr->dev_addr.net) && (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
...@@ -1295,13 +1297,13 @@ static struct rdma_id_private *cma_find_listener( ...@@ -1295,13 +1297,13 @@ static struct rdma_id_private *cma_find_listener(
hlist_for_each_entry(id_priv, &bind_list->owners, node) { hlist_for_each_entry(id_priv, &bind_list->owners, node) {
if (cma_match_private_data(id_priv, ib_event->private_data)) { if (cma_match_private_data(id_priv, ib_event->private_data)) {
if (id_priv->id.device == cm_id->device && if (id_priv->id.device == cm_id->device &&
cma_match_net_dev(id_priv, net_dev)) cma_match_net_dev(&id_priv->id, net_dev, req->port))
return id_priv; return id_priv;
list_for_each_entry(id_priv_dev, list_for_each_entry(id_priv_dev,
&id_priv->listen_list, &id_priv->listen_list,
listen_list) { listen_list) {
if (id_priv_dev->id.device == cm_id->device && if (id_priv_dev->id.device == cm_id->device &&
cma_match_net_dev(id_priv_dev, net_dev)) cma_match_net_dev(&id_priv_dev->id, net_dev, req->port))
return id_priv_dev; return id_priv_dev;
} }
} }
......
...@@ -286,7 +286,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq) ...@@ -286,7 +286,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
ib_umem_release(msrq->umem); ib_umem_release(msrq->umem);
} else { } else {
kfree(msrq->wrid); kvfree(msrq->wrid);
mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
&msrq->buf); &msrq->buf);
mlx4_db_free(dev->dev, &msrq->db); mlx4_db_free(dev->dev, &msrq->db);
......
...@@ -232,6 +232,10 @@ struct phy_info { ...@@ -232,6 +232,10 @@ struct phy_info {
u16 interface_type; u16 interface_type;
}; };
enum ocrdma_flags {
OCRDMA_FLAGS_LINK_STATUS_INIT = 0x01
};
struct ocrdma_dev { struct ocrdma_dev {
struct ib_device ibdev; struct ib_device ibdev;
struct ocrdma_dev_attr attr; struct ocrdma_dev_attr attr;
...@@ -287,6 +291,7 @@ struct ocrdma_dev { ...@@ -287,6 +291,7 @@ struct ocrdma_dev {
atomic_t update_sl; atomic_t update_sl;
u16 pvid; u16 pvid;
u32 asic_id; u32 asic_id;
u32 flags;
ulong last_stats_time; ulong last_stats_time;
struct mutex stats_lock; /* provide synch for debugfs operations */ struct mutex stats_lock; /* provide synch for debugfs operations */
...@@ -591,4 +596,9 @@ static inline u8 ocrdma_is_enabled_and_synced(u32 state) ...@@ -591,4 +596,9 @@ static inline u8 ocrdma_is_enabled_and_synced(u32 state)
(state & OCRDMA_STATE_FLAG_SYNC); (state & OCRDMA_STATE_FLAG_SYNC);
} }
static inline u8 ocrdma_get_ae_link_state(u32 ae_state)
{
return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT);
}
#endif #endif
...@@ -579,6 +579,8 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev, ...@@ -579,6 +579,8 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE); cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE); cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
/* Request link events on this MQ. */
cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_LINK_EVE_CODE);
cmd->async_cqid_ringsize = cq->id; cmd->async_cqid_ringsize = cq->id;
cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
...@@ -819,20 +821,42 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev, ...@@ -819,20 +821,42 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
} }
} }
static void ocrdma_process_link_state(struct ocrdma_dev *dev,
struct ocrdma_ae_mcqe *cqe)
{
struct ocrdma_ae_lnkst_mcqe *evt;
u8 lstate;
evt = (struct ocrdma_ae_lnkst_mcqe *)cqe;
lstate = ocrdma_get_ae_link_state(evt->speed_state_ptn);
if (!(lstate & OCRDMA_AE_LSC_LLINK_MASK))
return;
if (dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)
ocrdma_update_link_state(dev, (lstate & OCRDMA_LINK_ST_MASK));
}
static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
{ {
/* async CQE processing */ /* async CQE processing */
struct ocrdma_ae_mcqe *cqe = ae_cqe; struct ocrdma_ae_mcqe *cqe = ae_cqe;
u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >> u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
OCRDMA_AE_MCQE_EVENT_CODE_SHIFT; OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
switch (evt_code) {
if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE) case OCRDMA_ASYNC_LINK_EVE_CODE:
ocrdma_process_link_state(dev, cqe);
break;
case OCRDMA_ASYNC_RDMA_EVE_CODE:
ocrdma_dispatch_ibevent(dev, cqe); ocrdma_dispatch_ibevent(dev, cqe);
else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE) break;
case OCRDMA_ASYNC_GRP5_EVE_CODE:
ocrdma_process_grp5_aync(dev, cqe); ocrdma_process_grp5_aync(dev, cqe);
else break;
default:
pr_err("%s(%d) invalid evt code=0x%x\n", __func__, pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
dev->id, evt_code); dev->id, evt_code);
}
} }
static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
...@@ -1363,7 +1387,8 @@ static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev) ...@@ -1363,7 +1387,8 @@ static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
return status; return status;
} }
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed) int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
u8 *lnk_state)
{ {
int status = -ENOMEM; int status = -ENOMEM;
struct ocrdma_get_link_speed_rsp *rsp; struct ocrdma_get_link_speed_rsp *rsp;
...@@ -1384,8 +1409,11 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed) ...@@ -1384,8 +1409,11 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
goto mbx_err; goto mbx_err;
rsp = (struct ocrdma_get_link_speed_rsp *)cmd; rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
*lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK) if (lnk_speed)
>> OCRDMA_PHY_PS_SHIFT; *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
>> OCRDMA_PHY_PS_SHIFT;
if (lnk_state)
*lnk_state = (rsp->res_lnk_st & OCRDMA_LINK_ST_MASK);
mbx_err: mbx_err:
kfree(cmd); kfree(cmd);
...@@ -2515,9 +2543,10 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, ...@@ -2515,9 +2543,10 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid)); ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
if (vlan_id < 0x1000) { if (vlan_id == 0xFFFF)
if (dev->pfc_state) { vlan_id = 0;
vlan_id = 0; if (vlan_id || dev->pfc_state) {
if (!vlan_id) {
pr_err("ocrdma%d:Using VLAN with PFC is recommended\n", pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
dev->id); dev->id);
pr_err("ocrdma%d:Using VLAN 0 for this connection\n", pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
......
...@@ -106,7 +106,8 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed, ...@@ -106,7 +106,8 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
bool solicited, u16 cqe_popped); bool solicited, u16 cqe_popped);
/* verbs specific mailbox commands */ /* verbs specific mailbox commands */
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed); int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
u8 *lnk_st);
int ocrdma_query_config(struct ocrdma_dev *, int ocrdma_query_config(struct ocrdma_dev *,
struct ocrdma_mbx_query_config *config); struct ocrdma_mbx_query_config *config);
...@@ -153,5 +154,6 @@ char *port_speed_string(struct ocrdma_dev *dev); ...@@ -153,5 +154,6 @@ char *port_speed_string(struct ocrdma_dev *dev);
void ocrdma_init_service_level(struct ocrdma_dev *); void ocrdma_init_service_level(struct ocrdma_dev *);
void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev); void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
void ocrdma_free_pd_range(struct ocrdma_dev *dev); void ocrdma_free_pd_range(struct ocrdma_dev *dev);
void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate);
#endif /* __OCRDMA_HW_H__ */ #endif /* __OCRDMA_HW_H__ */
...@@ -290,6 +290,7 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev) ...@@ -290,6 +290,7 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{ {
int status = 0, i; int status = 0, i;
u8 lstate = 0;
struct ocrdma_dev *dev; struct ocrdma_dev *dev;
dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev)); dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
...@@ -319,6 +320,11 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) ...@@ -319,6 +320,11 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (status) if (status)
goto alloc_err; goto alloc_err;
/* Query Link state and update */
status = ocrdma_mbx_get_link_speed(dev, NULL, &lstate);
if (!status)
ocrdma_update_link_state(dev, lstate);
for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++) for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i])) if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i]))
goto sysfs_err; goto sysfs_err;
...@@ -373,7 +379,7 @@ static void ocrdma_remove(struct ocrdma_dev *dev) ...@@ -373,7 +379,7 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
ocrdma_remove_free(dev); ocrdma_remove_free(dev);
} }
static int ocrdma_open(struct ocrdma_dev *dev) static int ocrdma_dispatch_port_active(struct ocrdma_dev *dev)
{ {
struct ib_event port_event; struct ib_event port_event;
...@@ -384,32 +390,9 @@ static int ocrdma_open(struct ocrdma_dev *dev) ...@@ -384,32 +390,9 @@ static int ocrdma_open(struct ocrdma_dev *dev)
return 0; return 0;
} }
static int ocrdma_close(struct ocrdma_dev *dev) static int ocrdma_dispatch_port_error(struct ocrdma_dev *dev)
{ {
int i;
struct ocrdma_qp *qp, **cur_qp;
struct ib_event err_event; struct ib_event err_event;
struct ib_qp_attr attrs;
int attr_mask = IB_QP_STATE;
attrs.qp_state = IB_QPS_ERR;
mutex_lock(&dev->dev_lock);
if (dev->qp_tbl) {
cur_qp = dev->qp_tbl;
for (i = 0; i < OCRDMA_MAX_QP; i++) {
qp = cur_qp[i];
if (qp && qp->ibqp.qp_type != IB_QPT_GSI) {
/* change the QP state to ERROR */
_ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
err_event.event = IB_EVENT_QP_FATAL;
err_event.element.qp = &qp->ibqp;
err_event.device = &dev->ibdev;
ib_dispatch_event(&err_event);
}
}
}
mutex_unlock(&dev->dev_lock);
err_event.event = IB_EVENT_PORT_ERR; err_event.event = IB_EVENT_PORT_ERR;
err_event.element.port_num = 1; err_event.element.port_num = 1;
...@@ -420,7 +403,7 @@ static int ocrdma_close(struct ocrdma_dev *dev) ...@@ -420,7 +403,7 @@ static int ocrdma_close(struct ocrdma_dev *dev)
static void ocrdma_shutdown(struct ocrdma_dev *dev) static void ocrdma_shutdown(struct ocrdma_dev *dev)
{ {
ocrdma_close(dev); ocrdma_dispatch_port_error(dev);
ocrdma_remove(dev); ocrdma_remove(dev);
} }
...@@ -431,18 +414,28 @@ static void ocrdma_shutdown(struct ocrdma_dev *dev) ...@@ -431,18 +414,28 @@ static void ocrdma_shutdown(struct ocrdma_dev *dev)
static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event) static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
{ {
switch (event) { switch (event) {
case BE_DEV_UP:
ocrdma_open(dev);
break;
case BE_DEV_DOWN:
ocrdma_close(dev);
break;
case BE_DEV_SHUTDOWN: case BE_DEV_SHUTDOWN:
ocrdma_shutdown(dev); ocrdma_shutdown(dev);
break; break;
default:
break;
} }
} }
void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate)
{
if (!(dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)) {
dev->flags |= OCRDMA_FLAGS_LINK_STATUS_INIT;
if (!lstate)
return;
}
if (!lstate)
ocrdma_dispatch_port_error(dev);
else
ocrdma_dispatch_port_active(dev);
}
static struct ocrdma_driver ocrdma_drv = { static struct ocrdma_driver ocrdma_drv = {
.name = "ocrdma_driver", .name = "ocrdma_driver",
.add = ocrdma_add, .add = ocrdma_add,
......
...@@ -465,8 +465,11 @@ struct ocrdma_ae_qp_mcqe { ...@@ -465,8 +465,11 @@ struct ocrdma_ae_qp_mcqe {
u32 valid_ae_event; u32 valid_ae_event;
}; };
#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14 enum ocrdma_async_event_code {
#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5 OCRDMA_ASYNC_LINK_EVE_CODE = 0x01,
OCRDMA_ASYNC_GRP5_EVE_CODE = 0x05,
OCRDMA_ASYNC_RDMA_EVE_CODE = 0x14
};
enum ocrdma_async_grp5_events { enum ocrdma_async_grp5_events {
OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01, OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01,
...@@ -489,6 +492,44 @@ enum OCRDMA_ASYNC_EVENT_TYPE { ...@@ -489,6 +492,44 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
OCRDMA_MAX_ASYNC_ERRORS OCRDMA_MAX_ASYNC_ERRORS
}; };
struct ocrdma_ae_lnkst_mcqe {
u32 speed_state_ptn;
u32 qos_reason_falut;
u32 evt_tag;
u32 valid_ae_event;
};
enum {
OCRDMA_AE_LSC_PORT_NUM_MASK = 0x3F,
OCRDMA_AE_LSC_PT_SHIFT = 0x06,
OCRDMA_AE_LSC_PT_MASK = (0x03 <<
OCRDMA_AE_LSC_PT_SHIFT),
OCRDMA_AE_LSC_LS_SHIFT = 0x08,
OCRDMA_AE_LSC_LS_MASK = (0xFF <<
OCRDMA_AE_LSC_LS_SHIFT),
OCRDMA_AE_LSC_LD_SHIFT = 0x10,
OCRDMA_AE_LSC_LD_MASK = (0xFF <<
OCRDMA_AE_LSC_LD_SHIFT),
OCRDMA_AE_LSC_PPS_SHIFT = 0x18,
OCRDMA_AE_LSC_PPS_MASK = (0xFF <<
OCRDMA_AE_LSC_PPS_SHIFT),
OCRDMA_AE_LSC_PPF_MASK = 0xFF,
OCRDMA_AE_LSC_ER_SHIFT = 0x08,
OCRDMA_AE_LSC_ER_MASK = (0xFF <<
OCRDMA_AE_LSC_ER_SHIFT),
OCRDMA_AE_LSC_QOS_SHIFT = 0x10,
OCRDMA_AE_LSC_QOS_MASK = (0xFFFF <<
OCRDMA_AE_LSC_QOS_SHIFT)
};
enum {
OCRDMA_AE_LSC_PLINK_DOWN = 0x00,
OCRDMA_AE_LSC_PLINK_UP = 0x01,
OCRDMA_AE_LSC_LLINK_DOWN = 0x02,
OCRDMA_AE_LSC_LLINK_MASK = 0x02,
OCRDMA_AE_LSC_LLINK_UP = 0x03
};
/* mailbox command request and responses */ /* mailbox command request and responses */
enum { enum {
OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2, OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2,
...@@ -676,7 +717,7 @@ enum { ...@@ -676,7 +717,7 @@ enum {
OCRDMA_PHY_PFLT_SHIFT = 0x18, OCRDMA_PHY_PFLT_SHIFT = 0x18,
OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000, OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000,
OCRDMA_QOS_LNKSP_SHIFT = 0x10, OCRDMA_QOS_LNKSP_SHIFT = 0x10,
OCRDMA_LLST_MASK = 0xFF, OCRDMA_LINK_ST_MASK = 0x01,
OCRDMA_PLFC_MASK = 0x00000400, OCRDMA_PLFC_MASK = 0x00000400,
OCRDMA_PLFC_SHIFT = 0x8, OCRDMA_PLFC_SHIFT = 0x8,
OCRDMA_PLRFC_MASK = 0x00000200, OCRDMA_PLRFC_MASK = 0x00000200,
...@@ -691,7 +732,7 @@ struct ocrdma_get_link_speed_rsp { ...@@ -691,7 +732,7 @@ struct ocrdma_get_link_speed_rsp {
u32 pflt_pps_ld_pnum; u32 pflt_pps_ld_pnum;
u32 qos_lsp; u32 qos_lsp;
u32 res_lls; u32 res_lnk_st;
}; };
enum { enum {
......
...@@ -171,7 +171,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev, ...@@ -171,7 +171,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
int status; int status;
u8 speed; u8 speed;
status = ocrdma_mbx_get_link_speed(dev, &speed); status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
if (status) if (status)
speed = OCRDMA_PHYS_LINK_SPEED_ZERO; speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
......
...@@ -848,8 +848,6 @@ void be_roce_dev_remove(struct be_adapter *); ...@@ -848,8 +848,6 @@ void be_roce_dev_remove(struct be_adapter *);
/* /*
* internal function to open-close roce device during ifup-ifdown. * internal function to open-close roce device during ifup-ifdown.
*/ */
void be_roce_dev_open(struct be_adapter *);
void be_roce_dev_close(struct be_adapter *);
void be_roce_dev_shutdown(struct be_adapter *); void be_roce_dev_shutdown(struct be_adapter *);
#endif /* BE_H */ #endif /* BE_H */
...@@ -3432,8 +3432,6 @@ static int be_close(struct net_device *netdev) ...@@ -3432,8 +3432,6 @@ static int be_close(struct net_device *netdev)
be_disable_if_filters(adapter); be_disable_if_filters(adapter);
be_roce_dev_close(adapter);
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
for_all_evt_queues(adapter, eqo, i) { for_all_evt_queues(adapter, eqo, i) {
napi_disable(&eqo->napi); napi_disable(&eqo->napi);
...@@ -3601,8 +3599,6 @@ static int be_open(struct net_device *netdev) ...@@ -3601,8 +3599,6 @@ static int be_open(struct net_device *netdev)
be_link_status_update(adapter, link_status); be_link_status_update(adapter, link_status);
netif_tx_start_all_queues(netdev); netif_tx_start_all_queues(netdev);
be_roce_dev_open(adapter);
#ifdef CONFIG_BE2NET_VXLAN #ifdef CONFIG_BE2NET_VXLAN
if (skyhawk_chip(adapter)) if (skyhawk_chip(adapter))
vxlan_get_rx_port(netdev); vxlan_get_rx_port(netdev);
......
...@@ -116,40 +116,6 @@ void be_roce_dev_remove(struct be_adapter *adapter) ...@@ -116,40 +116,6 @@ void be_roce_dev_remove(struct be_adapter *adapter)
} }
} }
static void _be_roce_dev_open(struct be_adapter *adapter)
{
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
BE_DEV_UP);
}
void be_roce_dev_open(struct be_adapter *adapter)
{
if (be_roce_supported(adapter)) {
mutex_lock(&be_adapter_list_lock);
_be_roce_dev_open(adapter);
mutex_unlock(&be_adapter_list_lock);
}
}
static void _be_roce_dev_close(struct be_adapter *adapter)
{
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
BE_DEV_DOWN);
}
void be_roce_dev_close(struct be_adapter *adapter)
{
if (be_roce_supported(adapter)) {
mutex_lock(&be_adapter_list_lock);
_be_roce_dev_close(adapter);
mutex_unlock(&be_adapter_list_lock);
}
}
void be_roce_dev_shutdown(struct be_adapter *adapter) void be_roce_dev_shutdown(struct be_adapter *adapter)
{ {
if (be_roce_supported(adapter)) { if (be_roce_supported(adapter)) {
...@@ -177,8 +143,6 @@ int be_roce_register_driver(struct ocrdma_driver *drv) ...@@ -177,8 +143,6 @@ int be_roce_register_driver(struct ocrdma_driver *drv)
_be_roce_dev_add(dev); _be_roce_dev_add(dev);
netdev = dev->netdev; netdev = dev->netdev;
if (netif_running(netdev) && netif_oper_up(netdev))
_be_roce_dev_open(dev);
} }
mutex_unlock(&be_adapter_list_lock); mutex_unlock(&be_adapter_list_lock);
return 0; return 0;
......
...@@ -60,9 +60,7 @@ struct ocrdma_driver { ...@@ -60,9 +60,7 @@ struct ocrdma_driver {
void (*state_change_handler) (struct ocrdma_dev *, u32 new_state); void (*state_change_handler) (struct ocrdma_dev *, u32 new_state);
}; };
enum { enum be_roce_event {
BE_DEV_UP = 0,
BE_DEV_DOWN = 1,
BE_DEV_SHUTDOWN = 2 BE_DEV_SHUTDOWN = 2
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment