Commit 1e60508c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:
 "Three late 4.4-rc fixes.

  The first two were very small in terms of number of lines, the third
  is more lines of change than I like this late in the cycle, but there
  are positive test results from Avagotech and from my own test setup
  with the target hardware, and given the problem was a 100% failure
  case, I sent it through.

   - A previous patch updated the mlx4 driver to use vmalloc when there
     was not enough memory to get a contiguous region large enough for
     our needs, so we need kvfree() whenever we free that item.  We
     missed one place, so fix that now.

   - A previous patch added code to match incoming packets against a
     specific device, but failed to compensate for devices that have
     both InfiniBand and Ethernet ports.  Fix that.

   - Under certain vlan conditions, the ocrdma driver would fail to
     bring up any vlan interfaces and would print out a circular locking
     failure.  Fix that"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  RDMA/be2net: Remove open and close entry points
  RDMA/ocrdma: Depend on async link events from CNA
  RDMA/ocrdma: Dispatch only port event when port state changes
  RDMA/ocrdma: Fix vlan-id assignment in qp parameters
  IB/mlx4: Replace kfree with kvfree in mlx4_ib_destroy_srq
  IB/cma: cma_match_net_dev needs to take into account port_num
parents 85133421 f41647ef
......@@ -1265,15 +1265,17 @@ static bool cma_protocol_roce(const struct rdma_cm_id *id)
return cma_protocol_roce_dev_port(device, port_num);
}
static bool cma_match_net_dev(const struct rdma_id_private *id_priv,
const struct net_device *net_dev)
static bool cma_match_net_dev(const struct rdma_cm_id *id,
const struct net_device *net_dev,
u8 port_num)
{
const struct rdma_addr *addr = &id_priv->id.route.addr;
const struct rdma_addr *addr = &id->route.addr;
if (!net_dev)
/* This request is an AF_IB request or a RoCE request */
return addr->src_addr.ss_family == AF_IB ||
cma_protocol_roce(&id_priv->id);
return (!id->port_num || id->port_num == port_num) &&
(addr->src_addr.ss_family == AF_IB ||
cma_protocol_roce_dev_port(id->device, port_num));
return !addr->dev_addr.bound_dev_if ||
(net_eq(dev_net(net_dev), addr->dev_addr.net) &&
......@@ -1295,13 +1297,13 @@ static struct rdma_id_private *cma_find_listener(
hlist_for_each_entry(id_priv, &bind_list->owners, node) {
if (cma_match_private_data(id_priv, ib_event->private_data)) {
if (id_priv->id.device == cm_id->device &&
cma_match_net_dev(id_priv, net_dev))
cma_match_net_dev(&id_priv->id, net_dev, req->port))
return id_priv;
list_for_each_entry(id_priv_dev,
&id_priv->listen_list,
listen_list) {
if (id_priv_dev->id.device == cm_id->device &&
cma_match_net_dev(id_priv_dev, net_dev))
cma_match_net_dev(&id_priv_dev->id, net_dev, req->port))
return id_priv_dev;
}
}
......
......@@ -286,7 +286,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
ib_umem_release(msrq->umem);
} else {
kfree(msrq->wrid);
kvfree(msrq->wrid);
mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
&msrq->buf);
mlx4_db_free(dev->dev, &msrq->db);
......
......@@ -232,6 +232,10 @@ struct phy_info {
u16 interface_type;
};
enum ocrdma_flags {
OCRDMA_FLAGS_LINK_STATUS_INIT = 0x01
};
struct ocrdma_dev {
struct ib_device ibdev;
struct ocrdma_dev_attr attr;
......@@ -287,6 +291,7 @@ struct ocrdma_dev {
atomic_t update_sl;
u16 pvid;
u32 asic_id;
u32 flags;
ulong last_stats_time;
struct mutex stats_lock; /* provide synch for debugfs operations */
......@@ -591,4 +596,9 @@ static inline u8 ocrdma_is_enabled_and_synced(u32 state)
(state & OCRDMA_STATE_FLAG_SYNC);
}
static inline u8 ocrdma_get_ae_link_state(u32 ae_state)
{
return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT);
}
#endif
......@@ -579,6 +579,8 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
/* Request link events on this MQ. */
cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_LINK_EVE_CODE);
cmd->async_cqid_ringsize = cq->id;
cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
......@@ -819,20 +821,42 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
}
}
static void ocrdma_process_link_state(struct ocrdma_dev *dev,
struct ocrdma_ae_mcqe *cqe)
{
struct ocrdma_ae_lnkst_mcqe *evt;
u8 lstate;
evt = (struct ocrdma_ae_lnkst_mcqe *)cqe;
lstate = ocrdma_get_ae_link_state(evt->speed_state_ptn);
if (!(lstate & OCRDMA_AE_LSC_LLINK_MASK))
return;
if (dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)
ocrdma_update_link_state(dev, (lstate & OCRDMA_LINK_ST_MASK));
}
static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
{
/* async CQE processing */
struct ocrdma_ae_mcqe *cqe = ae_cqe;
u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE)
switch (evt_code) {
case OCRDMA_ASYNC_LINK_EVE_CODE:
ocrdma_process_link_state(dev, cqe);
break;
case OCRDMA_ASYNC_RDMA_EVE_CODE:
ocrdma_dispatch_ibevent(dev, cqe);
else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE)
break;
case OCRDMA_ASYNC_GRP5_EVE_CODE:
ocrdma_process_grp5_aync(dev, cqe);
else
break;
default:
pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
dev->id, evt_code);
}
}
static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
......@@ -1363,7 +1387,8 @@ static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
return status;
}
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
u8 *lnk_state)
{
int status = -ENOMEM;
struct ocrdma_get_link_speed_rsp *rsp;
......@@ -1384,8 +1409,11 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
goto mbx_err;
rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
*lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
>> OCRDMA_PHY_PS_SHIFT;
if (lnk_speed)
*lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
>> OCRDMA_PHY_PS_SHIFT;
if (lnk_state)
*lnk_state = (rsp->res_lnk_st & OCRDMA_LINK_ST_MASK);
mbx_err:
kfree(cmd);
......@@ -2515,9 +2543,10 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
if (vlan_id < 0x1000) {
if (dev->pfc_state) {
vlan_id = 0;
if (vlan_id == 0xFFFF)
vlan_id = 0;
if (vlan_id || dev->pfc_state) {
if (!vlan_id) {
pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
dev->id);
pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
......
......@@ -106,7 +106,8 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
bool solicited, u16 cqe_popped);
/* verbs specific mailbox commands */
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed);
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
u8 *lnk_st);
int ocrdma_query_config(struct ocrdma_dev *,
struct ocrdma_mbx_query_config *config);
......@@ -153,5 +154,6 @@ char *port_speed_string(struct ocrdma_dev *dev);
void ocrdma_init_service_level(struct ocrdma_dev *);
void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
void ocrdma_free_pd_range(struct ocrdma_dev *dev);
void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate);
#endif /* __OCRDMA_HW_H__ */
......@@ -290,6 +290,7 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{
int status = 0, i;
u8 lstate = 0;
struct ocrdma_dev *dev;
dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
......@@ -319,6 +320,11 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (status)
goto alloc_err;
/* Query Link state and update */
status = ocrdma_mbx_get_link_speed(dev, NULL, &lstate);
if (!status)
ocrdma_update_link_state(dev, lstate);
for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i]))
goto sysfs_err;
......@@ -373,7 +379,7 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
ocrdma_remove_free(dev);
}
static int ocrdma_open(struct ocrdma_dev *dev)
static int ocrdma_dispatch_port_active(struct ocrdma_dev *dev)
{
struct ib_event port_event;
......@@ -384,32 +390,9 @@ static int ocrdma_open(struct ocrdma_dev *dev)
return 0;
}
static int ocrdma_close(struct ocrdma_dev *dev)
static int ocrdma_dispatch_port_error(struct ocrdma_dev *dev)
{
int i;
struct ocrdma_qp *qp, **cur_qp;
struct ib_event err_event;
struct ib_qp_attr attrs;
int attr_mask = IB_QP_STATE;
attrs.qp_state = IB_QPS_ERR;
mutex_lock(&dev->dev_lock);
if (dev->qp_tbl) {
cur_qp = dev->qp_tbl;
for (i = 0; i < OCRDMA_MAX_QP; i++) {
qp = cur_qp[i];
if (qp && qp->ibqp.qp_type != IB_QPT_GSI) {
/* change the QP state to ERROR */
_ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
err_event.event = IB_EVENT_QP_FATAL;
err_event.element.qp = &qp->ibqp;
err_event.device = &dev->ibdev;
ib_dispatch_event(&err_event);
}
}
}
mutex_unlock(&dev->dev_lock);
err_event.event = IB_EVENT_PORT_ERR;
err_event.element.port_num = 1;
......@@ -420,7 +403,7 @@ static int ocrdma_close(struct ocrdma_dev *dev)
static void ocrdma_shutdown(struct ocrdma_dev *dev)
{
ocrdma_close(dev);
ocrdma_dispatch_port_error(dev);
ocrdma_remove(dev);
}
......@@ -431,18 +414,28 @@ static void ocrdma_shutdown(struct ocrdma_dev *dev)
static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
{
switch (event) {
case BE_DEV_UP:
ocrdma_open(dev);
break;
case BE_DEV_DOWN:
ocrdma_close(dev);
break;
case BE_DEV_SHUTDOWN:
ocrdma_shutdown(dev);
break;
default:
break;
}
}
void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate)
{
if (!(dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)) {
dev->flags |= OCRDMA_FLAGS_LINK_STATUS_INIT;
if (!lstate)
return;
}
if (!lstate)
ocrdma_dispatch_port_error(dev);
else
ocrdma_dispatch_port_active(dev);
}
static struct ocrdma_driver ocrdma_drv = {
.name = "ocrdma_driver",
.add = ocrdma_add,
......
......@@ -465,8 +465,11 @@ struct ocrdma_ae_qp_mcqe {
u32 valid_ae_event;
};
#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14
#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5
enum ocrdma_async_event_code {
OCRDMA_ASYNC_LINK_EVE_CODE = 0x01,
OCRDMA_ASYNC_GRP5_EVE_CODE = 0x05,
OCRDMA_ASYNC_RDMA_EVE_CODE = 0x14
};
enum ocrdma_async_grp5_events {
OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01,
......@@ -489,6 +492,44 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
OCRDMA_MAX_ASYNC_ERRORS
};
struct ocrdma_ae_lnkst_mcqe {
u32 speed_state_ptn;
u32 qos_reason_falut;
u32 evt_tag;
u32 valid_ae_event;
};
enum {
OCRDMA_AE_LSC_PORT_NUM_MASK = 0x3F,
OCRDMA_AE_LSC_PT_SHIFT = 0x06,
OCRDMA_AE_LSC_PT_MASK = (0x03 <<
OCRDMA_AE_LSC_PT_SHIFT),
OCRDMA_AE_LSC_LS_SHIFT = 0x08,
OCRDMA_AE_LSC_LS_MASK = (0xFF <<
OCRDMA_AE_LSC_LS_SHIFT),
OCRDMA_AE_LSC_LD_SHIFT = 0x10,
OCRDMA_AE_LSC_LD_MASK = (0xFF <<
OCRDMA_AE_LSC_LD_SHIFT),
OCRDMA_AE_LSC_PPS_SHIFT = 0x18,
OCRDMA_AE_LSC_PPS_MASK = (0xFF <<
OCRDMA_AE_LSC_PPS_SHIFT),
OCRDMA_AE_LSC_PPF_MASK = 0xFF,
OCRDMA_AE_LSC_ER_SHIFT = 0x08,
OCRDMA_AE_LSC_ER_MASK = (0xFF <<
OCRDMA_AE_LSC_ER_SHIFT),
OCRDMA_AE_LSC_QOS_SHIFT = 0x10,
OCRDMA_AE_LSC_QOS_MASK = (0xFFFF <<
OCRDMA_AE_LSC_QOS_SHIFT)
};
enum {
OCRDMA_AE_LSC_PLINK_DOWN = 0x00,
OCRDMA_AE_LSC_PLINK_UP = 0x01,
OCRDMA_AE_LSC_LLINK_DOWN = 0x02,
OCRDMA_AE_LSC_LLINK_MASK = 0x02,
OCRDMA_AE_LSC_LLINK_UP = 0x03
};
/* mailbox command request and responses */
enum {
OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2,
......@@ -676,7 +717,7 @@ enum {
OCRDMA_PHY_PFLT_SHIFT = 0x18,
OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000,
OCRDMA_QOS_LNKSP_SHIFT = 0x10,
OCRDMA_LLST_MASK = 0xFF,
OCRDMA_LINK_ST_MASK = 0x01,
OCRDMA_PLFC_MASK = 0x00000400,
OCRDMA_PLFC_SHIFT = 0x8,
OCRDMA_PLRFC_MASK = 0x00000200,
......@@ -691,7 +732,7 @@ struct ocrdma_get_link_speed_rsp {
u32 pflt_pps_ld_pnum;
u32 qos_lsp;
u32 res_lls;
u32 res_lnk_st;
};
enum {
......
......@@ -171,7 +171,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
int status;
u8 speed;
status = ocrdma_mbx_get_link_speed(dev, &speed);
status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
if (status)
speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
......
......@@ -848,8 +848,6 @@ void be_roce_dev_remove(struct be_adapter *);
/*
* internal function to open-close roce device during ifup-ifdown.
*/
void be_roce_dev_open(struct be_adapter *);
void be_roce_dev_close(struct be_adapter *);
void be_roce_dev_shutdown(struct be_adapter *);
#endif /* BE_H */
......@@ -3432,8 +3432,6 @@ static int be_close(struct net_device *netdev)
be_disable_if_filters(adapter);
be_roce_dev_close(adapter);
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
for_all_evt_queues(adapter, eqo, i) {
napi_disable(&eqo->napi);
......@@ -3601,8 +3599,6 @@ static int be_open(struct net_device *netdev)
be_link_status_update(adapter, link_status);
netif_tx_start_all_queues(netdev);
be_roce_dev_open(adapter);
#ifdef CONFIG_BE2NET_VXLAN
if (skyhawk_chip(adapter))
vxlan_get_rx_port(netdev);
......
......@@ -116,40 +116,6 @@ void be_roce_dev_remove(struct be_adapter *adapter)
}
}
static void _be_roce_dev_open(struct be_adapter *adapter)
{
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
BE_DEV_UP);
}
void be_roce_dev_open(struct be_adapter *adapter)
{
if (be_roce_supported(adapter)) {
mutex_lock(&be_adapter_list_lock);
_be_roce_dev_open(adapter);
mutex_unlock(&be_adapter_list_lock);
}
}
static void _be_roce_dev_close(struct be_adapter *adapter)
{
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
BE_DEV_DOWN);
}
void be_roce_dev_close(struct be_adapter *adapter)
{
if (be_roce_supported(adapter)) {
mutex_lock(&be_adapter_list_lock);
_be_roce_dev_close(adapter);
mutex_unlock(&be_adapter_list_lock);
}
}
void be_roce_dev_shutdown(struct be_adapter *adapter)
{
if (be_roce_supported(adapter)) {
......@@ -177,8 +143,6 @@ int be_roce_register_driver(struct ocrdma_driver *drv)
_be_roce_dev_add(dev);
netdev = dev->netdev;
if (netif_running(netdev) && netif_oper_up(netdev))
_be_roce_dev_open(dev);
}
mutex_unlock(&be_adapter_list_lock);
return 0;
......
......@@ -60,9 +60,7 @@ struct ocrdma_driver {
void (*state_change_handler) (struct ocrdma_dev *, u32 new_state);
};
enum {
BE_DEV_UP = 0,
BE_DEV_DOWN = 1,
enum be_roce_event {
BE_DEV_SHUTDOWN = 2
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment