Commit 5cce1cf7 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4-next'

Amir Vadai says:

====================
Mellanox ethernet driver updates Jan-27-2015

This patchset introduces some bug fixes, code cleanups and support in a new
firmware event called recoverable error events.

Patches were applied and tested against commit b8665c6c ("net: dsa/mv88e6352:
make mv88e6352_wait generic")

Changes from V0:
- Patch 6/11 ("net/mlx4_core: Fix struct mlx4_vhcr_cmd to make implicit padding
  explicit"):
  - Removed __packed
  - Rephrased commit message

- Added a new patch by Majd ("net/mlx4_core: Update the HCA core clock frequency
  after INIT_PORT")
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4967082b 6d6e996c
...@@ -901,7 +901,9 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, ...@@ -901,7 +901,9 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
index = be32_to_cpu(smp->attr_mod); index = be32_to_cpu(smp->attr_mod);
if (port < 1 || port > dev->caps.num_ports) if (port < 1 || port > dev->caps.num_ports)
return -EINVAL; return -EINVAL;
table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL); table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
sizeof(*table) * 32, GFP_KERNEL);
if (!table) if (!table)
return -ENOMEM; return -ENOMEM;
/* need to get the full pkey table because the paravirtualized /* need to get the full pkey table because the paravirtualized
...@@ -1221,7 +1223,7 @@ static struct mlx4_cmd_info cmd_info[] = { ...@@ -1221,7 +1223,7 @@ static struct mlx4_cmd_info cmd_info[] = {
{ {
.opcode = MLX4_CMD_HW2SW_EQ, .opcode = MLX4_CMD_HW2SW_EQ,
.has_inbox = false, .has_inbox = false,
.has_outbox = true, .has_outbox = false,
.out_is_imm = false, .out_is_imm = false,
.encode_slave_id = true, .encode_slave_id = true,
.verify = NULL, .verify = NULL,
......
...@@ -770,22 +770,20 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ...@@ -770,22 +770,20 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0; return 0;
} }
proto_admin = cpu_to_be32(ptys_adv); proto_admin = cmd->autoneg == AUTONEG_ENABLE ?
if (speed >= 0 && speed != priv->port_state.link_speed) cpu_to_be32(ptys_adv) :
/* If speed was set then speed decides :-) */ speed_set_ptys_admin(priv, speed,
proto_admin = speed_set_ptys_admin(priv, speed, ptys_reg.eth_proto_cap);
ptys_reg.eth_proto_cap);
proto_admin &= ptys_reg.eth_proto_cap; proto_admin &= ptys_reg.eth_proto_cap;
if (proto_admin == ptys_reg.eth_proto_admin)
return 0; /* Nothing to change */
if (!proto_admin) { if (!proto_admin) {
en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n"); en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
return -EINVAL; /* nothing to change due to bad input */ return -EINVAL; /* nothing to change due to bad input */
} }
if (proto_admin == ptys_reg.eth_proto_admin)
return 0; /* Nothing to change */
en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
be32_to_cpu(proto_admin)); be32_to_cpu(proto_admin));
...@@ -798,9 +796,9 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ...@@ -798,9 +796,9 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return ret; return ret;
} }
en_warn(priv, "Port link mode changed, restarting port...\n");
mutex_lock(&priv->mdev->state_lock); mutex_lock(&priv->mdev->state_lock);
if (priv->port_up) { if (priv->port_up) {
en_warn(priv, "Port link mode changed, restarting port...\n");
mlx4_en_stop_port(dev, 1); mlx4_en_stop_port(dev, 1);
if (mlx4_en_start_port(dev)) if (mlx4_en_start_port(dev))
en_err(priv, "Failed restarting port %d\n", priv->port); en_err(priv, "Failed restarting port %d\n", priv->port);
......
...@@ -88,6 +88,8 @@ static u64 get_async_ev_mask(struct mlx4_dev *dev) ...@@ -88,6 +88,8 @@ static u64 get_async_ev_mask(struct mlx4_dev *dev)
u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT);
return async_ev_mask; return async_ev_mask;
} }
...@@ -736,6 +738,26 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -736,6 +738,26 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
(unsigned long) eqe); (unsigned long) eqe);
break; break;
case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT:
switch (eqe->subtype) {
case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE:
mlx4_warn(dev, "Bad cable detected on port %u\n",
eqe->event.bad_cable.port);
break;
case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE:
mlx4_warn(dev, "Unsupported cable detected\n");
break;
default:
mlx4_dbg(dev,
"Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
!!(eqe->owner & 0x80) ^
!!(eq->cons_index & eq->nent) ? "HW" : "SW");
break;
}
break;
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT: case MLX4_EVENT_TYPE_ECC_DETECT:
default: default:
...@@ -846,12 +868,10 @@ static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, ...@@ -846,12 +868,10 @@ static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
MLX4_CMD_WRAPPED); MLX4_CMD_WRAPPED);
} }
static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num)
int eq_num)
{ {
return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ,
0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
MLX4_CMD_WRAPPED);
} }
static int mlx4_num_eq_uar(struct mlx4_dev *dev) static int mlx4_num_eq_uar(struct mlx4_dev *dev)
...@@ -1024,7 +1044,6 @@ static void mlx4_free_eq(struct mlx4_dev *dev, ...@@ -1024,7 +1044,6 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
struct mlx4_eq *eq) struct mlx4_eq *eq)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
int err; int err;
int i; int i;
/* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
...@@ -1032,24 +1051,10 @@ static void mlx4_free_eq(struct mlx4_dev *dev, ...@@ -1032,24 +1051,10 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
*/ */
int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE;
mailbox = mlx4_alloc_cmd_mailbox(dev); err = mlx4_HW2SW_EQ(dev, eq->eqn);
if (IS_ERR(mailbox))
return;
err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
if (err) if (err)
mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
if (0) {
mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
if (i % 4 == 0)
pr_cont("[%02x] ", i * 4);
pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
if ((i + 1) % 4 == 0)
pr_cont("\n");
}
}
synchronize_irq(eq->irq); synchronize_irq(eq->irq);
tasklet_disable(&eq->tasklet_ctx.task); tasklet_disable(&eq->tasklet_ctx.task);
...@@ -1061,7 +1066,6 @@ static void mlx4_free_eq(struct mlx4_dev *dev, ...@@ -1061,7 +1066,6 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
kfree(eq->page_list); kfree(eq->page_list);
mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
mlx4_free_cmd_mailbox(dev, mailbox);
} }
static void mlx4_free_irqs(struct mlx4_dev *dev) static void mlx4_free_irqs(struct mlx4_dev *dev)
......
...@@ -84,13 +84,10 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) ...@@ -84,13 +84,10 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[ 1] = "UC transport", [ 1] = "UC transport",
[ 2] = "UD transport", [ 2] = "UD transport",
[ 3] = "XRC transport", [ 3] = "XRC transport",
[ 4] = "reliable multicast",
[ 5] = "FCoIB support",
[ 6] = "SRQ support", [ 6] = "SRQ support",
[ 7] = "IPoIB checksum offload", [ 7] = "IPoIB checksum offload",
[ 8] = "P_Key violation counter", [ 8] = "P_Key violation counter",
[ 9] = "Q_Key violation counter", [ 9] = "Q_Key violation counter",
[10] = "VMM",
[12] = "Dual Port Different Protocol (DPDP) support", [12] = "Dual Port Different Protocol (DPDP) support",
[15] = "Big LSO headers", [15] = "Big LSO headers",
[16] = "MW support", [16] = "MW support",
...@@ -99,12 +96,11 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) ...@@ -99,12 +96,11 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[19] = "Raw multicast support", [19] = "Raw multicast support",
[20] = "Address vector port checking support", [20] = "Address vector port checking support",
[21] = "UD multicast support", [21] = "UD multicast support",
[24] = "Demand paging support",
[25] = "Router support",
[30] = "IBoE support", [30] = "IBoE support",
[32] = "Unicast loopback support", [32] = "Unicast loopback support",
[34] = "FCS header control", [34] = "FCS header control",
[38] = "Wake On LAN support", [37] = "Wake On LAN (port1) support",
[38] = "Wake On LAN (port2) support",
[40] = "UDP RSS support", [40] = "UDP RSS support",
[41] = "Unicast VEP steering support", [41] = "Unicast VEP steering support",
[42] = "Multicast VEP steering support", [42] = "Multicast VEP steering support",
...@@ -145,7 +141,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) ...@@ -145,7 +141,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[16] = "CONFIG DEV support", [16] = "CONFIG DEV support",
[17] = "Asymmetric EQs support", [17] = "Asymmetric EQs support",
[18] = "More than 80 VFs support", [18] = "More than 80 VFs support",
[19] = "Performance optimized for limited rule configuration flow steering support" [19] = "Performance optimized for limited rule configuration flow steering support",
[20] = "Recoverable error events support"
}; };
int i; int i;
...@@ -259,6 +256,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -259,6 +256,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
#define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48
#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
...@@ -273,6 +271,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -273,6 +271,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAG_RDMA 0x40 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
#define QUERY_FUNC_CAP_FLAG_ETH 0x80 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
#define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08
#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04
#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31)
...@@ -344,9 +343,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -344,9 +343,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
} else if (vhcr->op_modifier == 0) { } else if (vhcr->op_modifier == 0) {
struct mlx4_active_ports actv_ports = struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave); mlx4_get_active_ports(dev, slave);
/* enable rdma and ethernet interfaces, and new quota locations */ /* enable rdma and ethernet interfaces, new quota locations,
* and reserved lkey
*/
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX); QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX |
QUERY_FUNC_CAP_FLAG_RESD_LKEY);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
field = min( field = min(
...@@ -411,6 +413,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -411,6 +413,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG | size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG; QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
} else } else
err = -EINVAL; err = -EINVAL;
...@@ -503,6 +508,13 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, ...@@ -503,6 +508,13 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
func_cap->reserved_eq = size & 0xFFFFFF; func_cap->reserved_eq = size & 0xFFFFFF;
if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) {
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
func_cap->reserved_lkey = size;
} else {
func_cap->reserved_lkey = 0;
}
func_cap->extra_flags = 0; func_cap->extra_flags = 0;
/* Mailbox data from 0x6c and onward should only be treated if /* Mailbox data from 0x6c and onward should only be treated if
...@@ -859,6 +871,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -859,6 +871,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
if (field32 & (1 << 0)) if (field32 & (1 << 0))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
if (field32 & (1 << 7))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6) if (field & 1<<6)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
...@@ -1562,6 +1576,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) ...@@ -1562,6 +1576,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_VXLAN_OFFSET 0x0c #define INIT_HCA_VXLAN_OFFSET 0x0c
#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
#define INIT_HCA_FLAGS_OFFSET 0x014 #define INIT_HCA_FLAGS_OFFSET 0x014
#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
#define INIT_HCA_QPC_OFFSET 0x020 #define INIT_HCA_QPC_OFFSET 0x020
#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
...@@ -1668,6 +1683,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) ...@@ -1668,6 +1683,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
} }
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
*(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
/* QPC/EEC/CQC/EQC/RDMARC attributes */ /* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
...@@ -1752,8 +1770,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) ...@@ -1752,8 +1770,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
} }
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000, err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA,
MLX4_CMD_NATIVE); MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (err) if (err)
mlx4_err(dev, "INIT_HCA returns %d\n", err); mlx4_err(dev, "INIT_HCA returns %d\n", err);
...@@ -1879,6 +1897,36 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, ...@@ -1879,6 +1897,36 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
return err; return err;
} }
static int mlx4_hca_core_clock_update(struct mlx4_dev *dev)
{
struct mlx4_cmd_mailbox *mailbox;
__be32 *outbox;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n");
return PTR_ERR(mailbox);
}
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
MLX4_CMD_QUERY_HCA,
MLX4_CMD_TIME_CLASS_B,
!mlx4_is_slave(dev));
if (err) {
mlx4_warn(dev, "hca_core_clock update failed\n");
goto out;
}
MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
* and real QP0 are active, so that the paravirtualized QP0 is ready * and real QP0 are active, so that the paravirtualized QP0 is ready
* to operate */ * to operate */
...@@ -1983,6 +2031,9 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) ...@@ -1983,6 +2031,9 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)
mlx4_hca_core_clock_update(dev);
return err; return err;
} }
EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
...@@ -2007,7 +2058,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, ...@@ -2007,7 +2058,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
if (priv->mfunc.master.init_port_ref[port] == 1) { if (priv->mfunc.master.init_port_ref[port] == 1) {
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1000, MLX4_CMD_NATIVE); MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err) if (err)
return err; return err;
} }
...@@ -2018,7 +2069,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, ...@@ -2018,7 +2069,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
if (!priv->mfunc.master.qp0_state[port].qp0_active && if (!priv->mfunc.master.qp0_state[port].qp0_active &&
priv->mfunc.master.qp0_state[port].port_active) { priv->mfunc.master.qp0_state[port].port_active) {
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1000, MLX4_CMD_NATIVE); MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err) if (err)
return err; return err;
priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
...@@ -2033,15 +2084,15 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, ...@@ -2033,15 +2084,15 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
{ {
return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
MLX4_CMD_WRAPPED); MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
} }
EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
{ {
return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000, return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA,
MLX4_CMD_NATIVE); MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
} }
struct mlx4_config_dev { struct mlx4_config_dev {
...@@ -2180,7 +2231,8 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) ...@@ -2180,7 +2231,8 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
int mlx4_NOP(struct mlx4_dev *dev) int mlx4_NOP(struct mlx4_dev *dev)
{ {
/* Input modifier of 0x1f means "finish as soon as possible." */ /* Input modifier of 0x1f means "finish as soon as possible." */
return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE); return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
} }
int mlx4_get_phys_port_id(struct mlx4_dev *dev) int mlx4_get_phys_port_id(struct mlx4_dev *dev)
......
...@@ -147,6 +147,7 @@ struct mlx4_func_cap { ...@@ -147,6 +147,7 @@ struct mlx4_func_cap {
u32 qp0_proxy_qpn; u32 qp0_proxy_qpn;
u32 qp1_tunnel_qpn; u32 qp1_tunnel_qpn;
u32 qp1_proxy_qpn; u32 qp1_proxy_qpn;
u32 reserved_lkey;
u8 physical_port; u8 physical_port;
u8 port_flags; u8 port_flags;
u8 flags1; u8 flags1;
......
...@@ -797,6 +797,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) ...@@ -797,6 +797,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
dev->caps.num_eqs = func_cap.max_eq; dev->caps.num_eqs = func_cap.max_eq;
dev->caps.reserved_eqs = func_cap.reserved_eq; dev->caps.reserved_eqs = func_cap.reserved_eq;
dev->caps.reserved_lkey = func_cap.reserved_lkey;
dev->caps.num_pds = MLX4_NUM_PDS; dev->caps.num_pds = MLX4_NUM_PDS;
dev->caps.num_mgms = 0; dev->caps.num_mgms = 0;
dev->caps.num_amgms = 0; dev->caps.num_amgms = 0;
...@@ -2978,8 +2979,10 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, ...@@ -2978,8 +2979,10 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
mlx4_free_eq_table(dev); mlx4_free_eq_table(dev);
err_master_mfunc: err_master_mfunc:
if (mlx4_is_master(dev)) if (mlx4_is_master(dev)) {
mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
mlx4_multi_func_cleanup(dev); mlx4_multi_func_cleanup(dev);
}
if (mlx4_is_slave(dev)) { if (mlx4_is_slave(dev)) {
kfree(dev->caps.qp0_qkey); kfree(dev->caps.qp0_qkey);
......
...@@ -196,6 +196,7 @@ struct mlx4_vhcr { ...@@ -196,6 +196,7 @@ struct mlx4_vhcr {
struct mlx4_vhcr_cmd { struct mlx4_vhcr_cmd {
__be64 in_param; __be64 in_param;
__be32 in_modifier; __be32 in_modifier;
u32 reserved1;
__be64 out_param; __be64 out_param;
__be16 token; __be16 token;
u16 reserved; u16 reserved;
......
...@@ -1155,7 +1155,7 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_free); ...@@ -1155,7 +1155,7 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_free);
int mlx4_SYNC_TPT(struct mlx4_dev *dev) int mlx4_SYNC_TPT(struct mlx4_dev *dev)
{ {
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT,
MLX4_CMD_NATIVE); MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
} }
EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
...@@ -214,7 +214,6 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node) ...@@ -214,7 +214,6 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
list_add(&uar->bf_list, &priv->bf_list); list_add(&uar->bf_list, &priv->bf_list);
} }
bf->uar = uar;
idx = ffz(uar->free_bf_bmap); idx = ffz(uar->free_bf_bmap);
uar->free_bf_bmap |= 1 << idx; uar->free_bf_bmap |= 1 << idx;
bf->uar = uar; bf->uar = uar;
......
...@@ -4677,7 +4677,6 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave) ...@@ -4677,7 +4677,6 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
int state; int state;
LIST_HEAD(tlist); LIST_HEAD(tlist);
int eqn; int eqn;
struct mlx4_cmd_mailbox *mailbox;
err = move_all_busy(dev, slave, RES_EQ); err = move_all_busy(dev, slave, RES_EQ);
if (err) if (err)
...@@ -4703,20 +4702,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave) ...@@ -4703,20 +4702,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
break; break;
case RES_EQ_HW: case RES_EQ_HW:
mailbox = mlx4_alloc_cmd_mailbox(dev); err = mlx4_cmd(dev, slave, eqn & 0xff,
if (IS_ERR(mailbox)) { 1, MLX4_CMD_HW2SW_EQ,
cond_resched(); MLX4_CMD_TIME_CLASS_A,
continue; MLX4_CMD_NATIVE);
}
err = mlx4_cmd_box(dev, slave, 0,
eqn & 0xff, 0,
MLX4_CMD_HW2SW_EQ,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err) if (err)
mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
slave, eqn); slave, eqn);
mlx4_free_cmd_mailbox(dev, mailbox);
atomic_dec(&eq->mtt->ref_count); atomic_dec(&eq->mtt->ref_count);
state = RES_EQ_RESERVED; state = RES_EQ_RESERVED;
break; break;
......
...@@ -165,9 +165,9 @@ enum { ...@@ -165,9 +165,9 @@ enum {
}; };
enum { enum {
MLX4_CMD_TIME_CLASS_A = 10000, MLX4_CMD_TIME_CLASS_A = 60000,
MLX4_CMD_TIME_CLASS_B = 10000, MLX4_CMD_TIME_CLASS_B = 60000,
MLX4_CMD_TIME_CLASS_C = 10000, MLX4_CMD_TIME_CLASS_C = 60000,
}; };
enum { enum {
......
...@@ -200,7 +200,8 @@ enum { ...@@ -200,7 +200,8 @@ enum {
MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19 MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19,
MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20
}; };
enum { enum {
...@@ -280,6 +281,7 @@ enum mlx4_event { ...@@ -280,6 +281,7 @@ enum mlx4_event {
MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d, MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e,
MLX4_EVENT_TYPE_NONE = 0xff, MLX4_EVENT_TYPE_NONE = 0xff,
}; };
...@@ -288,6 +290,11 @@ enum { ...@@ -288,6 +290,11 @@ enum {
MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4 MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4
}; };
enum {
MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1,
MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2,
};
enum { enum {
MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0, MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
}; };
...@@ -860,6 +867,11 @@ struct mlx4_eqe { ...@@ -860,6 +867,11 @@ struct mlx4_eqe {
} __packed tbl_change_info; } __packed tbl_change_info;
} params; } params;
} __packed port_mgmt_change; } __packed port_mgmt_change;
struct {
u8 reserved[3];
u8 port;
u32 reserved1[5];
} __packed bad_cable;
} event; } event;
u8 slave_id; u8 slave_id;
u8 reserved3[2]; u8 reserved3[2];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment