Commit e59cc767 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4-fixes'

Tariq Toukan says:

====================
mlx4 misc fixes for 4.9

This patchset contains several bug fixes from the team to the
mlx4 Eth and Core drivers.

Series generated against net commit:
ecc515d7 'sctp: fix the panic caused by route update'
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ff57087f eb4b6788
......@@ -2469,6 +2469,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
kfree(priv->mfunc.master.slave_state);
err_comm:
iounmap(priv->mfunc.comm);
priv->mfunc.comm = NULL;
err_vhcr:
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
priv->mfunc.vhcr,
......@@ -2537,6 +2538,13 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
int slave;
u32 slave_read;
/* If the comm channel has not yet been initialized,
* skip reporting the internal error event to all
* the communication channels.
*/
if (!priv->mfunc.comm)
return;
/* Report an internal error event to all
* communication channels.
*/
......@@ -2571,6 +2579,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
}
iounmap(priv->mfunc.comm);
priv->mfunc.comm = NULL;
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
......
......@@ -245,8 +245,11 @@ static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
u64 tmp_rounded =
roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
max_val_cycles : tmp_rounded;
/* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
......
......@@ -1733,6 +1733,13 @@ int mlx4_en_start_port(struct net_device *dev)
udp_tunnel_get_rx_info(dev);
priv->port_up = true;
/* Process all completions if exist to prevent
* the queues freezing if they are full
*/
for (i = 0; i < priv->rx_ring_num; i++)
napi_schedule(&priv->rx_cq[i]->napi);
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
......@@ -1910,8 +1917,9 @@ static void mlx4_en_clear_stats(struct net_device *dev)
struct mlx4_en_dev *mdev = priv->mdev;
int i;
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
if (!mlx4_is_slave(mdev->dev))
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
......@@ -2194,6 +2202,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
if (!shutdown)
free_netdev(dev);
dev->ethtool_ops = NULL;
}
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
......
......@@ -166,7 +166,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
return PTR_ERR(mailbox);
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
MLX4_CMD_NATIVE);
if (err)
goto out;
......@@ -322,7 +322,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
0, MLX4_CMD_DUMP_ETH_STATS,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
}
......
......@@ -118,6 +118,29 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
return !loopback_ok;
}
static int mlx4_en_test_interrupts(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
int i = 0;
err = mlx4_test_async(mdev->dev);
/* When not in MSI_X or slave, test only async */
if (!(mdev->dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(mdev->dev))
return err;
/* A loop over all completion vectors of current port,
* for each vector check whether it works by mapping command
* completions to that vector and performing a NOP command
*/
for (i = 0; i < priv->rx_ring_num; i++) {
err = mlx4_test_interrupt(mdev->dev, priv->rx_cq[i]->vector);
if (err)
break;
}
return err;
}
static int mlx4_en_test_link(struct mlx4_en_priv *priv)
{
......@@ -151,7 +174,6 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int i, carrier_ok;
memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
......@@ -177,7 +199,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
netif_carrier_on(dev);
}
buf[0] = mlx4_test_interrupts(mdev->dev);
buf[0] = mlx4_en_test_interrupts(priv);
buf[1] = mlx4_en_test_link(priv);
buf[2] = mlx4_en_test_speed(priv);
......
......@@ -1361,53 +1361,49 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
kfree(priv->eq_table.uar_map);
}
/* A test that verifies that we can accept interrupts on all
* the irq vectors of the device.
/* A test that verifies that we can accept interrupts
* on the vector allocated for asynchronous events
*/
int mlx4_test_async(struct mlx4_dev *dev)
{
return mlx4_NOP(dev);
}
EXPORT_SYMBOL(mlx4_test_async);
/* A test that verifies that we can accept interrupts
* on the given irq vector of the tested port.
* Interrupts are checked using the NOP command.
*/
int mlx4_test_interrupts(struct mlx4_dev *dev)
int mlx4_test_interrupt(struct mlx4_dev *dev, int vector)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
int err;
err = mlx4_NOP(dev);
/* When not in MSI_X, there is only one irq to check */
if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
return err;
/* A loop over all completion vectors, for each vector we will check
* whether it works by mapping command completions to that vector
* and performing a NOP command
*/
for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
/* Make sure request_irq was called */
if (!priv->eq_table.eq[i].have_irq)
continue;
/* Temporary use polling for command completions */
mlx4_cmd_use_polling(dev);
/* Map the new eq to handle all asynchronous events */
err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[i].eqn);
if (err) {
mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
mlx4_cmd_use_events(dev);
break;
}
/* Temporary use polling for command completions */
mlx4_cmd_use_polling(dev);
/* Go back to using events */
mlx4_cmd_use_events(dev);
err = mlx4_NOP(dev);
/* Map the new eq to handle all asynchronous events */
err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn);
if (err) {
mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
goto out;
}
/* Go back to using events */
mlx4_cmd_use_events(dev);
err = mlx4_NOP(dev);
/* Return to default */
mlx4_cmd_use_polling(dev);
out:
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
mlx4_cmd_use_events(dev);
return err;
}
EXPORT_SYMBOL(mlx4_test_interrupts);
EXPORT_SYMBOL(mlx4_test_interrupt);
bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
{
......
......@@ -49,9 +49,9 @@ enum {
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
static bool enable_qos = true;
static bool enable_qos;
module_param(enable_qos, bool, 0444);
MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)");
#define MLX4_GET(dest, source, offset) \
do { \
......
......@@ -1102,6 +1102,14 @@ static int __set_port_type(struct mlx4_port_info *info,
int i;
int err = 0;
if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
mlx4_err(mdev,
"Requested port type for port %d is not supported on this HCA\n",
info->port);
err = -EINVAL;
goto err_sup;
}
mlx4_stop_sense(mdev);
mutex_lock(&priv->port_mutex);
info->tmp_type = port_type;
......@@ -1147,7 +1155,7 @@ static int __set_port_type(struct mlx4_port_info *info,
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
err_sup:
return err;
}
......
......@@ -145,9 +145,10 @@ enum mlx4_resource {
RES_MTT,
RES_MAC,
RES_VLAN,
RES_EQ,
RES_NPORT_ID,
RES_COUNTER,
RES_FS_RULE,
RES_EQ,
MLX4_NUM_OF_RESOURCE_TYPE
};
......@@ -1329,8 +1330,6 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd);
int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
int port, void *buf);
int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
struct mlx4_cmd_mailbox *outbox);
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
......
......@@ -1728,24 +1728,13 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
u32 in_mod, struct mlx4_cmd_mailbox *outbox)
{
return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
}
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
if (slave != dev->caps.function)
return 0;
return mlx4_common_dump_eth_stats(dev, slave,
vhcr->in_modifier, outbox);
return 0;
}
int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
......
......@@ -1399,7 +1399,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey);
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupts(struct mlx4_dev *dev);
int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
int mlx4_test_async(struct mlx4_dev *dev);
int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
const u32 offset[], u32 value[],
size_t array_len, u8 port);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment