Commit 91e019de authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-fixes'

Saeed Mahameed says:

====================
mlx5 driver updates and fixes

Changes from V0:
	- Dropped: ("net/mlx5e: Reset link modes upon setting speed to zero")
	- Fixed compilation issue introduced to mlx5_ib driver.
	- Rebased to df637193 ('Revert "Prevent NUll pointer dereference with two PHYs on cpsw"')

This series has few bug fixes for mlx5 core and ethernet driver.

Eli fixed a wrong static local variable declaration in flow steering API.
Majd added the support of ConnectX-5 PF and VF and added the support
for kernel shutdown pci callback for more robust reboot procedures.
Maor fixed a soft lockup in flow steering.
Rana fixed a wrog speed define in mlx5 EN driver.
I also had the chance to introduce some bug fixes in mlx5 EN mtu
reporting and handling.

For -stable:
	net/mlx5_core: Fix soft lockup in steering error flow
	net/mlx5e: Device's mtu field is u16 and not int
	net/mlx5e: Fix minimum MTU
	net/mlx5e: Use vport MTU rather than physical port MTU
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 45c78e02 5fc7197d
...@@ -671,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, ...@@ -671,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_hca_vport_context *rep; struct mlx5_hca_vport_context *rep;
int max_mtu; u16 max_mtu;
int oper_mtu; u16 oper_mtu;
int err; int err;
u8 ib_link_width_oper; u8 ib_link_width_oper;
u8 vl_hw_cap; u8 vl_hw_cap;
......
...@@ -609,7 +609,7 @@ enum mlx5e_link_mode { ...@@ -609,7 +609,7 @@ enum mlx5e_link_mode {
MLX5E_100GBASE_KR4 = 22, MLX5E_100GBASE_KR4 = 22,
MLX5E_100GBASE_LR4 = 23, MLX5E_100GBASE_LR4 = 23,
MLX5E_100BASE_TX = 24, MLX5E_100BASE_TX = 24,
MLX5E_100BASE_T = 25, MLX5E_1000BASE_T = 25,
MLX5E_10GBASE_T = 26, MLX5E_10GBASE_T = 26,
MLX5E_25GBASE_CR = 27, MLX5E_25GBASE_CR = 27,
MLX5E_25GBASE_KR = 28, MLX5E_25GBASE_KR = 28,
......
...@@ -138,10 +138,10 @@ static const struct { ...@@ -138,10 +138,10 @@ static const struct {
[MLX5E_100BASE_TX] = { [MLX5E_100BASE_TX] = {
.speed = 100, .speed = 100,
}, },
[MLX5E_100BASE_T] = { [MLX5E_1000BASE_T] = {
.supported = SUPPORTED_100baseT_Full, .supported = SUPPORTED_1000baseT_Full,
.advertised = ADVERTISED_100baseT_Full, .advertised = ADVERTISED_1000baseT_Full,
.speed = 100, .speed = 1000,
}, },
[MLX5E_10GBASE_T] = { [MLX5E_10GBASE_T] = {
.supported = SUPPORTED_10000baseT_Full, .supported = SUPPORTED_10000baseT_Full,
......
...@@ -1404,24 +1404,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv) ...@@ -1404,24 +1404,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
return 0; return 0;
} }
static int mlx5e_set_dev_port_mtu(struct net_device *netdev) static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
int hw_mtu; u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
int err; int err;
err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1); err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
if (err) if (err)
return err; return err;
mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); /* Update vport context MTU */
mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
return 0;
}
if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu) static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n", {
__func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu); struct mlx5_core_dev *mdev = priv->mdev;
u16 hw_mtu = 0;
int err;
netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu); err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
if (err || !hw_mtu) /* fallback to port oper mtu */
mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
*mtu = MLX5E_HW2SW_MTU(hw_mtu);
}
static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u16 mtu;
int err;
err = mlx5e_set_mtu(priv, netdev->mtu);
if (err)
return err;
mlx5e_query_mtu(priv, &mtu);
if (mtu != netdev->mtu)
netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
__func__, mtu, netdev->mtu);
netdev->mtu = mtu;
return 0; return 0;
} }
...@@ -1999,22 +2025,27 @@ static int mlx5e_set_features(struct net_device *netdev, ...@@ -1999,22 +2025,27 @@ static int mlx5e_set_features(struct net_device *netdev,
return err; return err;
} }
#define MXL5_HW_MIN_MTU 64
#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
bool was_opened; bool was_opened;
int max_mtu; u16 max_mtu;
u16 min_mtu;
int err = 0; int err = 0;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1); mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
max_mtu = MLX5E_HW2SW_MTU(max_mtu); max_mtu = MLX5E_HW2SW_MTU(max_mtu);
min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
if (new_mtu > max_mtu) { if (new_mtu > max_mtu || new_mtu < min_mtu) {
netdev_err(netdev, netdev_err(netdev,
"%s: Bad MTU (%d) > (%d) Max\n", "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
__func__, new_mtu, max_mtu); __func__, new_mtu, min_mtu, max_mtu);
return -EINVAL; return -EINVAL;
} }
...@@ -2602,7 +2633,16 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) ...@@ -2602,7 +2633,16 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
schedule_work(&priv->set_rx_mode_work); schedule_work(&priv->set_rx_mode_work);
mlx5e_disable_async_events(priv); mlx5e_disable_async_events(priv);
flush_scheduled_work(); flush_scheduled_work();
unregister_netdev(netdev); if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
netif_device_detach(netdev);
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_close_locked(netdev);
mutex_unlock(&priv->state_lock);
} else {
unregister_netdev(netdev);
}
mlx5e_tc_cleanup(priv); mlx5e_tc_cleanup(priv);
mlx5e_vxlan_cleanup(priv); mlx5e_vxlan_cleanup(priv);
mlx5e_destroy_flow_tables(priv); mlx5e_destroy_flow_tables(priv);
...@@ -2615,7 +2655,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) ...@@ -2615,7 +2655,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
free_netdev(netdev);
if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
free_netdev(netdev);
} }
static void *mlx5e_get_netdev(void *vpriv) static void *mlx5e_get_netdev(void *vpriv)
......
...@@ -1065,33 +1065,6 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg, ...@@ -1065,33 +1065,6 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
return rule; return rule;
} }
static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
u8 match_criteria_enable,
u32 *match_criteria,
u32 *match_value,
u8 action,
u32 flow_tag,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_rule *rule;
struct mlx5_flow_group *g;
g = create_autogroup(ft, match_criteria_enable, match_criteria);
if (IS_ERR(g))
return (void *)g;
rule = add_rule_fg(g, match_value,
action, flow_tag, dest);
if (IS_ERR(rule)) {
/* Remove assumes refcount > 0 and autogroup creates a group
* with a refcount = 0.
*/
tree_get_node(&g->node);
tree_remove_node(&g->node);
}
return rule;
}
static struct mlx5_flow_rule * static struct mlx5_flow_rule *
_mlx5_add_flow_rule(struct mlx5_flow_table *ft, _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable, u8 match_criteria_enable,
...@@ -1119,8 +1092,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft, ...@@ -1119,8 +1092,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
goto unlock; goto unlock;
} }
rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria, g = create_autogroup(ft, match_criteria_enable, match_criteria);
match_value, action, flow_tag, dest); if (IS_ERR(g)) {
rule = (void *)g;
goto unlock;
}
rule = add_rule_fg(g, match_value,
action, flow_tag, dest);
if (IS_ERR(rule)) {
/* Remove assumes refcount > 0 and autogroup creates a group
* with a refcount = 0.
*/
unlock_ref_node(&ft->node);
tree_get_node(&g->node);
tree_remove_node(&g->node);
return rule;
}
unlock: unlock:
unlock_ref_node(&ft->node); unlock_ref_node(&ft->node);
return rule; return rule;
...@@ -1288,7 +1276,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, ...@@ -1288,7 +1276,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
{ {
struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
int prio; int prio;
static struct fs_prio *fs_prio; struct fs_prio *fs_prio;
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
if (!root_ns) if (!root_ns)
......
...@@ -966,7 +966,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) ...@@ -966,7 +966,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
int err; int err;
mutex_lock(&dev->intf_state_mutex); mutex_lock(&dev->intf_state_mutex);
if (dev->interface_state == MLX5_INTERFACE_STATE_UP) { if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
__func__); __func__);
goto out; goto out;
...@@ -1133,7 +1133,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) ...@@ -1133,7 +1133,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
if (err) if (err)
pr_info("failed request module on %s\n", MLX5_IB_MOD); pr_info("failed request module on %s\n", MLX5_IB_MOD);
dev->interface_state = MLX5_INTERFACE_STATE_UP; clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out: out:
mutex_unlock(&dev->intf_state_mutex); mutex_unlock(&dev->intf_state_mutex);
...@@ -1207,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) ...@@ -1207,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
} }
mutex_lock(&dev->intf_state_mutex); mutex_lock(&dev->intf_state_mutex);
if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) { if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
__func__); __func__);
goto out; goto out;
...@@ -1241,7 +1242,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) ...@@ -1241,7 +1242,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_cmd_cleanup(dev); mlx5_cmd_cleanup(dev);
out: out:
dev->interface_state = MLX5_INTERFACE_STATE_DOWN; clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
mutex_unlock(&dev->intf_state_mutex); mutex_unlock(&dev->intf_state_mutex);
return err; return err;
} }
...@@ -1452,6 +1454,18 @@ static const struct pci_error_handlers mlx5_err_handler = { ...@@ -1452,6 +1454,18 @@ static const struct pci_error_handlers mlx5_err_handler = {
.resume = mlx5_pci_resume .resume = mlx5_pci_resume
}; };
static void shutdown(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_priv *priv = &dev->priv;
dev_info(&pdev->dev, "Shutdown was called\n");
/* Notify mlx5 clients that the kernel is being shut down */
set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
mlx5_unload_one(dev, priv);
mlx5_pci_disable_device(dev);
}
static const struct pci_device_id mlx5_core_pci_table[] = { static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
...@@ -1459,6 +1473,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = { ...@@ -1459,6 +1473,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
{ PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
{ 0, } { 0, }
}; };
...@@ -1469,6 +1485,7 @@ static struct pci_driver mlx5_core_driver = { ...@@ -1469,6 +1485,7 @@ static struct pci_driver mlx5_core_driver = {
.id_table = mlx5_core_pci_table, .id_table = mlx5_core_pci_table,
.probe = init_one, .probe = init_one,
.remove = remove_one, .remove = remove_one,
.shutdown = shutdown,
.err_handler = &mlx5_err_handler, .err_handler = &mlx5_err_handler,
.sriov_configure = mlx5_core_sriov_configure, .sriov_configure = mlx5_core_sriov_configure,
}; };
......
...@@ -247,8 +247,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, ...@@ -247,8 +247,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
} }
EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
int *max_mtu, int *oper_mtu, u8 port) u16 *max_mtu, u16 *oper_mtu, u8 port)
{ {
u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
...@@ -268,7 +268,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, ...@@ -268,7 +268,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
} }
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
{ {
u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
...@@ -283,14 +283,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) ...@@ -283,14 +283,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
} }
EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
u8 port) u8 port)
{ {
mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port); mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
} }
EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
u8 port) u8 port)
{ {
mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port); mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
......
...@@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, ...@@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
} }
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address); EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
{
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
u32 *out;
int err;
out = mlx5_vzalloc(outlen);
if (!out)
return -ENOMEM;
err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
if (!err)
*mtu = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.mtu);
kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
{
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
void *in;
int err;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
u32 vport, u32 vport,
enum mlx5_list_type list_type, enum mlx5_list_type list_type,
......
...@@ -519,8 +519,9 @@ enum mlx5_device_state { ...@@ -519,8 +519,9 @@ enum mlx5_device_state {
}; };
enum mlx5_interface_state { enum mlx5_interface_state {
MLX5_INTERFACE_STATE_DOWN, MLX5_INTERFACE_STATE_DOWN = BIT(0),
MLX5_INTERFACE_STATE_UP, MLX5_INTERFACE_STATE_UP = BIT(1),
MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
}; };
enum mlx5_pci_status { enum mlx5_pci_status {
...@@ -544,7 +545,7 @@ struct mlx5_core_dev { ...@@ -544,7 +545,7 @@ struct mlx5_core_dev {
enum mlx5_device_state state; enum mlx5_device_state state;
/* sync interface state */ /* sync interface state */
struct mutex intf_state_mutex; struct mutex intf_state_mutex;
enum mlx5_interface_state interface_state; unsigned long intf_state;
void (*event) (struct mlx5_core_dev *dev, void (*event) (struct mlx5_core_dev *dev,
enum mlx5_dev_event event, enum mlx5_dev_event event,
unsigned long param); unsigned long param);
......
...@@ -54,9 +54,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, ...@@ -54,9 +54,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status); enum mlx5_port_status *status);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
u8 port); u8 port);
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
......
...@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, ...@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr); u16 vport, u8 *addr);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
u16 vport, u8 *addr); u16 vport, u8 *addr);
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid); u64 *system_image_guid);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment