Commit 3df8f4c6 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-fixes'

Saeed Mahameed says:

====================
Mellanox 100G mlx5 fixes for 4.6-rc

This small series provides some bug fixes for mlx5 driver.

A small bug fix for iounmap of a null pointer, which dumps a warning on some archs.

One patch to fix the VXLAN/MLX5_EN dependency issue reported by Arnd.

Two patches to fix the scheduling while atomic issue for ndo_add/del_vxlan_port
NDOs.  The first will add an internal mlx5e workqueue and the second will
delegate vxlan ports add/del requests to that workqueue.

Note: ('net/mlx5: Kconfig: Fix MLX5_EN/VXLAN build issue') is only needed for net
and not net-next as the issue was globally fixed for all device drivers by:
b7aade15 ('vxlan: break dependency with netdev drivers') in net-next.

Applied on top: f27337e1 ('ip_tunnel: fix preempt warning in ip tunnel creation/updating')
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6071bd1a d8cf2dda
...@@ -14,6 +14,7 @@ config MLX5_CORE_EN ...@@ -14,6 +14,7 @@ config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support" bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
select PTP_1588_CLOCK select PTP_1588_CLOCK
select VXLAN if MLX5_CORE=y
default n default n
---help--- ---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC. Ethernet support in Mellanox Technologies ConnectX-4 NIC.
......
...@@ -567,6 +567,7 @@ struct mlx5e_priv { ...@@ -567,6 +567,7 @@ struct mlx5e_priv {
struct mlx5e_vxlan_db vxlan; struct mlx5e_vxlan_db vxlan;
struct mlx5e_params params; struct mlx5e_params params;
struct workqueue_struct *wq;
struct work_struct update_carrier_work; struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work; struct work_struct set_rx_mode_work;
struct delayed_work update_stats_work; struct delayed_work update_stats_work;
......
...@@ -262,9 +262,8 @@ static void mlx5e_update_stats_work(struct work_struct *work) ...@@ -262,9 +262,8 @@ static void mlx5e_update_stats_work(struct work_struct *work)
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
mlx5e_update_stats(priv); mlx5e_update_stats(priv);
schedule_delayed_work(dwork, queue_delayed_work(priv->wq, dwork,
msecs_to_jiffies( msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
MLX5E_UPDATE_STATS_INTERVAL));
} }
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
} }
...@@ -280,7 +279,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, ...@@ -280,7 +279,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
switch (event) { switch (event) {
case MLX5_DEV_EVENT_PORT_UP: case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN: case MLX5_DEV_EVENT_PORT_DOWN:
schedule_work(&priv->update_carrier_work); queue_work(priv->wq, &priv->update_carrier_work);
break; break;
default: default:
...@@ -1505,7 +1504,7 @@ int mlx5e_open_locked(struct net_device *netdev) ...@@ -1505,7 +1504,7 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_update_carrier(priv); mlx5e_update_carrier(priv);
mlx5e_timestamp_init(priv); mlx5e_timestamp_init(priv);
schedule_delayed_work(&priv->update_stats_work, 0); queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
return 0; return 0;
...@@ -1961,7 +1960,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev) ...@@ -1961,7 +1960,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
schedule_work(&priv->set_rx_mode_work); queue_work(priv->wq, &priv->set_rx_mode_work);
} }
static int mlx5e_set_mac(struct net_device *netdev, void *addr) static int mlx5e_set_mac(struct net_device *netdev, void *addr)
...@@ -1976,7 +1975,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr) ...@@ -1976,7 +1975,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
ether_addr_copy(netdev->dev_addr, saddr->sa_data); ether_addr_copy(netdev->dev_addr, saddr->sa_data);
netif_addr_unlock_bh(netdev); netif_addr_unlock_bh(netdev);
schedule_work(&priv->set_rx_mode_work); queue_work(priv->wq, &priv->set_rx_mode_work);
return 0; return 0;
} }
...@@ -2158,7 +2157,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev, ...@@ -2158,7 +2157,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
if (!mlx5e_vxlan_allowed(priv->mdev)) if (!mlx5e_vxlan_allowed(priv->mdev))
return; return;
mlx5e_vxlan_add_port(priv, be16_to_cpu(port)); mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
} }
static void mlx5e_del_vxlan_port(struct net_device *netdev, static void mlx5e_del_vxlan_port(struct net_device *netdev,
...@@ -2169,7 +2168,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev, ...@@ -2169,7 +2168,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
if (!mlx5e_vxlan_allowed(priv->mdev)) if (!mlx5e_vxlan_allowed(priv->mdev))
return; return;
mlx5e_vxlan_del_port(priv, be16_to_cpu(port)); mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
} }
static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
...@@ -2498,10 +2497,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) ...@@ -2498,10 +2497,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
priv = netdev_priv(netdev); priv = netdev_priv(netdev);
priv->wq = create_singlethread_workqueue("mlx5e");
if (!priv->wq)
goto err_free_netdev;
err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false); err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
if (err) { if (err) {
mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
goto err_free_netdev; goto err_destroy_wq;
} }
err = mlx5_core_alloc_pd(mdev, &priv->pdn); err = mlx5_core_alloc_pd(mdev, &priv->pdn);
...@@ -2580,7 +2583,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) ...@@ -2580,7 +2583,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
vxlan_get_rx_port(netdev); vxlan_get_rx_port(netdev);
mlx5e_enable_async_events(priv); mlx5e_enable_async_events(priv);
schedule_work(&priv->set_rx_mode_work); queue_work(priv->wq, &priv->set_rx_mode_work);
return priv; return priv;
...@@ -2617,6 +2620,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) ...@@ -2617,6 +2620,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
err_unmap_free_uar: err_unmap_free_uar:
mlx5_unmap_free_uar(mdev, &priv->cq_uar); mlx5_unmap_free_uar(mdev, &priv->cq_uar);
err_destroy_wq:
destroy_workqueue(priv->wq);
err_free_netdev: err_free_netdev:
free_netdev(netdev); free_netdev(netdev);
...@@ -2630,9 +2636,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) ...@@ -2630,9 +2636,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
set_bit(MLX5E_STATE_DESTROYING, &priv->state); set_bit(MLX5E_STATE_DESTROYING, &priv->state);
schedule_work(&priv->set_rx_mode_work); queue_work(priv->wq, &priv->set_rx_mode_work);
mlx5e_disable_async_events(priv); mlx5e_disable_async_events(priv);
flush_scheduled_work(); flush_workqueue(priv->wq);
if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
netif_device_detach(netdev); netif_device_detach(netdev);
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
...@@ -2655,6 +2661,8 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) ...@@ -2655,6 +2661,8 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
cancel_delayed_work_sync(&priv->update_stats_work);
destroy_workqueue(priv->wq);
if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
free_netdev(netdev); free_netdev(netdev);
......
...@@ -269,7 +269,9 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar); ...@@ -269,7 +269,9 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
{ {
if (uar->map)
iounmap(uar->map); iounmap(uar->map);
else
iounmap(uar->bf_map); iounmap(uar->bf_map);
mlx5_cmd_free_uar(mdev, uar->index); mlx5_cmd_free_uar(mdev, uar->index);
} }
......
...@@ -95,21 +95,22 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) ...@@ -95,21 +95,22 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
return vxlan; return vxlan;
} }
int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) static void mlx5e_vxlan_add_port(struct work_struct *work)
{ {
struct mlx5e_vxlan_work *vxlan_work =
container_of(work, struct mlx5e_vxlan_work, work);
struct mlx5e_priv *priv = vxlan_work->priv;
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
u16 port = vxlan_work->port;
struct mlx5e_vxlan *vxlan; struct mlx5e_vxlan *vxlan;
int err; int err;
err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port); if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
if (err) goto free_work;
return err;
vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
if (!vxlan) { if (!vxlan)
err = -ENOMEM;
goto err_delete_port; goto err_delete_port;
}
vxlan->udp_port = port; vxlan->udp_port = port;
...@@ -119,13 +120,14 @@ int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) ...@@ -119,13 +120,14 @@ int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
if (err) if (err)
goto err_free; goto err_free;
return 0; goto free_work;
err_free: err_free:
kfree(vxlan); kfree(vxlan);
err_delete_port: err_delete_port:
mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
return err; free_work:
kfree(vxlan_work);
} }
static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
...@@ -145,12 +147,36 @@ static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) ...@@ -145,12 +147,36 @@ static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
kfree(vxlan); kfree(vxlan);
} }
void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) static void mlx5e_vxlan_del_port(struct work_struct *work)
{ {
if (!mlx5e_vxlan_lookup_port(priv, port)) struct mlx5e_vxlan_work *vxlan_work =
return; container_of(work, struct mlx5e_vxlan_work, work);
struct mlx5e_priv *priv = vxlan_work->priv;
u16 port = vxlan_work->port;
__mlx5e_vxlan_core_del_port(priv, port); __mlx5e_vxlan_core_del_port(priv, port);
kfree(vxlan_work);
}
void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
u16 port, int add)
{
struct mlx5e_vxlan_work *vxlan_work;
vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
if (!vxlan_work)
return;
if (add)
INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
else
INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
vxlan_work->priv = priv;
vxlan_work->port = port;
vxlan_work->sa_family = sa_family;
queue_work(priv->wq, &vxlan_work->work);
} }
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
......
...@@ -39,6 +39,13 @@ struct mlx5e_vxlan { ...@@ -39,6 +39,13 @@ struct mlx5e_vxlan {
u16 udp_port; u16 udp_port;
}; };
struct mlx5e_vxlan_work {
struct work_struct work;
struct mlx5e_priv *priv;
sa_family_t sa_family;
u16 port;
};
static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
{ {
return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
...@@ -46,8 +53,8 @@ static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) ...@@ -46,8 +53,8 @@ static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
} }
void mlx5e_vxlan_init(struct mlx5e_priv *priv); void mlx5e_vxlan_init(struct mlx5e_priv *priv);
int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port); void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port); u16 port, int add);
struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment