Commit 64363e61 authored by Maor Gottlieb's avatar Maor Gottlieb Committed by Saeed Mahameed

net/mlx5: Change lag mutex lock to spin lock

The lag lock could be a spin lock, the critical section is short
and there is no need that the thread will sleep.
Change the lock that protects the LAG structure from mutex
to spin lock. It is required for next patch that need to
access this structure from context that we can't sleep.
In addition there is no need to hold this lock when query the
congestion counters.
Signed-off-by: default avatarMaor Gottlieb <maorg@mellanox.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 33720aaf
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
* Beware of lock dependencies (preferably, no locks should be acquired * Beware of lock dependencies (preferably, no locks should be acquired
* under it). * under it).
*/ */
static DEFINE_MUTEX(lag_mutex); static DEFINE_SPINLOCK(lag_lock);
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
u8 remap_port2) u8 remap_port2)
...@@ -274,9 +274,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -274,9 +274,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (!dev0 || !dev1) if (!dev0 || !dev1)
return; return;
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
tracker = ldev->tracker; tracker = ldev->tracker;
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev); do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
...@@ -458,9 +458,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, ...@@ -458,9 +458,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
break; break;
} }
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
ldev->tracker = tracker; ldev->tracker = tracker;
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
if (changed) if (changed)
mlx5_queue_bond_work(ldev, 0); mlx5_queue_bond_work(ldev, 0);
...@@ -502,7 +502,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev, ...@@ -502,7 +502,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
if (fn >= MLX5_MAX_PORTS) if (fn >= MLX5_MAX_PORTS)
return; return;
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
ldev->pf[fn].dev = dev; ldev->pf[fn].dev = dev;
ldev->pf[fn].netdev = netdev; ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0; ldev->tracker.netdev_state[fn].link_up = 0;
...@@ -510,7 +510,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev, ...@@ -510,7 +510,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
dev->priv.lag = ldev; dev->priv.lag = ldev;
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
} }
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev, static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
...@@ -525,11 +525,11 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev, ...@@ -525,11 +525,11 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
if (i == MLX5_MAX_PORTS) if (i == MLX5_MAX_PORTS)
return; return;
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
memset(&ldev->pf[i], 0, sizeof(*ldev->pf)); memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
dev->priv.lag = NULL; dev->priv.lag = NULL;
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
} }
/* Must be called with intf_mutex held */ /* Must be called with intf_mutex held */
...@@ -607,10 +607,10 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev) ...@@ -607,10 +607,10 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
bool res; bool res;
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_roce(ldev); res = ldev && __mlx5_lag_is_roce(ldev);
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
return res; return res;
} }
...@@ -621,10 +621,10 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev) ...@@ -621,10 +621,10 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
bool res; bool res;
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_active(ldev); res = ldev && __mlx5_lag_is_active(ldev);
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
return res; return res;
} }
...@@ -635,10 +635,10 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev) ...@@ -635,10 +635,10 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
bool res; bool res;
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_sriov(ldev); res = ldev && __mlx5_lag_is_sriov(ldev);
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
return res; return res;
} }
...@@ -664,7 +664,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) ...@@ -664,7 +664,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
struct net_device *ndev = NULL; struct net_device *ndev = NULL;
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev))) if (!(ldev && __mlx5_lag_is_roce(ldev)))
...@@ -681,7 +681,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) ...@@ -681,7 +681,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
dev_hold(ndev); dev_hold(ndev);
unlock: unlock:
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
return ndev; return ndev;
} }
...@@ -723,7 +723,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ...@@ -723,7 +723,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
memset(values, 0, sizeof(*values) * num_counters); memset(values, 0, sizeof(*values) * num_counters);
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
if (ldev && __mlx5_lag_is_roce(ldev)) { if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS; num_ports = MLX5_MAX_PORTS;
...@@ -733,6 +733,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ...@@ -733,6 +733,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
num_ports = 1; num_ports = 1;
mdev[MLX5_LAG_P1] = dev; mdev[MLX5_LAG_P1] = dev;
} }
spin_unlock(&lag_lock);
for (i = 0; i < num_ports; ++i) { for (i = 0; i < num_ports; ++i) {
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {}; u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
...@@ -742,14 +743,13 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ...@@ -742,14 +743,13 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in, ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
out); out);
if (ret) if (ret)
goto unlock; goto free;
for (j = 0; j < num_counters; ++j) for (j = 0; j < num_counters; ++j)
values[j] += be64_to_cpup((__be64 *)(out + offsets[j])); values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
} }
unlock: free:
mutex_unlock(&lag_mutex);
kvfree(out); kvfree(out);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment