Commit 292612d6 authored by Aviv Heller's avatar Aviv Heller Committed by Saeed Mahameed

net/mlx5: Rename mlx5_lag_is_bonded() to __mlx5_lag_is_active()

The new name better represents the function's aim, and sets a precedent
for a '__' prefix for internal, non-locked versions of LAG APIs.
Signed-off-by: default avatarAviv Heller <avivh@mellanox.com>
Reviewed-by: default avatarRoi Dayan <roid@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 8aaca197
...@@ -161,7 +161,7 @@ static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, ...@@ -161,7 +161,7 @@ static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
return -1; return -1;
} }
static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev) static bool __mlx5_lag_is_active(struct mlx5_lag *ldev)
{ {
return !!(ldev->flags & MLX5_LAG_FLAG_BONDED); return !!(ldev->flags & MLX5_LAG_FLAG_BONDED);
} }
...@@ -278,7 +278,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -278,7 +278,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev); do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
if (do_bond && !mlx5_lag_is_bonded(ldev)) { if (do_bond && !__mlx5_lag_is_active(ldev)) {
if (!sriov_enabled) if (!sriov_enabled)
for (i = 0; i < MLX5_MAX_PORTS; i++) for (i = 0; i < MLX5_MAX_PORTS; i++)
mlx5_remove_dev_by_protocol(ldev->pf[i].dev, mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
...@@ -290,9 +290,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -290,9 +290,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_nic_vport_enable_roce(dev1); mlx5_nic_vport_enable_roce(dev1);
} }
} else if (do_bond && mlx5_lag_is_bonded(ldev)) { } else if (do_bond && __mlx5_lag_is_active(ldev)) {
mlx5_modify_lag(ldev, &tracker); mlx5_modify_lag(ldev, &tracker);
} else if (!do_bond && mlx5_lag_is_bonded(ldev)) { } else if (!do_bond && __mlx5_lag_is_active(ldev)) {
if (!sriov_enabled) { if (!sriov_enabled) {
mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_nic_vport_disable_roce(dev1); mlx5_nic_vport_disable_roce(dev1);
...@@ -555,7 +555,7 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev) ...@@ -555,7 +555,7 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
if (!ldev) if (!ldev)
return; return;
if (mlx5_lag_is_bonded(ldev)) if (__mlx5_lag_is_active(ldev))
mlx5_deactivate_lag(ldev); mlx5_deactivate_lag(ldev);
mlx5_lag_dev_remove_pf(ldev, dev); mlx5_lag_dev_remove_pf(ldev, dev);
...@@ -579,7 +579,7 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev) ...@@ -579,7 +579,7 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
mutex_lock(&lag_mutex); mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
res = ldev && mlx5_lag_is_bonded(ldev); res = ldev && __mlx5_lag_is_active(ldev);
mutex_unlock(&lag_mutex); mutex_unlock(&lag_mutex);
return res; return res;
...@@ -609,7 +609,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) ...@@ -609,7 +609,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
mutex_lock(&lag_mutex); mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
if (!(ldev && mlx5_lag_is_bonded(ldev))) if (!(ldev && __mlx5_lag_is_active(ldev)))
goto unlock; goto unlock;
if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
...@@ -638,7 +638,7 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv) ...@@ -638,7 +638,7 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
return true; return true;
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev) if (!ldev || !__mlx5_lag_is_active(ldev) || ldev->pf[0].dev == dev)
return true; return true;
/* If bonded, we do not add an IB device for PF1. */ /* If bonded, we do not add an IB device for PF1. */
...@@ -665,7 +665,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ...@@ -665,7 +665,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
mutex_lock(&lag_mutex); mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
if (ldev && mlx5_lag_is_bonded(ldev)) { if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = MLX5_MAX_PORTS; num_ports = MLX5_MAX_PORTS;
mdev[0] = ldev->pf[0].dev; mdev[0] = ldev->pf[0].dev;
mdev[1] = ldev->pf[1].dev; mdev[1] = ldev->pf[1].dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment