Commit a9f7705f authored by Saeed Mahameed's avatar Saeed Mahameed

net/mlx5: Unify vport manager capability check

Expose MLX5_VPORT_MANAGER macro to check for strict vport manager
E-switch and MPFS (Multi Physical Function Switch) abilities.

VPORT manager must be a PF with an ethernet link and with FW advertised
vport group manager capability

Replace older checks with the new macro and use it where needed in
eswitch.c and mlx5e netdev eswitch related flows.

The same macro will be reused in MPFS separation downstream patch.
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 07c9f1e5
...@@ -2581,12 +2581,6 @@ static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv) ...@@ -2581,12 +2581,6 @@ static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
} }
} }
static bool mlx5e_is_eswitch_vport_mngr(struct mlx5_core_dev *mdev)
{
return (MLX5_CAP_GEN(mdev, vport_group_manager) &&
MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH);
}
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{ {
int num_txqs = priv->channels.num * priv->channels.params.num_tc; int num_txqs = priv->channels.num * priv->channels.params.num_tc;
...@@ -2600,7 +2594,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) ...@@ -2600,7 +2594,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_activate_channels(&priv->channels); mlx5e_activate_channels(&priv->channels);
netif_tx_start_all_queues(priv->netdev); netif_tx_start_all_queues(priv->netdev);
if (mlx5e_is_eswitch_vport_mngr(priv->mdev)) if (MLX5_VPORT_MANAGER(priv->mdev))
mlx5e_add_sqs_fwd_rules(priv); mlx5e_add_sqs_fwd_rules(priv);
mlx5e_wait_channels_min_rx_wqes(&priv->channels); mlx5e_wait_channels_min_rx_wqes(&priv->channels);
...@@ -2611,7 +2605,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) ...@@ -2611,7 +2605,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{ {
mlx5e_redirect_rqts_to_drop(priv); mlx5e_redirect_rqts_to_drop(priv);
if (mlx5e_is_eswitch_vport_mngr(priv->mdev)) if (MLX5_VPORT_MANAGER(priv->mdev))
mlx5e_remove_sqs_fwd_rules(priv); mlx5e_remove_sqs_fwd_rules(priv);
/* FIXME: This is a W/A only for tx timeout watch dog false alarm when /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
...@@ -4079,7 +4073,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4079,7 +4073,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_set_netdev_dev_addr(netdev); mlx5e_set_netdev_dev_addr(netdev);
#ifdef CONFIG_NET_SWITCHDEV #ifdef CONFIG_NET_SWITCHDEV
if (MLX5_CAP_GEN(mdev, vport_group_manager)) if (MLX5_VPORT_MANAGER(mdev))
netdev->switchdev_ops = &mlx5e_switchdev_ops; netdev->switchdev_ops = &mlx5e_switchdev_ops;
#endif #endif
...@@ -4221,7 +4215,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) ...@@ -4221,7 +4215,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5e_enable_async_events(priv); mlx5e_enable_async_events(priv);
if (MLX5_CAP_GEN(mdev, vport_group_manager)) if (MLX5_VPORT_MANAGER(priv->mdev))
mlx5e_register_vport_reps(priv); mlx5e_register_vport_reps(priv);
if (netdev->reg_state != NETREG_REGISTERED) if (netdev->reg_state != NETREG_REGISTERED)
...@@ -4255,7 +4249,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) ...@@ -4255,7 +4249,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->set_rx_mode_work); queue_work(priv->wq, &priv->set_rx_mode_work);
if (MLX5_CAP_GEN(mdev, vport_group_manager)) if (MLX5_VPORT_MANAGER(priv->mdev))
mlx5e_unregister_vport_reps(priv); mlx5e_unregister_vport_reps(priv);
mlx5e_disable_async_events(priv); mlx5e_disable_async_events(priv);
...@@ -4437,7 +4431,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) ...@@ -4437,7 +4431,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
if (err) if (err)
return NULL; return NULL;
if (MLX5_CAP_GEN(mdev, vport_group_manager)) { if (MLX5_VPORT_MANAGER(mdev)) {
rpriv = mlx5e_alloc_nic_rep_priv(mdev); rpriv = mlx5e_alloc_nic_rep_priv(mdev);
if (!rpriv) { if (!rpriv) {
mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
......
...@@ -688,9 +688,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) ...@@ -688,9 +688,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err; int err;
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && if (MLX5_VPORT_MANAGER(dev))
MLX5_CAP_GEN(dev, vport_group_manager) &&
mlx5_core_is_pf(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
if (MLX5_CAP_GEN(dev, port_module_event)) if (MLX5_CAP_GEN(dev, port_module_event))
......
...@@ -1611,13 +1611,14 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) ...@@ -1611,13 +1611,14 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
} }
/* Public E-Switch API */ /* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev))
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
{ {
int err; int err;
int i, enabled_events; int i, enabled_events;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || if (!ESW_ALLOWED(esw))
MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return 0; return 0;
if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
...@@ -1667,9 +1668,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) ...@@ -1667,9 +1668,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
int nvports; int nvports;
int i; int i;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE)
MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH ||
esw->mode == SRIOV_NONE)
return; return;
esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n", esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
...@@ -1698,8 +1697,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) ...@@ -1698,8 +1697,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
void mlx5_eswitch_attach(struct mlx5_eswitch *esw) void mlx5_eswitch_attach(struct mlx5_eswitch *esw)
{ {
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || if (!ESW_ALLOWED(esw))
MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return; return;
esw_enable_vport(esw, 0, UC_ADDR_CHANGE); esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
...@@ -1708,8 +1706,7 @@ void mlx5_eswitch_attach(struct mlx5_eswitch *esw) ...@@ -1708,8 +1706,7 @@ void mlx5_eswitch_attach(struct mlx5_eswitch *esw)
void mlx5_eswitch_detach(struct mlx5_eswitch *esw) void mlx5_eswitch_detach(struct mlx5_eswitch *esw)
{ {
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || if (!ESW_ALLOWED(esw))
MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return; return;
esw_disable_vport(esw, 0); esw_disable_vport(esw, 0);
...@@ -1723,8 +1720,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1723,8 +1720,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
int vport_num; int vport_num;
int err; int err;
if (!MLX5_CAP_GEN(dev, vport_group_manager) || if (!MLX5_VPORT_MANAGER(dev))
MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return 0; return 0;
esw_info(dev, esw_info(dev,
...@@ -1806,8 +1802,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1806,8 +1802,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
{ {
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return; return;
esw_info(esw->dev, "cleanup\n"); esw_info(esw->dev, "cleanup\n");
...@@ -1838,8 +1833,6 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) ...@@ -1838,8 +1833,6 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
} }
/* Vport Administration */ /* Vport Administration */
#define ESW_ALLOWED(esw) \
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
......
...@@ -43,6 +43,10 @@ ...@@ -43,6 +43,10 @@
#define DRIVER_VERSION "5.0-0" #define DRIVER_VERSION "5.0-0"
#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev)) #define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
#define MLX5_VPORT_MANAGER(mdev) \
(MLX5_CAP_GEN(mdev, vport_group_manager) && \
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
mlx5_core_is_pf(mdev))
extern uint mlx5_core_debug_mask; extern uint mlx5_core_debug_mask;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment