Commit 5896b972 authored by Parav Pandit's avatar Parav Pandit Committed by Saeed Mahameed

net/mlx5: E-switch, Tide up eswitch config sequence

Currently for PF and ECPF vports, representors are created before
their eswitch hardware ports are initialized in below flow.

mlx5_eswitch_enable()
  esw_offloads_init()
    esw_offloads_load_all_reps()
[..]
esw_enable_vport()

However for VFs, vports are initialized before creating their
respective netdev represnetors in event handling context.

Similarly while disabling eswitch, first hardware vports are disabled,
followed by destroying their representors.
Here while underlying vports gets destroyed but its respective user
facing netdevice can still exist on which user can continue to perform
more offload operations.

Instead, its more accurate to do
enable_eswitch switchdev mode:
1. perform FDB tables initialization
2. initialize hw vport
3. create and publish representor for this vport

disable_eswitch switchdev mode:
1. destroy user facing representor for the vport
2. disable hw vport
3. perform FDB tables cleanup
Signed-off-by: default avatarParav Pandit <parav@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 131ce701
......@@ -452,6 +452,22 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
return err;
}
#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
MLX5_VPORT_MC_ADDR_CHANGE | \
MLX5_VPORT_PROMISC_CHANGE)
static int esw_legacy_enable(struct mlx5_eswitch *esw)
{
int ret;
ret = esw_create_legacy_table(esw);
if (ret)
return ret;
mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
return 0;
}
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
{
esw_cleanup_vepa_rules(esw);
......@@ -459,6 +475,19 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
esw_destroy_legacy_vepa_table(esw);
}
static void esw_legacy_disable(struct mlx5_eswitch *esw)
{
struct esw_mc_addr *mc_promisc;
mlx5_eswitch_disable_pf_vf_vports(esw);
mc_promisc = &esw->mc_promisc;
if (mc_promisc->uplink_rule)
mlx5_del_flow_rules(mc_promisc->uplink_rule);
esw_destroy_legacy_table(esw);
}
/* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
struct vport_addr *vaddr);
......@@ -1826,13 +1855,8 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
esw_disable_vport(esw, vport);
}
#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
MLX5_VPORT_MC_ADDR_CHANGE | \
MLX5_VPORT_PROMISC_CHANGE)
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
{
int enabled_events;
int err;
if (!ESW_ALLOWED(esw) ||
......@@ -1854,21 +1878,16 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
mlx5_lag_update(esw->dev);
if (mode == MLX5_ESWITCH_LEGACY) {
err = esw_create_legacy_table(esw);
err = esw_legacy_enable(esw);
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
err = esw_offloads_init(esw);
err = esw_offloads_enable(esw);
}
if (err)
goto abort;
enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? MLX5_LEGACY_SRIOV_VPORT_EVENTS :
MLX5_VPORT_UC_ADDR_CHANGE;
mlx5_eswitch_enable_pf_vf_vports(esw, enabled_events);
mlx5_eswitch_event_handlers_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
......@@ -1890,7 +1909,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
struct esw_mc_addr *mc_promisc;
int old_mode;
if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
......@@ -1902,16 +1920,10 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
mlx5_eswitch_event_handlers_unregister(esw);
mlx5_eswitch_disable_pf_vf_vports(esw);
mc_promisc = &esw->mc_promisc;
if (mc_promisc->uplink_rule)
mlx5_del_flow_rules(mc_promisc->uplink_rule);
if (esw->mode == MLX5_ESWITCH_LEGACY)
esw_destroy_legacy_table(esw);
esw_legacy_disable(esw);
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
esw_offloads_cleanup(esw);
esw_offloads_disable(esw);
esw_destroy_tsar(esw);
......
......@@ -242,8 +242,8 @@ struct mlx5_eswitch {
struct mlx5_esw_functions esw_funcs;
};
void esw_offloads_cleanup(struct mlx5_eswitch *esw);
int esw_offloads_init(struct mlx5_eswitch *esw);
void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
......
......@@ -2104,7 +2104,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
return NOTIFY_OK;
}
int esw_offloads_init(struct mlx5_eswitch *esw)
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
int err;
......@@ -2122,6 +2122,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err)
goto err_vport_metadata;
mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
err = esw_offloads_load_all_reps(esw);
if (err)
goto err_reps;
......@@ -2134,6 +2136,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
return 0;
err_reps:
mlx5_eswitch_disable_pf_vf_vports(esw);
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
......@@ -2159,11 +2162,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
return err;
}
void esw_offloads_cleanup(struct mlx5_eswitch *esw)
void esw_offloads_disable(struct mlx5_eswitch *esw)
{
mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw);
mlx5_eswitch_disable_pf_vf_vports(esw);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment