Commit 34413460 authored by Bodong Wang's avatar Bodong Wang Committed by David S. Miller

mlx5/core: E-Switch, Create ACL FT for eswitch manager in switchdev mode

ACL flow table is required in switchdev mode when metadata is enabled,
driver creates such table when loading each vport. However, not every
vport is loaded in switchdev mode. Such as ECPF if it's the eswitch manager.
In this case, ACL flow table is still needed.

To make it modularized, create ACL flow table for eswitch manager as
default and skip such operations when loading manager vport.

Also, there is no need to load the eswitch manager vport in switchdev mode.
This means there is no need to load it on regular connect-x HCAs where
the PF is the eswitch manager. This will avoid creating duplicate ACL
flow table for host PF vport.

Fixes: 29bcb6e4 ("net/mlx5e: E-Switch, Use metadata for vport matching in send-to-vport rules")
Fixes: eb8e9fae ("mlx5/core: E-Switch, Allocate ECPF vport if it's an eswitch manager")
Fixes: 5019833d ("net/mlx5: E-switch, Introduce helper function to enable/disable vports")
Signed-off-by: default avatarBodong Wang <bodong@nvidia.com>
Reviewed-by: default avatarMark Bloch <mbloch@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b7558a77
......@@ -1276,12 +1276,19 @@ int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events)
{
bool pf_needed;
int ret;
pf_needed = mlx5_core_is_ecpf_esw_manager(esw->dev) ||
esw->mode == MLX5_ESWITCH_LEGACY;
/* Enable PF vport */
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF, enabled_events);
if (ret)
return ret;
if (pf_needed) {
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF,
enabled_events);
if (ret)
return ret;
}
/* Enable external host PF HCA */
ret = host_pf_enable_hca(esw->dev);
......@@ -1317,7 +1324,8 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
ecpf_err:
host_pf_disable_hca(esw->dev);
pf_hca_err:
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
if (pf_needed)
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
return ret;
}
......@@ -1335,7 +1343,10 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
}
host_pf_disable_hca(esw->dev);
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
esw->mode == MLX5_ESWITCH_LEGACY)
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}
static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
......
......@@ -3216,26 +3216,47 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
esw_acl_ingress_ofld_cleanup(esw, vport);
}
static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
struct mlx5_vport *uplink, *manager;
int ret;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
if (IS_ERR(vport))
return PTR_ERR(vport);
uplink = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
if (IS_ERR(uplink))
return PTR_ERR(uplink);
ret = esw_vport_create_offloads_acl_tables(esw, uplink);
if (ret)
return ret;
manager = mlx5_eswitch_get_vport(esw, esw->manager_vport);
if (IS_ERR(manager)) {
ret = PTR_ERR(manager);
goto err_manager;
}
return esw_vport_create_offloads_acl_tables(esw, vport);
ret = esw_vport_create_offloads_acl_tables(esw, manager);
if (ret)
goto err_manager;
return 0;
err_manager:
esw_vport_destroy_offloads_acl_tables(esw, uplink);
return ret;
}
static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
if (IS_ERR(vport))
return;
vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
if (!IS_ERR(vport))
esw_vport_destroy_offloads_acl_tables(esw, vport);
esw_vport_destroy_offloads_acl_tables(esw, vport);
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
if (!IS_ERR(vport))
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
......@@ -3280,7 +3301,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.indir = indir;
err = esw_create_uplink_offloads_acl_tables(esw);
err = esw_create_offloads_acl_tables(esw);
if (err)
goto create_acl_err;
......@@ -3321,7 +3342,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
create_restore_err:
esw_destroy_offloads_table(esw);
create_offloads_err:
esw_destroy_uplink_offloads_acl_tables(esw);
esw_destroy_offloads_acl_tables(esw);
create_acl_err:
mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
create_indir_err:
......@@ -3337,7 +3358,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_offloads_fdb_tables(esw);
esw_destroy_restore_table(esw);
esw_destroy_offloads_table(esw);
esw_destroy_uplink_offloads_acl_tables(esw);
esw_destroy_offloads_acl_tables(esw);
mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment