Commit c2d7712c authored by Bodong Wang's avatar Bodong Wang Committed by Saeed Mahameed

net/mlx5: E-Switch, Introduce per vport configuration for eswitch modes

Both legacy and offload modes require vport setup, only offload mode
requires rep setup. Before this patch, vport and rep operations are
separated applied to all relevant vports in different stages.

Change to use per vport configuration, so that vport and rep operations
are modularized per vport.
Signed-off-by: default avatarBodong Wang <bodong@mellanox.com>
Reviewed-by: default avatarParav Pandit <parav@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent d7c92cb5
...@@ -1806,12 +1806,14 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport ...@@ -1806,12 +1806,14 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
esw_vport_cleanup_acl(esw, vport); esw_vport_cleanup_acl(esw, vport);
} }
static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events) enum mlx5_eswitch_vport_event enabled_events)
{ {
u16 vport_num = vport->vport; struct mlx5_vport *vport;
int ret; int ret;
vport = mlx5_eswitch_get_vport(esw, vport_num);
mutex_lock(&esw->state_lock); mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled); WARN_ON(vport->enabled);
...@@ -1841,10 +1843,11 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, ...@@ -1841,10 +1843,11 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
return ret; return ret;
} }
static void esw_disable_vport(struct mlx5_eswitch *esw, static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num)
struct mlx5_vport *vport)
{ {
u16 vport_num = vport->vport; struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
mutex_lock(&esw->state_lock); mutex_lock(&esw->state_lock);
if (!vport->enabled) if (!vport->enabled)
...@@ -1950,6 +1953,32 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) ...@@ -1950,6 +1953,32 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
/* Public E-Switch API */ /* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
static int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events)
{
int err;
err = esw_enable_vport(esw, vport_num, enabled_events);
if (err)
return err;
err = esw_offloads_load_rep(esw, vport_num);
if (err)
goto err_rep;
return err;
err_rep:
esw_disable_vport(esw, vport_num);
return err;
}
static void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
esw_offloads_unload_rep(esw, vport_num);
esw_disable_vport(esw, vport_num);
}
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
* whichever are present on the eswitch. * whichever are present on the eswitch.
*/ */
...@@ -1957,28 +1986,25 @@ int ...@@ -1957,28 +1986,25 @@ int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events) enum mlx5_eswitch_vport_event enabled_events)
{ {
struct mlx5_vport *vport;
int num_vfs; int num_vfs;
int ret; int ret;
int i; int i;
/* Enable PF vport */ /* Enable PF vport */
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
ret = esw_enable_vport(esw, vport, enabled_events);
if (ret) if (ret)
return ret; return ret;
/* Enable ECPF vport */ /* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) { if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
ret = esw_enable_vport(esw, vport, enabled_events);
if (ret) if (ret)
goto ecpf_err; goto ecpf_err;
} }
/* Enable VF vports */ /* Enable VF vports */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { mlx5_esw_for_each_vf_vport_num(esw, i, esw->esw_funcs.num_vfs) {
ret = esw_enable_vport(esw, vport, enabled_events); ret = mlx5_eswitch_load_vport(esw, i, enabled_events);
if (ret) if (ret)
goto vf_err; goto vf_err;
} }
...@@ -1986,17 +2012,14 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, ...@@ -1986,17 +2012,14 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
vf_err: vf_err:
num_vfs = i - 1; num_vfs = i - 1;
mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, num_vfs) mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs)
esw_disable_vport(esw, vport); mlx5_eswitch_unload_vport(esw, i);
if (mlx5_ecpf_vport_exists(esw->dev)) { if (mlx5_ecpf_vport_exists(esw->dev))
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
esw_disable_vport(esw, vport);
}
ecpf_err: ecpf_err:
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
esw_disable_vport(esw, vport);
return ret; return ret;
} }
...@@ -2005,11 +2028,15 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, ...@@ -2005,11 +2028,15 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
*/ */
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{ {
struct mlx5_vport *vport;
int i; int i;
mlx5_esw_for_all_vports_reverse(esw, i, vport) mlx5_esw_for_each_vf_vport_num_reverse(esw, i, esw->esw_funcs.num_vfs)
esw_disable_vport(esw, vport); mlx5_eswitch_unload_vport(esw, i);
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
} }
static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw) static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
......
...@@ -651,6 +651,9 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); ...@@ -651,6 +651,9 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
u32 u32
esw_get_max_restore_tag(struct mlx5_eswitch *esw); esw_get_max_restore_tag(struct mlx5_eswitch *esw);
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */ /* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
......
...@@ -1678,14 +1678,6 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) ...@@ -1678,14 +1678,6 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
__unload_reps_special_vport(esw, rep_type); __unload_reps_special_vport(esw, rep_type);
} }
static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
{
u8 rep_type = NUM_REP_TYPES;
while (rep_type-- > 0)
__unload_reps_all_vport(esw, rep_type);
}
static int __esw_offloads_load_rep(struct mlx5_eswitch *esw, static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type) struct mlx5_eswitch_rep *rep, u8 rep_type)
{ {
...@@ -1702,44 +1694,6 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw, ...@@ -1702,44 +1694,6 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
return err; return err;
} }
static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int err;
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
return err;
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
goto err_pf;
}
if (mlx5_ecpf_vport_exists(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
goto err_ecpf;
}
return 0;
err_ecpf:
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
__esw_offloads_unload_rep(esw, rep, rep_type);
}
err_pf:
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
__esw_offloads_unload_rep(esw, rep, rep_type);
return err;
}
static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports, static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
u8 rep_type) u8 rep_type)
{ {
...@@ -1759,26 +1713,6 @@ static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports, ...@@ -1759,26 +1713,6 @@ static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
return err; return err;
} }
static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
int err;
/* Special vports must be loaded first, uplink rep creates mdev resource. */
err = __load_reps_special_vport(esw, rep_type);
if (err)
return err;
err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
if (err)
goto err_vfs;
return 0;
err_vfs:
__unload_reps_special_vport(esw, rep_type);
return err;
}
static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports) static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
{ {
u8 rep_type = 0; u8 rep_type = 0;
...@@ -1798,25 +1732,46 @@ static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports) ...@@ -1798,25 +1732,46 @@ static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
return err; return err;
} }
static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw) int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
{ {
u8 rep_type = 0; struct mlx5_eswitch_rep *rep;
int rep_type;
int err; int err;
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { if (esw->mode != MLX5_ESWITCH_OFFLOADS)
err = __load_reps_all_vport(esw, rep_type); return 0;
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
if (err) if (err)
goto err_reps; goto err_reps;
} }
return err; return 0;
err_reps: err_reps:
while (rep_type-- > 0) atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
__unload_reps_all_vport(esw, rep_type); for (--rep_type; rep_type >= 0; rep_type--)
__esw_offloads_unload_rep(esw, rep, rep_type);
return err; return err;
} }
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
#define ESW_OFFLOADS_DEVCOM_PAIR (0) #define ESW_OFFLOADS_DEVCOM_PAIR (0)
#define ESW_OFFLOADS_DEVCOM_UNPAIR (1) #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
...@@ -2466,22 +2421,23 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) ...@@ -2466,22 +2421,23 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); /* Uplink vport rep must load first. */
err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
if (err) if (err)
goto err_vports; goto err_uplink;
err = esw_offloads_load_all_reps(esw); err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
if (err) if (err)
goto err_reps; goto err_vports;
esw_offloads_devcom_init(esw); esw_offloads_devcom_init(esw);
mutex_init(&esw->offloads.termtbl_mutex); mutex_init(&esw->offloads.termtbl_mutex);
return 0; return 0;
err_reps:
mlx5_eswitch_disable_pf_vf_vports(esw);
err_vports: err_vports:
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
err_uplink:
esw_set_passing_vport_metadata(esw, false); esw_set_passing_vport_metadata(esw, false);
err_vport_metadata: err_vport_metadata:
esw_offloads_steering_cleanup(esw); esw_offloads_steering_cleanup(esw);
...@@ -2512,8 +2468,8 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, ...@@ -2512,8 +2468,8 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_disable(struct mlx5_eswitch *esw) void esw_offloads_disable(struct mlx5_eswitch *esw)
{ {
esw_offloads_devcom_cleanup(esw); esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw);
mlx5_eswitch_disable_pf_vf_vports(esw); mlx5_eswitch_disable_pf_vf_vports(esw);
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false); esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw); esw_offloads_steering_cleanup(esw);
mlx5_rdma_disable_roce(esw->dev); mlx5_rdma_disable_roce(esw->dev);
...@@ -2786,6 +2742,21 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, ...@@ -2786,6 +2742,21 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
return 0; return 0;
} }
static bool
mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
{
/* Currently, only ECPF based device has representor for host PF. */
if (vport_num == MLX5_VPORT_PF &&
!mlx5_core_is_ecpf_esw_manager(esw->dev))
return false;
if (vport_num == MLX5_VPORT_ECPF &&
!mlx5_ecpf_vport_exists(esw->dev))
return false;
return true;
}
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
const struct mlx5_eswitch_rep_ops *ops, const struct mlx5_eswitch_rep_ops *ops,
u8 rep_type) u8 rep_type)
...@@ -2796,9 +2767,11 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, ...@@ -2796,9 +2767,11 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
esw->offloads.rep_ops[rep_type] = ops; esw->offloads.rep_ops[rep_type] = ops;
mlx5_esw_for_all_reps(esw, i, rep) { mlx5_esw_for_all_reps(esw, i, rep) {
if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
rep_data = &rep->rep_data[rep_type]; rep_data = &rep->rep_data[rep_type];
atomic_set(&rep_data->state, REP_REGISTERED); atomic_set(&rep_data->state, REP_REGISTERED);
} }
}
} }
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment