Commit d7df09f5 authored by Dmytro Linkin's avatar Dmytro Linkin Committed by Saeed Mahameed

net/mlx5: E-switch, Enable vport QoS on demand

Vports' QoS is not commonly used but consume SW/HW resources, which
becomes an issue on BlueField SoC systems.
Don't enable QoS on vports by default on eswitch mode change and enable
when it's going to be used by one of the top level users:
- configuring TC matchall filter with police action;
- setting rate with legacy NDO API;
- calling devlink ops->rate_leaf_*() callbacks.

Disable vport QoS on vport cleanup.
Signed-off-by: default avatarDmytro Linkin <dlinkin@nvidia.com>
Reviewed-by: default avatarParav Pandit <parav@nvidia.com>
Reviewed-by: default avatarMark Bloch <mbloch@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent e9d491a6
...@@ -522,9 +522,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, ...@@ -522,9 +522,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
return PTR_ERR(evport); return PTR_ERR(evport);
mutex_lock(&esw->state_lock); mutex_lock(&esw->state_lock);
err = mlx5_esw_qos_set_vport_min_rate(esw, evport, min_rate, NULL); err = mlx5_esw_qos_set_vport_rate(esw, evport, max_rate, min_rate);
if (!err)
err = mlx5_esw_qos_set_vport_max_rate(esw, evport, max_rate, NULL);
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
return err; return err;
} }
...@@ -204,10 +204,8 @@ static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divid ...@@ -204,10 +204,8 @@ static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divid
return 0; return 0;
} }
int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, static int esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
struct mlx5_vport *evport, u32 min_rate, struct netlink_ext_ack *extack)
u32 min_rate,
struct netlink_ext_ack *extack)
{ {
u32 fw_max_bw_share, previous_min_rate; u32 fw_max_bw_share, previous_min_rate;
bool min_rate_supported; bool min_rate_supported;
...@@ -231,10 +229,8 @@ int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, ...@@ -231,10 +229,8 @@ int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw,
return err; return err;
} }
int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, static int esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
struct mlx5_vport *evport, u32 max_rate, struct netlink_ext_ack *extack)
u32 max_rate,
struct netlink_ext_ack *extack)
{ {
u32 act_max_rate = max_rate; u32 act_max_rate = max_rate;
bool max_rate_supported; bool max_rate_supported;
...@@ -605,8 +601,8 @@ void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw) ...@@ -605,8 +601,8 @@ void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw)
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
} }
int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, static int esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
u32 max_rate, u32 bw_share) u32 max_rate, u32 bw_share)
{ {
int err; int err;
...@@ -615,7 +611,7 @@ int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport ...@@ -615,7 +611,7 @@ int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport
return 0; return 0;
if (vport->qos.enabled) if (vport->qos.enabled)
return -EEXIST; return 0;
vport->qos.group = esw->qos.group0; vport->qos.group = esw->qos.group0;
...@@ -645,31 +641,55 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo ...@@ -645,31 +641,55 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n", esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
vport->vport, err); vport->vport, err);
vport->qos.enabled = false; memset(&vport->qos, 0, sizeof(vport->qos));
trace_mlx5_esw_vport_qos_destroy(vport); trace_mlx5_esw_vport_qos_destroy(vport);
} }
int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
u32 min_rate, u32 max_rate)
{
int err;
lockdep_assert_held(&esw->state_lock);
err = esw_qos_vport_enable(esw, vport, 0, 0);
if (err)
return err;
err = esw_qos_set_vport_min_rate(esw, vport, min_rate, NULL);
if (!err)
err = esw_qos_set_vport_max_rate(esw, vport, max_rate, NULL);
return err;
}
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps) int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
{ {
u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_vport *vport; struct mlx5_vport *vport;
u32 bitmask; u32 bitmask;
int err;
vport = mlx5_eswitch_get_vport(esw, vport_num); vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport)) if (IS_ERR(vport))
return PTR_ERR(vport); return PTR_ERR(vport);
if (!vport->qos.enabled) mutex_lock(&esw->state_lock);
return -EOPNOTSUPP; if (!vport->qos.enabled) {
/* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */
MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); err = esw_qos_vport_enable(esw, vport, rate_mbps, vport->qos.bw_share);
bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; } else {
MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
err = mlx5_modify_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
ctx,
vport->qos.esw_tsar_ix,
bitmask);
}
mutex_unlock(&esw->state_lock);
return mlx5_modify_scheduling_element_cmd(esw->dev, return err;
SCHEDULING_HIERARCHY_E_SWITCH,
ctx,
vport->qos.esw_tsar_ix,
bitmask);
} }
#define MLX5_LINKSPEED_UNIT 125000 /* 1Mbps in Bps */ #define MLX5_LINKSPEED_UNIT 125000 /* 1Mbps in Bps */
...@@ -728,7 +748,12 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void ...@@ -728,7 +748,12 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
return err; return err;
mutex_lock(&esw->state_lock); mutex_lock(&esw->state_lock);
err = mlx5_esw_qos_set_vport_min_rate(esw, vport, tx_share, extack); err = esw_qos_vport_enable(esw, vport, 0, 0);
if (err)
goto unlock;
err = esw_qos_set_vport_min_rate(esw, vport, tx_share, extack);
unlock:
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
return err; return err;
} }
...@@ -749,7 +774,12 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void * ...@@ -749,7 +774,12 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
return err; return err;
mutex_lock(&esw->state_lock); mutex_lock(&esw->state_lock);
err = mlx5_esw_qos_set_vport_max_rate(esw, vport, tx_max, extack); err = esw_qos_vport_enable(esw, vport, 0, 0);
if (err)
goto unlock;
err = esw_qos_set_vport_max_rate(esw, vport, tx_max, extack);
unlock:
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
return err; return err;
} }
...@@ -846,7 +876,9 @@ int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw, ...@@ -846,7 +876,9 @@ int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
int err; int err;
mutex_lock(&esw->state_lock); mutex_lock(&esw->state_lock);
err = esw_qos_vport_update_group(esw, vport, group, extack); err = esw_qos_vport_enable(esw, vport, 0, 0);
if (!err)
err = esw_qos_vport_update_group(esw, vport, group, extack);
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
return err; return err;
} }
......
...@@ -6,18 +6,10 @@ ...@@ -6,18 +6,10 @@
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
struct mlx5_vport *evport, u32 max_rate, u32 min_rate);
u32 min_rate,
struct netlink_ext_ack *extack);
int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw,
struct mlx5_vport *evport,
u32 max_rate,
struct netlink_ext_ack *extack);
void mlx5_esw_qos_create(struct mlx5_eswitch *esw); void mlx5_esw_qos_create(struct mlx5_eswitch *esw);
void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw); void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw);
int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
u32 max_rate, u32 bw_share);
void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport); void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv, int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
......
...@@ -781,9 +781,6 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) ...@@ -781,9 +781,6 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
if (err) if (err)
return err; return err;
/* Attach vport to the eswitch rate limiter */
mlx5_esw_qos_vport_enable(esw, vport, vport->qos.max_rate, vport->qos.bw_share);
if (mlx5_esw_is_manager_vport(esw, vport_num)) if (mlx5_esw_is_manager_vport(esw, vport_num))
return 0; return 0;
...@@ -1746,8 +1743,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ...@@ -1746,8 +1743,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->qos = evport->info.qos; ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk; ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted; ivi->trusted = evport->info.trusted;
ivi->min_tx_rate = evport->qos.min_rate; if (evport->qos.enabled) {
ivi->max_tx_rate = evport->qos.max_rate; ivi->min_tx_rate = evport->qos.min_rate;
ivi->max_tx_rate = evport->qos.max_rate;
}
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment