Commit acc6c595 authored by Saeed Mahameed's avatar Saeed Mahameed

net/mlx5e: Split open/close channels to stages

As a foundation for safe config flow, a simple clear API such as
(Open then Activate) where the "Open" handles the heavy unsafe
creation operation and the "activate" will be fast and fail safe,
to enable the newly created channels.

For this we split the RQs/TXQ SQs and channels open/close flows to
open => activate, deactivate => close.

This will simplify the ability to have fail safe configuration changes
in downstream patches as follows:

make_new_config(new_params)
{
     old_channels = current_active_channels;
     new_channels = create_channels(new_params);
     if (!new_channels)
              return "Failed, but current channels still active :)"
     deactivate_channels(old_channels); /* Can't fail */
     activate_channels(new_channels); /* Can't fail */
     close_channels(old_channels);
     current_active_channels = new_channels;

     return "SUCCESS";
}
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Reviewed-by: default avatarTariq Toukan <tariqt@mellanox.com>
parent b676f653
...@@ -358,6 +358,7 @@ struct mlx5e_txqsq { ...@@ -358,6 +358,7 @@ struct mlx5e_txqsq {
struct mlx5_wq_ctrl wq_ctrl; struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel; struct mlx5e_channel *channel;
int tc; int tc;
int txq_ix;
u32 rate_limit; u32 rate_limit;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
...@@ -732,8 +733,8 @@ struct mlx5e_profile { ...@@ -732,8 +733,8 @@ struct mlx5e_profile {
struct mlx5e_priv { struct mlx5e_priv {
/* priv data path fields - start */ /* priv data path fields - start */
struct mlx5e_txqsq **txq_to_sq_map; struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
/* priv data path fields - end */ /* priv data path fields - end */
......
...@@ -269,7 +269,7 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) ...@@ -269,7 +269,7 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
for (j = 0; j < NUM_SQ_STATS; j++) for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, sprintf(data + (idx++) * ETH_GSTRING_LEN,
sq_stats_desc[j].format, sq_stats_desc[j].format,
priv->channeltc_to_txq_map[i][tc]); priv->channel_tc2txq[i][tc]);
} }
static void mlx5e_get_strings(struct net_device *dev, static void mlx5e_get_strings(struct net_device *dev,
......
...@@ -103,7 +103,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -103,7 +103,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
channel_ix = reciprocal_scale(channel_ix, channel_ix = reciprocal_scale(channel_ix,
priv->params.num_channels); priv->params.num_channels);
return priv->channeltc_to_txq_map[channel_ix][up]; return priv->channel_tc2txq[channel_ix][up];
} }
static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
...@@ -339,7 +339,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb) ...@@ -339,7 +339,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb)
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_txqsq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)]; struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
return mlx5e_sq_xmit(sq, skb); return mlx5e_sq_xmit(sq, skb);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment