Commit 4cbeaff5 authored by Achiad Shochat's avatar Achiad Shochat Committed by David S. Miller

net/mlx5e: Unify the RX flow

Generally an RX packet flows through the following objects:
Flow table --> TIR --> RQT --> RQ

Where:
- TIR stands for "Transport Interface Receive", defining the RSS and
  LRO paramaters.
- RQT stands for "RQ Table", implementing the RSS indirection table.
- RQ stands for "Receive Queue"

For flows that do not need LRO, nor RSS, the driver made a shortcut to
the above RX flow by pointing to the RQ directly from the TIR, yielding
this flow:
Flow table --> TIR --> RQ

In this commit we remove this shortcut by "inserting" a single-RQ RQT
between the TIR and the RQ, i.e RX packets will reach the same RQ but
will go through an RQT of size 1, pointing to just a single RQ.

This way the RX traffic re-direction to/from the "Drop RQ" will be more
uniform (AKA "one flow"), as it will involve only RQTs re-direction and
no TIRs re-direction.
Signed-off-by: default avatarAchiad Shochat <achiad@mellanox.com>
Signed-off-by: default avatarAmir Vadai <amirv@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent adc4cc99
...@@ -344,10 +344,10 @@ enum mlx5e_traffic_types { ...@@ -344,10 +344,10 @@ enum mlx5e_traffic_types {
MLX5E_NUM_TT, MLX5E_NUM_TT,
}; };
enum { enum mlx5e_rqt_ix {
MLX5E_RQT_SPREADING = 0, MLX5E_INDIRECTION_RQT,
MLX5E_RQT_DEFAULT_RQ = 1, MLX5E_SINGLE_RQ_RQT,
MLX5E_NUM_RQT = 2, MLX5E_NUM_RQT,
}; };
struct mlx5e_eth_addr_info { struct mlx5e_eth_addr_info {
...@@ -402,7 +402,7 @@ struct mlx5e_priv { ...@@ -402,7 +402,7 @@ struct mlx5e_priv {
struct mlx5e_channel **channel; struct mlx5e_channel **channel;
u32 tisn[MLX5E_MAX_NUM_TC]; u32 tisn[MLX5E_MAX_NUM_TC];
u32 rqtn; u32 rqtn[MLX5E_NUM_RQT];
u32 tirn[MLX5E_NUM_TT]; u32 tirn[MLX5E_NUM_TT];
struct mlx5e_flow_table ft; struct mlx5e_flow_table ft;
......
...@@ -1184,16 +1184,49 @@ static int mlx5e_bits_invert(unsigned long a, int size) ...@@ -1184,16 +1184,49 @@ static int mlx5e_bits_invert(unsigned long a, int size)
return inv; return inv;
} }
static int mlx5e_open_rqt(struct mlx5e_priv *priv) static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
enum mlx5e_rqt_ix rqt_ix)
{
int i;
int log_sz;
switch (rqt_ix) {
case MLX5E_INDIRECTION_RQT:
log_sz = priv->params.rx_hash_log_tbl_sz;
for (i = 0; i < (1 << log_sz); i++) {
int ix = i;
if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, log_sz);
ix = ix % priv->params.num_channels;
MLX5_SET(rqtc, rqtc, rq_num[i],
priv->channel[ix]->rq.rqn);
}
break;
default: /* MLX5E_SINGLE_RQ_RQT */
MLX5_SET(rqtc, rqtc, rq_num[0],
priv->channel[0]->rq.rqn);
break;
}
}
static int mlx5e_open_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{ {
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
u32 *in; u32 *in;
void *rqtc; void *rqtc;
int inlen; int inlen;
int log_sz;
int sz;
int err; int err;
int log_tbl_sz = priv->params.rx_hash_log_tbl_sz;
int sz = 1 << log_tbl_sz; log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 :
int i; priv->params.rx_hash_log_tbl_sz;
sz = 1 << log_sz;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen); in = mlx5_vzalloc(inlen);
...@@ -1205,26 +1238,18 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv) ...@@ -1205,26 +1238,18 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv)
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz); MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
for (i = 0; i < sz; i++) { mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
int ix = i;
if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR) err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
ix = mlx5e_bits_invert(i, log_tbl_sz);
ix = ix % priv->params.num_channels;
MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
}
err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn);
kvfree(in); kvfree(in);
return err; return err;
} }
static void mlx5e_close_rqt(struct mlx5e_priv *priv) static void mlx5e_close_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{ {
mlx5_core_destroy_rqt(priv->mdev, priv->rqtn); mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
} }
static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
...@@ -1259,18 +1284,17 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) ...@@ -1259,18 +1284,17 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
lro_timer_supported_periods[3])); lro_timer_supported_periods[3]));
} }
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
switch (tt) { switch (tt) {
case MLX5E_TT_ANY: case MLX5E_TT_ANY:
MLX5_SET(tirc, tirc, disp_type, MLX5_SET(tirc, tirc, indirect_table,
MLX5_TIRC_DISP_TYPE_DIRECT); priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
MLX5_SET(tirc, tirc, inline_rqn, MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
priv->channel[0]->rq.rqn);
break; break;
default: default:
MLX5_SET(tirc, tirc, disp_type,
MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, MLX5_SET(tirc, tirc, indirect_table,
priv->rqtn); priv->rqtn[MLX5E_INDIRECTION_RQT]);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_SET(tirc, tirc, rx_hash_fn,
mlx5e_rx_hash_fn(priv->params.rss_hfunc)); mlx5e_rx_hash_fn(priv->params.rss_hfunc));
if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
...@@ -1472,18 +1496,25 @@ int mlx5e_open_locked(struct net_device *netdev) ...@@ -1472,18 +1496,25 @@ int mlx5e_open_locked(struct net_device *netdev)
goto err_close_tises; goto err_close_tises;
} }
err = mlx5e_open_rqt(priv); err = mlx5e_open_rqt(priv, MLX5E_INDIRECTION_RQT);
if (err) { if (err) {
netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n", netdev_err(netdev, "%s: mlx5e_open_rqt(INDIR) failed, %d\n",
__func__, err); __func__, err);
goto err_close_channels; goto err_close_channels;
} }
err = mlx5e_open_rqt(priv, MLX5E_SINGLE_RQ_RQT);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_rqt(SINGLE) failed, %d\n",
__func__, err);
goto err_close_rqt_indir;
}
err = mlx5e_open_tirs(priv); err = mlx5e_open_tirs(priv);
if (err) { if (err) {
netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n", netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
__func__, err); __func__, err);
goto err_close_rqls; goto err_close_rqt_single;
} }
err = mlx5e_open_flow_table(priv); err = mlx5e_open_flow_table(priv);
...@@ -1516,8 +1547,11 @@ int mlx5e_open_locked(struct net_device *netdev) ...@@ -1516,8 +1547,11 @@ int mlx5e_open_locked(struct net_device *netdev)
err_close_tirs: err_close_tirs:
mlx5e_close_tirs(priv); mlx5e_close_tirs(priv);
err_close_rqls: err_close_rqt_single:
mlx5e_close_rqt(priv); mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT);
err_close_rqt_indir:
mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT);
err_close_channels: err_close_channels:
mlx5e_close_channels(priv); mlx5e_close_channels(priv);
...@@ -1551,7 +1585,8 @@ int mlx5e_close_locked(struct net_device *netdev) ...@@ -1551,7 +1585,8 @@ int mlx5e_close_locked(struct net_device *netdev)
netif_carrier_off(priv->netdev); netif_carrier_off(priv->netdev);
mlx5e_close_flow_table(priv); mlx5e_close_flow_table(priv);
mlx5e_close_tirs(priv); mlx5e_close_tirs(priv);
mlx5e_close_rqt(priv); mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT);
mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT);
mlx5e_close_channels(priv); mlx5e_close_channels(priv);
mlx5e_close_tises(priv); mlx5e_close_tises(priv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment