Commit 43ec0f41 authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Saeed Mahameed

net/mlx5e: Hide all implementation details of mlx5e_rx_res

This commit moves all implementation details of struct mlx5e_rx_res
under en/rx_res.c. All access to RX resources is now done using methods.
Encapsulating RX resources into an object allows for better
manageability, because all the implementation details are now in a
single place, and external code can use only a limited set of API
methods to init/teardown the whole thing, reconfigure RSS and LRO
parameters, connect TIRs to flow steering and activate/deactivate TIRs.

mlx5e_rx_res is self-contained and doesn't depend on struct mlx5e_priv
or include en.h.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent e6e01b5f
......@@ -921,8 +921,6 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
void mlx5e_timestamp_init(struct mlx5e_priv *priv);
int mlx5e_modify_tirs_hash(struct mlx5e_priv *priv);
struct mlx5e_xsk_param;
struct mlx5e_rq_param;
......@@ -1033,16 +1031,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
void mlx5e_free_di_list(struct mlx5e_rq *rq);
int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
......@@ -1130,8 +1118,6 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
u16 num_channels);
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
......
......@@ -605,8 +605,8 @@ static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
{
u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
u32 tirn = priv->rx_res->ptp.tir.tirn;
struct mlx5_flow_handle *rule;
int err;
......
......@@ -2,6 +2,8 @@
/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
#include "rx_res.h"
#include "channels.h"
#include "params.h"
static const struct mlx5e_rss_params_traffic_type rss_default_config[MLX5E_NUM_INDIR_TIRS] = {
[MLX5E_TT_IPV4_TCP] = {
......@@ -62,6 +64,539 @@ mlx5e_rss_get_default_tt_config(enum mlx5e_traffic_types tt)
return rss_default_config[tt];
}
struct mlx5e_rx_res {
struct mlx5_core_dev *mdev;
enum mlx5e_rx_res_features features;
unsigned int max_nch;
u32 drop_rqn;
struct {
struct mlx5e_rss_params_hash hash;
struct mlx5e_rss_params_indir indir;
u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
} rss_params;
struct mlx5e_rqt indir_rqt;
struct {
struct mlx5e_tir indir_tir;
struct mlx5e_tir inner_indir_tir;
} rss[MLX5E_NUM_INDIR_TIRS];
bool rss_active;
u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
unsigned int rss_nch;
struct {
struct mlx5e_rqt direct_rqt;
struct mlx5e_tir direct_tir;
struct mlx5e_rqt xsk_rqt;
struct mlx5e_tir xsk_tir;
} channels[MLX5E_MAX_NUM_CHANNELS];
struct {
struct mlx5e_rqt rqt;
struct mlx5e_tir tir;
} ptp;
};
struct mlx5e_rx_res *mlx5e_rx_res_alloc(void)
{
return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL);
}
static void mlx5e_rx_res_rss_params_init(struct mlx5e_rx_res *res, unsigned int init_nch)
{
enum mlx5e_traffic_types tt;
res->rss_params.hash.hfunc = ETH_RSS_HASH_TOP;
netdev_rss_key_fill(res->rss_params.hash.toeplitz_hash_key,
sizeof(res->rss_params.hash.toeplitz_hash_key));
mlx5e_rss_params_indir_init_uniform(&res->rss_params.indir, init_nch);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
res->rss_params.rx_hash_fields[tt] =
mlx5e_rss_get_default_tt_config(tt).rx_hash_fields;
}
static int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res,
const struct mlx5e_lro_param *init_lro_param)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
enum mlx5e_traffic_types tt, max_tt;
struct mlx5e_tir_builder *builder;
u32 indir_rqtn;
int err;
builder = mlx5e_tir_builder_alloc(false);
if (!builder)
return -ENOMEM;
err = mlx5e_rqt_init_direct(&res->indir_rqt, res->mdev, true, res->drop_rqn);
if (err)
goto out;
indir_rqtn = mlx5e_rqt_get_rqtn(&res->indir_rqt);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
struct mlx5e_rss_params_traffic_type rss_tt;
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
indir_rqtn, inner_ft_support);
mlx5e_tir_builder_build_lro(builder, init_lro_param);
rss_tt = mlx5e_rx_res_rss_get_current_tt_config(res, tt);
mlx5e_tir_builder_build_rss(builder, &res->rss_params.hash, &rss_tt, false);
err = mlx5e_tir_init(&res->rss[tt].indir_tir, builder, res->mdev, true);
if (err) {
mlx5_core_warn(res->mdev, "Failed to create an indirect TIR: err = %d, tt = %d\n",
err, tt);
goto err_destroy_tirs;
}
mlx5e_tir_builder_clear(builder);
}
if (!inner_ft_support)
goto out;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
struct mlx5e_rss_params_traffic_type rss_tt;
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
indir_rqtn, inner_ft_support);
mlx5e_tir_builder_build_lro(builder, init_lro_param);
rss_tt = mlx5e_rx_res_rss_get_current_tt_config(res, tt);
mlx5e_tir_builder_build_rss(builder, &res->rss_params.hash, &rss_tt, true);
err = mlx5e_tir_init(&res->rss[tt].inner_indir_tir, builder, res->mdev, true);
if (err) {
mlx5_core_warn(res->mdev, "Failed to create an inner indirect TIR: err = %d, tt = %d\n",
err, tt);
goto err_destroy_inner_tirs;
}
mlx5e_tir_builder_clear(builder);
}
goto out;
err_destroy_inner_tirs:
max_tt = tt;
for (tt = 0; tt < max_tt; tt++)
mlx5e_tir_destroy(&res->rss[tt].inner_indir_tir);
tt = MLX5E_NUM_INDIR_TIRS;
err_destroy_tirs:
max_tt = tt;
for (tt = 0; tt < max_tt; tt++)
mlx5e_tir_destroy(&res->rss[tt].indir_tir);
mlx5e_rqt_destroy(&res->indir_rqt);
out:
mlx5e_tir_builder_free(builder);
return err;
}
static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
const struct mlx5e_lro_param *init_lro_param)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_tir_builder *builder;
int err = 0;
int ix;
builder = mlx5e_tir_builder_alloc(false);
if (!builder)
return -ENOMEM;
for (ix = 0; ix < res->max_nch; ix++) {
err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt,
res->mdev, false, res->drop_rqn);
if (err) {
mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n",
err, ix);
goto err_destroy_direct_rqts;
}
}
for (ix = 0; ix < res->max_nch; ix++) {
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
inner_ft_support);
mlx5e_tir_builder_build_lro(builder, init_lro_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
if (err) {
mlx5_core_warn(res->mdev, "Failed to create a direct TIR: err = %d, ix = %u\n",
err, ix);
goto err_destroy_direct_tirs;
}
mlx5e_tir_builder_clear(builder);
}
if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
goto out;
for (ix = 0; ix < res->max_nch; ix++) {
err = mlx5e_rqt_init_direct(&res->channels[ix].xsk_rqt,
res->mdev, false, res->drop_rqn);
if (err) {
mlx5_core_warn(res->mdev, "Failed to create an XSK RQT: err = %d, ix = %u\n",
err, ix);
goto err_destroy_xsk_rqts;
}
}
for (ix = 0; ix < res->max_nch; ix++) {
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
inner_ft_support);
mlx5e_tir_builder_build_lro(builder, init_lro_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
if (err) {
mlx5_core_warn(res->mdev, "Failed to create an XSK TIR: err = %d, ix = %u\n",
err, ix);
goto err_destroy_xsk_tirs;
}
mlx5e_tir_builder_clear(builder);
}
goto out;
err_destroy_xsk_tirs:
while (--ix >= 0)
mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
ix = res->max_nch;
err_destroy_xsk_rqts:
while (--ix >= 0)
mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
ix = res->max_nch;
err_destroy_direct_tirs:
while (--ix >= 0)
mlx5e_tir_destroy(&res->channels[ix].direct_tir);
ix = res->max_nch;
err_destroy_direct_rqts:
while (--ix >= 0)
mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
out:
mlx5e_tir_builder_free(builder);
return err;
}
static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_tir_builder *builder;
int err;
builder = mlx5e_tir_builder_alloc(false);
if (!builder)
return -ENOMEM;
err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn);
if (err)
goto out;
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
inner_ft_support);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->ptp.tir, builder, res->mdev, true);
if (err)
goto err_destroy_ptp_rqt;
goto out;
err_destroy_ptp_rqt:
mlx5e_rqt_destroy(&res->ptp.rqt);
out:
mlx5e_tir_builder_free(builder);
return err;
}
static void mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res)
{
enum mlx5e_traffic_types tt;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
mlx5e_tir_destroy(&res->rss[tt].indir_tir);
if (res->features & MLX5E_RX_RES_FEATURE_INNER_FT)
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
mlx5e_tir_destroy(&res->rss[tt].inner_indir_tir);
mlx5e_rqt_destroy(&res->indir_rqt);
}
static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res)
{
unsigned int ix;
for (ix = 0; ix < res->max_nch; ix++) {
mlx5e_tir_destroy(&res->channels[ix].direct_tir);
mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
continue;
mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
}
}
static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
{
mlx5e_tir_destroy(&res->ptp.tir);
mlx5e_rqt_destroy(&res->ptp.rqt);
}
int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
enum mlx5e_rx_res_features features, unsigned int max_nch,
u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param,
unsigned int init_nch)
{
int err;
res->mdev = mdev;
res->features = features;
res->max_nch = max_nch;
res->drop_rqn = drop_rqn;
mlx5e_rx_res_rss_params_init(res, init_nch);
err = mlx5e_rx_res_rss_init(res, init_lro_param);
if (err)
return err;
err = mlx5e_rx_res_channels_init(res, init_lro_param);
if (err)
goto err_rss_destroy;
err = mlx5e_rx_res_ptp_init(res);
if (err)
goto err_channels_destroy;
return 0;
err_channels_destroy:
mlx5e_rx_res_channels_destroy(res);
err_rss_destroy:
mlx5e_rx_res_rss_destroy(res);
return err;
}
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
{
mlx5e_rx_res_ptp_destroy(res);
mlx5e_rx_res_channels_destroy(res);
mlx5e_rx_res_rss_destroy(res);
}
void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
{
kvfree(res);
}
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
}
u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix)
{
WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_XSK));
return mlx5e_tir_get_tirn(&res->channels[ix].xsk_tir);
}
u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5e_traffic_types tt)
{
return mlx5e_tir_get_tirn(&res->rss[tt].indir_tir);
}
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5e_traffic_types tt)
{
WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_INNER_FT));
return mlx5e_tir_get_tirn(&res->rss[tt].inner_indir_tir);
}
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
{
WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_PTP));
return mlx5e_tir_get_tirn(&res->ptp.tir);
}
u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res)
{
int err;
res->rss_active = true;
err = mlx5e_rqt_redirect_indir(&res->indir_rqt, res->rss_rqns, res->rss_nch,
res->rss_params.hash.hfunc,
&res->rss_params.indir);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect indirect RQT %#x to channels: err = %d\n",
mlx5e_rqt_get_rqtn(&res->indir_rqt), err);
}
static void mlx5e_rx_res_rss_disable(struct mlx5e_rx_res *res)
{
int err;
res->rss_active = false;
err = mlx5e_rqt_redirect_direct(&res->indir_rqt, res->drop_rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect indirect RQT %#x to drop RQ %#x: err = %d\n",
mlx5e_rqt_get_rqtn(&res->indir_rqt), res->drop_rqn, err);
}
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
{
unsigned int nch, ix;
int err;
nch = mlx5e_channels_get_num(chs);
for (ix = 0; ix < chs->num; ix++)
mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
res->rss_nch = chs->num;
mlx5e_rx_res_rss_enable(res);
for (ix = 0; ix < nch; ix++) {
u32 rqn;
mlx5e_channels_get_regular_rqn(chs, ix, &rqn);
err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
rqn, ix, err);
if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
continue;
if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
rqn = res->drop_rqn;
err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
rqn, ix, err);
}
for (ix = nch; ix < res->max_nch; ix++) {
err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
res->drop_rqn, ix, err);
if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
continue;
err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
res->drop_rqn, ix, err);
}
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
u32 rqn;
if (mlx5e_channels_get_ptp_rqn(chs, &rqn))
rqn = res->drop_rqn;
err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n",
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
rqn, err);
}
}
void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
{
unsigned int ix;
int err;
mlx5e_rx_res_rss_disable(res);
for (ix = 0; ix < res->max_nch; ix++) {
err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
res->drop_rqn, ix, err);
if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
continue;
err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
res->drop_rqn, ix, err);
}
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n",
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
res->drop_rqn, err);
}
}
int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
unsigned int ix)
{
u32 rqn;
int err;
if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
return -EINVAL;
err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to XSK RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
rqn, ix, err);
return err;
}
int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix)
{
int err;
err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
res->drop_rqn, ix, err);
return err;
}
struct mlx5e_rss_params_traffic_type
mlx5e_rx_res_rss_get_current_tt_config(struct mlx5e_rx_res *res, enum mlx5e_traffic_types tt)
{
......@@ -71,3 +606,216 @@ mlx5e_rx_res_rss_get_current_tt_config(struct mlx5e_rx_res *res, enum mlx5e_traf
rss_tt.rx_hash_fields = res->rss_params.rx_hash_fields[tt];
return rss_tt;
}
void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch)
{
mlx5e_rss_params_indir_init_uniform(&res->rss_params.indir, nch);
if (!res->rss_active)
return;
mlx5e_rx_res_rss_enable(res);
}
void mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 *indir, u8 *key, u8 *hfunc)
{
unsigned int i;
if (indir)
for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
indir[i] = res->rss_params.indir.table[i];
if (key)
memcpy(key, res->rss_params.hash.toeplitz_hash_key,
sizeof(res->rss_params.hash.toeplitz_hash_key));
if (hfunc)
*hfunc = res->rss_params.hash.hfunc;
}
static int mlx5e_rx_res_rss_update_tir(struct mlx5e_rx_res *res, enum mlx5e_traffic_types tt,
bool inner)
{
struct mlx5e_rss_params_traffic_type rss_tt;
struct mlx5e_tir_builder *builder;
struct mlx5e_tir *tir;
int err;
builder = mlx5e_tir_builder_alloc(true);
if (!builder)
return -ENOMEM;
rss_tt = mlx5e_rx_res_rss_get_current_tt_config(res, tt);
mlx5e_tir_builder_build_rss(builder, &res->rss_params.hash, &rss_tt, inner);
tir = inner ? &res->rss[tt].inner_indir_tir : &res->rss[tt].indir_tir;
err = mlx5e_tir_modify(tir, builder);
mlx5e_tir_builder_free(builder);
return err;
}
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, const u32 *indir,
const u8 *key, const u8 *hfunc)
{
enum mlx5e_traffic_types tt;
bool changed_indir = false;
bool changed_hash = false;
int err;
if (hfunc && *hfunc != res->rss_params.hash.hfunc) {
switch (*hfunc) {
case ETH_RSS_HASH_XOR:
case ETH_RSS_HASH_TOP:
break;
default:
return -EINVAL;
}
changed_hash = true;
changed_indir = true;
res->rss_params.hash.hfunc = *hfunc;
}
if (key) {
if (res->rss_params.hash.hfunc == ETH_RSS_HASH_TOP)
changed_hash = true;
memcpy(res->rss_params.hash.toeplitz_hash_key, key,
sizeof(res->rss_params.hash.toeplitz_hash_key));
}
if (indir) {
unsigned int i;
changed_indir = true;
for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
res->rss_params.indir.table[i] = indir[i];
}
if (changed_indir && res->rss_active) {
err = mlx5e_rqt_redirect_indir(&res->indir_rqt, res->rss_rqns, res->rss_nch,
res->rss_params.hash.hfunc,
&res->rss_params.indir);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect indirect RQT %#x to channels: err = %d\n",
mlx5e_rqt_get_rqtn(&res->indir_rqt), err);
}
if (changed_hash)
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
err = mlx5e_rx_res_rss_update_tir(res, tt, false);
if (err)
mlx5_core_warn(res->mdev, "Failed to update RSS hash of indirect TIR for traffic type %d: err = %d\n",
tt, err);
if (!(res->features & MLX5E_RX_RES_FEATURE_INNER_FT))
continue;
err = mlx5e_rx_res_rss_update_tir(res, tt, true);
if (err)
mlx5_core_warn(res->mdev, "Failed to update RSS hash of inner indirect TIR for traffic type %d: err = %d\n",
tt, err);
}
return 0;
}
u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5e_traffic_types tt)
{
return res->rss_params.rx_hash_fields[tt];
}
int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5e_traffic_types tt,
u8 rx_hash_fields)
{
u8 old_rx_hash_fields;
int err;
old_rx_hash_fields = res->rss_params.rx_hash_fields[tt];
if (old_rx_hash_fields == rx_hash_fields)
return 0;
res->rss_params.rx_hash_fields[tt] = rx_hash_fields;
err = mlx5e_rx_res_rss_update_tir(res, tt, false);
if (err) {
res->rss_params.rx_hash_fields[tt] = old_rx_hash_fields;
mlx5_core_warn(res->mdev, "Failed to update RSS hash fields of indirect TIR for traffic type %d: err = %d\n",
tt, err);
return err;
}
if (!(res->features & MLX5E_RX_RES_FEATURE_INNER_FT))
return 0;
err = mlx5e_rx_res_rss_update_tir(res, tt, true);
if (err) {
/* Partial update happened. Try to revert - it may fail too, but
* there is nothing more we can do.
*/
res->rss_params.rx_hash_fields[tt] = old_rx_hash_fields;
mlx5_core_warn(res->mdev, "Failed to update RSS hash fields of inner indirect TIR for traffic type %d: err = %d\n",
tt, err);
if (mlx5e_rx_res_rss_update_tir(res, tt, false))
mlx5_core_warn(res->mdev, "Partial update of RSS hash fields happened: failed to revert indirect TIR for traffic type %d to the old values\n",
tt);
}
return err;
}
int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param)
{
struct mlx5e_tir_builder *builder;
enum mlx5e_traffic_types tt;
int err, final_err;
unsigned int ix;
builder = mlx5e_tir_builder_alloc(true);
if (!builder)
return -ENOMEM;
mlx5e_tir_builder_build_lro(builder, lro_param);
final_err = 0;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
err = mlx5e_tir_modify(&res->rss[tt].indir_tir, builder);
if (err) {
mlx5_core_warn(res->mdev, "Failed to update LRO state of indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_tir_get_tirn(&res->rss[tt].indir_tir), tt, err);
if (!final_err)
final_err = err;
}
if (!(res->features & MLX5E_RX_RES_FEATURE_INNER_FT))
continue;
err = mlx5e_tir_modify(&res->rss[tt].inner_indir_tir, builder);
if (err) {
mlx5_core_warn(res->mdev, "Failed to update LRO state of inner indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_tir_get_tirn(&res->rss[tt].inner_indir_tir), tt, err);
if (!final_err)
final_err = err;
}
}
for (ix = 0; ix < res->max_nch; ix++) {
err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
if (err) {
mlx5_core_warn(res->mdev, "Failed to update LRO state of direct TIR %#x for channel %u: err = %d\n",
mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err);
if (!final_err)
final_err = err;
}
}
mlx5e_tir_builder_free(builder);
return final_err;
}
struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res)
{
return res->rss_params.hash;
}
......@@ -11,37 +11,59 @@
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2)
struct mlx5e_rss_params {
struct mlx5e_rss_params_hash hash;
struct mlx5e_rss_params_indir indir;
u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
};
struct mlx5e_rx_res;
struct mlx5e_channels;
struct mlx5e_rss_params_hash;
struct mlx5e_rx_res {
struct mlx5e_rss_params rss_params;
struct mlx5e_rqt indir_rqt;
struct {
struct mlx5e_tir indir_tir;
struct mlx5e_tir inner_indir_tir;
} rss[MLX5E_NUM_INDIR_TIRS];
struct {
struct mlx5e_rqt direct_rqt;
struct mlx5e_tir direct_tir;
struct mlx5e_rqt xsk_rqt;
struct mlx5e_tir xsk_tir;
} channels[MLX5E_MAX_NUM_CHANNELS];
struct {
struct mlx5e_rqt rqt;
struct mlx5e_tir tir;
} ptp;
enum mlx5e_rx_res_features {
MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0),
MLX5E_RX_RES_FEATURE_XSK = BIT(1),
MLX5E_RX_RES_FEATURE_PTP = BIT(2),
};
struct mlx5e_rss_params_traffic_type
mlx5e_rss_get_default_tt_config(enum mlx5e_traffic_types tt);
/* Setup */
struct mlx5e_rx_res *mlx5e_rx_res_alloc(void);
int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
enum mlx5e_rx_res_features features, unsigned int max_nch,
u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param,
unsigned int init_nch);
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);
void mlx5e_rx_res_free(struct mlx5e_rx_res *res);
/* TIRN getters for flow steering */
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix);
u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5e_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5e_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
/* RQTN getters for modules that create their own TIRs */
u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res);
int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
unsigned int ix);
int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix);
/* Configuration API */
struct mlx5e_rss_params_traffic_type
mlx5e_rx_res_rss_get_current_tt_config(struct mlx5e_rx_res *res, enum mlx5e_traffic_types tt);
void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch);
void mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 *indir, u8 *key, u8 *hfunc);
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, const u32 *indir,
const u8 *key, const u8 *hfunc);
u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5e_traffic_types tt);
int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5e_traffic_types tt,
u8 rx_hash_fields);
int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param);
/* Workaround for hairpin */
struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res);
#endif /* __MLX5_EN_RX_RES_H__ */
......@@ -122,7 +122,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
* any Fill Ring entries at the setup stage.
*/
err = mlx5e_xsk_redirect_rqt_to_channel(priv, priv->channels.c[ix]);
err = mlx5e_rx_res_xsk_activate(priv->rx_res, &priv->channels, ix);
if (unlikely(err))
goto err_deactivate;
......@@ -169,7 +169,7 @@ static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
goto remove_pool;
c = priv->channels.c[ix];
mlx5e_xsk_redirect_rqt_to_drop(priv, ix);
mlx5e_rx_res_xsk_deactivate(priv->rx_res, ix);
mlx5e_deactivate_xsk(c);
mlx5e_close_xsk(c);
......
......@@ -183,59 +183,3 @@ void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
mlx5e_deactivate_rq(&c->xskrq);
/* TX queue is disabled on close. */
}
int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c)
{
return mlx5e_rqt_redirect_direct(&priv->rx_res->channels[c->ix].xsk_rqt, c->xskrq.rqn);
}
int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix)
{
return mlx5e_rqt_redirect_direct(&priv->rx_res->channels[ix].xsk_rqt, priv->drop_rq.rqn);
}
int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{
int err, i;
if (!priv->xsk.refcnt)
return 0;
for (i = 0; i < chs->num; i++) {
struct mlx5e_channel *c = chs->c[i];
if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
continue;
err = mlx5e_xsk_redirect_rqt_to_channel(priv, c);
if (unlikely(err))
goto err_stop;
}
return 0;
err_stop:
for (i--; i >= 0; i--) {
if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state))
continue;
mlx5e_xsk_redirect_rqt_to_drop(priv, i);
}
return err;
}
void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{
int i;
if (!priv->xsk.refcnt)
return;
for (i = 0; i < chs->num; i++) {
if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state))
continue;
mlx5e_xsk_redirect_rqt_to_drop(priv, i);
}
}
......@@ -17,9 +17,5 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
void mlx5e_close_xsk(struct mlx5e_channel *c);
void mlx5e_activate_xsk(struct mlx5e_channel *c);
void mlx5e_deactivate_xsk(struct mlx5e_channel *c);
int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c);
int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix);
int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
#endif /* __MLX5_EN_XSK_SETUP_H__ */
......@@ -628,7 +628,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->sw_stats = &priv->tls->sw_stats;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
rqtn = mlx5e_rqt_get_rqtn(&priv->rx_res->channels[rxq].direct_rqt);
rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq);
err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn);
if (err)
......
......@@ -208,7 +208,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
/* FIXME: Must use mlx5e_ttc_get_default_dest(),
* but can't since TTC default is not setup yet !
*/
dest.tir_num = priv->rx_res->rss[tt].indir_tir.tirn;
dest.tir_num = mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
&flow_act,
&dest, 1);
......@@ -552,7 +552,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
16);
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->rx_res->channels[arfs_rule->rxq].direct_tir.tirn;
dest.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, arfs_rule->rxq);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
......@@ -575,7 +575,7 @@ static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
int err = 0;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dst.tir_num = priv->rx_res->channels[rxq].direct_tir.tirn;
dst.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, rxq);
err = mlx5_modify_rule_destination(rule, &dst, NULL);
if (err)
netdev_warn(priv->netdev,
......
......@@ -1172,7 +1172,7 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev,
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv)
{
return sizeof(priv->rx_res->rss_params.hash.toeplitz_hash_key);
return sizeof_field(struct mlx5e_rss_params_hash, toeplitz_hash_key);
}
static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
......@@ -1198,18 +1198,10 @@ int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rss_params *rss;
rss = &priv->rx_res->rss_params;
if (indir)
memcpy(indir, rss->indir.table, sizeof(rss->indir.table));
if (key)
memcpy(key, rss->hash.toeplitz_hash_key, sizeof(rss->hash.toeplitz_hash_key));
if (hfunc)
*hfunc = rss->hash.hfunc;
mutex_lock(&priv->state_lock);
mlx5e_rx_res_rss_get_rxfh(priv->rx_res, indir, key, hfunc);
mutex_unlock(&priv->state_lock);
return 0;
}
......@@ -1218,58 +1210,13 @@ int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rss_params *rss;
bool refresh_tirs = false;
bool refresh_rqt = false;
if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
(hfunc != ETH_RSS_HASH_XOR) &&
(hfunc != ETH_RSS_HASH_TOP))
return -EINVAL;
int err;
mutex_lock(&priv->state_lock);
rss = &priv->rx_res->rss_params;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hash.hfunc) {
rss->hash.hfunc = hfunc;
refresh_rqt = true;
refresh_tirs = true;
}
if (indir) {
memcpy(rss->indir.table, indir, sizeof(rss->indir.table));
refresh_rqt = true;
}
if (key) {
memcpy(rss->hash.toeplitz_hash_key, key, sizeof(rss->hash.toeplitz_hash_key));
refresh_tirs = refresh_tirs || rss->hash.hfunc == ETH_RSS_HASH_TOP;
}
if (refresh_rqt && test_bit(MLX5E_STATE_OPENED, &priv->state)) {
u32 *rqns;
rqns = kvmalloc_array(priv->channels.num, sizeof(*rqns), GFP_KERNEL);
if (rqns) {
unsigned int ix;
for (ix = 0; ix < priv->channels.num; ix++)
rqns[ix] = priv->channels.c[ix]->rq.rqn;
mlx5e_rqt_redirect_indir(&priv->rx_res->indir_rqt, rqns,
priv->channels.num,
rss->hash.hfunc, &rss->indir);
kvfree(rqns);
}
}
if (refresh_tirs)
mlx5e_modify_tirs_hash(priv);
err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, indir, key,
hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
mutex_unlock(&priv->state_lock);
return 0;
return err;
}
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
......
......@@ -1320,7 +1320,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
struct ttc_params *ttc_params)
{
ttc_params->any_tt_tirn = priv->rx_res->channels[0].direct_tir.tirn;
ttc_params->any_tt_tirn = mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0);
ttc_params->inner_ttc = &priv->fs.inner_ttc;
}
......@@ -1786,7 +1786,8 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
if (mlx5e_tunnel_inner_ft_supported(priv->mdev)) {
mlx5e_set_inner_ttc_ft_params(&ttc_params);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = priv->rx_res->rss[tt].inner_indir_tir.tirn;
ttc_params.indir_tirn[tt] =
mlx5e_rx_res_get_tirn_rss_inner(priv->rx_res, tt);
err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
if (err) {
......@@ -1798,7 +1799,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
mlx5e_set_ttc_ft_params(&ttc_params);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = priv->rx_res->rss[tt].indir_tir.tirn;
ttc_params.indir_tirn[tt] = mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
......
......@@ -433,9 +433,9 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
if (group == MLX5E_RQ_GROUP_XSK)
dst->tir_num = priv->rx_res->channels[ix].xsk_tir.tirn;
dst->tir_num = mlx5e_rx_res_get_tirn_xsk(priv->rx_res, ix);
else
dst->tir_num = priv->rx_res->channels[ix].direct_tir.tirn;
dst->tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, ix);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
......@@ -819,6 +819,7 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
{
enum mlx5e_traffic_types tt;
u8 rx_hash_field = 0;
int err;
tt = flow_type_to_traffic_type(nfc->flow_type);
if (tt == MLX5E_NUM_INDIR_TIRS)
......@@ -848,16 +849,10 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
mutex_lock(&priv->state_lock);
if (rx_hash_field == priv->rx_res->rss_params.rx_hash_fields[tt])
goto out;
priv->rx_res->rss_params.rx_hash_fields[tt] = rx_hash_field;
mlx5e_modify_tirs_hash(priv);
out:
err = mlx5e_rx_res_rss_set_hash_fields(priv->rx_res, tt, rx_hash_field);
mutex_unlock(&priv->state_lock);
return 0;
return err;
}
static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
......@@ -870,7 +865,7 @@ static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
if (tt == MLX5E_NUM_INDIR_TIRS)
return -EINVAL;
hash_field = priv->rx_res->rss_params.rx_hash_fields[tt];
hash_field = mlx5e_rx_res_rss_get_hash_fields(priv->rx_res, tt);
nfc->data = 0;
if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
......
......@@ -2194,202 +2194,14 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
chs->num = 0;
}
int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
{
int err;
err = mlx5e_rqt_init_direct(&priv->rx_res->indir_rqt, priv->mdev, true,
priv->drop_rq.rqn);
if (err)
mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
return err;
}
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
{
int err;
int ix;
for (ix = 0; ix < priv->max_nch; ix++) {
err = mlx5e_rqt_init_direct(&priv->rx_res->channels[ix].direct_rqt,
priv->mdev, false, priv->drop_rq.rqn);
if (unlikely(err))
goto err_destroy_rqts;
}
return 0;
err_destroy_rqts:
mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
while (--ix >= 0)
mlx5e_rqt_destroy(&priv->rx_res->channels[ix].direct_rqt);
return err;
}
static int mlx5e_create_xsk_rqts(struct mlx5e_priv *priv)
{
int err;
int ix;
for (ix = 0; ix < priv->max_nch; ix++) {
err = mlx5e_rqt_init_direct(&priv->rx_res->channels[ix].xsk_rqt,
priv->mdev, false, priv->drop_rq.rqn);
if (unlikely(err))
goto err_destroy_rqts;
}
return 0;
err_destroy_rqts:
mlx5_core_warn(priv->mdev, "create xsk rqts failed, %d\n", err);
while (--ix >= 0)
mlx5e_rqt_destroy(&priv->rx_res->channels[ix].xsk_rqt);
return err;
}
void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
{
unsigned int ix;
for (ix = 0; ix < priv->max_nch; ix++)
mlx5e_rqt_destroy(&priv->rx_res->channels[ix].direct_rqt);
}
static void mlx5e_destroy_xsk_rqts(struct mlx5e_priv *priv)
{
unsigned int ix;
for (ix = 0; ix < priv->max_nch; ix++)
mlx5e_rqt_destroy(&priv->rx_res->channels[ix].xsk_rqt);
}
static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs)
{
struct mlx5e_rx_res *res = priv->rx_res;
unsigned int ix;
u32 *rqns;
rqns = kvmalloc_array(chs->num, sizeof(*rqns), GFP_KERNEL);
if (rqns) {
for (ix = 0; ix < chs->num; ix++)
rqns[ix] = chs->c[ix]->rq.rqn;
mlx5e_rqt_redirect_indir(&res->indir_rqt, rqns, chs->num,
res->rss_params.hash.hfunc,
&res->rss_params.indir);
kvfree(rqns);
}
for (ix = 0; ix < priv->max_nch; ix++) {
u32 rqn = priv->drop_rq.rqn;
if (ix < chs->num)
rqn = chs->c[ix]->rq.rqn;
mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
}
if (priv->profile->rx_ptp_support) {
u32 rqn;
if (mlx5e_ptp_get_rqn(priv->channels.ptp, &rqn))
rqn = priv->drop_rq.rqn;
mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
}
}
static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
{
struct mlx5e_rx_res *res = priv->rx_res;
unsigned int ix;
mlx5e_rqt_redirect_direct(&res->indir_rqt, priv->drop_rq.rqn);
for (ix = 0; ix < priv->max_nch; ix++)
mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, priv->drop_rq.rqn);
if (priv->profile->rx_ptp_support)
mlx5e_rqt_redirect_direct(&res->ptp.rqt, priv->drop_rq.rqn);
}
int mlx5e_modify_tirs_hash(struct mlx5e_priv *priv)
{
struct mlx5e_rss_params_hash *rss_hash = &priv->rx_res->rss_params.hash;
struct mlx5e_rss_params_traffic_type rss_tt;
struct mlx5e_rx_res *res = priv->rx_res;
struct mlx5e_tir_builder *builder;
enum mlx5e_traffic_types tt;
builder = mlx5e_tir_builder_alloc(true);
if (!builder)
return -ENOMEM;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
rss_tt = mlx5e_rx_res_rss_get_current_tt_config(res, tt);
mlx5e_tir_builder_build_rss(builder, rss_hash, &rss_tt, false);
mlx5e_tir_modify(&res->rss[tt].indir_tir, builder);
mlx5e_tir_builder_clear(builder);
}
/* Verify inner tirs resources allocated */
if (!res->rss[0].inner_indir_tir.tirn)
goto out;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
rss_tt = mlx5e_rx_res_rss_get_current_tt_config(res, tt);
mlx5e_tir_builder_build_rss(builder, rss_hash, &rss_tt, true);
mlx5e_tir_modify(&res->rss[tt].indir_tir, builder);
mlx5e_tir_builder_clear(builder);
}
out:
mlx5e_tir_builder_free(builder);
return 0;
}
static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
{
struct mlx5e_rx_res *res = priv->rx_res;
struct mlx5e_tir_builder *builder;
struct mlx5e_lro_param lro_param;
enum mlx5e_traffic_types tt;
int err;
int ix;
builder = mlx5e_tir_builder_alloc(true);
if (!builder)
return -ENOMEM;
lro_param = mlx5e_get_lro_param(&priv->channels.params);
mlx5e_tir_builder_build_lro(builder, &lro_param);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
err = mlx5e_tir_modify(&res->rss[tt].indir_tir, builder);
if (err)
goto err_free_builder;
/* Verify inner tirs resources allocated */
if (!res->rss[0].inner_indir_tir.tirn)
continue;
err = mlx5e_tir_modify(&res->rss[tt].inner_indir_tir, builder);
if (err)
goto err_free_builder;
}
for (ix = 0; ix < priv->max_nch; ix++) {
err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
if (err)
goto err_free_builder;
}
err_free_builder:
mlx5e_tir_builder_free(builder);
return err;
return mlx5e_rx_res_lro_set_param(res, &lro_param);
}
static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro);
......@@ -2572,8 +2384,7 @@ int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
/* This function may be called on attach, before priv->rx_res is created. */
if (!netif_is_rxfh_configured(priv->netdev) && priv->rx_res)
mlx5e_rss_params_indir_init_uniform(&priv->rx_res->rss_params.indir,
count);
mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count);
return 0;
}
......@@ -2633,18 +2444,14 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
if (priv->rx_res) {
mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
mlx5e_xsk_redirect_rqts_to_channels(priv, &priv->channels);
}
if (priv->rx_res)
mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels);
}
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{
if (priv->rx_res) {
mlx5e_xsk_redirect_rqts_to_drop(priv, &priv->channels);
mlx5e_redirect_rqts_to_drop(priv);
}
if (priv->rx_res)
mlx5e_rx_res_channels_deactivate(priv->rx_res);
if (mlx5e_is_vport_rep(priv))
mlx5e_remove_sqs_fwd_rules(priv);
......@@ -3019,194 +2826,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
mlx5e_destroy_tises(priv);
}
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
{
struct mlx5e_rss_params_hash *rss_hash = &priv->rx_res->rss_params.hash;
bool inner_ft_support = priv->channels.params.tunneled_offload_en;
struct mlx5e_rss_params_traffic_type rss_tt;
struct mlx5e_rx_res *res = priv->rx_res;
enum mlx5e_traffic_types tt, max_tt;
struct mlx5e_tir_builder *builder;
struct mlx5e_lro_param lro_param;
u32 indir_rqtn;
int err = 0;
builder = mlx5e_tir_builder_alloc(false);
if (!builder)
return -ENOMEM;
lro_param = mlx5e_get_lro_param(&priv->channels.params);
indir_rqtn = mlx5e_rqt_get_rqtn(&res->indir_rqt);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
mlx5e_tir_builder_build_rqt(builder, priv->mdev->mlx5e_res.hw_objs.td.tdn,
indir_rqtn, inner_ft_support);
mlx5e_tir_builder_build_lro(builder, &lro_param);
rss_tt = mlx5e_rx_res_rss_get_current_tt_config(res, tt);
mlx5e_tir_builder_build_rss(builder, rss_hash, &rss_tt, false);
err = mlx5e_tir_init(&res->rss[tt].indir_tir, builder, priv->mdev, true);
if (err) {
mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
goto err_destroy_tirs;
}
mlx5e_tir_builder_clear(builder);
}
if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
goto out;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
mlx5e_tir_builder_build_rqt(builder, priv->mdev->mlx5e_res.hw_objs.td.tdn,
indir_rqtn, inner_ft_support);
mlx5e_tir_builder_build_lro(builder, &lro_param);
rss_tt = mlx5e_rx_res_rss_get_current_tt_config(res, tt);
mlx5e_tir_builder_build_rss(builder, rss_hash, &rss_tt, true);
err = mlx5e_tir_init(&res->rss[tt].inner_indir_tir, builder, priv->mdev, true);
if (err) {
mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
goto err_destroy_inner_tirs;
}
mlx5e_tir_builder_clear(builder);
}
goto out;
err_destroy_inner_tirs:
max_tt = tt;
for (tt = 0; tt < max_tt; tt++)
mlx5e_tir_destroy(&res->rss[tt].inner_indir_tir);
tt = MLX5E_NUM_INDIR_TIRS;
err_destroy_tirs:
max_tt = tt;
for (tt = 0; tt < max_tt; tt++)
mlx5e_tir_destroy(&res->rss[tt].indir_tir);
out:
mlx5e_tir_builder_free(builder);
return err;
}
static int mlx5e_create_direct_tir(struct mlx5e_priv *priv, struct mlx5e_tir *tir,
struct mlx5e_tir_builder *builder, struct mlx5e_rqt *rqt)
{
bool inner_ft_support = priv->channels.params.tunneled_offload_en;
struct mlx5e_lro_param lro_param;
int err = 0;
lro_param = mlx5e_get_lro_param(&priv->channels.params);
mlx5e_tir_builder_build_rqt(builder, priv->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(rqt), inner_ft_support);
mlx5e_tir_builder_build_lro(builder, &lro_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(tir, builder, priv->mdev, true);
if (unlikely(err))
mlx5_core_warn(priv->mdev, "create tirs failed, %d\n", err);
mlx5e_tir_builder_clear(builder);
return err;
}
int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
{
struct mlx5e_rx_res *res = priv->rx_res;
struct mlx5e_tir_builder *builder;
int err = 0;
int ix;
builder = mlx5e_tir_builder_alloc(false);
if (!builder)
return -ENOMEM;
for (ix = 0; ix < priv->max_nch; ix++) {
err = mlx5e_create_direct_tir(priv, &res->channels[ix].direct_tir,
builder, &res->channels[ix].direct_rqt);
if (err)
goto err_destroy_tirs;
}
goto out;
err_destroy_tirs:
while (--ix >= 0)
mlx5e_tir_destroy(&res->channels[ix].direct_tir);
out:
mlx5e_tir_builder_free(builder);
return err;
}
static int mlx5e_create_xsk_tirs(struct mlx5e_priv *priv)
{
struct mlx5e_rx_res *res = priv->rx_res;
struct mlx5e_tir_builder *builder;
int err;
int ix;
builder = mlx5e_tir_builder_alloc(false);
if (!builder)
return -ENOMEM;
for (ix = 0; ix < priv->max_nch; ix++) {
err = mlx5e_create_direct_tir(priv, &res->channels[ix].xsk_tir,
builder, &res->channels[ix].xsk_rqt);
if (err)
goto err_destroy_tirs;
}
goto out;
err_destroy_tirs:
while (--ix >= 0)
mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
out:
mlx5e_tir_builder_free(builder);
return err;
}
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
{
struct mlx5e_rx_res *res = priv->rx_res;
enum mlx5e_traffic_types tt;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
mlx5e_tir_destroy(&res->rss[tt].indir_tir);
/* Verify inner tirs resources allocated */
if (!res->rss[0].inner_indir_tir.tirn)
return;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
mlx5e_tir_destroy(&res->rss[tt].inner_indir_tir);
}
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
{
unsigned int ix;
for (ix = 0; ix < priv->max_nch; ix++)
mlx5e_tir_destroy(&priv->rx_res->channels[ix].direct_tir);
}
static void mlx5e_destroy_xsk_tirs(struct mlx5e_priv *priv)
{
unsigned int ix;
for (ix = 0; ix < priv->max_nch; ix++)
mlx5e_tir_destroy(&priv->rx_res->channels[ix].xsk_tir);
}
static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
{
int err = 0;
......@@ -4471,20 +4090,6 @@ static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeo
return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
}
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
u16 num_channels)
{
enum mlx5e_traffic_types tt;
rss_params->hash.hfunc = ETH_RSS_HASH_TOP;
netdev_rss_key_fill(rss_params->hash.toeplitz_hash_key,
sizeof(rss_params->hash.toeplitz_hash_key));
mlx5e_rss_params_indir_init_uniform(&rss_params->indir, num_channels);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
rss_params->rx_hash_fields[tt] =
mlx5e_rss_get_default_tt_config(tt).rx_hash_fields;
}
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
{
struct mlx5e_params *params = &priv->channels.params;
......@@ -4809,15 +4414,14 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_tir_builder *tir_builder;
enum mlx5e_rx_res_features features;
struct mlx5e_lro_param lro_param;
int err;
priv->rx_res = kvzalloc(sizeof(*priv->rx_res), GFP_KERNEL);
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res)
return -ENOMEM;
mlx5e_build_rss_params(&priv->rx_res->rss_params, priv->channels.params.num_channels);
mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
......@@ -4826,50 +4430,20 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters;
}
err = mlx5e_create_indirect_rqt(priv);
features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP;
if (priv->channels.params.tunneled_offload_en)
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
priv->max_nch, priv->drop_rq.rqn, &lro_param,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
err = mlx5e_create_direct_rqts(priv);
if (err)
goto err_destroy_indirect_rqts;
err = mlx5e_create_indirect_tirs(priv, true);
if (err)
goto err_destroy_direct_rqts;
err = mlx5e_create_direct_tirs(priv);
if (err)
goto err_destroy_indirect_tirs;
err = mlx5e_create_xsk_rqts(priv);
if (unlikely(err))
goto err_destroy_direct_tirs;
err = mlx5e_create_xsk_tirs(priv);
if (unlikely(err))
goto err_destroy_xsk_rqts;
err = mlx5e_rqt_init_direct(&priv->rx_res->ptp.rqt, priv->mdev, false,
priv->drop_rq.rqn);
if (err)
goto err_destroy_xsk_tirs;
tir_builder = mlx5e_tir_builder_alloc(false);
if (!tir_builder) {
err = -ENOMEM;
goto err_destroy_ptp_rqt;
}
err = mlx5e_create_direct_tir(priv, &priv->rx_res->ptp.tir, tir_builder,
&priv->rx_res->ptp.rqt);
mlx5e_tir_builder_free(tir_builder);
if (err)
goto err_destroy_ptp_rqt;
err = mlx5e_create_flow_steering(priv);
if (err) {
mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
goto err_destroy_ptp_direct_tir;
goto err_destroy_rx_res;
}
err = mlx5e_tc_nic_init(priv);
......@@ -4890,27 +4464,13 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering:
mlx5e_destroy_flow_steering(priv);
err_destroy_ptp_direct_tir:
mlx5e_tir_destroy(&priv->rx_res->ptp.tir);
err_destroy_ptp_rqt:
mlx5e_rqt_destroy(&priv->rx_res->ptp.rqt);
err_destroy_xsk_tirs:
mlx5e_destroy_xsk_tirs(priv);
err_destroy_xsk_rqts:
mlx5e_destroy_xsk_rqts(priv);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts:
mlx5e_rqt_destroy(&priv->rx_res->indir_rqt);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
kvfree(priv->rx_res);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
return err;
}
......@@ -4920,17 +4480,10 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
mlx5e_tir_destroy(&priv->rx_res->ptp.tir);
mlx5e_rqt_destroy(&priv->rx_res->ptp.rqt);
mlx5e_destroy_xsk_tirs(priv);
mlx5e_destroy_xsk_rqts(priv);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_direct_rqts(priv);
mlx5e_rqt_destroy(&priv->rx_res->indir_rqt);
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
kvfree(priv->rx_res);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
}
......
......@@ -655,7 +655,7 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
MLX5_FLOW_NAMESPACE_KERNEL);
/* The inner_ttc in the ttc params is intentionally not set */
ttc_params.any_tt_tirn = res->channels[0].direct_tir.tirn;
ttc_params.any_tt_tirn = mlx5e_rx_res_get_tirn_direct(res, 0);
mlx5e_set_ttc_ft_params(&ttc_params);
if (rep->vport != MLX5_VPORT_UPLINK)
......@@ -663,7 +663,7 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = res->rss[tt].indir_tir.tirn;
ttc_params.indir_tirn[tt] = mlx5e_rx_res_get_tirn_rss(res, tt);
err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
......@@ -758,14 +758,13 @@ int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_lro_param lro_param;
int err;
priv->rx_res = kvzalloc(sizeof(*priv->rx_res), GFP_KERNEL);
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res)
return -ENOMEM;
mlx5e_build_rss_params(&priv->rx_res->rss_params, priv->channels.params.num_channels);
mlx5e_init_l2_addr(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
......@@ -774,25 +773,16 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return err;
}
err = mlx5e_create_indirect_rqt(priv);
lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
priv->max_nch, priv->drop_rq.rqn, &lro_param,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
err = mlx5e_create_direct_rqts(priv);
if (err)
goto err_destroy_indirect_rqts;
err = mlx5e_create_indirect_tirs(priv, false);
if (err)
goto err_destroy_direct_rqts;
err = mlx5e_create_direct_tirs(priv);
if (err)
goto err_destroy_indirect_tirs;
err = mlx5e_create_rep_ttc_table(priv);
if (err)
goto err_destroy_direct_tirs;
goto err_destroy_rx_res;
err = mlx5e_create_rep_root_ft(priv);
if (err)
......@@ -810,17 +800,11 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts:
mlx5e_rqt_destroy(&priv->rx_res->indir_rqt);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
kvfree(priv->rx_res);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
return err;
}
......@@ -831,12 +815,9 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_direct_rqts(priv);
mlx5e_rqt_destroy(&priv->rx_res->indir_rqt);
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
kvfree(priv->rx_res);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
}
......
......@@ -527,7 +527,8 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels);
err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
priv->rx_res->rss_params.hash.hfunc, indir);
mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
indir);
kvfree(indir);
return err;
......@@ -536,7 +537,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
struct mlx5e_rss_params_hash *rss_hash;
struct mlx5e_rss_params_hash rss_hash;
enum mlx5e_traffic_types tt, max_tt;
struct mlx5e_tir_builder *builder;
int err = 0;
......@@ -545,7 +546,7 @@ static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
if (!builder)
return -ENOMEM;
rss_hash = &priv->rx_res->rss_params.hash;
rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
struct mlx5e_rss_params_traffic_type rss_tt;
......@@ -555,7 +556,7 @@ static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
mlx5e_tir_builder_build_rqt(builder, hp->tdn,
mlx5e_rqt_get_rqtn(&hp->indir_rqt),
false);
mlx5e_tir_builder_build_rss(builder, rss_hash, &rss_tt, false);
mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false);
err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false);
if (err) {
......
......@@ -333,7 +333,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
mlx5e_set_ttc_basic_params(priv, &ttc_params);
mlx5e_set_ttc_ft_params(&ttc_params);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = priv->rx_res->rss[tt].indir_tir.tirn;
ttc_params.indir_tirn[tt] = mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
......@@ -359,14 +359,13 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_lro_param lro_param;
int err;
priv->rx_res = kvzalloc(sizeof(*priv->rx_res), GFP_KERNEL);
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res)
return -ENOMEM;
mlx5e_build_rss_params(&priv->rx_res->rss_params, priv->channels.params.num_channels);
mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
......@@ -375,41 +374,26 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters;
}
err = mlx5e_create_indirect_rqt(priv);
lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
priv->max_nch, priv->drop_rq.rqn, &lro_param,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
err = mlx5e_create_direct_rqts(priv);
if (err)
goto err_destroy_indirect_rqts;
err = mlx5e_create_indirect_tirs(priv, false);
if (err)
goto err_destroy_direct_rqts;
err = mlx5e_create_direct_tirs(priv);
if (err)
goto err_destroy_indirect_tirs;
err = mlx5i_create_flow_steering(priv);
if (err)
goto err_destroy_direct_tirs;
goto err_destroy_rx_res;
return 0;
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts:
mlx5e_rqt_destroy(&priv->rx_res->indir_rqt);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
kvfree(priv->rx_res);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
return err;
}
......@@ -417,13 +401,10 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5i_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_direct_rqts(priv);
mlx5e_rqt_destroy(&priv->rx_res->indir_rqt);
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
kvfree(priv->rx_res);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment