Commit eaee12f0 authored by Khalid Manaa's avatar Khalid Manaa Committed by Saeed Mahameed

net/mlx5e: Rename TIR lro functions to TIR packet merge functions

This series introduces new packet merge type, therefore rename lro
functions to packet merge to support the new merge type:
- Generalize + rename mlx5e_build_tir_ctx_lro to
  mlx5e_build_tir_ctx_packet_merge.
- Rename mlx5e_modify_tirs_lro to mlx5e_modify_tirs_packet_merge.
- Rename lro bit in mlx5_ifc_modify_tir_bitmask_bits to packet_merge.
- Rename lro_en in mlx5e_params to packet_merge_type type and combine
  packet_merge params into one struct mlx5e_packet_merge_param.
Signed-off-by: default avatarKhalid Manaa <khalidm@nvidia.com>
Signed-off-by: default avatarBen Ben-Ishay <benishay@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 7025329d
......@@ -242,6 +242,17 @@ enum mlx5e_priv_flag {
#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
enum packet_merge {
MLX5E_PACKET_MERGE_NONE,
MLX5E_PACKET_MERGE_LRO,
MLX5E_PACKET_MERGE_SHAMPO,
};
struct mlx5e_packet_merge_param {
enum packet_merge type;
u32 timeout;
};
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
......@@ -259,13 +270,12 @@ struct mlx5e_params {
bool tunneled_offload_en;
struct dim_cq_moder rx_cq_moderation;
struct dim_cq_moder tx_cq_moderation;
bool lro_en;
struct mlx5e_packet_merge_param packet_merge;
u8 tx_min_inline_mode;
bool vlan_strip_disable;
bool scatter_fcs_en;
bool rx_dim_enabled;
bool tx_dim_enabled;
u32 packet_merge_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
struct mlx5e_xsk *xsk;
......
......@@ -87,7 +87,8 @@ bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
mlx5e_rx_get_linear_frag_sz(params, NULL));
return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
return params->packet_merge.type == MLX5E_PACKET_MERGE_NONE &&
linear_frag_sz <= PAGE_SIZE;
}
bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
......@@ -164,19 +165,8 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
mlx5e_rx_is_linear_skb(params, xsk) :
mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
}
struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params)
{
struct mlx5e_lro_param lro_param;
lro_param = (struct mlx5e_lro_param) {
.enabled = params->lro_en,
.timeout = params->packet_merge_timeout,
};
return lro_param;
return is_linear_skb || params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO ?
mlx5e_get_linear_rq_headroom(params, xsk) : 0;
}
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
......@@ -485,10 +475,11 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
MLX5_CAP_GEN(mdev, relaxed_ordering_write);
return ro && params->lro_en ?
return ro && lro_en ?
MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
}
......
......@@ -11,11 +11,6 @@ struct mlx5e_xsk_param {
u16 chunk_size;
};
struct mlx5e_lro_param {
bool enabled;
u32 timeout;
};
struct mlx5e_cq_param {
u32 cqc[MLX5_ST_SZ_DW(cqc)];
struct mlx5_wq_param wq;
......@@ -125,7 +120,6 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params);
/* Build queue parameters */
......
......@@ -127,7 +127,7 @@ mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
const struct mlx5e_lro_param *init_lro_param,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner)
{
struct mlx5e_rss_params_traffic_type rss_tt;
......@@ -161,7 +161,7 @@ static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
rqtn = mlx5e_rqt_get_rqtn(&rss->rqt);
mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn,
rqtn, rss->inner_ft_support);
mlx5e_tir_builder_build_lro(builder, init_lro_param);
mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
rss_tt = mlx5e_rss_get_tt_config(rss, tt);
mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
......@@ -198,14 +198,14 @@ static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types
}
static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
const struct mlx5e_lro_param *init_lro_param,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner)
{
enum mlx5_traffic_types tt, max_tt;
int err;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner);
err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
if (err)
goto err_destroy_tirs;
}
......@@ -297,7 +297,7 @@ int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
bool inner_ft_support, u32 drop_rqn,
const struct mlx5e_lro_param *init_lro_param)
const struct mlx5e_packet_merge_param *init_pkt_merge_param)
{
int err;
......@@ -305,12 +305,12 @@ int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
if (err)
goto err_out;
err = mlx5e_rss_create_tirs(rss, init_lro_param, false);
err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false);
if (err)
goto err_destroy_rqt;
if (inner_ft_support) {
err = mlx5e_rss_create_tirs(rss, init_lro_param, true);
err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, true);
if (err)
goto err_destroy_tirs;
}
......@@ -372,7 +372,7 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
*/
int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
const struct mlx5e_lro_param *init_lro_param,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner, u32 *tirn)
{
struct mlx5e_tir *tir;
......@@ -381,7 +381,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
if (!tir) { /* TIR doesn't exist, create one */
int err;
err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner);
err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
if (err)
return err;
tir = rss_get_tir(rss, tt, inner);
......@@ -419,7 +419,8 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss)
mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
}
int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param)
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
struct mlx5e_packet_merge_param *pkt_merge_param)
{
struct mlx5e_tir_builder *builder;
enum mlx5_traffic_types tt;
......@@ -429,7 +430,7 @@ int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_p
if (!builder)
return -ENOMEM;
mlx5e_tir_builder_build_lro(builder, lro_param);
mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
final_err = 0;
......
......@@ -17,7 +17,7 @@ struct mlx5e_rss *mlx5e_rss_alloc(void);
void mlx5e_rss_free(struct mlx5e_rss *rss);
int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
bool inner_ft_support, u32 drop_rqn,
const struct mlx5e_lro_param *init_lro_param);
const struct mlx5e_packet_merge_param *init_pkt_merge_param);
int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
bool inner_ft_support, u32 drop_rqn);
int mlx5e_rss_cleanup(struct mlx5e_rss *rss);
......@@ -30,13 +30,14 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner);
int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
const struct mlx5e_lro_param *init_lro_param,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner, u32 *tirn);
void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns);
void mlx5e_rss_disable(struct mlx5e_rss *rss);
int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param);
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
struct mlx5e_packet_merge_param *pkt_merge_param);
int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
const u8 *key, const u8 *hfunc,
......
......@@ -34,7 +34,7 @@ struct mlx5e_rx_res {
/* API for rx_res_rss_* */
static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
const struct mlx5e_lro_param *init_lro_param,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
......@@ -49,7 +49,7 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
return -ENOMEM;
err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
init_lro_param);
init_pkt_merge_param);
if (err)
goto err_rss_free;
......@@ -275,7 +275,7 @@ struct mlx5e_rx_res *mlx5e_rx_res_alloc(void)
}
static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
const struct mlx5e_lro_param *init_lro_param)
const struct mlx5e_packet_merge_param *init_pkt_merge_param)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_tir_builder *builder;
......@@ -306,7 +306,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
inner_ft_support);
mlx5e_tir_builder_build_lro(builder, init_lro_param);
mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
......@@ -336,7 +336,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
inner_ft_support);
mlx5e_tir_builder_build_lro(builder, init_lro_param);
mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
......@@ -437,7 +437,7 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
enum mlx5e_rx_res_features features, unsigned int max_nch,
u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param,
u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch)
{
int err;
......@@ -447,11 +447,11 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
res->max_nch = max_nch;
res->drop_rqn = drop_rqn;
err = mlx5e_rx_res_rss_init_def(res, init_lro_param, init_nch);
err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
if (err)
goto err_out;
err = mlx5e_rx_res_channels_init(res, init_lro_param);
err = mlx5e_rx_res_channels_init(res, init_pkt_merge_param);
if (err)
goto err_rss_destroy;
......@@ -645,7 +645,8 @@ int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix)
return err;
}
int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param)
int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
struct mlx5e_packet_merge_param *pkt_merge_param)
{
struct mlx5e_tir_builder *builder;
int err, final_err;
......@@ -655,7 +656,7 @@ int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param
if (!builder)
return -ENOMEM;
mlx5e_tir_builder_build_lro(builder, lro_param);
mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
final_err = 0;
......@@ -665,7 +666,7 @@ int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param
if (!rss)
continue;
err = mlx5e_rss_lro_set_param(rss, lro_param);
err = mlx5e_rss_packet_merge_set_param(rss, pkt_merge_param);
if (err)
final_err = final_err ? : err;
}
......@@ -673,7 +674,7 @@ int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param
for (ix = 0; ix < res->max_nch; ix++) {
err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
if (err) {
mlx5_core_warn(res->mdev, "Failed to update LRO state of direct TIR %#x for channel %u: err = %d\n",
mlx5_core_warn(res->mdev, "Failed to update packet merge state of direct TIR %#x for channel %u: err = %d\n",
mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err);
if (!final_err)
final_err = err;
......
......@@ -25,7 +25,7 @@ enum mlx5e_rx_res_features {
struct mlx5e_rx_res *mlx5e_rx_res_alloc(void);
int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
enum mlx5e_rx_res_features features, unsigned int max_nch,
u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param,
u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch);
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);
void mlx5e_rx_res_free(struct mlx5e_rx_res *res);
......@@ -57,7 +57,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt,
u8 rx_hash_fields);
int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param);
int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
struct mlx5e_packet_merge_param *pkt_merge_param);
int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch);
int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx);
......
......@@ -70,16 +70,16 @@ void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn,
MLX5_SET(tirc, tirc, tunneled_offload_en, inner_ft_support);
}
void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
const struct mlx5e_lro_param *lro_param)
void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
const struct mlx5e_packet_merge_param *pkt_merge_param)
{
void *tirc = mlx5e_tir_builder_get_tirc(builder);
const unsigned int rough_max_l2_l3_hdr_sz = 256;
if (builder->modify)
MLX5_SET(modify_tir_in, builder->in, bitmask.lro, 1);
MLX5_SET(modify_tir_in, builder->in, bitmask.packet_merge, 1);
if (!lro_param->enabled)
if (pkt_merge_param->type == MLX5E_PACKET_MERGE_NONE)
return;
MLX5_SET(tirc, tirc, packet_merge_mask,
......@@ -87,7 +87,7 @@ void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO);
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
(MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, lro_param->timeout);
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout);
}
static int mlx5e_hfunc_to_hw(u8 hfunc)
......
......@@ -18,7 +18,7 @@ struct mlx5e_rss_params_traffic_type {
};
struct mlx5e_tir_builder;
struct mlx5e_lro_param;
struct mlx5e_packet_merge_param;
struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify);
void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder);
......@@ -27,8 +27,8 @@ void mlx5e_tir_builder_clear(struct mlx5e_tir_builder *builder);
void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn);
void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn,
u32 rqtn, bool inner_ft_support);
void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
const struct mlx5e_lro_param *lro_param);
void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
const struct mlx5e_packet_merge_param *pkt_merge_param);
void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
const struct mlx5e_rss_params_hash *rss_hash,
const struct mlx5e_rss_params_traffic_type *rss_tt,
......
......@@ -1952,8 +1952,8 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
return -EOPNOTSUPP;
if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params))
return -EINVAL;
} else if (priv->channels.params.lro_en) {
netdev_warn(netdev, "Can't set legacy RQ with LRO, disable LRO first\n");
} else if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
netdev_warn(netdev, "Can't set legacy RQ with HW-GRO/LRO, disable them first\n");
return -EINVAL;
}
......
......@@ -411,7 +411,7 @@ static int flow_get_tirn(struct mlx5e_priv *priv,
u32 rss_context, u32 *tirn)
{
if (fs->flow_type & FLOW_RSS) {
struct mlx5e_lro_param lro_param;
struct mlx5e_packet_merge_param pkt_merge_param;
struct mlx5e_rss *rss;
u32 flow_type;
int err;
......@@ -426,8 +426,8 @@ static int flow_get_tirn(struct mlx5e_priv *priv,
if (tt < 0)
return -EINVAL;
lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rss_obtain_tirn(rss, tt, &lro_param, false, tirn);
pkt_merge_param = priv->channels.params.packet_merge;
err = mlx5e_rss_obtain_tirn(rss, tt, &pkt_merge_param, false, tirn);
if (err)
return err;
eth_rule->rss = rss;
......
......@@ -2222,17 +2222,14 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
chs->num = 0;
}
static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
static int mlx5e_modify_tirs_packet_merge(struct mlx5e_priv *priv)
{
struct mlx5e_rx_res *res = priv->rx_res;
struct mlx5e_lro_param lro_param;
lro_param = mlx5e_get_lro_param(&priv->channels.params);
return mlx5e_rx_res_lro_set_param(res, &lro_param);
return mlx5e_rx_res_packet_merge_set_param(res, &priv->channels.params.packet_merge);
}
static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro);
static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_packet_merge);
static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u16 mtu)
......@@ -3351,16 +3348,25 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
}
new_params = *cur_params;
new_params.lro_en = enable;
if (enable)
new_params.packet_merge.type = MLX5E_PACKET_MERGE_LRO;
else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)
new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
else
goto out;
if (!(cur_params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO &&
new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)) {
if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
reset = false;
}
}
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_modify_tirs_lro_ctx, NULL, reset);
mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
out:
mutex_unlock(&priv->state_lock);
return err;
......@@ -3687,7 +3693,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
goto out;
}
if (params->lro_en)
if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO)
reset = false;
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
......@@ -4144,8 +4150,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
struct net_device *netdev = priv->netdev;
struct mlx5e_params new_params;
if (priv->channels.params.lro_en) {
netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
netdev_warn(netdev, "can't set XDP while HW-GRO/LRO is on, disable them first\n");
return -EINVAL;
}
......@@ -4402,9 +4408,10 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
/* No XSK params: checking the availability of striding RQ in general. */
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
params->lro_en = !slow_pci_heuristic(mdev);
params->packet_merge.type = slow_pci_heuristic(mdev) ?
MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO;
}
params->packet_merge_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
......@@ -4693,7 +4700,6 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
enum mlx5e_rx_res_features features;
struct mlx5e_lro_param lro_param;
int err;
priv->rx_res = mlx5e_rx_res_alloc();
......@@ -4711,9 +4717,9 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP;
if (priv->channels.params.tunneled_offload_en)
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
priv->max_nch, priv->drop_rq.rqn, &lro_param,
priv->max_nch, priv->drop_rq.rqn,
&priv->channels.params.packet_merge,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
......
......@@ -793,7 +793,6 @@ int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_lro_param lro_param;
int err;
priv->rx_res = mlx5e_rx_res_alloc();
......@@ -808,9 +807,9 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return err;
}
lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
priv->max_nch, priv->drop_rq.rqn, &lro_param,
priv->max_nch, priv->drop_rq.rqn,
&priv->channels.params.packet_merge,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
......
......@@ -67,7 +67,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
params->lro_en = false;
params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
params->tunneled_offload_en = false;
}
......@@ -356,7 +356,6 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_lro_param lro_param;
int err;
priv->rx_res = mlx5e_rx_res_alloc();
......@@ -371,9 +370,9 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters;
}
lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
priv->max_nch, priv->drop_rq.rqn, &lro_param,
priv->max_nch, priv->drop_rq.rqn,
&priv->channels.params.packet_merge,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
......
......@@ -6707,7 +6707,7 @@ struct mlx5_ifc_modify_tir_bitmask_bits {
u8 reserved_at_3c[0x1];
u8 hash[0x1];
u8 reserved_at_3e[0x1];
u8 lro[0x1];
u8 packet_merge[0x1];
};
struct mlx5_ifc_modify_tir_out_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment