Commit c4554fbc authored by Gal Pressman's avatar Gal Pressman Committed by Saeed Mahameed

net/mlx5e: Remove unused max inline related code

Commit 58d52291 ("net/mlx5e: Support TX packet copy into WQE")
introduced the max inline WQE as an ethtool tunable. One commit later,
that functionality was made dependent on BlueFlame.

Commit 6982ab60 ("net/mlx5e: Xmit, no write combining") removed
BlueFlame support, and with it the max inline WQE.
This patch cleans up the leftovers from the removed feature.
Signed-off-by: default avatarGal Pressman <galp@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 2ccb0a79
...@@ -240,7 +240,6 @@ struct mlx5e_params { ...@@ -240,7 +240,6 @@ struct mlx5e_params {
struct net_dim_cq_moder tx_cq_moderation; struct net_dim_cq_moder tx_cq_moderation;
bool lro_en; bool lro_en;
u32 lro_wqe_sz; u32 lro_wqe_sz;
u16 tx_max_inline;
u8 tx_min_inline_mode; u8 tx_min_inline_mode;
u8 rss_hfunc; u8 rss_hfunc;
u8 toeplitz_hash_key[40]; u8 toeplitz_hash_key[40];
...@@ -366,7 +365,6 @@ struct mlx5e_txqsq { ...@@ -366,7 +365,6 @@ struct mlx5e_txqsq {
void __iomem *uar_map; void __iomem *uar_map;
struct netdev_queue *txq; struct netdev_queue *txq;
u32 sqn; u32 sqn;
u16 max_inline;
u8 min_inline_mode; u8 min_inline_mode;
u16 edge; u16 edge;
struct device *pdev; struct device *pdev;
...@@ -1017,7 +1015,6 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -1017,7 +1015,6 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id); u16 rxq_index, u32 flow_id);
#endif #endif
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
int mlx5e_create_tir(struct mlx5_core_dev *mdev, int mlx5e_create_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir, u32 *in, int inlen); struct mlx5e_tir *tir, u32 *in, int inlen);
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
......
...@@ -1118,13 +1118,9 @@ static int mlx5e_get_tunable(struct net_device *dev, ...@@ -1118,13 +1118,9 @@ static int mlx5e_get_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna, const struct ethtool_tunable *tuna,
void *data) void *data)
{ {
const struct mlx5e_priv *priv = netdev_priv(dev); int err;
int err = 0;
switch (tuna->id) { switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
*(u32 *)data = priv->channels.params.tx_max_inline;
break;
case ETHTOOL_PFC_PREVENTION_TOUT: case ETHTOOL_PFC_PREVENTION_TOUT:
err = mlx5e_get_pfc_prevention_tout(dev, data); err = mlx5e_get_pfc_prevention_tout(dev, data);
break; break;
...@@ -1141,35 +1137,11 @@ static int mlx5e_set_tunable(struct net_device *dev, ...@@ -1141,35 +1137,11 @@ static int mlx5e_set_tunable(struct net_device *dev,
const void *data) const void *data)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev; int err;
struct mlx5e_channels new_channels = {};
int err = 0;
u32 val;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
switch (tuna->id) { switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
val = *(u32 *)data;
if (val > mlx5e_get_max_inline_cap(mdev)) {
err = -EINVAL;
break;
}
new_channels.params = priv->channels.params;
new_channels.params.tx_max_inline = val;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
break;
}
err = mlx5e_open_channels(priv, &new_channels);
if (err)
break;
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
break;
case ETHTOOL_PFC_PREVENTION_TOUT: case ETHTOOL_PFC_PREVENTION_TOUT:
err = mlx5e_set_pfc_prevention_tout(dev, *(u16 *)data); err = mlx5e_set_pfc_prevention_tout(dev, *(u16 *)data);
break; break;
......
...@@ -993,7 +993,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, ...@@ -993,7 +993,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->channel = c; sq->channel = c;
sq->txq_ix = txq_ix; sq->txq_ix = txq_ix;
sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->max_inline = params->tx_max_inline;
sq->min_inline_mode = params->tx_min_inline_mode; sq->min_inline_mode = params->tx_min_inline_mode;
if (MLX5_IPSEC_DEV(c->priv->mdev)) if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
...@@ -3882,15 +3881,6 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) ...@@ -3882,15 +3881,6 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
return 0; return 0;
} }
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
{
int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
return bf_buf_size -
sizeof(struct mlx5e_tx_wqe) +
2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
}
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels) int num_channels)
{ {
...@@ -4052,7 +4042,6 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, ...@@ -4052,7 +4042,6 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
mlx5e_set_tx_cq_mode_params(params, cq_period_mode); mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
/* TX inline */ /* TX inline */
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
/* RSS */ /* RSS */
......
...@@ -884,7 +884,6 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, ...@@ -884,7 +884,6 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode); mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
params->num_tc = 1; params->num_tc = 1;
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment