Commit 5c50368f authored by Achiad Shochat's avatar Achiad Shochat Committed by David S. Miller

net/mlx5e: Light-weight netdev open/stop

Create/destroy TIRs, TISs and flow tables upon PCI probe/remove rather
than upon the netdev ndo_open/stop.

Upon ndo_stop(), redirect all RX traffic to the (lately introduced)
"Drop RQ" and then close only the RX/TX rings, leaving the TIRs,
TISs and flow tables alive.
Signed-off-by: default avatarAchiad Shochat <achiad@mellanox.com>
Signed-off-by: default avatarAmir Vadai <amirv@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d9eea403
...@@ -1301,14 +1301,18 @@ static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc, ...@@ -1301,14 +1301,18 @@ static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
ix = ix % priv->params.num_channels; ix = ix % priv->params.num_channels;
MLX5_SET(rqtc, rqtc, rq_num[i], MLX5_SET(rqtc, rqtc, rq_num[i],
priv->channel[ix]->rq.rqn); test_bit(MLX5E_STATE_OPENED, &priv->state) ?
priv->channel[ix]->rq.rqn :
priv->drop_rq.rqn);
} }
break; break;
default: /* MLX5E_SINGLE_RQ_RQT */ default: /* MLX5E_SINGLE_RQ_RQT */
MLX5_SET(rqtc, rqtc, rq_num[0], MLX5_SET(rqtc, rqtc, rq_num[0],
priv->channel[0]->rq.rqn); test_bit(MLX5E_STATE_OPENED, &priv->state) ?
priv->channel[0]->rq.rqn :
priv->drop_rq.rqn);
break; break;
} }
...@@ -1347,19 +1351,95 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) ...@@ -1347,19 +1351,95 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
return err; return err;
} }
static int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 *in;
void *rqtc;
int inlen;
int log_sz;
int sz;
int err;
log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 :
priv->params.rx_hash_log_tbl_sz;
sz = 1 << log_sz;
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
kvfree(in);
return err;
}
static void mlx5e_close_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) static void mlx5e_close_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{ {
mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]); mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
} }
static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
{
if (!priv->params.lro_en)
return;
#define ROUGH_MAX_L2_L3_HDR_SZ 256
MLX5_SET(tirc, tirc, lro_enable_mask,
MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
(priv->params.lro_wqe_sz -
ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
MLX5_CAP_ETH(priv->mdev,
lro_timer_supported_periods[3]));
}
static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
{
struct mlx5_core_dev *mdev = priv->mdev;
void *in;
void *tirc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
mlx5e_build_tir_ctx_lro(tirc, priv);
err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
kvfree(in);
return err;
}
static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
{ {
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
MLX5_SET(tirc, tirc, transport_domain, priv->tdn); MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
#define ROUGH_MAX_L2_L3_HDR_SZ 256
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP) MLX5_HASH_FIELD_SEL_DST_IP)
...@@ -1372,17 +1452,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) ...@@ -1372,17 +1452,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_HASH_FIELD_SEL_DST_IP |\ MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI) MLX5_HASH_FIELD_SEL_IPSEC_SPI)
if (priv->params.lro_en) { mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, lro_enable_mask,
MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
(priv->params.lro_wqe_sz -
ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
MLX5_CAP_ETH(priv->mdev,
lro_timer_supported_periods[3]));
}
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
...@@ -1568,12 +1638,20 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev) ...@@ -1568,12 +1638,20 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
return 0; return 0;
} }
static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
{
mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
}
int mlx5e_open_locked(struct net_device *netdev) int mlx5e_open_locked(struct net_device *netdev)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
int num_txqs; int num_txqs;
int err; int err;
set_bit(MLX5E_STATE_OPENED, &priv->state);
num_txqs = priv->params.num_channels * priv->params.num_tc; num_txqs = priv->params.num_channels * priv->params.num_tc;
netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels); netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
...@@ -1582,83 +1660,32 @@ int mlx5e_open_locked(struct net_device *netdev) ...@@ -1582,83 +1660,32 @@ int mlx5e_open_locked(struct net_device *netdev)
if (err) if (err)
return err; return err;
err = mlx5e_open_tises(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
__func__, err);
return err;
}
err = mlx5e_open_channels(priv); err = mlx5e_open_channels(priv);
if (err) { if (err) {
netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
__func__, err); __func__, err);
goto err_close_tises; return err;
}
err = mlx5e_open_rqt(priv, MLX5E_INDIRECTION_RQT);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_rqt(INDIR) failed, %d\n",
__func__, err);
goto err_close_channels;
}
err = mlx5e_open_rqt(priv, MLX5E_SINGLE_RQ_RQT);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_rqt(SINGLE) failed, %d\n",
__func__, err);
goto err_close_rqt_indir;
}
err = mlx5e_open_tirs(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
__func__, err);
goto err_close_rqt_single;
}
err = mlx5e_open_flow_table(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
__func__, err);
goto err_close_tirs;
} }
err = mlx5e_add_all_vlan_rules(priv); err = mlx5e_add_all_vlan_rules(priv);
if (err) { if (err) {
netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n", netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
__func__, err); __func__, err);
goto err_close_flow_table; goto err_close_channels;
} }
mlx5e_init_eth_addr(priv); mlx5e_init_eth_addr(priv);
set_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_update_carrier(priv); mlx5e_update_carrier(priv);
mlx5e_redirect_rqts(priv);
mlx5e_set_rx_mode_core(priv); mlx5e_set_rx_mode_core(priv);
schedule_delayed_work(&priv->update_stats_work, 0); schedule_delayed_work(&priv->update_stats_work, 0);
return 0; return 0;
err_close_flow_table:
mlx5e_close_flow_table(priv);
err_close_tirs:
mlx5e_close_tirs(priv);
err_close_rqt_single:
mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT);
err_close_rqt_indir:
mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT);
err_close_channels: err_close_channels:
mlx5e_close_channels(priv); mlx5e_close_channels(priv);
err_close_tises:
mlx5e_close_tises(priv);
return err; return err;
} }
...@@ -1682,13 +1709,9 @@ int mlx5e_close_locked(struct net_device *netdev) ...@@ -1682,13 +1709,9 @@ int mlx5e_close_locked(struct net_device *netdev)
mlx5e_set_rx_mode_core(priv); mlx5e_set_rx_mode_core(priv);
mlx5e_del_all_vlan_rules(priv); mlx5e_del_all_vlan_rules(priv);
mlx5e_redirect_rqts(priv);
netif_carrier_off(priv->netdev); netif_carrier_off(priv->netdev);
mlx5e_close_flow_table(priv);
mlx5e_close_tirs(priv);
mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT);
mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT);
mlx5e_close_channels(priv); mlx5e_close_channels(priv);
mlx5e_close_tises(priv);
return 0; return 0;
} }
...@@ -1766,6 +1789,8 @@ static int mlx5e_set_features(struct net_device *netdev, ...@@ -1766,6 +1789,8 @@ static int mlx5e_set_features(struct net_device *netdev,
mlx5e_close_locked(priv->netdev); mlx5e_close_locked(priv->netdev);
priv->params.lro_en = !!(features & NETIF_F_LRO); priv->params.lro_en = !!(features & NETIF_F_LRO);
mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
if (was_opened) if (was_opened)
err = mlx5e_open_locked(priv->netdev); err = mlx5e_open_locked(priv->netdev);
...@@ -2026,16 +2051,72 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) ...@@ -2026,16 +2051,72 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
goto err_dealloc_transport_domain; goto err_dealloc_transport_domain;
} }
err = mlx5e_open_tises(priv);
if (err) {
mlx5_core_warn(mdev, "open tises failed, %d\n", err);
goto err_destroy_mkey;
}
err = mlx5e_open_drop_rq(priv);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
goto err_close_tises;
}
err = mlx5e_open_rqt(priv, MLX5E_INDIRECTION_RQT);
if (err) {
mlx5_core_warn(mdev, "open rqt(INDIR) failed, %d\n", err);
goto err_close_drop_rq;
}
err = mlx5e_open_rqt(priv, MLX5E_SINGLE_RQ_RQT);
if (err) {
mlx5_core_warn(mdev, "open rqt(SINGLE) failed, %d\n", err);
goto err_close_rqt_indir;
}
err = mlx5e_open_tirs(priv);
if (err) {
mlx5_core_warn(mdev, "open tirs failed, %d\n", err);
goto err_close_rqt_single;
}
err = mlx5e_open_flow_table(priv);
if (err) {
mlx5_core_warn(mdev, "open flow table failed, %d\n", err);
goto err_close_tirs;
}
mlx5e_init_eth_addr(priv);
err = register_netdev(netdev); err = register_netdev(netdev);
if (err) { if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err); mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
goto err_destroy_mkey; goto err_close_flow_table;
} }
mlx5e_enable_async_events(priv); mlx5e_enable_async_events(priv);
return priv; return priv;
err_close_flow_table:
mlx5e_close_flow_table(priv);
err_close_tirs:
mlx5e_close_tirs(priv);
err_close_rqt_single:
mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT);
err_close_rqt_indir:
mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT);
err_close_drop_rq:
mlx5e_close_drop_rq(priv);
err_close_tises:
mlx5e_close_tises(priv);
err_destroy_mkey: err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, &priv->mr); mlx5_core_destroy_mkey(mdev, &priv->mr);
...@@ -2060,6 +2141,12 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) ...@@ -2060,6 +2141,12 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
struct net_device *netdev = priv->netdev; struct net_device *netdev = priv->netdev;
unregister_netdev(netdev); unregister_netdev(netdev);
mlx5e_close_flow_table(priv);
mlx5e_close_tirs(priv);
mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT);
mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT);
mlx5e_close_drop_rq(priv);
mlx5e_close_tises(priv);
mlx5_core_destroy_mkey(priv->mdev, &priv->mr); mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
......
...@@ -387,6 +387,18 @@ int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, ...@@ -387,6 +387,18 @@ int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err; return err;
} }
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
}
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn) void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
{ {
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]; u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
......
...@@ -65,6 +65,8 @@ int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); ...@@ -65,6 +65,8 @@ int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn); u32 *rqtn);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen);
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
#endif /* __TRANSOBJ_H__ */ #endif /* __TRANSOBJ_H__ */
...@@ -4123,6 +4123,13 @@ struct mlx5_ifc_modify_rqt_out_bits { ...@@ -4123,6 +4123,13 @@ struct mlx5_ifc_modify_rqt_out_bits {
u8 reserved_1[0x40]; u8 reserved_1[0x40];
}; };
struct mlx5_ifc_rqt_bitmask_bits {
u8 reserved[0x20];
u8 reserved1[0x1f];
u8 rqn_list[0x1];
};
struct mlx5_ifc_modify_rqt_in_bits { struct mlx5_ifc_modify_rqt_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_0[0x10]; u8 reserved_0[0x10];
...@@ -4135,7 +4142,7 @@ struct mlx5_ifc_modify_rqt_in_bits { ...@@ -4135,7 +4142,7 @@ struct mlx5_ifc_modify_rqt_in_bits {
u8 reserved_3[0x20]; u8 reserved_3[0x20];
u8 modify_bitmask[0x40]; struct mlx5_ifc_rqt_bitmask_bits bitmask;
u8 reserved_4[0x40]; u8 reserved_4[0x40];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment