Commit 5fe2a6b4 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2021-08-26' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2021-08-26

This series introduces some fixes to mlx5 driver.
Please pull and let me know if there is any problem.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d0efb162 6cc64770
...@@ -397,7 +397,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev) ...@@ -397,7 +397,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
void mlx5_unregister_device(struct mlx5_core_dev *dev) void mlx5_unregister_device(struct mlx5_core_dev *dev)
{ {
mutex_lock(&mlx5_intf_mutex); mutex_lock(&mlx5_intf_mutex);
dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
mlx5_rescan_drivers_locked(dev); mlx5_rescan_drivers_locked(dev);
mutex_unlock(&mlx5_intf_mutex); mutex_unlock(&mlx5_intf_mutex);
} }
......
...@@ -147,7 +147,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, ...@@ -147,7 +147,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
mlx5e_rep_queue_neigh_stats_work(priv); mlx5e_rep_queue_neigh_stats_work(priv);
list_for_each_entry(flow, flow_list, tmp_list) { list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow)) if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW))
continue; continue;
attr = flow->attr; attr = flow->attr;
esw_attr = attr->esw_attr; esw_attr = attr->esw_attr;
...@@ -188,7 +188,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, ...@@ -188,7 +188,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
int err; int err;
list_for_each_entry(flow, flow_list, tmp_list) { list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow)) if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
continue; continue;
attr = flow->attr; attr = flow->attr;
esw_attr = attr->esw_attr; esw_attr = attr->esw_attr;
......
...@@ -1338,6 +1338,7 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_ ...@@ -1338,6 +1338,7 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
{ {
struct mlx5e_priv *out_priv, *route_priv; struct mlx5e_priv *out_priv, *route_priv;
struct mlx5_devcom *devcom = NULL;
struct mlx5_core_dev *route_mdev; struct mlx5_core_dev *route_mdev;
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
u16 vhca_id; u16 vhca_id;
...@@ -1349,7 +1350,24 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro ...@@ -1349,7 +1350,24 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
route_mdev = route_priv->mdev; route_mdev = route_priv->mdev;
vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id); vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
if (mlx5_lag_is_active(out_priv->mdev)) {
/* In lag case we may get devices from different eswitch instances.
* If we failed to get vport num, it means, mostly, that we on the wrong
* eswitch.
*/
err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
if (err != -ENOENT)
return err;
devcom = out_priv->mdev->priv.devcom;
esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
if (!esw)
return -ENODEV;
}
err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
if (devcom)
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return err; return err;
} }
......
...@@ -364,6 +364,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, ...@@ -364,6 +364,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = e->vport; dest.vport.num = e->vport;
dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1); e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1);
if (IS_ERR(e->fwd_rule)) { if (IS_ERR(e->fwd_rule)) {
mlx5_destroy_flow_group(e->fwd_grp); mlx5_destroy_flow_group(e->fwd_grp);
......
...@@ -277,6 +277,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev) ...@@ -277,6 +277,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
int err; int err;
ldev->flags &= ~MLX5_LAG_MODE_FLAGS; ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
mlx5_lag_mp_reset(ldev);
MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG); MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
err = mlx5_cmd_exec_in(dev0, destroy_lag, in); err = mlx5_cmd_exec_in(dev0, destroy_lag, in);
......
...@@ -302,6 +302,14 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, ...@@ -302,6 +302,14 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
return NOTIFY_DONE; return NOTIFY_DONE;
} }
void mlx5_lag_mp_reset(struct mlx5_lag *ldev)
{
/* Clear mfi, as it might become stale when a route delete event
* has been missed, see mlx5_lag_fib_route_event().
*/
ldev->lag_mp.mfi = NULL;
}
int mlx5_lag_mp_init(struct mlx5_lag *ldev) int mlx5_lag_mp_init(struct mlx5_lag *ldev)
{ {
struct lag_mp *mp = &ldev->lag_mp; struct lag_mp *mp = &ldev->lag_mp;
......
...@@ -21,11 +21,13 @@ struct lag_mp { ...@@ -21,11 +21,13 @@ struct lag_mp {
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
void mlx5_lag_mp_reset(struct mlx5_lag *ldev);
int mlx5_lag_mp_init(struct mlx5_lag *ldev); int mlx5_lag_mp_init(struct mlx5_lag *ldev);
void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev); void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {};
static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; } static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }
static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {} static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {}
......
...@@ -846,9 +846,9 @@ dr_rule_handle_ste_branch(struct mlx5dr_rule *rule, ...@@ -846,9 +846,9 @@ dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl, new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
ste_location, send_ste_list); ste_location, send_ste_list);
if (!new_htbl) { if (!new_htbl) {
mlx5dr_htbl_put(cur_htbl);
mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n", mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
cur_htbl->chunk_size); cur_htbl->chunk_size);
mlx5dr_htbl_put(cur_htbl);
} else { } else {
cur_htbl = new_htbl; cur_htbl = new_htbl;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment