Commit 64e4cf0d authored by Saeed Mahameed's avatar Saeed Mahameed

Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

mlx5-next shared branch with rdma subtree to avoid mlx5 rdma v.s. netdev
conflicts.

Highlights:
1) Lag refactroing and flow counter affinity bits.
2) mlx5 core cleanups

By Roi Dayan (2) and others
* 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux:
  net/mlx5: Fold the modify lag code into function
  net/mlx5: Add lag affinity info to log
  net/mlx5: Split the activate lag function into two routines
  net/mlx5: E-Switch, Introduce flow counter affinity
  IB/mlx5: Unify e-switch representors load approach between uplink and VFs
  net/mlx5: Use lowercase 'X' for hex values
  net/mlx5: Remove duplicated include from eswitch.c
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parents 522185d5 4c283e61
......@@ -48,13 +48,21 @@ static const struct mlx5_ib_profile rep_profile = {
static int
mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
struct mlx5_ib_dev *ibdev;
ibdev = mlx5_ib_rep_to_dev(rep);
if (!__mlx5_ib_add(ibdev, ibdev->profile))
return -EINVAL;
return 0;
}
static void
mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep)
{
rep->rep_if[REP_IB].priv = NULL;
struct mlx5_ib_dev *ibdev;
ibdev = mlx5_ib_rep_to_dev(rep);
__mlx5_ib_remove(ibdev, ibdev->profile, MLX5_IB_STAGE_MAX);
}
static int
......@@ -89,6 +97,7 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
dev = mlx5_ib_rep_to_dev(rep);
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
rep->rep_if[REP_IB].priv = NULL;
ib_dealloc_device(&dev->ib_dev);
}
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
......
......@@ -6207,18 +6207,6 @@ static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
cancel_delay_drop(dev);
}
static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev)
{
mlx5_ib_register_vport_reps(dev);
return 0;
}
static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_unregister_vport_reps(dev);
}
static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
{
dev->mdev_events.notifier_call = mlx5_ib_event;
......@@ -6257,8 +6245,6 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
if (profile->stage[stage].cleanup)
profile->stage[stage].cleanup(dev);
}
ib_dealloc_device((struct ib_device *)dev);
}
void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
......@@ -6392,9 +6378,6 @@ static const struct mlx5_ib_profile nic_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
mlx5_ib_stage_post_ib_reg_umr_init,
NULL),
STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
mlx5_ib_stage_rep_reg_init,
mlx5_ib_stage_rep_reg_cleanup),
};
static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
......@@ -6462,8 +6445,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
return __mlx5_ib_add(dev, &nic_rep_profile);
dev->profile = &nic_rep_profile;
mlx5_ib_register_vport_reps(dev);
return dev;
}
return __mlx5_ib_add(dev, &pf_profile);
......@@ -6485,7 +6469,12 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
}
dev = context;
if (dev->profile == &nic_rep_profile)
mlx5_ib_unregister_vport_reps(dev);
else
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
ib_dealloc_device((struct ib_device *)dev);
}
static struct mlx5_interface mlx5_ib_interface = {
......
......@@ -790,7 +790,6 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_POST_IB_REG_UMR,
MLX5_IB_STAGE_DELAY_DROP,
MLX5_IB_STAGE_CLASS_ATTR,
MLX5_IB_STAGE_REP_REG,
MLX5_IB_STAGE_MAX,
};
......
......@@ -39,7 +39,6 @@
#include "lib/eq.h"
#include "eswitch.h"
#include "fs_core.h"
#include "lib/eq.h"
#define UPLINK_VPORT 0xFFFF
......
......@@ -186,22 +186,57 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
*port2 = 1;
}
static void mlx5_activate_lag(struct mlx5_lag *ldev,
static void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
u8 v2p_port1, v2p_port2;
int err;
ldev->flags |= MLX5_LAG_FLAG_BONDED;
mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
&v2p_port2);
if (v2p_port1 != ldev->v2p_map[0] ||
v2p_port2 != ldev->v2p_map[1]) {
ldev->v2p_map[0] = v2p_port1;
ldev->v2p_map[1] = v2p_port2;
mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
ldev->v2p_map[0], ldev->v2p_map[1]);
err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
if (err)
mlx5_core_err(dev0,
"Failed to modify LAG (%d)\n",
err);
}
}
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
int err;
mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
&ldev->v2p_map[1]);
mlx5_core_info(dev0, "lag map port 1:%d port 2:%d",
ldev->v2p_map[0], ldev->v2p_map[1]);
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
if (err)
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
err);
return err;
}
static void mlx5_activate_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
ldev->flags |= MLX5_LAG_FLAG_BONDED;
mlx5_create_lag(ldev, tracker);
}
static void mlx5_deactivate_lag(struct mlx5_lag *ldev)
......@@ -223,8 +258,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
struct lag_tracker tracker;
u8 v2p_port1, v2p_port2;
int i, err;
int i;
bool do_bond;
if (!dev0 || !dev1)
......@@ -246,20 +280,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_nic_vport_enable_roce(dev1);
} else if (do_bond && mlx5_lag_is_bonded(ldev)) {
mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
&v2p_port2);
if ((v2p_port1 != ldev->v2p_map[0]) ||
(v2p_port2 != ldev->v2p_map[1])) {
ldev->v2p_map[0] = v2p_port1;
ldev->v2p_map[1] = v2p_port2;
err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
if (err)
mlx5_core_err(dev0,
"Failed to modify LAG (%d)\n",
err);
}
mlx5_modify_lag(ldev, &tracker);
} else if (!do_bond && mlx5_lag_is_bonded(ldev)) {
mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_nic_vport_disable_roce(dev1);
......
......@@ -608,13 +608,19 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_800[0x7800];
};
enum {
MLX5_COUNTER_SOURCE_ESWITCH = 0x0,
MLX5_COUNTER_FLOW_ESWITCH = 0x1,
};
struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_strip[0x1];
u8 vport_cvlan_strip[0x1];
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
u8 reserved_at_5[0x18];
u8 reserved_at_5[0x17];
u8 counter_eswitch_affinity[0x1];
u8 merged_eswitch[0x1];
u8 nic_vport_node_guid_modify[0x1];
u8 nic_vport_port_guid_modify[0x1];
......@@ -3865,16 +3871,16 @@ enum {
};
enum mlx5_monitor_counter_ppcnt {
MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS = 0X0,
MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD = 0X1,
MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS = 0X2,
MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0X3,
MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS = 0X4,
MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS = 0X5,
MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS = 0x0,
MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD = 0x1,
MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS = 0x2,
MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0x3,
MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS = 0x4,
MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS = 0x5,
};
enum {
MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER = 0X4,
MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER = 0x4,
};
struct mlx5_ifc_monitor_counter_output_bits {
......@@ -4780,7 +4786,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0X3,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
};
struct mlx5_ifc_query_flow_group_out_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment