Commit dc48516e authored by Maor Gottlieb's avatar Maor Gottlieb Committed by Saeed Mahameed

net/mlx5: Lag, add support to create definers for LAG

Every definer will consist of a flow table with a single hash group
with exactly two flow table entries, one for each device port.
The destination of these entries is the uplink vport according to the
port state and hash policy.
Signed-off-by: default avatarMaor Gottlieb <maorg@nvidia.com>
Reviewed-by: default avatarMark Bloch <mbloch@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent e465550b
...@@ -588,8 +588,10 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, ...@@ -588,8 +588,10 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
if (!(bond_status & 0x3)) if (!(bond_status & 0x3))
return 0; return 0;
if (lag_upper_info) if (lag_upper_info) {
tracker->tx_type = lag_upper_info->tx_type; tracker->tx_type = lag_upper_info->tx_type;
tracker->hash_type = lag_upper_info->hash_type;
}
/* Determine bonding status: /* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves * A device is considered bonded if both its physical ports are slaves
......
...@@ -33,6 +33,7 @@ struct lag_tracker { ...@@ -33,6 +33,7 @@ struct lag_tracker {
enum netdev_lag_tx_type tx_type; enum netdev_lag_tx_type tx_type;
struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS]; struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
unsigned int is_bonded:1; unsigned int is_bonded:1;
enum netdev_lag_hash hash_type;
}; };
/* LAG data of a ConnectX card. /* LAG data of a ConnectX card.
......
...@@ -4,6 +4,95 @@ ...@@ -4,6 +4,95 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include "lag.h" #include "lag.h"
enum {
MLX5_LAG_FT_LEVEL_DEFINER,
};
static struct mlx5_flow_group *
mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
struct mlx5_flow_definer *definer)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
u32 *in;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return ERR_PTR(-ENOMEM);
MLX5_SET(create_flow_group_in, in, match_definer_id,
mlx5_get_match_definer_id(definer));
MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_MAX_PORTS - 1);
MLX5_SET(create_flow_group_in, in, group_type,
MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT);
fg = mlx5_create_flow_group(ft, in);
kvfree(in);
return fg;
}
static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer,
u8 port1, u8 port2)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_namespace *ns;
int err, i;
ft_attr.max_fte = MLX5_MAX_PORTS;
ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL);
if (!ns) {
mlx5_core_warn(dev, "Failed to get port selection namespace\n");
return -EOPNOTSUPP;
}
lag_definer->ft = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(lag_definer->ft)) {
mlx5_core_warn(dev, "Failed to create port selection table\n");
return PTR_ERR(lag_definer->ft);
}
lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft,
lag_definer->definer);
if (IS_ERR(lag_definer->fg)) {
err = PTR_ERR(lag_definer->fg);
goto destroy_ft;
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.flags |= FLOW_ACT_NO_APPEND;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
u8 affinity = i == 0 ? port1 : port2;
dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
vhca_id);
lag_definer->rules[i] = mlx5_add_flow_rules(lag_definer->ft,
NULL, &flow_act,
&dest, 1);
if (IS_ERR(lag_definer->rules[i])) {
err = PTR_ERR(lag_definer->rules[i]);
while (i--)
mlx5_del_flow_rules(lag_definer->rules[i]);
goto destroy_fg;
}
}
return 0;
destroy_fg:
mlx5_destroy_flow_group(lag_definer->fg);
destroy_ft:
mlx5_destroy_flow_table(lag_definer->ft);
return err;
}
static int mlx5_lag_set_definer_inner(u32 *match_definer_mask, static int mlx5_lag_set_definer_inner(u32 *match_definer_mask,
enum mlx5_traffic_types tt) enum mlx5_traffic_types tt)
{ {
...@@ -186,6 +275,120 @@ static int mlx5_lag_set_definer(u32 *match_definer_mask, ...@@ -186,6 +275,120 @@ static int mlx5_lag_set_definer(u32 *match_definer_mask,
return format_id; return format_id;
} }
static struct mlx5_lag_definer *
mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
enum mlx5_traffic_types tt, bool tunnel, u8 port1,
u8 port2)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_definer *lag_definer;
u32 *match_definer_mask;
int format_id, err;
lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
if (!lag_definer)
return ERR_PTR(ENOMEM);
match_definer_mask = kvzalloc(MLX5_FLD_SZ_BYTES(match_definer,
match_mask),
GFP_KERNEL);
if (!match_definer_mask) {
err = -ENOMEM;
goto free_lag_definer;
}
format_id = mlx5_lag_set_definer(match_definer_mask, tt, tunnel, hash);
lag_definer->definer =
mlx5_create_match_definer(dev, MLX5_FLOW_NAMESPACE_PORT_SEL,
format_id, match_definer_mask);
if (IS_ERR(lag_definer->definer)) {
err = PTR_ERR(lag_definer->definer);
goto free_mask;
}
err = mlx5_lag_create_port_sel_table(ldev, lag_definer, port1, port2);
if (err)
goto destroy_match_definer;
kvfree(match_definer_mask);
return lag_definer;
destroy_match_definer:
mlx5_destroy_match_definer(dev, lag_definer->definer);
free_mask:
kvfree(match_definer_mask);
free_lag_definer:
kfree(lag_definer);
return ERR_PTR(err);
}
static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
int i;
for (i = 0; i < MLX5_MAX_PORTS; i++)
mlx5_del_flow_rules(lag_definer->rules[i]);
mlx5_destroy_flow_group(lag_definer->fg);
mlx5_destroy_flow_table(lag_definer->ft);
mlx5_destroy_match_definer(dev, lag_definer->definer);
kfree(lag_definer);
}
static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
int tt;
for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
if (port_sel->outer.definers[tt])
mlx5_lag_destroy_definer(ldev,
port_sel->outer.definers[tt]);
if (port_sel->inner.definers[tt])
mlx5_lag_destroy_definer(ldev,
port_sel->inner.definers[tt]);
}
}
static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type,
u8 port1, u8 port2)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_lag_definer *lag_definer;
int tt, err;
for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt,
false, port1, port2);
if (IS_ERR(lag_definer)) {
err = PTR_ERR(lag_definer);
goto destroy_definers;
}
port_sel->outer.definers[tt] = lag_definer;
if (!port_sel->tunnel)
continue;
lag_definer =
mlx5_lag_create_definer(ldev, hash_type, tt,
true, port1, port2);
if (IS_ERR(lag_definer)) {
err = PTR_ERR(lag_definer);
goto destroy_definers;
}
port_sel->inner.definers[tt] = lag_definer;
}
return 0;
destroy_definers:
mlx5_lag_destroy_definers(ldev);
return err;
}
static void set_tt_map(struct mlx5_lag_port_sel *port_sel, static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
enum netdev_lag_hash hash) enum netdev_lag_hash hash)
{ {
......
...@@ -6,9 +6,22 @@ ...@@ -6,9 +6,22 @@
#include "lib/fs_ttc.h" #include "lib/fs_ttc.h"
struct mlx5_lag_definer {
struct mlx5_flow_definer *definer;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct mlx5_flow_handle *rules[MLX5_MAX_PORTS];
};
struct mlx5_lag_ttc {
struct mlx5_lag_definer *definers[MLX5_NUM_TT];
};
struct mlx5_lag_port_sel { struct mlx5_lag_port_sel {
DECLARE_BITMAP(tt_map, MLX5_NUM_TT); DECLARE_BITMAP(tt_map, MLX5_NUM_TT);
bool tunnel; bool tunnel;
struct mlx5_lag_ttc outer;
struct mlx5_lag_ttc inner;
}; };
#endif /* __MLX5_LAG_FS_H__ */ #endif /* __MLX5_LAG_FS_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment