Commit 487884eb authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-ntuple-steering'

Saeed Mahameed says:

====================
Mellanox 100G mlx5 ethtool ntuple steering

This series adds Ethernet ethtool ntuple steering 'ethtool -N|U' and exposes two more
counter sets to Ethtool statistics, RDMA vport and global flow control statistics.

We start from three refactoring patches of the flow steering infrastructure
    - mlx5_add_flow_rule will now receive mlx5 flow spec to simplify and reduce
      number of parameters
    - All low level steering objects are now wrapped in mlx5_flow_steering structure
      for better encapsulation
    - Flow steering object will now be removed properly and generically rather than
      traversing on a well-known steering tree objects

Patch#4 adds the infrastructure and the data structures needed for the ethtool ntuple
steering, all implemented in a new file 'en_fs_ethtool.c'.  Add the support for set_rxnfc
ethtool callback to add/remove/replace a flow spec of ethter type L2.

Patch#5 adds the support for L3/L4 flow specs and a higher priority in favor for L3/L4
rules when interleaving with L2 rules.

Patch#6 adds the support for get_rxnfc ethtool callback.

Patch#7,8 adds RDMA vport and global flow control statistics.

Applied on top: 8186f6e3 ('net-next: mediatek: fix compile error inside mtk_poll_controller()')
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 019d0c99 e989d5a5
...@@ -1528,21 +1528,18 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, ...@@ -1528,21 +1528,18 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
{ {
struct mlx5_flow_table *ft = ft_prio->flow_table; struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler; struct mlx5_ib_flow_handler *handler;
struct mlx5_flow_spec *spec;
void *ib_flow = flow_attr + 1; void *ib_flow = flow_attr + 1;
u8 match_criteria_enable = 0;
unsigned int spec_index; unsigned int spec_index;
u32 *match_c;
u32 *match_v;
u32 action; u32 action;
int err = 0; int err = 0;
if (!is_valid_attr(flow_attr)) if (!is_valid_attr(flow_attr))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); spec = mlx5_vzalloc(sizeof(*spec));
match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
handler = kzalloc(sizeof(*handler), GFP_KERNEL); handler = kzalloc(sizeof(*handler), GFP_KERNEL);
if (!handler || !match_c || !match_v) { if (!handler || !spec) {
err = -ENOMEM; err = -ENOMEM;
goto free; goto free;
} }
...@@ -1550,7 +1547,8 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, ...@@ -1550,7 +1547,8 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
INIT_LIST_HEAD(&handler->list); INIT_LIST_HEAD(&handler->list);
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
err = parse_flow_attr(match_c, match_v, ib_flow); err = parse_flow_attr(spec->match_criteria,
spec->match_value, ib_flow);
if (err < 0) if (err < 0)
goto free; goto free;
...@@ -1558,11 +1556,11 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, ...@@ -1558,11 +1556,11 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
} }
/* Outer header support only */ /* Outer header support only */
match_criteria_enable = (!outer_header_zero(match_c)) << 0; spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria))
<< 0;
action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable, handler->rule = mlx5_add_flow_rule(ft, spec,
match_c, match_v,
action, action,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
dst); dst);
...@@ -1578,8 +1576,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, ...@@ -1578,8 +1576,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
free: free:
if (err) if (err)
kfree(handler); kfree(handler);
kfree(match_c); kvfree(spec);
kfree(match_v);
return err ? ERR_PTR(err) : handler; return err ? ERR_PTR(err) : handler;
} }
......
...@@ -8,6 +8,6 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ ...@@ -8,6 +8,6 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \ en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \
en_tc.o en_arfs.o en_rep.o en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
...@@ -544,8 +544,24 @@ enum { ...@@ -544,8 +544,24 @@ enum {
MLX5E_ARFS_FT_LEVEL MLX5E_ARFS_FT_LEVEL
}; };
struct mlx5e_ethtool_table {
struct mlx5_flow_table *ft;
int num_rules;
};
#define ETHTOOL_NUM_L3_L4_FTS 7
#define ETHTOOL_NUM_L2_FTS 4
struct mlx5e_ethtool_steering {
struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
struct list_head rules;
int tot_num_rules;
};
struct mlx5e_flow_steering { struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
struct mlx5e_ethtool_steering ethtool;
struct mlx5e_tc_table tc; struct mlx5e_tc_table tc;
struct mlx5e_vlan_table vlan; struct mlx5e_vlan_table vlan;
struct mlx5e_l2_table l2; struct mlx5e_l2_table l2;
...@@ -701,6 +717,16 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv); ...@@ -701,6 +717,16 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv); void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
int location);
int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs);
int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs);
int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
int location);
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work); void mlx5e_set_rx_mode_work(struct work_struct *work);
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
......
...@@ -175,15 +175,12 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, ...@@ -175,15 +175,12 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
{ {
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
u8 match_criteria_enable = 0;
struct mlx5e_tir *tir = priv->indir_tir; struct mlx5e_tir *tir = priv->indir_tir;
u32 *match_criteria; struct mlx5_flow_spec *spec;
u32 *match_value;
int err = 0; int err = 0;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); spec = mlx5_vzalloc(sizeof(*spec));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); if (!spec) {
if (!match_value || !match_criteria) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__); netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
...@@ -208,8 +205,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, ...@@ -208,8 +205,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
goto out; goto out;
} }
arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable, arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, spec,
match_criteria, match_value,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
&dest); &dest);
...@@ -220,8 +216,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, ...@@ -220,8 +216,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
__func__, type); __func__, type);
} }
out: out:
kvfree(match_criteria); kvfree(spec);
kvfree(match_value);
return err; return err;
} }
...@@ -475,23 +470,20 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, ...@@ -475,23 +470,20 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule = NULL; struct mlx5_flow_rule *rule = NULL;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct arfs_table *arfs_table; struct arfs_table *arfs_table;
u8 match_criteria_enable = 0; struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
u32 *match_criteria;
u32 *match_value;
int err = 0; int err = 0;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); spec = mlx5_vzalloc(sizeof(*spec));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); if (!spec) {
if (!match_value || !match_criteria) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__); netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, match_criteria, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.ethertype); outer_headers.ethertype);
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
ntohs(tuple->etype)); ntohs(tuple->etype));
arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype); arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
if (!arfs_table) { if (!arfs_table) {
...@@ -501,59 +493,58 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, ...@@ -501,59 +493,58 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
ft = arfs_table->ft.t; ft = arfs_table->ft.t;
if (tuple->ip_proto == IPPROTO_TCP) { if (tuple->ip_proto == IPPROTO_TCP) {
MLX5_SET_TO_ONES(fte_match_param, match_criteria, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.tcp_dport); outer_headers.tcp_dport);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.tcp_sport); outer_headers.tcp_sport);
MLX5_SET(fte_match_param, match_value, outer_headers.tcp_dport, MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
ntohs(tuple->dst_port)); ntohs(tuple->dst_port));
MLX5_SET(fte_match_param, match_value, outer_headers.tcp_sport, MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
ntohs(tuple->src_port)); ntohs(tuple->src_port));
} else { } else {
MLX5_SET_TO_ONES(fte_match_param, match_criteria, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.udp_dport); outer_headers.udp_dport);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.udp_sport); outer_headers.udp_sport);
MLX5_SET(fte_match_param, match_value, outer_headers.udp_dport, MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
ntohs(tuple->dst_port)); ntohs(tuple->dst_port));
MLX5_SET(fte_match_param, match_value, outer_headers.udp_sport, MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
ntohs(tuple->src_port)); ntohs(tuple->src_port));
} }
if (tuple->etype == htons(ETH_P_IP)) { if (tuple->etype == htons(ETH_P_IP)) {
memcpy(MLX5_ADDR_OF(fte_match_param, match_value, memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
&tuple->src_ipv4, &tuple->src_ipv4,
4); 4);
memcpy(MLX5_ADDR_OF(fte_match_param, match_value, memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&tuple->dst_ipv4, &tuple->dst_ipv4,
4); 4);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
} else { } else {
memcpy(MLX5_ADDR_OF(fte_match_param, match_value, memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
&tuple->src_ipv6, &tuple->src_ipv6,
16); 16);
memcpy(MLX5_ADDR_OF(fte_match_param, match_value, memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&tuple->dst_ipv6, &tuple->dst_ipv6,
16); 16);
memset(MLX5_ADDR_OF(fte_match_param, match_criteria, memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
0xff, 0xff,
16); 16);
memset(MLX5_ADDR_OF(fte_match_param, match_criteria, memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0xff, 0xff,
16); 16);
} }
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn; dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
match_value, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
&dest); &dest);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
...@@ -563,8 +554,7 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, ...@@ -563,8 +554,7 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
} }
out: out:
kvfree(match_criteria); kvfree(spec);
kvfree(match_value);
return err ? ERR_PTR(err) : rule; return err ? ERR_PTR(err) : rule;
} }
......
...@@ -139,6 +139,18 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) ...@@ -139,6 +139,18 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
return err ? 0 : pfc_en_tx | pfc_en_rx; return err ? 0 : pfc_en_tx | pfc_en_rx;
} }
static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 rx_pause;
u32 tx_pause;
int err;
err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
return err ? false : rx_pause | tx_pause;
}
#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter)) #define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
#define MLX5E_NUM_RQ_STATS(priv) \ #define MLX5E_NUM_RQ_STATS(priv) \
(NUM_RQ_STATS * priv->params.num_channels * \ (NUM_RQ_STATS * priv->params.num_channels * \
...@@ -147,8 +159,8 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) ...@@ -147,8 +159,8 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
(NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \ (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
test_bit(MLX5E_STATE_OPENED, &priv->state)) test_bit(MLX5E_STATE_OPENED, &priv->state))
#define MLX5E_NUM_PFC_COUNTERS(priv) \ #define MLX5E_NUM_PFC_COUNTERS(priv) \
(hweight8(mlx5e_query_pfc_combined(priv)) * \ ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
NUM_PPORT_PER_PRIO_PFC_COUNTERS) NUM_PPORT_PER_PRIO_PFC_COUNTERS)
static int mlx5e_get_sset_count(struct net_device *dev, int sset) static int mlx5e_get_sset_count(struct net_device *dev, int sset)
{ {
...@@ -210,8 +222,18 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) ...@@ -210,8 +222,18 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
pfc_combined = mlx5e_query_pfc_combined(priv); pfc_combined = mlx5e_query_pfc_combined(priv);
for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
char pfc_string[ETH_GSTRING_LEN];
snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
sprintf(data + (idx++) * ETH_GSTRING_LEN, sprintf(data + (idx++) * ETH_GSTRING_LEN,
pport_per_prio_pfc_stats_desc[i].format, prio); pport_per_prio_pfc_stats_desc[i].format, pfc_string);
}
}
if (mlx5e_query_global_pause_combined(priv)) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
sprintf(data + (idx++) * ETH_GSTRING_LEN,
pport_per_prio_pfc_stats_desc[i].format, "global");
} }
} }
...@@ -306,6 +328,13 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, ...@@ -306,6 +328,13 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
} }
} }
if (mlx5e_query_global_pause_combined(priv)) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
pport_per_prio_pfc_stats_desc, 0);
}
}
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return; return;
...@@ -931,6 +960,15 @@ static int mlx5e_get_rxnfc(struct net_device *netdev, ...@@ -931,6 +960,15 @@ static int mlx5e_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS: case ETHTOOL_GRXRINGS:
info->data = priv->params.num_channels; info->data = priv->params.num_channels;
break; break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = priv->fs.ethtool.tot_num_rules;
break;
case ETHTOOL_GRXCLSRULE:
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
break;
case ETHTOOL_GRXCLSRLALL:
err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
break;
default: default:
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
break; break;
...@@ -1368,6 +1406,26 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev) ...@@ -1368,6 +1406,26 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->pflags; return priv->pflags;
} }
static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
int err = 0;
struct mlx5e_priv *priv = netdev_priv(dev);
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
break;
case ETHTOOL_SRXCLSRLDEL:
err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
const struct ethtool_ops mlx5e_ethtool_ops = { const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo, .get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
...@@ -1387,6 +1445,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = { ...@@ -1387,6 +1445,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh = mlx5e_get_rxfh, .get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh, .set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc, .get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable, .get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable, .set_tunable = mlx5e_set_tunable,
.get_pauseparam = mlx5e_get_pauseparam, .get_pauseparam = mlx5e_get_pauseparam,
......
...@@ -156,19 +156,18 @@ enum mlx5e_vlan_rule_type { ...@@ -156,19 +156,18 @@ enum mlx5e_vlan_rule_type {
static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type, enum mlx5e_vlan_rule_type rule_type,
u16 vid, u32 *mc, u32 *mv) u16 vid, struct mlx5_flow_spec *spec)
{ {
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
u8 match_criteria_enable = 0;
struct mlx5_flow_rule **rule_p; struct mlx5_flow_rule **rule_p;
int err = 0; int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fs.l2.ft.t; dest.ft = priv->fs.l2.ft.t;
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
switch (rule_type) { switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED: case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
...@@ -176,17 +175,19 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, ...@@ -176,17 +175,19 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
break; break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID: case MLX5E_VLAN_RULE_TYPE_ANY_VID:
rule_p = &priv->fs.vlan.any_vlan_rule; rule_p = &priv->fs.vlan.any_vlan_rule;
MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
break; break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
rule_p = &priv->fs.vlan.active_vlans_rule[vid]; rule_p = &priv->fs.vlan.active_vlans_rule[vid];
MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); outer_headers.first_vid);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
vid);
break; break;
} }
*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, *rule_p = mlx5_add_flow_rule(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
&dest); &dest);
...@@ -203,27 +204,21 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, ...@@ -203,27 +204,21 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type, u16 vid) enum mlx5e_vlan_rule_type rule_type, u16 vid)
{ {
u32 *match_criteria; struct mlx5_flow_spec *spec;
u32 *match_value;
int err = 0; int err = 0;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); spec = mlx5_vzalloc(sizeof(*spec));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); if (!spec) {
if (!match_value || !match_criteria) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__); netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM; return -ENOMEM;
goto add_vlan_rule_out;
} }
if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID) if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
mlx5e_vport_context_update_vlans(priv); mlx5e_vport_context_update_vlans(priv);
err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria, err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
match_value);
add_vlan_rule_out: kvfree(spec);
kvfree(match_criteria);
kvfree(match_value);
return err; return err;
} }
...@@ -598,32 +593,27 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, ...@@ -598,32 +593,27 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
u8 proto) u8 proto)
{ {
struct mlx5_flow_rule *rule; struct mlx5_flow_rule *rule;
u8 match_criteria_enable = 0; struct mlx5_flow_spec *spec;
u32 *match_criteria;
u32 *match_value;
int err = 0; int err = 0;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); spec = mlx5_vzalloc(sizeof(*spec));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); if (!spec) {
if (!match_value || !match_criteria) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__); netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM; return ERR_PTR(-ENOMEM);
goto out;
} }
if (proto) { if (proto) {
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto); MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
} }
if (etype) { if (etype) {
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype); MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
} }
rule = mlx5_add_flow_rule(ft, match_criteria_enable, rule = mlx5_add_flow_rule(ft, spec,
match_criteria, match_value,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
dest); dest);
...@@ -631,9 +621,8 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, ...@@ -631,9 +621,8 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule failed\n", __func__); netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
} }
out:
kvfree(match_criteria); kvfree(spec);
kvfree(match_value);
return err ? ERR_PTR(err) : rule; return err ? ERR_PTR(err) : rule;
} }
...@@ -792,24 +781,20 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, ...@@ -792,24 +781,20 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
{ {
struct mlx5_flow_table *ft = priv->fs.l2.ft.t; struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
u8 match_criteria_enable = 0; struct mlx5_flow_spec *spec;
u32 *match_criteria;
u32 *match_value;
int err = 0; int err = 0;
u8 *mc_dmac; u8 *mc_dmac;
u8 *mv_dmac; u8 *mv_dmac;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); spec = mlx5_vzalloc(sizeof(*spec));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); if (!spec) {
if (!match_value || !match_criteria) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__); netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM; return -ENOMEM;
goto add_l2_rule_out;
} }
mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dmac_47_16); outer_headers.dmac_47_16);
mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value, mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dmac_47_16); outer_headers.dmac_47_16);
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
...@@ -817,13 +802,13 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, ...@@ -817,13 +802,13 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
switch (type) { switch (type) {
case MLX5E_FULLMATCH: case MLX5E_FULLMATCH:
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
eth_broadcast_addr(mc_dmac); eth_broadcast_addr(mc_dmac);
ether_addr_copy(mv_dmac, ai->addr); ether_addr_copy(mv_dmac, ai->addr);
break; break;
case MLX5E_ALLMULTI: case MLX5E_ALLMULTI:
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
mc_dmac[0] = 0x01; mc_dmac[0] = 0x01;
mv_dmac[0] = 0x01; mv_dmac[0] = 0x01;
break; break;
...@@ -832,8 +817,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, ...@@ -832,8 +817,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
break; break;
} }
ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, ai->rule = mlx5_add_flow_rule(ft, spec,
match_value,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest); MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR(ai->rule)) { if (IS_ERR(ai->rule)) {
...@@ -843,9 +827,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, ...@@ -843,9 +827,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
ai->rule = NULL; ai->rule = NULL;
} }
add_l2_rule_out: kvfree(spec);
kvfree(match_criteria);
kvfree(match_value);
return err; return err;
} }
...@@ -1102,6 +1084,8 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) ...@@ -1102,6 +1084,8 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
goto err_destroy_l2_table; goto err_destroy_l2_table;
} }
mlx5e_ethtool_init_steering(priv);
return 0; return 0;
err_destroy_l2_table: err_destroy_l2_table:
...@@ -1121,4 +1105,5 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) ...@@ -1121,4 +1105,5 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
mlx5e_destroy_l2_table(priv); mlx5e_destroy_l2_table(priv);
mlx5e_destroy_ttc_table(priv); mlx5e_destroy_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv); mlx5e_arfs_destroy_tables(priv);
mlx5e_ethtool_cleanup_steering(priv);
} }
/*
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mlx5/fs.h>
#include "en.h"
struct mlx5e_ethtool_rule {
struct list_head list;
struct ethtool_rx_flow_spec flow_spec;
struct mlx5_flow_rule *rule;
struct mlx5e_ethtool_table *eth_ft;
};
static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
{
if (!--eth_ft->num_rules) {
mlx5_destroy_flow_table(eth_ft->ft);
eth_ft->ft = NULL;
}
}
#define MLX5E_ETHTOOL_L3_L4_PRIO 0
#define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
#define MLX5E_ETHTOOL_NUM_ENTRIES 64000
#define MLX5E_ETHTOOL_NUM_GROUPS 10
static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs,
int num_tuples)
{
struct mlx5e_ethtool_table *eth_ft;
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft;
int max_tuples;
int table_size;
int prio;
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
break;
case IP_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
break;
case ETHER_FLOW:
max_tuples = ETHTOOL_NUM_L2_FTS;
prio = max_tuples - num_tuples;
eth_ft = &priv->fs.ethtool.l2_ft[prio];
prio += MLX5E_ETHTOOL_L2_PRIO;
break;
default:
return ERR_PTR(-EINVAL);
}
eth_ft->num_rules++;
if (eth_ft->ft)
return eth_ft;
ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_ETHTOOL);
if (!ns)
return ERR_PTR(-ENOTSUPP);
table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.log_max_ft_size)),
MLX5E_ETHTOOL_NUM_ENTRIES);
ft = mlx5_create_auto_grouped_flow_table(ns, prio,
table_size,
MLX5E_ETHTOOL_NUM_GROUPS, 0);
if (IS_ERR(ft))
return (void *)ft;
eth_ft->ft = ft;
return eth_ft;
}
static void mask_spec(u8 *mask, u8 *val, size_t size)
{
unsigned int i;
for (i = 0; i < size; i++, mask++, val++)
*((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
}
static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m,
__be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
{
if (ip4src_m) {
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ip4src_v, sizeof(ip4src_v));
memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
0xff, sizeof(ip4src_m));
}
if (ip4dst_m) {
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ip4dst_v, sizeof(ip4dst_v));
memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
0xff, sizeof(ip4dst_m));
}
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
ethertype, ETH_P_IP);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
ethertype, 0xffff);
}
static int set_flow_attrs(u32 *match_c, u32 *match_v,
struct ethtool_rx_flow_spec *fs)
{
void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
outer_headers);
void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers);
u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
struct ethtool_tcpip4_spec *l4_mask;
struct ethtool_tcpip4_spec *l4_val;
struct ethtool_usrip4_spec *l3_mask;
struct ethtool_usrip4_spec *l3_val;
struct ethhdr *eth_val;
struct ethhdr *eth_mask;
switch (flow_type) {
case TCP_V4_FLOW:
l4_mask = &fs->m_u.tcp_ip4_spec;
l4_val = &fs->h_u.tcp_ip4_spec;
set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
if (l4_mask->psrc) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
ntohs(l4_val->psrc));
}
if (l4_mask->pdst) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
ntohs(l4_val->pdst));
}
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
IPPROTO_TCP);
break;
case UDP_V4_FLOW:
l4_mask = &fs->m_u.tcp_ip4_spec;
l4_val = &fs->h_u.tcp_ip4_spec;
set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
if (l4_mask->psrc) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
ntohs(l4_val->psrc));
}
if (l4_mask->pdst) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
ntohs(l4_val->pdst));
}
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
IPPROTO_UDP);
break;
case IP_USER_FLOW:
l3_mask = &fs->m_u.usr_ip4_spec;
l3_val = &fs->h_u.usr_ip4_spec;
set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src,
l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst);
break;
case ETHER_FLOW:
eth_mask = &fs->m_u.ether_spec;
eth_val = &fs->h_u.ether_spec;
mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
outer_headers_c, smac_47_16),
eth_mask->h_source);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
outer_headers_v, smac_47_16),
eth_val->h_source);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
outer_headers_c, dmac_47_16),
eth_mask->h_dest);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
outer_headers_v, dmac_47_16),
eth_val->h_dest);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype,
ntohs(eth_mask->h_proto));
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype,
ntohs(eth_val->h_proto));
break;
default:
return -EINVAL;
}
if ((fs->flow_type & FLOW_EXT) &&
(fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
vlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
vlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
first_vid, 0xfff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
first_vid, ntohs(fs->h_ext.vlan_tci));
}
if (fs->flow_type & FLOW_MAC_EXT &&
!is_zero_ether_addr(fs->m_ext.h_dest)) {
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
outer_headers_c, dmac_47_16),
fs->m_ext.h_dest);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
outer_headers_v, dmac_47_16),
fs->h_ext.h_dest);
}
return 0;
}
static void add_rule_to_list(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *rule)
{
struct mlx5e_ethtool_rule *iter;
struct list_head *head = &priv->fs.ethtool.rules;
list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
if (iter->flow_spec.location > rule->flow_spec.location)
break;
head = &iter->list;
}
priv->fs.ethtool.tot_num_rules++;
list_add(&rule->list, head);
}
static bool outer_header_zero(u32 *match_criteria)
{
int size = MLX5_ST_SZ_BYTES(fte_match_param);
char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers);
return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
outer_headers_c + 1,
size - 1);
}
static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft,
struct ethtool_rx_flow_spec *fs)
{
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *rule;
int err = 0;
u32 action;
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec)
return ERR_PTR(-ENOMEM);
err = set_flow_attrs(spec->match_criteria, spec->match_value,
fs);
if (err)
goto free;
if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
action = MLX5_FLOW_CONTEXT_ACTION_DROP;
} else {
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst) {
err = -ENOMEM;
goto free;
}
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
rule = mlx5_add_flow_rule(ft, spec, action,
MLX5_FS_DEFAULT_FLOW_TAG, dst);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
__func__, err);
goto free;
}
free:
kvfree(spec);
kfree(dst);
return err ? ERR_PTR(err) : rule;
}
static void del_ethtool_rule(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *eth_rule)
{
if (eth_rule->rule)
mlx5_del_flow_rule(eth_rule->rule);
list_del(&eth_rule->list);
priv->fs.ethtool.tot_num_rules--;
put_flow_table(eth_rule->eth_ft);
kfree(eth_rule);
}
static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
int location)
{
struct mlx5e_ethtool_rule *iter;
list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
if (iter->flow_spec.location == location)
return iter;
}
return NULL;
}
static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
int location)
{
struct mlx5e_ethtool_rule *eth_rule;
eth_rule = find_ethtool_rule(priv, location);
if (eth_rule)
del_ethtool_rule(priv, eth_rule);
eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
if (!eth_rule)
return ERR_PTR(-ENOMEM);
add_rule_to_list(priv, eth_rule);
return eth_rule;
}
#define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
#define all_ones(field) (field == (__force typeof(field))-1)
#define all_zeros_or_all_ones(field) \
((field) == 0 || (field) == (__force typeof(field))-1)
static int validate_flow(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip4_spec *l4_mask;
struct ethtool_usrip4_spec *l3_mask;
struct ethhdr *eth_mask;
int num_tuples = 0;
if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
if (fs->ring_cookie >= priv->params.num_channels &&
fs->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW:
eth_mask = &fs->m_u.ether_spec;
if (!is_zero_ether_addr(eth_mask->h_dest))
num_tuples++;
if (!is_zero_ether_addr(eth_mask->h_source))
num_tuples++;
if (eth_mask->h_proto)
num_tuples++;
break;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
if (fs->m_u.tcp_ip4_spec.tos)
return -EINVAL;
l4_mask = &fs->m_u.tcp_ip4_spec;
if (l4_mask->ip4src) {
if (!all_ones(l4_mask->ip4src))
return -EINVAL;
num_tuples++;
}
if (l4_mask->ip4dst) {
if (!all_ones(l4_mask->ip4dst))
return -EINVAL;
num_tuples++;
}
if (l4_mask->psrc) {
if (!all_ones(l4_mask->psrc))
return -EINVAL;
num_tuples++;
}
if (l4_mask->pdst) {
if (!all_ones(l4_mask->pdst))
return -EINVAL;
num_tuples++;
}
/* Flow is TCP/UDP */
num_tuples++;
break;
case IP_USER_FLOW:
l3_mask = &fs->m_u.usr_ip4_spec;
if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
return -EINVAL;
if (l3_mask->ip4src) {
if (!all_ones(l3_mask->ip4src))
return -EINVAL;
num_tuples++;
}
if (l3_mask->ip4dst) {
if (!all_ones(l3_mask->ip4dst))
return -EINVAL;
num_tuples++;
}
/* Flow is IPv4 */
num_tuples++;
break;
default:
return -EINVAL;
}
if ((fs->flow_type & FLOW_EXT)) {
if (fs->m_ext.vlan_etype ||
(fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)))
return -EINVAL;
if (fs->m_ext.vlan_tci) {
if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
return -EINVAL;
}
num_tuples++;
}
if (fs->flow_type & FLOW_MAC_EXT &&
!is_zero_ether_addr(fs->m_ext.h_dest))
num_tuples++;
return num_tuples;
}
int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs)
{
struct mlx5e_ethtool_table *eth_ft;
struct mlx5e_ethtool_rule *eth_rule;
struct mlx5_flow_rule *rule;
int num_tuples;
int err;
num_tuples = validate_flow(priv, fs);
if (num_tuples <= 0) {
netdev_warn(priv->netdev, "%s: flow is not valid\n", __func__);
return -EINVAL;
}
eth_ft = get_flow_table(priv, fs, num_tuples);
if (IS_ERR(eth_ft))
return PTR_ERR(eth_ft);
eth_rule = get_ethtool_rule(priv, fs->location);
if (IS_ERR(eth_rule)) {
put_flow_table(eth_ft);
return PTR_ERR(eth_rule);
}
eth_rule->flow_spec = *fs;
eth_rule->eth_ft = eth_ft;
if (!eth_ft->ft) {
err = -EINVAL;
goto del_ethtool_rule;
}
rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto del_ethtool_rule;
}
eth_rule->rule = rule;
return 0;
del_ethtool_rule:
del_ethtool_rule(priv, eth_rule);
return err;
}
int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
int location)
{
struct mlx5e_ethtool_rule *eth_rule;
int err = 0;
if (location >= MAX_NUM_OF_ETHTOOL_RULES)
return -ENOSPC;
eth_rule = find_ethtool_rule(priv, location);
if (!eth_rule) {
err = -ENOENT;
goto out;
}
del_ethtool_rule(priv, eth_rule);
out:
return err;
}
int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
int location)
{
struct mlx5e_ethtool_rule *eth_rule;
if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
if (eth_rule->flow_spec.location == location) {
info->fs = eth_rule->flow_spec;
return 0;
}
}
return -ENOENT;
}
int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
int location = 0;
int idx = 0;
int err = 0;
while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
err = mlx5e_ethtool_get_flow(priv, info, location);
if (!err)
rule_locs[idx++] = location;
location++;
}
return err;
}
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
{
struct mlx5e_ethtool_rule *iter;
struct mlx5e_ethtool_rule *temp;
list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
del_ethtool_rule(priv, iter);
}
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
{
INIT_LIST_HEAD(&priv->fs.ethtool.rules);
}
...@@ -151,6 +151,22 @@ static const struct counter_desc vport_stats_desc[] = { ...@@ -151,6 +151,22 @@ static const struct counter_desc vport_stats_desc[] = {
VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) }, VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
{ "tx_vport_broadcast_bytes", { "tx_vport_broadcast_bytes",
VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) }, VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
{ "rx_vport_rdma_unicast_packets",
VPORT_COUNTER_OFF(received_ib_unicast.packets) },
{ "rx_vport_rdma_unicast_bytes",
VPORT_COUNTER_OFF(received_ib_unicast.octets) },
{ "tx_vport_rdma_unicast_packets",
VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
{ "tx_vport_rdma_unicast_bytes",
VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
{ "rx_vport_rdma_multicast_packets",
VPORT_COUNTER_OFF(received_ib_multicast.packets) },
{ "rx_vport_rdma_multicast_bytes",
VPORT_COUNTER_OFF(received_ib_multicast.octets) },
{ "tx_vport_rdma_multicast_packets",
VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
{ "tx_vport_rdma_multicast_bytes",
VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
}; };
#define PPORT_802_3_OFF(c) \ #define PPORT_802_3_OFF(c) \
...@@ -238,11 +254,12 @@ static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { ...@@ -238,11 +254,12 @@ static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
}; };
static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
{ "rx_prio%d_pause", PPORT_PER_PRIO_OFF(rx_pause) }, /* %s is "global" or "prio{i}" */
{ "rx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
{ "tx_prio%d_pause", PPORT_PER_PRIO_OFF(tx_pause) }, { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
{ "tx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
{ "rx_prio%d_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
}; };
struct mlx5e_rq_stats { struct mlx5e_rq_stats {
......
...@@ -50,7 +50,7 @@ struct mlx5e_tc_flow { ...@@ -50,7 +50,7 @@ struct mlx5e_tc_flow {
#define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_NUM_GROUPS 4
static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
u32 *match_c, u32 *match_v, struct mlx5_flow_spec *spec,
u32 action, u32 flow_tag) u32 action, u32 flow_tag)
{ {
struct mlx5_core_dev *dev = priv->mdev; struct mlx5_core_dev *dev = priv->mdev;
...@@ -88,8 +88,8 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, ...@@ -88,8 +88,8 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
table_created = true; table_created = true;
} }
rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS, spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
match_c, match_v, rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
action, flow_tag, action, flow_tag,
&dest); &dest);
...@@ -126,12 +126,13 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, ...@@ -126,12 +126,13 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
} }
} }
static int parse_cls_flower(struct mlx5e_priv *priv, static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
u32 *match_c, u32 *match_v,
struct tc_cls_flower_offload *f) struct tc_cls_flower_offload *f)
{ {
void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers); void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers); outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
u16 addr_type = 0; u16 addr_type = 0;
u8 ip_proto = 0; u8 ip_proto = 0;
...@@ -342,12 +343,11 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, ...@@ -342,12 +343,11 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
struct tc_cls_flower_offload *f) struct tc_cls_flower_offload *f)
{ {
struct mlx5e_tc_table *tc = &priv->fs.tc; struct mlx5e_tc_table *tc = &priv->fs.tc;
u32 *match_c;
u32 *match_v;
int err = 0; int err = 0;
u32 flow_tag; u32 flow_tag;
u32 action; u32 action;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *old = NULL; struct mlx5_flow_rule *old = NULL;
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
...@@ -357,16 +357,15 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, ...@@ -357,16 +357,15 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
else else
flow = kzalloc(sizeof(*flow), GFP_KERNEL); flow = kzalloc(sizeof(*flow), GFP_KERNEL);
match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); spec = mlx5_vzalloc(sizeof(*spec));
match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); if (!spec || !flow) {
if (!match_c || !match_v || !flow) {
err = -ENOMEM; err = -ENOMEM;
goto err_free; goto err_free;
} }
flow->cookie = f->cookie; flow->cookie = f->cookie;
err = parse_cls_flower(priv, match_c, match_v, f); err = parse_cls_flower(priv, spec, f);
if (err < 0) if (err < 0)
goto err_free; goto err_free;
...@@ -379,8 +378,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, ...@@ -379,8 +378,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
if (err) if (err)
goto err_free; goto err_free;
flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action, flow->rule = mlx5e_tc_add_flow(priv, spec, action, flow_tag);
flow_tag);
if (IS_ERR(flow->rule)) { if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule); err = PTR_ERR(flow->rule);
goto err_hash_del; goto err_hash_del;
...@@ -398,8 +396,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, ...@@ -398,8 +396,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
if (!old) if (!old)
kfree(flow); kfree(flow);
out: out:
kfree(match_c); kvfree(spec);
kfree(match_v);
return err; return err;
} }
......
...@@ -329,25 +329,23 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, ...@@ -329,25 +329,23 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
MLX5_MATCH_OUTER_HEADERS); MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_rule *flow_rule = NULL; struct mlx5_flow_rule *flow_rule = NULL;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_spec *spec;
void *mv_misc = NULL; void *mv_misc = NULL;
void *mc_misc = NULL; void *mc_misc = NULL;
u8 *dmac_v = NULL; u8 *dmac_v = NULL;
u8 *dmac_c = NULL; u8 *dmac_c = NULL;
u32 *match_v;
u32 *match_c;
if (rx_rule) if (rx_rule)
match_header |= MLX5_MATCH_MISC_PARAMETERS; match_header |= MLX5_MATCH_MISC_PARAMETERS;
match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); spec = mlx5_vzalloc(sizeof(*spec));
if (!match_v || !match_c) { if (!spec) {
pr_warn("FDB: Failed to alloc match parameters\n"); pr_warn("FDB: Failed to alloc match parameters\n");
goto out; return NULL;
} }
dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers.dmac_47_16); outer_headers.dmac_47_16);
dmac_c = MLX5_ADDR_OF(fte_match_param, match_c, dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dmac_47_16); outer_headers.dmac_47_16);
if (match_header & MLX5_MATCH_OUTER_HEADERS) { if (match_header & MLX5_MATCH_OUTER_HEADERS) {
...@@ -356,8 +354,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, ...@@ -356,8 +354,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
} }
if (match_header & MLX5_MATCH_MISC_PARAMETERS) { if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
mv_misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters); mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
mc_misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); misc_parameters);
mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT); MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port); MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
} }
...@@ -368,11 +368,9 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, ...@@ -368,11 +368,9 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
esw_debug(esw->dev, esw_debug(esw->dev,
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
dmac_v, dmac_c, vport); dmac_v, dmac_c, vport);
spec->match_criteria_enable = match_header;
flow_rule = flow_rule =
mlx5_add_flow_rule(esw->fdb_table.fdb, mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
match_header,
match_c,
match_v,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
...@@ -381,9 +379,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, ...@@ -381,9 +379,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
flow_rule = NULL; flow_rule = NULL;
} }
out:
kfree(match_v); kvfree(spec);
kfree(match_c);
return flow_rule; return flow_rule;
} }
...@@ -1293,9 +1290,8 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, ...@@ -1293,9 +1290,8 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
static int esw_vport_ingress_config(struct mlx5_eswitch *esw, static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
struct mlx5_flow_spec *spec;
u8 smac[ETH_ALEN]; u8 smac[ETH_ALEN];
u32 *match_v;
u32 *match_c;
int err = 0; int err = 0;
u8 *smac_v; u8 *smac_v;
...@@ -1329,9 +1325,8 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1329,9 +1325,8 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->vlan, vport->qos); vport->vport, vport->vlan, vport->qos);
match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); spec = mlx5_vzalloc(sizeof(*spec));
match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); if (!spec) {
if (!match_v || !match_c) {
err = -ENOMEM; err = -ENOMEM;
esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n", esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
vport->vport, err); vport->vport, err);
...@@ -1339,22 +1334,20 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1339,22 +1334,20 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
} }
if (vport->vlan || vport->qos) if (vport->vlan || vport->qos)
MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
if (vport->spoofchk) { if (vport->spoofchk) {
MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
smac_v = MLX5_ADDR_OF(fte_match_param, smac_v = MLX5_ADDR_OF(fte_match_param,
match_v, spec->match_value,
outer_headers.smac_47_16); outer_headers.smac_47_16);
ether_addr_copy(smac_v, smac); ether_addr_copy(smac_v, smac);
} }
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->ingress.allow_rule = vport->ingress.allow_rule =
mlx5_add_flow_rule(vport->ingress.acl, mlx5_add_flow_rule(vport->ingress.acl, spec,
MLX5_MATCH_OUTER_HEADERS,
match_c,
match_v,
MLX5_FLOW_CONTEXT_ACTION_ALLOW, MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL); 0, NULL);
if (IS_ERR(vport->ingress.allow_rule)) { if (IS_ERR(vport->ingress.allow_rule)) {
...@@ -1365,13 +1358,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1365,13 +1358,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
goto out; goto out;
} }
memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); memset(spec, 0, sizeof(*spec));
memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
vport->ingress.drop_rule = vport->ingress.drop_rule =
mlx5_add_flow_rule(vport->ingress.acl, mlx5_add_flow_rule(vport->ingress.acl, spec,
0,
match_c,
match_v,
MLX5_FLOW_CONTEXT_ACTION_DROP, MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL); 0, NULL);
if (IS_ERR(vport->ingress.drop_rule)) { if (IS_ERR(vport->ingress.drop_rule)) {
...@@ -1385,17 +1374,14 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1385,17 +1374,14 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
out: out:
if (err) if (err)
esw_vport_cleanup_ingress_rules(esw, vport); esw_vport_cleanup_ingress_rules(esw, vport);
kvfree(spec);
kfree(match_v);
kfree(match_c);
return err; return err;
} }
static int esw_vport_egress_config(struct mlx5_eswitch *esw, static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
u32 *match_v; struct mlx5_flow_spec *spec;
u32 *match_c;
int err = 0; int err = 0;
esw_vport_cleanup_egress_rules(esw, vport); esw_vport_cleanup_egress_rules(esw, vport);
...@@ -1411,9 +1397,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, ...@@ -1411,9 +1397,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n", "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->vlan, vport->qos); vport->vport, vport->vlan, vport->qos);
match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); spec = mlx5_vzalloc(sizeof(*spec));
match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); if (!spec) {
if (!match_v || !match_c) {
err = -ENOMEM; err = -ENOMEM;
esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n", esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
vport->vport, err); vport->vport, err);
...@@ -1421,16 +1406,14 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, ...@@ -1421,16 +1406,14 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
} }
/* Allowed vlan rule */ /* Allowed vlan rule */
MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag); MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan); MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->egress.allowed_vlan = vport->egress.allowed_vlan =
mlx5_add_flow_rule(vport->egress.acl, mlx5_add_flow_rule(vport->egress.acl, spec,
MLX5_MATCH_OUTER_HEADERS,
match_c,
match_v,
MLX5_FLOW_CONTEXT_ACTION_ALLOW, MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL); 0, NULL);
if (IS_ERR(vport->egress.allowed_vlan)) { if (IS_ERR(vport->egress.allowed_vlan)) {
...@@ -1442,13 +1425,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, ...@@ -1442,13 +1425,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
} }
/* Drop others rule (star rule) */ /* Drop others rule (star rule) */
memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); memset(spec, 0, sizeof(*spec));
memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
vport->egress.drop_rule = vport->egress.drop_rule =
mlx5_add_flow_rule(vport->egress.acl, mlx5_add_flow_rule(vport->egress.acl, spec,
0,
match_c,
match_v,
MLX5_FLOW_CONTEXT_ACTION_DROP, MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL); 0, NULL);
if (IS_ERR(vport->egress.drop_rule)) { if (IS_ERR(vport->egress.drop_rule)) {
...@@ -1458,8 +1437,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, ...@@ -1458,8 +1437,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
vport->egress.drop_rule = NULL; vport->egress.drop_rule = NULL;
} }
out: out:
kfree(match_v); kvfree(spec);
kfree(match_c);
return err; return err;
} }
......
...@@ -43,37 +43,35 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn ...@@ -43,37 +43,35 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_rule *flow_rule;
int match_header = MLX5_MATCH_MISC_PARAMETERS; struct mlx5_flow_spec *spec;
u32 *match_v, *match_c;
void *misc; void *misc;
match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); spec = mlx5_vzalloc(sizeof(*spec));
match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); if (!spec) {
if (!match_v || !match_c) {
esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
flow_rule = ERR_PTR(-ENOMEM); flow_rule = ERR_PTR(-ENOMEM);
goto out; goto out;
} }
misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters); misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */ MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport; dest.vport_num = vport;
flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, match_header, match_c, flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
match_v, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest);
if (IS_ERR(flow_rule)) if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
out: out:
kfree(match_v); kvfree(spec);
kfree(match_c);
return flow_rule; return flow_rule;
} }
...@@ -138,12 +136,11 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) ...@@ -138,12 +136,11 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule *flow_rule = NULL; struct mlx5_flow_rule *flow_rule = NULL;
u32 *match_v, *match_c; struct mlx5_flow_spec *spec;
int err = 0; int err = 0;
match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); spec = mlx5_vzalloc(sizeof(*spec));
match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); if (!spec) {
if (!match_v || !match_c) {
esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
...@@ -152,8 +149,9 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) ...@@ -152,8 +149,9 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = 0; dest.vport_num = 0;
flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, 0, match_c, match_v, flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 0, &dest); MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule); err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err); esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
...@@ -162,8 +160,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) ...@@ -162,8 +160,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
esw->fdb_table.offloads.miss_rule = flow_rule; esw->fdb_table.offloads.miss_rule = flow_rule;
out: out:
kfree(match_v); kvfree(spec);
kfree(match_c);
return err; return err;
} }
...@@ -351,29 +348,28 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) ...@@ -351,29 +348,28 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_rule *flow_rule;
int match_header = MLX5_MATCH_MISC_PARAMETERS; struct mlx5_flow_spec *spec;
u32 *match_v, *match_c;
void *misc; void *misc;
match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); spec = mlx5_vzalloc(sizeof(*spec));
match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); if (!spec) {
if (!match_v || !match_c) {
esw_warn(esw->dev, "Failed to alloc match parameters\n"); esw_warn(esw->dev, "Failed to alloc match parameters\n");
flow_rule = ERR_PTR(-ENOMEM); flow_rule = ERR_PTR(-ENOMEM);
goto out; goto out;
} }
misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters); misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, vport); MLX5_SET(fte_match_set_misc, misc, source_port, vport);
misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = tirn; dest.tir_num = tirn;
flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, match_header, match_c, flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec,
match_v, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
...@@ -381,8 +377,7 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) ...@@ -381,8 +377,7 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
} }
out: out:
kfree(match_v); kvfree(spec);
kfree(match_c);
return flow_rule; return flow_rule;
} }
......
...@@ -67,13 +67,21 @@ ...@@ -67,13 +67,21 @@
#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \ #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
.caps = (long[]) {__VA_ARGS__} } .caps = (long[]) {__VA_ARGS__} }
#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
FS_CAP(flow_table_properties_nic_receive.modify_root), \
FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
#define LEFTOVERS_NUM_LEVELS 1 #define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1 #define LEFTOVERS_NUM_PRIOS 1
#define BY_PASS_PRIO_NUM_LEVELS 1 #define BY_PASS_PRIO_NUM_LEVELS 1
#define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
LEFTOVERS_NUM_PRIOS) LEFTOVERS_NUM_PRIOS)
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 10
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Vlan, mac, ttc, aRFS */ /* Vlan, mac, ttc, aRFS */
#define KERNEL_NIC_PRIO_NUM_LEVELS 4 #define KERNEL_NIC_PRIO_NUM_LEVELS 4
#define KERNEL_NIC_NUM_PRIOS 1 #define KERNEL_NIC_NUM_PRIOS 1
...@@ -103,27 +111,24 @@ static struct init_tree_node { ...@@ -103,27 +111,24 @@ static struct init_tree_node {
int num_levels; int num_levels;
} root_fs = { } root_fs = {
.type = FS_TYPE_NAMESPACE, .type = FS_TYPE_NAMESPACE,
.ar_size = 5, .ar_size = 6,
.children = (struct init_tree_node[]) { .children = (struct init_tree_node[]) {
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), FS_CHAINING_CAPS,
FS_CAP(flow_table_properties_nic_receive.modify_root),
FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))), BY_PASS_PRIO_NUM_LEVELS))),
ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {}, ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))), ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
ETHTOOL_PRIO_NUM_LEVELS))),
ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
ADD_NS(ADD_MULTIPLE_PRIO(1, 1), ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
KERNEL_NIC_PRIO_NUM_LEVELS))), KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), FS_CHAINING_CAPS,
FS_CAP(flow_table_properties_nic_receive.modify_root),
FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))), ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))), ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
...@@ -1160,9 +1165,7 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest, ...@@ -1160,9 +1165,7 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
static struct mlx5_flow_rule * static struct mlx5_flow_rule *
_mlx5_add_flow_rule(struct mlx5_flow_table *ft, _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable, struct mlx5_flow_spec *spec,
u32 *match_criteria,
u32 *match_value,
u32 action, u32 action,
u32 flow_tag, u32 flow_tag,
struct mlx5_flow_destination *dest) struct mlx5_flow_destination *dest)
...@@ -1176,22 +1179,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft, ...@@ -1176,22 +1179,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
fs_for_each_fg(g, ft) fs_for_each_fg(g, ft)
if (compare_match_criteria(g->mask.match_criteria_enable, if (compare_match_criteria(g->mask.match_criteria_enable,
match_criteria_enable, spec->match_criteria_enable,
g->mask.match_criteria, g->mask.match_criteria,
match_criteria)) { spec->match_criteria)) {
rule = add_rule_fg(g, match_value, rule = add_rule_fg(g, spec->match_value,
action, flow_tag, dest); action, flow_tag, dest);
if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
goto unlock; goto unlock;
} }
g = create_autogroup(ft, match_criteria_enable, match_criteria); g = create_autogroup(ft, spec->match_criteria_enable,
spec->match_criteria);
if (IS_ERR(g)) { if (IS_ERR(g)) {
rule = (void *)g; rule = (void *)g;
goto unlock; goto unlock;
} }
rule = add_rule_fg(g, match_value, rule = add_rule_fg(g, spec->match_value,
action, flow_tag, dest); action, flow_tag, dest);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
/* Remove assumes refcount > 0 and autogroup creates a group /* Remove assumes refcount > 0 and autogroup creates a group
...@@ -1215,9 +1219,7 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft) ...@@ -1215,9 +1219,7 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
struct mlx5_flow_rule * struct mlx5_flow_rule *
mlx5_add_flow_rule(struct mlx5_flow_table *ft, mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable, struct mlx5_flow_spec *spec,
u32 *match_criteria,
u32 *match_value,
u32 action, u32 action,
u32 flow_tag, u32 flow_tag,
struct mlx5_flow_destination *dest) struct mlx5_flow_destination *dest)
...@@ -1248,8 +1250,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft, ...@@ -1248,8 +1250,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
} }
} }
rule = _mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, rule = _mlx5_add_flow_rule(ft, spec, action, flow_tag, dest);
match_value, action, flow_tag, dest);
if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!IS_ERR_OR_NULL(rule) && if (!IS_ERR_OR_NULL(rule) &&
...@@ -1367,41 +1368,47 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) ...@@ -1367,41 +1368,47 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type) enum mlx5_flow_namespace_type type)
{ {
struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; struct mlx5_flow_steering *steering = dev->priv.steering;
struct mlx5_flow_root_namespace *root_ns;
int prio; int prio;
struct fs_prio *fs_prio; struct fs_prio *fs_prio;
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
if (!root_ns) if (!steering)
return NULL; return NULL;
switch (type) { switch (type) {
case MLX5_FLOW_NAMESPACE_BYPASS: case MLX5_FLOW_NAMESPACE_BYPASS:
case MLX5_FLOW_NAMESPACE_OFFLOADS: case MLX5_FLOW_NAMESPACE_OFFLOADS:
case MLX5_FLOW_NAMESPACE_ETHTOOL:
case MLX5_FLOW_NAMESPACE_KERNEL: case MLX5_FLOW_NAMESPACE_KERNEL:
case MLX5_FLOW_NAMESPACE_LEFTOVERS: case MLX5_FLOW_NAMESPACE_LEFTOVERS:
case MLX5_FLOW_NAMESPACE_ANCHOR: case MLX5_FLOW_NAMESPACE_ANCHOR:
prio = type; prio = type;
break; break;
case MLX5_FLOW_NAMESPACE_FDB: case MLX5_FLOW_NAMESPACE_FDB:
if (dev->priv.fdb_root_ns) if (steering->fdb_root_ns)
return &dev->priv.fdb_root_ns->ns; return &steering->fdb_root_ns->ns;
else else
return NULL; return NULL;
case MLX5_FLOW_NAMESPACE_ESW_EGRESS: case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
if (dev->priv.esw_egress_root_ns) if (steering->esw_egress_root_ns)
return &dev->priv.esw_egress_root_ns->ns; return &steering->esw_egress_root_ns->ns;
else else
return NULL; return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS: case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
if (dev->priv.esw_ingress_root_ns) if (steering->esw_ingress_root_ns)
return &dev->priv.esw_ingress_root_ns->ns; return &steering->esw_ingress_root_ns->ns;
else else
return NULL; return NULL;
default: default:
return NULL; return NULL;
} }
root_ns = steering->root_ns;
if (!root_ns)
return NULL;
fs_prio = find_prio(&root_ns->ns, prio); fs_prio = find_prio(&root_ns->ns, prio);
if (!fs_prio) if (!fs_prio)
return NULL; return NULL;
...@@ -1487,13 +1494,13 @@ static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) ...@@ -1487,13 +1494,13 @@ static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
return true; return true;
} }
static int init_root_tree_recursive(struct mlx5_core_dev *dev, static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
struct init_tree_node *init_node, struct init_tree_node *init_node,
struct fs_node *fs_parent_node, struct fs_node *fs_parent_node,
struct init_tree_node *init_parent_node, struct init_tree_node *init_parent_node,
int prio) int prio)
{ {
int max_ft_level = MLX5_CAP_FLOWTABLE(dev, int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
flow_table_properties_nic_receive. flow_table_properties_nic_receive.
max_ft_level); max_ft_level);
struct mlx5_flow_namespace *fs_ns; struct mlx5_flow_namespace *fs_ns;
...@@ -1504,7 +1511,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, ...@@ -1504,7 +1511,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
if (init_node->type == FS_TYPE_PRIO) { if (init_node->type == FS_TYPE_PRIO) {
if ((init_node->min_ft_level > max_ft_level) || if ((init_node->min_ft_level > max_ft_level) ||
!has_required_caps(dev, &init_node->caps)) !has_required_caps(steering->dev, &init_node->caps))
return 0; return 0;
fs_get_obj(fs_ns, fs_parent_node); fs_get_obj(fs_ns, fs_parent_node);
...@@ -1525,7 +1532,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, ...@@ -1525,7 +1532,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
} }
prio = 0; prio = 0;
for (i = 0; i < init_node->ar_size; i++) { for (i = 0; i < init_node->ar_size; i++) {
err = init_root_tree_recursive(dev, &init_node->children[i], err = init_root_tree_recursive(steering, &init_node->children[i],
base, init_node, prio); base, init_node, prio);
if (err) if (err)
return err; return err;
...@@ -1538,7 +1545,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, ...@@ -1538,7 +1545,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
return 0; return 0;
} }
static int init_root_tree(struct mlx5_core_dev *dev, static int init_root_tree(struct mlx5_flow_steering *steering,
struct init_tree_node *init_node, struct init_tree_node *init_node,
struct fs_node *fs_parent_node) struct fs_node *fs_parent_node)
{ {
...@@ -1548,7 +1555,7 @@ static int init_root_tree(struct mlx5_core_dev *dev, ...@@ -1548,7 +1555,7 @@ static int init_root_tree(struct mlx5_core_dev *dev,
fs_get_obj(fs_ns, fs_parent_node); fs_get_obj(fs_ns, fs_parent_node);
for (i = 0; i < init_node->ar_size; i++) { for (i = 0; i < init_node->ar_size; i++) {
err = init_root_tree_recursive(dev, &init_node->children[i], err = init_root_tree_recursive(steering, &init_node->children[i],
&fs_ns->node, &fs_ns->node,
init_node, i); init_node, i);
if (err) if (err)
...@@ -1557,7 +1564,7 @@ static int init_root_tree(struct mlx5_core_dev *dev, ...@@ -1557,7 +1564,7 @@ static int init_root_tree(struct mlx5_core_dev *dev,
return 0; return 0;
} }
static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev, static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering,
enum fs_flow_table_type enum fs_flow_table_type
table_type) table_type)
{ {
...@@ -1569,7 +1576,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev ...@@ -1569,7 +1576,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev
if (!root_ns) if (!root_ns)
return NULL; return NULL;
root_ns->dev = dev; root_ns->dev = steering->dev;
root_ns->table_type = table_type; root_ns->table_type = table_type;
ns = &root_ns->ns; ns = &root_ns->ns;
...@@ -1624,212 +1631,126 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) ...@@ -1624,212 +1631,126 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
#define ANCHOR_PRIO 0 #define ANCHOR_PRIO 0
#define ANCHOR_SIZE 1 #define ANCHOR_SIZE 1
#define ANCHOR_LEVEL 0 #define ANCHOR_LEVEL 0
static int create_anchor_flow_table(struct mlx5_core_dev static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
*dev)
{ {
struct mlx5_flow_namespace *ns = NULL; struct mlx5_flow_namespace *ns = NULL;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR); ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
if (!ns) if (!ns)
return -EINVAL; return -EINVAL;
ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL); ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
if (IS_ERR(ft)) { if (IS_ERR(ft)) {
mlx5_core_err(dev, "Failed to create last anchor flow table"); mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
return PTR_ERR(ft); return PTR_ERR(ft);
} }
return 0; return 0;
} }
static int init_root_ns(struct mlx5_core_dev *dev) static int init_root_ns(struct mlx5_flow_steering *steering)
{ {
dev->priv.root_ns = create_root_ns(dev, FS_FT_NIC_RX); steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
if (IS_ERR_OR_NULL(dev->priv.root_ns)) if (IS_ERR_OR_NULL(steering->root_ns))
goto cleanup; goto cleanup;
if (init_root_tree(dev, &root_fs, &dev->priv.root_ns->ns.node)) if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
goto cleanup; goto cleanup;
set_prio_attrs(dev->priv.root_ns); set_prio_attrs(steering->root_ns);
if (create_anchor_flow_table(dev)) if (create_anchor_flow_table(steering))
goto cleanup; goto cleanup;
return 0; return 0;
cleanup: cleanup:
mlx5_cleanup_fs(dev); mlx5_cleanup_fs(steering->dev);
return -ENOMEM; return -ENOMEM;
} }
static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev, static void clean_tree(struct fs_node *node)
struct mlx5_flow_root_namespace *root_ns)
{ {
struct fs_node *prio; if (node) {
struct fs_node *iter;
if (!root_ns) struct fs_node *temp;
return;
if (!list_empty(&root_ns->ns.node.children)) { list_for_each_entry_safe(iter, temp, &node->children, list)
prio = list_first_entry(&root_ns->ns.node.children, clean_tree(iter);
struct fs_node, tree_remove_node(node);
list);
if (tree_remove_node(prio))
mlx5_core_warn(dev,
"Flow steering priority wasn't destroyed, refcount > 1\n");
} }
if (tree_remove_node(&root_ns->ns.node))
mlx5_core_warn(dev,
"Flow steering namespace wasn't destroyed, refcount > 1\n");
root_ns = NULL;
} }
static void destroy_flow_tables(struct fs_prio *prio) static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
{ {
struct mlx5_flow_table *iter;
struct mlx5_flow_table *tmp;
fs_for_each_ft_safe(iter, tmp, prio)
mlx5_destroy_flow_table(iter);
}
static void cleanup_root_ns(struct mlx5_core_dev *dev)
{
struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
struct fs_prio *iter_prio;
if (!MLX5_CAP_GEN(dev, nic_flow_table))
return;
if (!root_ns) if (!root_ns)
return; return;
/* stage 1 */ clean_tree(&root_ns->ns.node);
fs_for_each_prio(iter_prio, &root_ns->ns) {
struct fs_node *node;
struct mlx5_flow_namespace *iter_ns;
fs_for_each_ns_or_ft(node, iter_prio) {
if (node->type == FS_TYPE_FLOW_TABLE)
continue;
fs_get_obj(iter_ns, node);
while (!list_empty(&iter_ns->node.children)) {
struct fs_prio *obj_iter_prio2;
struct fs_node *iter_prio2 =
list_first_entry(&iter_ns->node.children,
struct fs_node,
list);
fs_get_obj(obj_iter_prio2, iter_prio2);
destroy_flow_tables(obj_iter_prio2);
if (tree_remove_node(iter_prio2)) {
mlx5_core_warn(dev,
"Priority %d wasn't destroyed, refcount > 1\n",
obj_iter_prio2->prio);
return;
}
}
}
}
/* stage 2 */
fs_for_each_prio(iter_prio, &root_ns->ns) {
while (!list_empty(&iter_prio->node.children)) {
struct fs_node *iter_ns =
list_first_entry(&iter_prio->node.children,
struct fs_node,
list);
if (tree_remove_node(iter_ns)) {
mlx5_core_warn(dev,
"Namespace wasn't destroyed, refcount > 1\n");
return;
}
}
}
/* stage 3 */
while (!list_empty(&root_ns->ns.node.children)) {
struct fs_prio *obj_prio_node;
struct fs_node *prio_node =
list_first_entry(&root_ns->ns.node.children,
struct fs_node,
list);
fs_get_obj(obj_prio_node, prio_node);
if (tree_remove_node(prio_node)) {
mlx5_core_warn(dev,
"Priority %d wasn't destroyed, refcount > 1\n",
obj_prio_node->prio);
return;
}
}
if (tree_remove_node(&root_ns->ns.node)) {
mlx5_core_warn(dev,
"root namespace wasn't destroyed, refcount > 1\n");
return;
}
dev->priv.root_ns = NULL;
} }
void mlx5_cleanup_fs(struct mlx5_core_dev *dev) void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{ {
struct mlx5_flow_steering *steering = dev->priv.steering;
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return; return;
cleanup_root_ns(dev); cleanup_root_ns(steering->root_ns);
cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); cleanup_root_ns(steering->esw_egress_root_ns);
cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); cleanup_root_ns(steering->esw_ingress_root_ns);
cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns); cleanup_root_ns(steering->fdb_root_ns);
mlx5_cleanup_fc_stats(dev); mlx5_cleanup_fc_stats(dev);
kfree(steering);
} }
static int init_fdb_root_ns(struct mlx5_core_dev *dev) static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{ {
struct fs_prio *prio; struct fs_prio *prio;
dev->priv.fdb_root_ns = create_root_ns(dev, FS_FT_FDB); steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
if (!dev->priv.fdb_root_ns) if (!steering->fdb_root_ns)
return -ENOMEM; return -ENOMEM;
/* Create single prio */ /* Create single prio */
prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1); prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1);
if (IS_ERR(prio)) { if (IS_ERR(prio)) {
cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); cleanup_root_ns(steering->fdb_root_ns);
steering->fdb_root_ns = NULL;
return PTR_ERR(prio); return PTR_ERR(prio);
} else { } else {
return 0; return 0;
} }
} }
static int init_egress_acl_root_ns(struct mlx5_core_dev *dev) static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
{ {
struct fs_prio *prio; struct fs_prio *prio;
dev->priv.esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL); steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
if (!dev->priv.esw_egress_root_ns) if (!steering->esw_egress_root_ns)
return -ENOMEM; return -ENOMEM;
/* create 1 prio*/ /* create 1 prio*/
prio = fs_create_prio(&dev->priv.esw_egress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev)); prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
MLX5_TOTAL_VPORTS(steering->dev));
if (IS_ERR(prio)) if (IS_ERR(prio))
return PTR_ERR(prio); return PTR_ERR(prio);
else else
return 0; return 0;
} }
static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev) static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
{ {
struct fs_prio *prio; struct fs_prio *prio;
dev->priv.esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL); steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
if (!dev->priv.esw_ingress_root_ns) if (!steering->esw_ingress_root_ns)
return -ENOMEM; return -ENOMEM;
/* create 1 prio*/ /* create 1 prio*/
prio = fs_create_prio(&dev->priv.esw_ingress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev)); prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
MLX5_TOTAL_VPORTS(steering->dev));
if (IS_ERR(prio)) if (IS_ERR(prio))
return PTR_ERR(prio); return PTR_ERR(prio);
else else
...@@ -1838,6 +1759,7 @@ static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev) ...@@ -1838,6 +1759,7 @@ static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
int mlx5_init_fs(struct mlx5_core_dev *dev) int mlx5_init_fs(struct mlx5_core_dev *dev)
{ {
struct mlx5_flow_steering *steering;
int err = 0; int err = 0;
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
...@@ -1847,26 +1769,32 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) ...@@ -1847,26 +1769,32 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
if (err) if (err)
return err; return err;
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
if (!steering)
return -ENOMEM;
steering->dev = dev;
dev->priv.steering = steering;
if (MLX5_CAP_GEN(dev, nic_flow_table) && if (MLX5_CAP_GEN(dev, nic_flow_table) &&
MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) { MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
err = init_root_ns(dev); err = init_root_ns(steering);
if (err) if (err)
goto err; goto err;
} }
if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
err = init_fdb_root_ns(dev); err = init_fdb_root_ns(steering);
if (err) if (err)
goto err; goto err;
} }
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
err = init_egress_acl_root_ns(dev); err = init_egress_acl_root_ns(steering);
if (err) if (err)
goto err; goto err;
} }
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
err = init_ingress_acl_root_ns(dev); err = init_ingress_acl_root_ns(steering);
if (err) if (err)
goto err; goto err;
} }
......
...@@ -55,6 +55,14 @@ enum fs_fte_status { ...@@ -55,6 +55,14 @@ enum fs_fte_status {
FS_FTE_STATUS_EXISTING = 1UL << 0, FS_FTE_STATUS_EXISTING = 1UL << 0,
}; };
struct mlx5_flow_steering {
struct mlx5_core_dev *dev;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_root_namespace *esw_egress_root_ns;
struct mlx5_flow_root_namespace *esw_ingress_root_ns;
};
struct fs_node { struct fs_node {
struct list_head list; struct list_head list;
struct list_head children; struct list_head children;
......
...@@ -550,14 +550,10 @@ struct mlx5_priv { ...@@ -550,14 +550,10 @@ struct mlx5_priv {
struct list_head ctx_list; struct list_head ctx_list;
spinlock_t ctx_lock; spinlock_t ctx_lock;
struct mlx5_flow_steering *steering;
struct mlx5_eswitch *eswitch; struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov; struct mlx5_core_sriov sriov;
unsigned long pci_dev_data; unsigned long pci_dev_data;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_root_namespace *esw_egress_root_ns;
struct mlx5_flow_root_namespace *esw_ingress_root_ns;
struct mlx5_fc_stats fc_stats; struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table; struct mlx5_rl_table rl_table;
}; };
......
...@@ -55,6 +55,7 @@ static inline void build_leftovers_ft_param(int *priority, ...@@ -55,6 +55,7 @@ static inline void build_leftovers_ft_param(int *priority,
enum mlx5_flow_namespace_type { enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS, MLX5_FLOW_NAMESPACE_BYPASS,
MLX5_FLOW_NAMESPACE_OFFLOADS, MLX5_FLOW_NAMESPACE_OFFLOADS,
MLX5_FLOW_NAMESPACE_ETHTOOL,
MLX5_FLOW_NAMESPACE_KERNEL, MLX5_FLOW_NAMESPACE_KERNEL,
MLX5_FLOW_NAMESPACE_LEFTOVERS, MLX5_FLOW_NAMESPACE_LEFTOVERS,
MLX5_FLOW_NAMESPACE_ANCHOR, MLX5_FLOW_NAMESPACE_ANCHOR,
...@@ -68,6 +69,12 @@ struct mlx5_flow_group; ...@@ -68,6 +69,12 @@ struct mlx5_flow_group;
struct mlx5_flow_rule; struct mlx5_flow_rule;
struct mlx5_flow_namespace; struct mlx5_flow_namespace;
struct mlx5_flow_spec {
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
};
struct mlx5_flow_destination { struct mlx5_flow_destination {
enum mlx5_flow_destination_type type; enum mlx5_flow_destination_type type;
union { union {
...@@ -116,9 +123,7 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); ...@@ -116,9 +123,7 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
*/ */
struct mlx5_flow_rule * struct mlx5_flow_rule *
mlx5_add_flow_rule(struct mlx5_flow_table *ft, mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable, struct mlx5_flow_spec *spec,
u32 *match_criteria,
u32 *match_value,
u32 action, u32 action,
u32 flow_tag, u32 flow_tag,
struct mlx5_flow_destination *dest); struct mlx5_flow_destination *dest);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment