Commit 95ae2d1d authored by David S. Miller's avatar David S. Miller

Merge branch 'for-net-next' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 E-Switch chains and prios

This series has two parts,

1) A merge commit with mlx5-next branch that include updates for mlx5
HW layouts needed for this and upcoming submissions.

2) From Paul, Increase the number of chains and prios

Currently the Mellanox driver supports offloading tc rules that
are defined on the first 4 chains and the first 16 priorities.
The restriction stems from the firmware flow level enforcement
requiring a flow table of a certain level to point to a flow
table of a higher level. This limitation may be ignored by setting
the ignore_flow_level bit when creating flow table entries.
Use unmanaged tables and ignore flow level to create more tables than
declared by fs_core steering. Manually manage the connections between the
tables themselves.

HW table is instantiated for every tc <chain,prio> tuple. The miss rule
of every table either jumps to the next <chain,prio> table, or continues
to slow_fdb. This logic is realized by following this sequence:

1. Create an auto-grouped flow table for the specified priority with
    reserved entries

Reserved entries are allocated at the end of the flow table.
Flow groups are evaluated in sequence and therefore it is guaranteed
that the flow group defined on the last FTEs will be the last to evaluate.

Define a "match all" flow group on the reserved entries, providing
the platform to add table miss actions.

2. Set the miss rule action to jump to the next <chain,prio> table
    or the slow_fdb.

3. Link the previous priority table to point to the new table by
    updating its miss rule.

Please pull and let me know if there's any problem.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0c58ac1e 278d51f2
...@@ -3276,12 +3276,14 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns, ...@@ -3276,12 +3276,14 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
int num_entries, int num_groups, int num_entries, int num_groups,
u32 flags) u32 flags)
{ {
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
ft = mlx5_create_auto_grouped_flow_table(ns, priority, ft_attr.prio = priority;
num_entries, ft_attr.max_fte = num_entries;
num_groups, ft_attr.flags = flags;
0, flags); ft_attr.autogroup.max_num_groups = num_groups;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) if (IS_ERR(ft))
return ERR_CAST(ft); return ERR_CAST(ft);
......
...@@ -42,7 +42,7 @@ mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o ...@@ -42,7 +42,7 @@ mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
# Core extra # Core extra
# #
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
ecpf.o rdma.o ecpf.o rdma.o eswitch_offloads_chains.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
......
...@@ -58,6 +58,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, ...@@ -58,6 +58,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs, struct ethtool_rx_flow_spec *fs,
int num_tuples) int num_tuples)
{ {
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_ethtool_table *eth_ft; struct mlx5e_ethtool_table *eth_ft;
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
...@@ -102,9 +103,11 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, ...@@ -102,9 +103,11 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev, table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.log_max_ft_size)), flow_table_properties_nic_receive.log_max_ft_size)),
MLX5E_ETHTOOL_NUM_ENTRIES); MLX5E_ETHTOOL_NUM_ENTRIES);
ft = mlx5_create_auto_grouped_flow_table(ns, prio,
table_size, ft_attr.prio = prio;
MLX5E_ETHTOOL_NUM_GROUPS, 0, 0); ft_attr.max_fte = table_size;
ft_attr.autogroup.max_num_groups = MLX5E_ETHTOOL_NUM_GROUPS;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) if (IS_ERR(ft))
return (void *)ft; return (void *)ft;
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <net/ipv6_stubs.h> #include <net/ipv6_stubs.h>
#include "eswitch.h" #include "eswitch.h"
#include "eswitch_offloads_chains.h"
#include "en.h" #include "en.h"
#include "en_rep.h" #include "en_rep.h"
#include "en_tc.h" #include "en_tc.h"
...@@ -1247,8 +1248,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, ...@@ -1247,8 +1248,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
void *cb_priv) void *cb_priv)
{ {
struct flow_cls_offload *f = type_data; struct flow_cls_offload tmp, *f = type_data;
struct flow_cls_offload cls_flower;
struct mlx5e_priv *priv = cb_priv; struct mlx5e_priv *priv = cb_priv;
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
unsigned long flags; unsigned long flags;
...@@ -1261,16 +1261,30 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, ...@@ -1261,16 +1261,30 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
switch (type) { switch (type) {
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
if (!mlx5_eswitch_prios_supported(esw) || f->common.chain_index) memcpy(&tmp, f, sizeof(*f));
if (!mlx5_esw_chains_prios_supported(esw) ||
tmp.common.chain_index)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Re-use tc offload path by moving the ft flow to the /* Re-use tc offload path by moving the ft flow to the
* reserved ft chain. * reserved ft chain.
*
* FT offload can use prio range [0, INT_MAX], so we normalize
* it to range [1, mlx5_esw_chains_get_prio_range(esw)]
* as with tc, where prio 0 isn't supported.
*
* We only support chain 0 of FT offload.
*/ */
memcpy(&cls_flower, f, sizeof(*f)); if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
cls_flower.common.chain_index = FDB_FT_CHAIN; return -EOPNOTSUPP;
err = mlx5e_rep_setup_tc_cls_flower(priv, &cls_flower, flags); if (tmp.common.chain_index != 0)
memcpy(&f->stats, &cls_flower.stats, sizeof(f->stats)); return -EOPNOTSUPP;
tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
tmp.common.prio++;
err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
return err; return err;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -1133,6 +1133,7 @@ static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *pr ...@@ -1133,6 +1133,7 @@ static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *pr
static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
{ "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) }, { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) }, { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
}; };
......
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#include "en_rep.h" #include "en_rep.h"
#include "en_tc.h" #include "en_tc.h"
#include "eswitch.h" #include "eswitch.h"
#include "eswitch_offloads_chains.h"
#include "fs_core.h" #include "fs_core.h"
#include "en/port.h" #include "en/port.h"
#include "en/tc_tun.h" #include "en/tc_tun.h"
...@@ -960,7 +961,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -960,7 +961,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
mutex_lock(&priv->fs.tc.t_lock); mutex_lock(&priv->fs.tc.t_lock);
if (IS_ERR_OR_NULL(priv->fs.tc.t)) { if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
int tc_grp_size, tc_tbl_size; struct mlx5_flow_table_attr ft_attr = {};
int tc_grp_size, tc_tbl_size, tc_num_grps;
u32 max_flow_counter; u32 max_flow_counter;
max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
...@@ -970,13 +972,15 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -970,13 +972,15 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS, tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size))); BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
ft_attr.prio = MLX5E_TC_PRIO;
ft_attr.max_fte = tc_tbl_size;
ft_attr.level = MLX5E_TC_FT_LEVEL;
ft_attr.autogroup.max_num_groups = tc_num_grps;
priv->fs.tc.t = priv->fs.tc.t =
mlx5_create_auto_grouped_flow_table(priv->fs.ns, mlx5_create_auto_grouped_flow_table(priv->fs.ns,
MLX5E_TC_PRIO, &ft_attr);
tc_tbl_size,
MLX5E_TC_TABLE_NUM_GROUPS,
MLX5E_TC_FT_LEVEL, 0);
if (IS_ERR(priv->fs.tc.t)) { if (IS_ERR(priv->fs.tc.t)) {
mutex_unlock(&priv->fs.tc.t_lock); mutex_unlock(&priv->fs.tc.t_lock);
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
...@@ -1080,7 +1084,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, ...@@ -1080,7 +1084,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0; slow_attr->split_count = 0;
slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN; slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule)) if (!IS_ERR(rule))
...@@ -1097,7 +1101,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, ...@@ -1097,7 +1101,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0; slow_attr->split_count = 0;
slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN; slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow_flag_clear(flow, SLOW); flow_flag_clear(flow, SLOW);
} }
...@@ -1157,19 +1161,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -1157,19 +1161,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
u32 max_chain = mlx5_eswitch_get_chain_range(esw);
struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
u16 max_prio = mlx5_eswitch_get_prio_range(esw);
struct net_device *out_dev, *encap_dev = NULL; struct net_device *out_dev, *encap_dev = NULL;
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv; struct mlx5e_priv *out_priv;
bool encap_valid = true; bool encap_valid = true;
u32 max_prio, max_chain;
int err = 0; int err = 0;
int out_index; int out_index;
if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) { if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW"); NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -1179,11 +1182,13 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -1179,11 +1182,13 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* FDB_FT_CHAIN which is outside tc range. * FDB_FT_CHAIN which is outside tc range.
* See mlx5e_rep_setup_ft_cb(). * See mlx5e_rep_setup_ft_cb().
*/ */
max_chain = mlx5_esw_chains_get_chain_range(esw);
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) { if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range"); NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
max_prio = mlx5_esw_chains_get_prio_range(esw);
if (attr->prio > max_prio) { if (attr->prio > max_prio) {
NL_SET_ERR_MSG(extack, "Requested priority is out of supported range"); NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -3466,7 +3471,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, ...@@ -3466,7 +3471,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break; break;
case FLOW_ACTION_GOTO: { case FLOW_ACTION_GOTO: {
u32 dest_chain = act->chain_index; u32 dest_chain = act->chain_index;
u32 max_chain = mlx5_eswitch_get_chain_range(esw); u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
if (ft_flow) { if (ft_flow) {
NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported"); NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
......
...@@ -277,6 +277,7 @@ enum { ...@@ -277,6 +277,7 @@ enum {
static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw) static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
{ {
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev; struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns; struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb; struct mlx5_flow_table *fdb;
...@@ -289,8 +290,10 @@ static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw) ...@@ -289,8 +290,10 @@ static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
} }
/* num FTE 2, num FG 2 */ /* num FTE 2, num FG 2 */
fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO, ft_attr.prio = LEGACY_VEPA_PRIO;
2, 2, 0, 0); ft_attr.max_fte = 2;
ft_attr.autogroup.max_num_groups = 2;
fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) { if (IS_ERR(fdb)) {
err = PTR_ERR(fdb); err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create VEPA FDB err %d\n", err); esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
......
...@@ -157,7 +157,7 @@ enum offloads_fdb_flags { ...@@ -157,7 +157,7 @@ enum offloads_fdb_flags {
ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0), ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
}; };
extern const unsigned int ESW_POOLS[4]; struct mlx5_esw_chains_priv;
struct mlx5_eswitch_fdb { struct mlx5_eswitch_fdb {
union { union {
...@@ -182,14 +182,7 @@ struct mlx5_eswitch_fdb { ...@@ -182,14 +182,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_handle *miss_rule_multi; struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount; int vlan_push_pop_refcount;
struct { struct mlx5_esw_chains_priv *esw_chains_priv;
struct mlx5_flow_table *fdb;
u32 num_rules;
} fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO];
/* Protects fdb_prio table */
struct mutex fdb_prio_lock;
int fdb_left[ARRAY_SIZE(ESW_POOLS)];
} offloads; } offloads;
}; };
u32 flags; u32 flags;
...@@ -355,15 +348,6 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, ...@@ -355,15 +348,6 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule, struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr); struct mlx5_esw_flow_attr *attr);
bool
mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw);
u16
mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw);
u32
mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw);
struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_flow_destination *dest); struct mlx5_flow_destination *dest);
...@@ -388,6 +372,11 @@ enum { ...@@ -388,6 +372,11 @@ enum {
MLX5_ESW_DEST_ENCAP_VALID = BIT(1), MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
}; };
enum {
MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
};
struct mlx5_esw_flow_attr { struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep; struct mlx5_eswitch_rep *in_rep;
struct mlx5_core_dev *in_mdev; struct mlx5_core_dev *in_mdev;
...@@ -401,7 +390,6 @@ struct mlx5_esw_flow_attr { ...@@ -401,7 +390,6 @@ struct mlx5_esw_flow_attr {
u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
u8 total_vlan; u8 total_vlan;
bool vlan_handled;
struct { struct {
u32 flags; u32 flags;
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
...@@ -416,6 +404,7 @@ struct mlx5_esw_flow_attr { ...@@ -416,6 +404,7 @@ struct mlx5_esw_flow_attr {
u32 chain; u32 chain;
u16 prio; u16 prio;
u32 dest_chain; u32 dest_chain;
u32 flags;
struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow_parse_attr *parse_attr;
}; };
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies. */
#ifndef __ML5_ESW_CHAINS_H__
#define __ML5_ESW_CHAINS_H__
bool
mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
u32
mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw);
u32
mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw);
u32
mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw);
struct mlx5_flow_table *
mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
u32 level);
void
mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
u32 level);
struct mlx5_flow_table *
mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw);
int mlx5_esw_chains_create(struct mlx5_eswitch *esw);
void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw);
#endif /* __ML5_ESW_CHAINS_H__ */
...@@ -50,8 +50,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev, ...@@ -50,8 +50,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
struct mlx5_flow_act *flow_act) struct mlx5_flow_act *flow_act)
{ {
static const struct mlx5_flow_spec spec = {}; static const struct mlx5_flow_spec spec = {};
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns; struct mlx5_flow_namespace *root_ns;
int prio, flags;
int err; int err;
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
...@@ -63,10 +63,11 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev, ...@@ -63,10 +63,11 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
/* As this is the terminating action then the termination table is the /* As this is the terminating action then the termination table is the
* same prio as the slow path * same prio as the slow path
*/ */
prio = FDB_SLOW_PATH; ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION;
flags = MLX5_FLOW_TABLE_TERMINATION; ft_attr.prio = FDB_SLOW_PATH;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, prio, 1, 1, ft_attr.max_fte = 1;
0, flags); ft_attr.autogroup.max_num_groups = 1;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) { if (IS_ERR(tt->termtbl)) {
esw_warn(dev, "Failed to create termination table\n"); esw_warn(dev, "Failed to create termination table\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -432,6 +432,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -432,6 +432,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_type, ft->type); MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id); MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index); MLX5_SET(set_fte_in, in, flow_index, fte->index);
MLX5_SET(set_fte_in, in, ignore_flow_level,
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
if (ft->vport) { if (ft->vport) {
MLX5_SET(set_fte_in, in, vport_number, ft->vport); MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport, 1); MLX5_SET(set_fte_in, in, other_vport, 1);
......
...@@ -579,7 +579,9 @@ static void del_sw_flow_group(struct fs_node *node) ...@@ -579,7 +579,9 @@ static void del_sw_flow_group(struct fs_node *node)
rhashtable_destroy(&fg->ftes_hash); rhashtable_destroy(&fg->ftes_hash);
ida_destroy(&fg->fte_allocator); ida_destroy(&fg->fte_allocator);
if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size) if (ft->autogroup.active &&
fg->max_ftes == ft->autogroup.group_size &&
fg->start_index < ft->autogroup.max_fte)
ft->autogroup.num_groups--; ft->autogroup.num_groups--;
err = rhltable_remove(&ft->fgs_hash, err = rhltable_remove(&ft->fgs_hash,
&fg->hash, &fg->hash,
...@@ -1006,7 +1008,8 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa ...@@ -1006,7 +1008,8 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
u16 vport) u16 vport)
{ {
struct mlx5_flow_root_namespace *root = find_root(&ns->node); struct mlx5_flow_root_namespace *root = find_root(&ns->node);
struct mlx5_flow_table *next_ft = NULL; bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
struct mlx5_flow_table *next_ft;
struct fs_prio *fs_prio = NULL; struct fs_prio *fs_prio = NULL;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
int log_table_sz; int log_table_sz;
...@@ -1023,14 +1026,21 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa ...@@ -1023,14 +1026,21 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
err = -EINVAL; err = -EINVAL;
goto unlock_root; goto unlock_root;
} }
if (!unmanaged) {
/* The level is related to the
* priority level range.
*/
if (ft_attr->level >= fs_prio->num_levels) { if (ft_attr->level >= fs_prio->num_levels) {
err = -ENOSPC; err = -ENOSPC;
goto unlock_root; goto unlock_root;
} }
ft_attr->level += fs_prio->start_level;
}
/* The level is related to the /* The level is related to the
* priority level range. * priority level range.
*/ */
ft_attr->level += fs_prio->start_level;
ft = alloc_flow_table(ft_attr->level, ft = alloc_flow_table(ft_attr->level,
vport, vport,
ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0, ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
...@@ -1043,19 +1053,27 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa ...@@ -1043,19 +1053,27 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio); next_ft = unmanaged ? ft_attr->next_ft :
find_next_chained_ft(fs_prio);
ft->def_miss_action = ns->def_miss_action; ft->def_miss_action = ns->def_miss_action;
err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft); err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
if (err) if (err)
goto free_ft; goto free_ft;
if (!unmanaged) {
err = connect_flow_table(root->dev, ft, fs_prio); err = connect_flow_table(root->dev, ft, fs_prio);
if (err) if (err)
goto destroy_ft; goto destroy_ft;
}
ft->node.active = true; ft->node.active = true;
down_write_ref_node(&fs_prio->node, false); down_write_ref_node(&fs_prio->node, false);
if (!unmanaged) {
tree_add_node(&ft->node, &fs_prio->node); tree_add_node(&ft->node, &fs_prio->node);
list_add_flow_table(ft, fs_prio); list_add_flow_table(ft, fs_prio);
} else {
ft->node.root = fs_prio->node.root;
}
fs_prio->num_ft++; fs_prio->num_ft++;
up_write_ref_node(&fs_prio->node, false); up_write_ref_node(&fs_prio->node, false);
mutex_unlock(&root->chain_lock); mutex_unlock(&root->chain_lock);
...@@ -1103,31 +1121,27 @@ EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table); ...@@ -1103,31 +1121,27 @@ EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
struct mlx5_flow_table* struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio, struct mlx5_flow_table_attr *ft_attr)
int num_flow_table_entries,
int max_num_groups,
u32 level,
u32 flags)
{ {
struct mlx5_flow_table_attr ft_attr = {}; int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries;
int max_num_groups = ft_attr->autogroup.max_num_groups;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
if (max_num_groups > num_flow_table_entries) if (max_num_groups > autogroups_max_fte)
return ERR_PTR(-EINVAL);
if (num_reserved_entries > ft_attr->max_fte)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
ft_attr.max_fte = num_flow_table_entries; ft = mlx5_create_flow_table(ns, ft_attr);
ft_attr.prio = prio;
ft_attr.level = level;
ft_attr.flags = flags;
ft = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) if (IS_ERR(ft))
return ft; return ft;
ft->autogroup.active = true; ft->autogroup.active = true;
ft->autogroup.required_groups = max_num_groups; ft->autogroup.required_groups = max_num_groups;
ft->autogroup.max_fte = autogroups_max_fte;
/* We save place for flow groups in addition to max types */ /* We save place for flow groups in addition to max types */
ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1); ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
return ft; return ft;
} }
...@@ -1149,7 +1163,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, ...@@ -1149,7 +1163,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg; struct mlx5_flow_group *fg;
int err; int err;
if (ft->autogroup.active) if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
down_write_ref_node(&ft->node, false); down_write_ref_node(&ft->node, false);
...@@ -1322,9 +1336,10 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft ...@@ -1322,9 +1336,10 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
const struct mlx5_flow_spec *spec) const struct mlx5_flow_spec *spec)
{ {
struct list_head *prev = &ft->node.children; struct list_head *prev = &ft->node.children;
struct mlx5_flow_group *fg; u32 max_fte = ft->autogroup.max_fte;
unsigned int candidate_index = 0; unsigned int candidate_index = 0;
unsigned int group_size = 0; unsigned int group_size = 0;
struct mlx5_flow_group *fg;
if (!ft->autogroup.active) if (!ft->autogroup.active)
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
...@@ -1332,7 +1347,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft ...@@ -1332,7 +1347,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
if (ft->autogroup.num_groups < ft->autogroup.required_groups) if (ft->autogroup.num_groups < ft->autogroup.required_groups)
group_size = ft->autogroup.group_size; group_size = ft->autogroup.group_size;
/* ft->max_fte == ft->autogroup.max_types */ /* max_fte == ft->autogroup.max_types */
if (group_size == 0) if (group_size == 0)
group_size = 1; group_size = 1;
...@@ -1345,7 +1360,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft ...@@ -1345,7 +1360,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
prev = &fg->node.list; prev = &fg->node.list;
} }
if (candidate_index + group_size > ft->max_fte) if (candidate_index + group_size > max_fte)
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
fg = alloc_insert_flow_group(ft, fg = alloc_insert_flow_group(ft,
...@@ -1529,18 +1544,30 @@ static bool counter_is_valid(u32 action) ...@@ -1529,18 +1544,30 @@ static bool counter_is_valid(u32 action)
} }
static bool dest_is_valid(struct mlx5_flow_destination *dest, static bool dest_is_valid(struct mlx5_flow_destination *dest,
u32 action, struct mlx5_flow_act *flow_act,
struct mlx5_flow_table *ft) struct mlx5_flow_table *ft)
{ {
bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
u32 action = flow_act->action;
if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)) if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
return counter_is_valid(action); return counter_is_valid(action);
if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return true; return true;
if (ignore_level) {
if (ft->type != FS_FT_FDB)
return false;
if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
dest->ft->type != FS_FT_FDB)
return false;
}
if (!dest || ((dest->type == if (!dest || ((dest->type ==
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) && MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
(dest->ft->level <= ft->level))) (dest->ft->level <= ft->level && !ignore_level)))
return false; return false;
return true; return true;
} }
...@@ -1770,7 +1797,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1770,7 +1797,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
for (i = 0; i < dest_num; i++) { for (i = 0; i < dest_num; i++) {
if (!dest_is_valid(&dest[i], flow_act->action, ft)) if (!dest_is_valid(&dest[i], flow_act, ft))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT); nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
...@@ -2033,6 +2060,7 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft) ...@@ -2033,6 +2060,7 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
int err = 0; int err = 0;
mutex_lock(&root->chain_lock); mutex_lock(&root->chain_lock);
if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
err = disconnect_flow_table(ft); err = disconnect_flow_table(ft);
if (err) { if (err) {
mutex_unlock(&root->chain_lock); mutex_unlock(&root->chain_lock);
......
...@@ -164,6 +164,7 @@ struct mlx5_flow_table { ...@@ -164,6 +164,7 @@ struct mlx5_flow_table {
unsigned int required_groups; unsigned int required_groups;
unsigned int group_size; unsigned int group_size;
unsigned int num_groups; unsigned int num_groups;
unsigned int max_fte;
} autogroup; } autogroup;
/* Protect fwd_rules */ /* Protect fwd_rules */
struct mutex lock; struct mutex lock;
......
...@@ -131,11 +131,11 @@ static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev) ...@@ -131,11 +131,11 @@ static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev)
MLX5_PCAM_REGS_5000_TO_507F); MLX5_PCAM_REGS_5000_TO_507F);
} }
static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev) static int mlx5_get_mcam_access_reg_group(struct mlx5_core_dev *dev,
enum mlx5_mcam_reg_groups group)
{ {
return mlx5_query_mcam_reg(dev, dev->caps.mcam, return mlx5_query_mcam_reg(dev, dev->caps.mcam[group],
MLX5_MCAM_FEATURE_ENHANCED_FEATURES, MLX5_MCAM_FEATURE_ENHANCED_FEATURES, group);
MLX5_MCAM_REGS_FIRST_128);
} }
static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev) static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev)
...@@ -221,8 +221,11 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) ...@@ -221,8 +221,11 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, pcam_reg)) if (MLX5_CAP_GEN(dev, pcam_reg))
mlx5_get_pcam_reg(dev); mlx5_get_pcam_reg(dev);
if (MLX5_CAP_GEN(dev, mcam_reg)) if (MLX5_CAP_GEN(dev, mcam_reg)) {
mlx5_get_mcam_reg(dev); mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128);
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9080_0x90FF);
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F);
}
if (MLX5_CAP_GEN(dev, qcam_reg)) if (MLX5_CAP_GEN(dev, qcam_reg))
mlx5_get_qcam_reg(dev); mlx5_get_qcam_reg(dev);
...@@ -245,6 +248,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) ...@@ -245,6 +248,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err; return err;
} }
if (MLX5_CAP_GEN_64(dev, general_obj_types) &
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION);
if (err)
return err;
}
return 0; return 0;
} }
......
...@@ -1105,6 +1105,7 @@ enum mlx5_cap_type { ...@@ -1105,6 +1105,7 @@ enum mlx5_cap_type {
MLX5_CAP_DEV_MEM, MLX5_CAP_DEV_MEM,
MLX5_CAP_RESERVED_16, MLX5_CAP_RESERVED_16,
MLX5_CAP_TLS, MLX5_CAP_TLS,
MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14, MLX5_CAP_DEV_EVENT = 0x14,
/* NUM OF CAP Types */ /* NUM OF CAP Types */
MLX5_CAP_NUM MLX5_CAP_NUM
...@@ -1120,6 +1121,9 @@ enum mlx5_pcam_feature_groups { ...@@ -1120,6 +1121,9 @@ enum mlx5_pcam_feature_groups {
enum mlx5_mcam_reg_groups { enum mlx5_mcam_reg_groups {
MLX5_MCAM_REGS_FIRST_128 = 0x0, MLX5_MCAM_REGS_FIRST_128 = 0x0,
MLX5_MCAM_REGS_0x9080_0x90FF = 0x1,
MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
MLX5_MCAM_REGS_NUM = 0x3,
}; };
enum mlx5_mcam_feature_groups { enum mlx5_mcam_feature_groups {
...@@ -1268,7 +1272,16 @@ enum mlx5_qcam_feature_groups { ...@@ -1268,7 +1272,16 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg) MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
#define MLX5_CAP_MCAM_REG(mdev, reg) \ #define MLX5_CAP_MCAM_REG(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg) MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
mng_access_reg_cap_mask.access_regs.reg)
#define MLX5_CAP_MCAM_REG1(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
mng_access_reg_cap_mask.access_regs1.reg)
#define MLX5_CAP_MCAM_REG2(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
mng_access_reg_cap_mask.access_regs2.reg)
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
...@@ -1297,6 +1310,14 @@ enum mlx5_qcam_feature_groups { ...@@ -1297,6 +1310,14 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_DEV_EVENT(mdev, cap)\ #define MLX5_CAP_DEV_EVENT(mdev, cap)\
MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET(device_virtio_emulation_cap, \
(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET64(device_virtio_emulation_cap, \
(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
enum { enum {
MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1, MLX5_CMD_STAT_INT_ERR = 0x1,
......
...@@ -145,6 +145,8 @@ enum { ...@@ -145,6 +145,8 @@ enum {
MLX5_REG_MCC = 0x9062, MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063, MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f, MLX5_REG_MCAM = 0x907f,
MLX5_REG_MIRC = 0x9162,
MLX5_REG_RESOURCE_DUMP = 0xC000,
}; };
enum mlx5_qpts_trust_state { enum mlx5_qpts_trust_state {
...@@ -684,7 +686,7 @@ struct mlx5_core_dev { ...@@ -684,7 +686,7 @@ struct mlx5_core_dev {
u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
u8 embedded_cpu; u8 embedded_cpu;
......
...@@ -48,6 +48,7 @@ enum { ...@@ -48,6 +48,7 @@ enum {
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0), MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1), MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
MLX5_FLOW_TABLE_TERMINATION = BIT(2), MLX5_FLOW_TABLE_TERMINATION = BIT(2),
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
}; };
#define LEFTOVERS_RULE_NUM 2 #define LEFTOVERS_RULE_NUM 2
...@@ -145,25 +146,27 @@ mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, ...@@ -145,25 +146,27 @@ mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type, enum mlx5_flow_namespace_type type,
int vport); int vport);
struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
int max_num_groups,
u32 level,
u32 flags);
struct mlx5_flow_table_attr { struct mlx5_flow_table_attr {
int prio; int prio;
int max_fte; int max_fte;
u32 level; u32 level;
u32 flags; u32 flags;
struct mlx5_flow_table *next_ft;
struct {
int max_num_groups;
int num_reserved_entries;
} autogroup;
}; };
struct mlx5_flow_table * struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns, mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr); struct mlx5_flow_table_attr *ft_attr);
struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr);
struct mlx5_flow_table * struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio, int prio,
...@@ -194,6 +197,7 @@ struct mlx5_fs_vlan { ...@@ -194,6 +197,7 @@ struct mlx5_fs_vlan {
enum { enum {
FLOW_ACT_NO_APPEND = BIT(0), FLOW_ACT_NO_APPEND = BIT(0),
FLOW_ACT_IGNORE_FLOW_LEVEL = BIT(1),
}; };
struct mlx5_flow_act { struct mlx5_flow_act {
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment