Commit 96e32687 authored by Eli Cohen's avatar Eli Cohen Committed by Saeed Mahameed

net/mlx5e: Eswitch, Use per vport tables for mirroring

When using port mirroring, we forward the traffic to another table and
use that table to forward to the mirrored vport. Since the hardware
loses the values of reg c, and in particular reg c0, we fail the match
on the input vport which previously existed in reg c0. To overcome this
situation, we use a set of per vport tables, positioned at the lowest
priority, and forward traffic to those tables. Since these tables are
per vport, we can avoid matching on reg c0.

Fixes: c01cfd0f ("net/mlx5: E-Switch, Add match on vport metadata for rule in fast path")
Signed-off-by: default avatarEli Cohen <eli@mellanox.com>
Reviewed-by: default avatarMark Bloch <markb@mellanox.com>
Reviewed-by: default avatarPaul Blakey <paulb@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 1708dd54
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
/* The index of the last real chain (FT) + 1 as chain zero is valid as well */ /* The index of the last real chain (FT) + 1 as chain zero is valid as well */
#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1) #define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
#define ESW_OFFLOADS_NUM_GROUPS 4
#define FDB_TC_MAX_PRIO 16 #define FDB_TC_MAX_PRIO 16
#define FDB_TC_LEVELS_PER_PRIO 2 #define FDB_TC_LEVELS_PER_PRIO 2
...@@ -183,6 +184,12 @@ struct mlx5_eswitch_fdb { ...@@ -183,6 +184,12 @@ struct mlx5_eswitch_fdb {
int vlan_push_pop_refcount; int vlan_push_pop_refcount;
struct mlx5_esw_chains_priv *esw_chains_priv; struct mlx5_esw_chains_priv *esw_chains_priv;
struct {
DECLARE_HASHTABLE(table, 8);
/* Protects vports.table */
struct mutex lock;
} vports;
} offloads; } offloads;
}; };
u32 flags; u32 flags;
...@@ -623,6 +630,9 @@ void ...@@ -623,6 +630,9 @@ void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport); struct mlx5_vport *vport);
int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw);
void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */ /* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
......
...@@ -50,6 +50,179 @@ ...@@ -50,6 +50,179 @@
#define MLX5_ESW_MISS_FLOWS (2) #define MLX5_ESW_MISS_FLOWS (2)
#define UPLINK_REP_INDEX 0 #define UPLINK_REP_INDEX 0
/* Per vport tables */
#define MLX5_ESW_VPORT_TABLE_SIZE 128
/* This struct is used as a key to the hash table and we need it to be packed
* so hash result is consistent
*/
struct mlx5_vport_key {
u32 chain;
u16 prio;
u16 vport;
u16 vhca_id;
} __packed;
struct mlx5_vport_table {
struct hlist_node hlist;
struct mlx5_flow_table *fdb;
u32 num_rules;
struct mlx5_vport_key key;
};
static struct mlx5_flow_table *
esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *fdb;
ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS;
ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
ft_attr.prio = FDB_PER_VPORT;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
PTR_ERR(fdb));
}
return fdb;
}
static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr,
struct mlx5_vport_key *key)
{
key->vport = attr->in_rep->vport;
key->chain = attr->chain;
key->prio = attr->prio;
key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
return jhash(key, sizeof(*key), 0);
}
/* caller must hold vports.lock */
static struct mlx5_vport_table *
esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
{
struct mlx5_vport_table *e;
hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
if (!memcmp(&e->key, skey, sizeof(*skey)))
return e;
return NULL;
}
static void
esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
{
struct mlx5_vport_table *e;
struct mlx5_vport_key key;
u32 hkey;
mutex_lock(&esw->fdb_table.offloads.vports.lock);
hkey = flow_attr_to_vport_key(esw, attr, &key);
e = esw_vport_tbl_lookup(esw, &key, hkey);
if (!e || --e->num_rules)
goto out;
hash_del(&e->hlist);
mlx5_destroy_flow_table(e->fdb);
kfree(e);
out:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
}
static struct mlx5_flow_table *
esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *fdb;
struct mlx5_vport_table *e;
struct mlx5_vport_key skey;
u32 hkey;
mutex_lock(&esw->fdb_table.offloads.vports.lock);
hkey = flow_attr_to_vport_key(esw, attr, &skey);
e = esw_vport_tbl_lookup(esw, &skey, hkey);
if (e) {
e->num_rules++;
goto out;
}
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e) {
fdb = ERR_PTR(-ENOMEM);
goto err_alloc;
}
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!ns) {
esw_warn(dev, "Failed to get FDB namespace\n");
fdb = ERR_PTR(-ENOENT);
goto err_ns;
}
fdb = esw_vport_tbl_create(esw, ns);
if (IS_ERR(fdb))
goto err_ns;
e->fdb = fdb;
e->num_rules = 1;
e->key = skey;
hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
out:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
return e->fdb;
err_ns:
kfree(e);
err_alloc:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
return fdb;
}
int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
{
struct mlx5_esw_flow_attr attr = {};
struct mlx5_eswitch_rep rep = {};
struct mlx5_flow_table *fdb;
struct mlx5_vport *vport;
int i;
attr.prio = 1;
attr.in_rep = &rep;
mlx5_esw_for_all_vports(esw, i, vport) {
attr.in_rep->vport = vport->vport;
fdb = esw_vport_tbl_get(esw, &attr);
if (!fdb)
goto out;
}
return 0;
out:
mlx5_esw_vport_tbl_put(esw);
return PTR_ERR(fdb);
}
void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
{
struct mlx5_esw_flow_attr attr = {};
struct mlx5_eswitch_rep rep = {};
struct mlx5_vport *vport;
int i;
attr.prio = 1;
attr.in_rep = &rep;
mlx5_esw_for_all_vports(esw, i, vport) {
attr.in_rep->vport = vport->vport;
esw_vport_tbl_put(esw, &attr);
}
}
/* End: Per vport tables */
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num) u16 vport_num)
{ {
...@@ -191,8 +364,6 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -191,8 +364,6 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
i++; i++;
} }
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
if (attr->outer_match_level != MLX5_MATCH_NONE) if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
if (attr->inner_match_level != MLX5_MATCH_NONE) if (attr->inner_match_level != MLX5_MATCH_NONE)
...@@ -201,8 +372,13 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -201,8 +372,13 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr; flow_act.modify_hdr = attr->modify_hdr;
fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, if (split) {
!!split); fdb = esw_vport_tbl_get(esw, attr);
} else {
fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio,
0);
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
}
if (IS_ERR(fdb)) { if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb); rule = ERR_CAST(fdb);
goto err_esw_get; goto err_esw_get;
...@@ -221,7 +397,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -221,7 +397,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule; return rule;
err_add_rule: err_add_rule:
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, !!split); if (split)
esw_vport_tbl_put(esw, attr);
else
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_esw_get: err_esw_get:
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain) if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0); mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
...@@ -247,7 +426,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, ...@@ -247,7 +426,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
goto err_get_fast; goto err_get_fast;
} }
fwd_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 1); fwd_fdb = esw_vport_tbl_get(esw, attr);
if (IS_ERR(fwd_fdb)) { if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb); rule = ERR_CAST(fwd_fdb);
goto err_get_fwd; goto err_get_fwd;
...@@ -285,7 +464,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, ...@@ -285,7 +464,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
return rule; return rule;
add_err: add_err:
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1); esw_vport_tbl_put(esw, attr);
err_get_fwd: err_get_fwd:
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_get_fast: err_get_fast:
...@@ -312,11 +491,14 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, ...@@ -312,11 +491,14 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
atomic64_dec(&esw->offloads.num_flows); atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule) { if (fwd_rule) {
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1); esw_vport_tbl_put(esw, attr);
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
} else { } else {
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, if (split)
!!split); esw_vport_tbl_put(esw, attr);
else
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
0);
if (attr->dest_chain) if (attr->dest_chain)
mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0); mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
} }
...@@ -1923,6 +2105,9 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) ...@@ -1923,6 +2105,9 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err) if (err)
goto create_fg_err; goto create_fg_err;
mutex_init(&esw->fdb_table.offloads.vports.lock);
hash_init(esw->fdb_table.offloads.vports.table);
return 0; return 0;
create_fg_err: create_fg_err:
...@@ -1939,6 +2124,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) ...@@ -1939,6 +2124,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{ {
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
esw_destroy_vport_rx_group(esw); esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw); esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw); esw_destroy_offloads_fdb_tables(esw);
......
...@@ -21,8 +21,6 @@ ...@@ -21,8 +21,6 @@
#define fdb_ignore_flow_level_supported(esw) \ #define fdb_ignore_flow_level_supported(esw) \
(MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level)) (MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
#define ESW_OFFLOADS_NUM_GROUPS 4
/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS), /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
* and a virtual memory region of 16M (ESW_SIZE), this region is duplicated * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
* for each flow table pool. We can allocate up to 16M of each pool, * for each flow table pool. We can allocate up to 16M of each pool,
...@@ -704,12 +702,9 @@ mlx5_esw_chains_open(struct mlx5_eswitch *esw) ...@@ -704,12 +702,9 @@ mlx5_esw_chains_open(struct mlx5_eswitch *esw)
/* Open level 1 for split rules now if prios isn't supported */ /* Open level 1 for split rules now if prios isn't supported */
if (!mlx5_esw_chains_prios_supported(esw)) { if (!mlx5_esw_chains_prios_supported(esw)) {
ft = mlx5_esw_chains_get_table(esw, 0, 1, 1); err = mlx5_esw_vport_tbl_get(esw);
if (err)
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto level_1_err; goto level_1_err;
}
} }
return 0; return 0;
...@@ -725,7 +720,7 @@ static void ...@@ -725,7 +720,7 @@ static void
mlx5_esw_chains_close(struct mlx5_eswitch *esw) mlx5_esw_chains_close(struct mlx5_eswitch *esw)
{ {
if (!mlx5_esw_chains_prios_supported(esw)) if (!mlx5_esw_chains_prios_supported(esw))
mlx5_esw_chains_put_table(esw, 0, 1, 1); mlx5_esw_vport_tbl_put(esw);
mlx5_esw_chains_put_table(esw, 0, 1, 0); mlx5_esw_chains_put_table(esw, 0, 1, 0);
mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0); mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
} }
......
...@@ -2700,6 +2700,17 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) ...@@ -2700,6 +2700,17 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
goto out_err; goto out_err;
} }
/* We put this priority last, knowing that nothing will get here
* unless explicitly forwarded to. This is possible because the
* slow path tables have catch all rules and nothing gets passed
* those tables.
*/
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
goto out_err;
}
set_prio_attrs(steering->fdb_root_ns); set_prio_attrs(steering->fdb_root_ns);
return 0; return 0;
......
...@@ -84,6 +84,7 @@ enum { ...@@ -84,6 +84,7 @@ enum {
FDB_TC_OFFLOAD, FDB_TC_OFFLOAD,
FDB_FT_OFFLOAD, FDB_FT_OFFLOAD,
FDB_SLOW_PATH, FDB_SLOW_PATH,
FDB_PER_VPORT,
}; };
struct mlx5_pkt_reformat; struct mlx5_pkt_reformat;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment