Commit b7826076 authored by Parav Pandit's avatar Parav Pandit Committed by Saeed Mahameed

net/mlx5e: E-switch, Fix Ingress ACL groups in switchdev mode for prio tag

In cited commit, when prio tag mode is enabled, FTE creation fails
due to missing group with valid match criteria.

Hence,
(a) create prio tag group metadata_prio_tag_grp when prio tag is
enabled with match criteria for vlan push FTE.
(b) Rename metadata_grp to metadata_allmatch_grp to reflect its purpose.

Also when priority tag is enabled, delete metadata settings after
deleting ingress rules, which are using it.

Tide up rest of the ingress config code for unnecessary labels.

Fixes: 10652f39 ("net/mlx5: Refactor ingress acl configuration")
Signed-off-by: default avatarParav Pandit <parav@mellanox.com>
Reviewed-by: default avatarEli Britstein <elibr@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 3d7cadae
...@@ -81,7 +81,14 @@ struct vport_ingress { ...@@ -81,7 +81,14 @@ struct vport_ingress {
struct mlx5_fc *drop_counter; struct mlx5_fc *drop_counter;
} legacy; } legacy;
struct { struct {
struct mlx5_flow_group *metadata_grp; /* Optional group to add an FTE to do internal priority
* tagging on ingress packets.
*/
struct mlx5_flow_group *metadata_prio_tag_grp;
/* Group to add default match-all FTE entry to tag ingress
* packet with metadata.
*/
struct mlx5_flow_group *metadata_allmatch_grp;
struct mlx5_modify_hdr *modify_metadata; struct mlx5_modify_hdr *modify_metadata;
struct mlx5_flow_handle *modify_metadata_rule; struct mlx5_flow_handle *modify_metadata_rule;
} offloads; } offloads;
......
...@@ -88,6 +88,14 @@ u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw) ...@@ -88,6 +88,14 @@ u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
return 1; return 1;
} }
static bool
esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
const struct mlx5_vport *vport)
{
return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
mlx5_eswitch_is_vf_vport(esw, vport->vport));
}
static void static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
...@@ -1760,12 +1768,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw, ...@@ -1760,12 +1768,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
* required, allow * required, allow
* Unmatched traffic is allowed by default * Unmatched traffic is allowed by default
*/ */
spec = kvzalloc(sizeof(*spec), GFP_KERNEL); spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) { if (!spec)
err = -ENOMEM; return -ENOMEM;
goto out_no_mem;
}
/* Untagged packets - push prio tag VLAN, allow */ /* Untagged packets - push prio tag VLAN, allow */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
...@@ -1791,14 +1796,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw, ...@@ -1791,14 +1796,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
"vport[%d] configure ingress untagged allow rule, err(%d)\n", "vport[%d] configure ingress untagged allow rule, err(%d)\n",
vport->vport, err); vport->vport, err);
vport->ingress.allow_rule = NULL; vport->ingress.allow_rule = NULL;
goto out;
} }
out:
kvfree(spec); kvfree(spec);
out_no_mem:
if (err)
esw_vport_cleanup_ingress_rules(esw, vport);
return err; return err;
} }
...@@ -1836,13 +1836,9 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, ...@@ -1836,13 +1836,9 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
esw_warn(esw->dev, esw_warn(esw->dev,
"failed to add setting metadata rule for vport %d ingress acl, err(%d)\n", "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
vport->vport, err); vport->vport, err);
mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
vport->ingress.offloads.modify_metadata_rule = NULL; vport->ingress.offloads.modify_metadata_rule = NULL;
goto out;
} }
out:
if (err)
mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
return err; return err;
} }
...@@ -1862,50 +1858,103 @@ static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw, ...@@ -1862,50 +1858,103 @@ static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
{ {
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g; struct mlx5_flow_group *g;
void *match_criteria;
u32 *flow_group_in; u32 *flow_group_in;
u32 flow_index = 0;
int ret = 0; int ret = 0;
flow_group_in = kvzalloc(inlen, GFP_KERNEL); flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in) if (!flow_group_in)
return -ENOMEM; return -ENOMEM;
memset(flow_group_in, 0, inlen); if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); /* This group is to hold FTE to match untagged packets when prio_tag
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); * is enabled.
*/
memset(flow_group_in, 0, inlen);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); match_criteria = MLX5_ADDR_OF(create_flow_group_in,
if (IS_ERR(g)) { flow_group_in, match_criteria);
ret = PTR_ERR(g); MLX5_SET(create_flow_group_in, flow_group_in,
esw_warn(esw->dev, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
"Failed to create vport[%d] ingress metadata group, err(%d)\n", MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
vport->vport, ret); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
goto grp_err; MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
ret = PTR_ERR(g);
esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
vport->vport, ret);
goto prio_tag_err;
}
vport->ingress.offloads.metadata_prio_tag_grp = g;
flow_index++;
}
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
/* This group holds an FTE with no matches for add metadata for
* tagged packets, if prio-tag is enabled (as a fallthrough),
* or all traffic in case prio-tag is disabled.
*/
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
ret = PTR_ERR(g);
esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
vport->vport, ret);
goto metadata_err;
}
vport->ingress.offloads.metadata_allmatch_grp = g;
}
kvfree(flow_group_in);
return 0;
metadata_err:
if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
} }
vport->ingress.offloads.metadata_grp = g; prio_tag_err:
grp_err:
kvfree(flow_group_in); kvfree(flow_group_in);
return ret; return ret;
} }
static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport) static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
{ {
if (vport->ingress.offloads.metadata_grp) { if (vport->ingress.offloads.metadata_allmatch_grp) {
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp); mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
vport->ingress.offloads.metadata_grp = NULL; vport->ingress.offloads.metadata_allmatch_grp = NULL;
}
if (vport->ingress.offloads.metadata_prio_tag_grp) {
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
} }
} }
static int esw_vport_ingress_config(struct mlx5_eswitch *esw, static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
int num_ftes = 0;
int err; int err;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw) && if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
!MLX5_CAP_GEN(esw->dev, prio_tag_required)) !esw_check_ingress_prio_tag_enabled(esw, vport))
return 0; return 0;
esw_vport_cleanup_ingress_rules(esw, vport); esw_vport_cleanup_ingress_rules(esw, vport);
err = esw_vport_create_ingress_acl_table(esw, vport, 1);
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
num_ftes++;
if (esw_check_ingress_prio_tag_enabled(esw, vport))
num_ftes++;
err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes);
if (err) { if (err) {
esw_warn(esw->dev, esw_warn(esw->dev,
"failed to enable ingress acl (%d) on vport[%d]\n", "failed to enable ingress acl (%d) on vport[%d]\n",
...@@ -1926,8 +1975,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1926,8 +1975,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
goto metadata_err; goto metadata_err;
} }
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
err = esw_vport_ingress_prio_tag_config(esw, vport); err = esw_vport_ingress_prio_tag_config(esw, vport);
if (err) if (err)
goto prio_tag_err; goto prio_tag_err;
...@@ -1937,7 +1985,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1937,7 +1985,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
prio_tag_err: prio_tag_err:
esw_vport_del_ingress_acl_modify_metadata(esw, vport); esw_vport_del_ingress_acl_modify_metadata(esw, vport);
metadata_err: metadata_err:
esw_vport_cleanup_ingress_rules(esw, vport);
esw_vport_destroy_ingress_acl_group(vport); esw_vport_destroy_ingress_acl_group(vport);
group_err: group_err:
esw_vport_destroy_ingress_acl_table(vport); esw_vport_destroy_ingress_acl_table(vport);
...@@ -2008,8 +2055,9 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, ...@@ -2008,8 +2055,9 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
err = esw_vport_egress_config(esw, vport); err = esw_vport_egress_config(esw, vport);
if (err) { if (err) {
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
esw_vport_cleanup_ingress_rules(esw, vport); esw_vport_cleanup_ingress_rules(esw, vport);
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
esw_vport_destroy_ingress_acl_group(vport);
esw_vport_destroy_ingress_acl_table(vport); esw_vport_destroy_ingress_acl_table(vport);
} }
} }
...@@ -2021,8 +2069,8 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, ...@@ -2021,8 +2069,8 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
esw_vport_disable_egress_acl(esw, vport); esw_vport_disable_egress_acl(esw, vport);
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
esw_vport_cleanup_ingress_rules(esw, vport); esw_vport_cleanup_ingress_rules(esw, vport);
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
esw_vport_destroy_ingress_acl_group(vport); esw_vport_destroy_ingress_acl_group(vport);
esw_vport_destroy_ingress_acl_table(vport); esw_vport_destroy_ingress_acl_table(vport);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment