Commit 66958ed9 authored by Hadar Hen Zion's avatar Hadar Hen Zion Committed by David S. Miller

net/mlx5: Support encap id when setting new steering entry

In order to support steering rules which add encapsulation headers,
encap_id parameter is needed.

Add new mlx5_flow_act struct which holds action related parameter:
action, flow_tag and encap_id. Use mlx5_flow_act struct when adding a new
steering rule.
This patch doesn't change any functionality.
Signed-off-by: default avatarHadar Hen Zion <hadarh@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c9f1b073
...@@ -1877,10 +1877,10 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, ...@@ -1877,10 +1877,10 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
{ {
struct mlx5_flow_table *ft = ft_prio->flow_table; struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler; struct mlx5_ib_flow_handler *handler;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
unsigned int spec_index; unsigned int spec_index;
u32 action;
int err = 0; int err = 0;
if (!is_valid_attr(flow_attr)) if (!is_valid_attr(flow_attr))
...@@ -1905,12 +1905,12 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, ...@@ -1905,12 +1905,12 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
} }
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : flow_act.action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
handler->rule = mlx5_add_flow_rules(ft, spec, handler->rule = mlx5_add_flow_rules(ft, spec,
action, &flow_act,
MLX5_FS_DEFAULT_FLOW_TAG, dst, 1);
dst, 1);
if (IS_ERR(handler->rule)) { if (IS_ERR(handler->rule)) {
err = PTR_ERR(handler->rule); err = PTR_ERR(handler->rule);
......
...@@ -174,6 +174,11 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, ...@@ -174,6 +174,11 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type) enum arfs_type type)
{ {
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5e_tir *tir = priv->indir_tir; struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
...@@ -206,8 +211,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, ...@@ -206,8 +211,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
} }
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec, arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest, 1); &dest, 1);
if (IS_ERR(arfs_t->default_rule)) { if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule); err = PTR_ERR(arfs_t->default_rule);
...@@ -465,6 +469,11 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, ...@@ -465,6 +469,11 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule) struct arfs_rule *arfs_rule)
{ {
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple; struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL; struct mlx5_flow_handle *rule = NULL;
...@@ -544,9 +553,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, ...@@ -544,9 +553,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
} }
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn; dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
rule = mlx5_add_flow_rules(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
MLX5_FS_DEFAULT_FLOW_TAG,
&dest, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n", netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
......
...@@ -158,6 +158,11 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, ...@@ -158,6 +158,11 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type, enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec) u16 vid, struct mlx5_flow_spec *spec)
{ {
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_handle **rule_p; struct mlx5_flow_handle **rule_p;
...@@ -187,10 +192,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, ...@@ -187,10 +192,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
break; break;
} }
*rule_p = mlx5_add_flow_rules(ft, spec, *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest, 1);
if (IS_ERR(*rule_p)) { if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p); err = PTR_ERR(*rule_p);
...@@ -623,6 +625,11 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, ...@@ -623,6 +625,11 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
u16 etype, u16 etype,
u8 proto) u8 proto)
{ {
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
int err = 0; int err = 0;
...@@ -644,10 +651,7 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, ...@@ -644,10 +651,7 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype); MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
} }
rule = mlx5_add_flow_rules(ft, spec, rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
dest, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule failed\n", __func__); netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
...@@ -810,6 +814,11 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, ...@@ -810,6 +814,11 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type) struct mlx5e_l2_rule *ai, int type)
{ {
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5_flow_table *ft = priv->fs.l2.ft.t; struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
...@@ -848,9 +857,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, ...@@ -848,9 +857,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
break; break;
} }
ai->rule = mlx5_add_flow_rules(ft, spec, ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest, 1);
if (IS_ERR(ai->rule)) { if (IS_ERR(ai->rule)) {
netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n", netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
__func__, mv_dmac); __func__, mv_dmac);
......
...@@ -290,10 +290,10 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv, ...@@ -290,10 +290,10 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs) struct ethtool_rx_flow_spec *fs)
{ {
struct mlx5_flow_destination *dst = NULL; struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
int err = 0; int err = 0;
u32 action;
spec = mlx5_vzalloc(sizeof(*spec)); spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) if (!spec)
...@@ -304,7 +304,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv, ...@@ -304,7 +304,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
goto free; goto free;
if (fs->ring_cookie == RX_CLS_FLOW_DISC) { if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
action = MLX5_FLOW_CONTEXT_ACTION_DROP; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
} else { } else {
dst = kzalloc(sizeof(*dst), GFP_KERNEL); dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst) { if (!dst) {
...@@ -314,12 +314,12 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv, ...@@ -314,12 +314,12 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn; dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} }
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
rule = mlx5_add_flow_rules(ft, spec, action, flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
MLX5_FS_DEFAULT_FLOW_TAG, dst, 1); rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
......
...@@ -61,6 +61,11 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -61,6 +61,11 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
{ {
struct mlx5_core_dev *dev = priv->mdev; struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_flow_destination dest = { 0 }; struct mlx5_flow_destination dest = { 0 };
struct mlx5_flow_act flow_act = {
.action = action,
.flow_tag = flow_tag,
.encap_id = 0,
};
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
bool table_created = false; bool table_created = false;
...@@ -95,9 +100,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -95,9 +100,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
} }
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
action, flow_tag,
&dest, 1);
if (IS_ERR(rule)) if (IS_ERR(rule))
goto err_add_rule; goto err_add_rule;
......
...@@ -244,6 +244,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, ...@@ -244,6 +244,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
int match_header = (is_zero_ether_addr(mac_c) ? 0 : int match_header = (is_zero_ether_addr(mac_c) ? 0 :
MLX5_MATCH_OUTER_HEADERS); MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_handle *flow_rule = NULL; struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
void *mv_misc = NULL; void *mv_misc = NULL;
...@@ -285,10 +286,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, ...@@ -285,10 +286,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
dmac_v, dmac_c, vport); dmac_v, dmac_c, vport);
spec->match_criteria_enable = match_header; spec->match_criteria_enable = match_header;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = flow_rule =
mlx5_add_flow_rules(esw->fdb_table.fdb, spec, mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest, 1);
0, &dest, 1);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, esw_warn(esw->dev,
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
...@@ -1212,6 +1213,7 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, ...@@ -1212,6 +1213,7 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
static int esw_vport_ingress_config(struct mlx5_eswitch *esw, static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
int err = 0; int err = 0;
u8 *smac_v; u8 *smac_v;
...@@ -1264,10 +1266,10 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1264,10 +1266,10 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
} }
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->ingress.allow_rule = vport->ingress.allow_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec, mlx5_add_flow_rules(vport->ingress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_ALLOW, &flow_act, NULL, 0);
0, NULL, 0);
if (IS_ERR(vport->ingress.allow_rule)) { if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule); err = PTR_ERR(vport->ingress.allow_rule);
esw_warn(esw->dev, esw_warn(esw->dev,
...@@ -1278,10 +1280,10 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1278,10 +1280,10 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
} }
memset(spec, 0, sizeof(*spec)); memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
vport->ingress.drop_rule = vport->ingress.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec, mlx5_add_flow_rules(vport->ingress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_DROP, &flow_act, NULL, 0);
0, NULL, 0);
if (IS_ERR(vport->ingress.drop_rule)) { if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule); err = PTR_ERR(vport->ingress.drop_rule);
esw_warn(esw->dev, esw_warn(esw->dev,
...@@ -1301,6 +1303,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1301,6 +1303,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
static int esw_vport_egress_config(struct mlx5_eswitch *esw, static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
int err = 0; int err = 0;
...@@ -1338,10 +1341,10 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, ...@@ -1338,10 +1341,10 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan); MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->egress.allowed_vlan = vport->egress.allowed_vlan =
mlx5_add_flow_rules(vport->egress.acl, spec, mlx5_add_flow_rules(vport->egress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_ALLOW, &flow_act, NULL, 0);
0, NULL, 0);
if (IS_ERR(vport->egress.allowed_vlan)) { if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan); err = PTR_ERR(vport->egress.allowed_vlan);
esw_warn(esw->dev, esw_warn(esw->dev,
...@@ -1353,10 +1356,10 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, ...@@ -1353,10 +1356,10 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
/* Drop others rule (star rule) */ /* Drop others rule (star rule) */
memset(spec, 0, sizeof(*spec)); memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
vport->egress.drop_rule = vport->egress.drop_rule =
mlx5_add_flow_rules(vport->egress.acl, spec, mlx5_add_flow_rules(vport->egress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_DROP, &flow_act, NULL, 0);
0, NULL, 0);
if (IS_ERR(vport->egress.drop_rule)) { if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule); err = PTR_ERR(vport->egress.drop_rule);
esw_warn(esw->dev, esw_warn(esw->dev,
......
...@@ -49,23 +49,23 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -49,23 +49,23 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr) struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_flow_destination dest[2] = {}; struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {0};
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
void *misc; void *misc;
int action;
int i = 0; int i = 0;
if (esw->mode != SRIOV_OFFLOADS) if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
action = attr->action; flow_act.action = attr->action;
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[i].vport_num = attr->out_rep->vport; dest[i].vport_num = attr->out_rep->vport;
i++; i++;
} }
if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true); counter = mlx5_fc_create(esw->dev, true);
if (IS_ERR(counter)) if (IS_ERR(counter))
return ERR_CAST(counter); return ERR_CAST(counter);
...@@ -84,7 +84,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -84,7 +84,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
MLX5_MATCH_MISC_PARAMETERS; MLX5_MATCH_MISC_PARAMETERS;
rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb, rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
spec, action, 0, dest, i); spec, &flow_act, dest, i);
if (IS_ERR(rule)) if (IS_ERR(rule))
mlx5_fc_destroy(esw->dev, counter); mlx5_fc_destroy(esw->dev, counter);
...@@ -274,6 +274,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, ...@@ -274,6 +274,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{ {
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
...@@ -297,10 +298,10 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn ...@@ -297,10 +298,10 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport; dest.vport_num = vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest, 1);
0, &dest, 1);
if (IS_ERR(flow_rule)) if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
out: out:
...@@ -363,6 +364,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -363,6 +364,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{ {
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_handle *flow_rule = NULL; struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
...@@ -377,10 +379,10 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) ...@@ -377,10 +379,10 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = 0; dest.vport_num = 0;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest, 1);
0, &dest, 1);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule); err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err); esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
...@@ -591,6 +593,7 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) ...@@ -591,6 +593,7 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
{ {
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
...@@ -613,9 +616,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) ...@@ -613,9 +616,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = tirn; dest.tir_num = tirn;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, &flow_act, &dest, 1);
0, &dest, 1);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
goto out; goto out;
......
...@@ -248,6 +248,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -248,6 +248,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, group_id, group_id); MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag); MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action); MLX5_SET(flow_context, in_flow_context, action, fte->action);
MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value); match_value);
memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param)); memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
......
...@@ -460,8 +460,7 @@ static void del_flow_group(struct fs_node *node) ...@@ -460,8 +460,7 @@ static void del_flow_group(struct fs_node *node)
fg->id, ft->id); fg->id, ft->id);
} }
static struct fs_fte *alloc_fte(u8 action, static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
u32 flow_tag,
u32 *match_value, u32 *match_value,
unsigned int index) unsigned int index)
{ {
...@@ -473,9 +472,10 @@ static struct fs_fte *alloc_fte(u8 action, ...@@ -473,9 +472,10 @@ static struct fs_fte *alloc_fte(u8 action,
memcpy(fte->val, match_value, sizeof(fte->val)); memcpy(fte->val, match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY; fte->node.type = FS_TYPE_FLOW_ENTRY;
fte->flow_tag = flow_tag; fte->flow_tag = flow_act->flow_tag;
fte->index = index; fte->index = index;
fte->action = action; fte->action = flow_act->action;
fte->encap_id = flow_act->encap_id;
return fte; return fte;
} }
...@@ -1117,15 +1117,14 @@ static unsigned int get_free_fte_index(struct mlx5_flow_group *fg, ...@@ -1117,15 +1117,14 @@ static unsigned int get_free_fte_index(struct mlx5_flow_group *fg,
/* prev is output, prev->next = new_fte */ /* prev is output, prev->next = new_fte */
static struct fs_fte *create_fte(struct mlx5_flow_group *fg, static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
u32 *match_value, u32 *match_value,
u8 action, struct mlx5_flow_act *flow_act,
u32 flow_tag,
struct list_head **prev) struct list_head **prev)
{ {
struct fs_fte *fte; struct fs_fte *fte;
int index; int index;
index = get_free_fte_index(fg, prev); index = get_free_fte_index(fg, prev);
fte = alloc_fte(action, flow_tag, match_value, index); fte = alloc_fte(flow_act, match_value, index);
if (IS_ERR(fte)) if (IS_ERR(fte))
return fte; return fte;
...@@ -1219,8 +1218,7 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte, ...@@ -1219,8 +1218,7 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
u32 *match_value, u32 *match_value,
u8 action, struct mlx5_flow_act *flow_act,
u32 flow_tag,
struct mlx5_flow_destination *dest, struct mlx5_flow_destination *dest,
int dest_num) int dest_num)
{ {
...@@ -1234,12 +1232,13 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, ...@@ -1234,12 +1232,13 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
fs_for_each_fte(fte, fg) { fs_for_each_fte(fte, fg) {
nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
if (compare_match_value(&fg->mask, match_value, &fte->val) && if (compare_match_value(&fg->mask, match_value, &fte->val) &&
(action & fte->action) && flow_tag == fte->flow_tag) { (flow_act->action & fte->action) &&
flow_act->flow_tag == fte->flow_tag) {
int old_action = fte->action; int old_action = fte->action;
fte->action |= action; fte->action |= flow_act->action;
handle = add_rule_fte(fte, fg, dest, dest_num, handle = add_rule_fte(fte, fg, dest, dest_num,
old_action != action); old_action != flow_act->action);
if (IS_ERR(handle)) { if (IS_ERR(handle)) {
fte->action = old_action; fte->action = old_action;
goto unlock_fte; goto unlock_fte;
...@@ -1255,7 +1254,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, ...@@ -1255,7 +1254,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
goto unlock_fg; goto unlock_fg;
} }
fte = create_fte(fg, match_value, action, flow_tag, &prev); fte = create_fte(fg, match_value, flow_act, &prev);
if (IS_ERR(fte)) { if (IS_ERR(fte)) {
handle = (void *)fte; handle = (void *)fte;
goto unlock_fg; goto unlock_fg;
...@@ -1332,17 +1331,17 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest, ...@@ -1332,17 +1331,17 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
_mlx5_add_flow_rules(struct mlx5_flow_table *ft, _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
u32 action, struct mlx5_flow_act *flow_act,
u32 flow_tag,
struct mlx5_flow_destination *dest, struct mlx5_flow_destination *dest,
int dest_num) int dest_num)
{ {
struct mlx5_flow_group *g; struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
int i; int i;
for (i = 0; i < dest_num; i++) { for (i = 0; i < dest_num; i++) {
if (!dest_is_valid(&dest[i], action, ft)) if (!dest_is_valid(&dest[i], flow_act->action, ft))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -1353,7 +1352,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1353,7 +1352,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
g->mask.match_criteria, g->mask.match_criteria,
spec->match_criteria)) { spec->match_criteria)) {
rule = add_rule_fg(g, spec->match_value, rule = add_rule_fg(g, spec->match_value,
action, flow_tag, dest, dest_num); flow_act, dest, dest_num);
if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
goto unlock; goto unlock;
} }
...@@ -1365,8 +1364,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1365,8 +1364,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
goto unlock; goto unlock;
} }
rule = add_rule_fg(g, spec->match_value, rule = add_rule_fg(g, spec->match_value, flow_act, dest, dest_num);
action, flow_tag, dest, dest_num);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
/* Remove assumes refcount > 0 and autogroup creates a group /* Remove assumes refcount > 0 and autogroup creates a group
* with a refcount = 0. * with a refcount = 0.
...@@ -1390,8 +1388,7 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft) ...@@ -1390,8 +1388,7 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table *ft, mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
u32 action, struct mlx5_flow_act *flow_act,
u32 flow_tag,
struct mlx5_flow_destination *dest, struct mlx5_flow_destination *dest,
int dest_num) int dest_num)
{ {
...@@ -1399,11 +1396,11 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1399,11 +1396,11 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_destination gen_dest; struct mlx5_flow_destination gen_dest;
struct mlx5_flow_table *next_ft = NULL; struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_handle *handle = NULL; struct mlx5_flow_handle *handle = NULL;
u32 sw_action = action; u32 sw_action = flow_act->action;
struct fs_prio *prio; struct fs_prio *prio;
fs_get_obj(prio, ft->node.parent); fs_get_obj(prio, ft->node.parent);
if (action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft)) if (!fwd_next_prio_supported(ft))
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
if (dest) if (dest)
...@@ -1415,15 +1412,14 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1415,15 +1412,14 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
gen_dest.ft = next_ft; gen_dest.ft = next_ft;
dest = &gen_dest; dest = &gen_dest;
dest_num = 1; dest_num = 1;
action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else { } else {
mutex_unlock(&root->chain_lock); mutex_unlock(&root->chain_lock);
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
} }
} }
handle = _mlx5_add_flow_rules(ft, spec, action, flow_tag, dest, handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num);
dest_num);
if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!IS_ERR_OR_NULL(handle) && if (!IS_ERR_OR_NULL(handle) &&
......
...@@ -151,6 +151,7 @@ struct fs_fte { ...@@ -151,6 +151,7 @@ struct fs_fte {
u32 flow_tag; u32 flow_tag;
u32 index; u32 index;
u32 action; u32 action;
u32 encap_id;
enum fs_fte_status status; enum fs_fte_status status;
struct mlx5_fc *counter; struct mlx5_fc *counter;
}; };
......
...@@ -130,14 +130,19 @@ struct mlx5_flow_group * ...@@ -130,14 +130,19 @@ struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
struct mlx5_flow_act {
u32 action;
u32 flow_tag;
u32 encap_id;
};
/* Single destination per rule. /* Single destination per rule.
* Group ID is implied by the match criteria. * Group ID is implied by the match criteria.
*/ */
struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table *ft, mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
u32 action, struct mlx5_flow_act *flow_act,
u32 flow_tag,
struct mlx5_flow_destination *dest, struct mlx5_flow_destination *dest,
int dest_num); int dest_num);
void mlx5_del_flow_rules(struct mlx5_flow_handle *fr); void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment