Commit 79e28519 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-03-17' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-03-17

1) Compiler warnings and cleanup for the connection tracking series
2) Bug fixes for the connection tracking series
3) Fix devlink port register sequence
4) Last five patches in the series, By Eli cohen
   Add the support for forwarding traffic between two eswitch uplink
   representors (Hairpin for eswitch), using mlx5 termination tables
   to change the direction of a packet in hw from RX to TX pipeline.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d445dff2 87b51810
...@@ -3,20 +3,14 @@ ...@@ -3,20 +3,14 @@
#include "en/devlink.h" #include "en/devlink.h"
int mlx5e_devlink_port_register(struct net_device *netdev) int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
{ {
struct mlx5_core_dev *dev; struct devlink *devlink = priv_to_devlink(priv->mdev);
struct mlx5e_priv *priv;
struct devlink *devlink;
int err;
priv = netdev_priv(netdev); if (mlx5_core_is_pf(priv->mdev))
dev = priv->mdev;
if (mlx5_core_is_pf(dev))
devlink_port_attrs_set(&priv->dl_port, devlink_port_attrs_set(&priv->dl_port,
DEVLINK_PORT_FLAVOUR_PHYSICAL, DEVLINK_PORT_FLAVOUR_PHYSICAL,
PCI_FUNC(dev->pdev->devfn), PCI_FUNC(priv->mdev->pdev->devfn),
false, 0, false, 0,
NULL, 0); NULL, 0);
else else
...@@ -24,12 +18,12 @@ int mlx5e_devlink_port_register(struct net_device *netdev) ...@@ -24,12 +18,12 @@ int mlx5e_devlink_port_register(struct net_device *netdev)
DEVLINK_PORT_FLAVOUR_VIRTUAL, DEVLINK_PORT_FLAVOUR_VIRTUAL,
0, false, 0, NULL, 0); 0, false, 0, NULL, 0);
devlink = priv_to_devlink(dev); return devlink_port_register(devlink, &priv->dl_port, 1);
err = devlink_port_register(devlink, &priv->dl_port, 1); }
if (err)
return err; void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
devlink_port_type_eth_set(&priv->dl_port, netdev); {
return 0; devlink_port_type_eth_set(&priv->dl_port, priv->netdev);
} }
void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv) void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
......
...@@ -7,8 +7,9 @@ ...@@ -7,8 +7,9 @@
#include <net/devlink.h> #include <net/devlink.h>
#include "en.h" #include "en.h"
int mlx5e_devlink_port_register(struct net_device *dev); int mlx5e_devlink_port_register(struct mlx5e_priv *priv);
void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv); void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv);
void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv);
struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev); struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev);
#endif #endif
...@@ -484,19 +484,23 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -484,19 +484,23 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat]; struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
struct mlx5_esw_flow_attr *attr = &zone_rule->attr; struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
struct mlx5_eswitch *esw = ct_priv->esw; struct mlx5_eswitch *esw = ct_priv->esw;
struct mlx5_flow_spec spec = {}; struct mlx5_flow_spec *spec = NULL;
u32 tupleid = 1; u32 tupleid = 1;
int err; int err;
zone_rule->nat = nat; zone_rule->nat = nat;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
/* Get tuple unique id */ /* Get tuple unique id */
err = idr_alloc_u32(&ct_priv->tuple_ids, zone_rule, &tupleid, err = idr_alloc_u32(&ct_priv->tuple_ids, zone_rule, &tupleid,
TUPLE_ID_MAX, GFP_KERNEL); TUPLE_ID_MAX, GFP_KERNEL);
if (err) { if (err) {
netdev_warn(ct_priv->netdev, netdev_warn(ct_priv->netdev,
"Failed to allocate tuple id, err: %d\n", err); "Failed to allocate tuple id, err: %d\n", err);
return err; goto err_idr_alloc;
} }
zone_rule->tupleid = tupleid; zone_rule->tupleid = tupleid;
...@@ -517,18 +521,19 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -517,18 +521,19 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->counter = entry->counter; attr->counter = entry->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT; attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
mlx5_tc_ct_set_tuple_match(&spec, flow_rule); mlx5_tc_ct_set_tuple_match(spec, flow_rule);
mlx5e_tc_match_to_reg_match(&spec, ZONE_TO_REG, mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
entry->zone & MLX5_CT_ZONE_MASK, entry->zone & MLX5_CT_ZONE_MASK,
MLX5_CT_ZONE_MASK); MLX5_CT_ZONE_MASK);
zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, &spec, attr); zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
if (IS_ERR(zone_rule->rule)) { if (IS_ERR(zone_rule->rule)) {
err = PTR_ERR(zone_rule->rule); err = PTR_ERR(zone_rule->rule);
ct_dbg("Failed to add ct entry rule, nat: %d", nat); ct_dbg("Failed to add ct entry rule, nat: %d", nat);
goto err_rule; goto err_rule;
} }
kfree(spec);
ct_dbg("Offloaded ct entry rule in zone %d", entry->zone); ct_dbg("Offloaded ct entry rule in zone %d", entry->zone);
return 0; return 0;
...@@ -537,6 +542,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -537,6 +542,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
err_mod_hdr: err_mod_hdr:
idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid); idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid);
err_idr_alloc:
kfree(spec);
return err; return err;
} }
...@@ -696,7 +703,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, ...@@ -696,7 +703,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
{ {
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct flow_dissector_key_ct *mask, *key; struct flow_dissector_key_ct *mask, *key;
bool trk, est, untrk, unest, new, unnew; bool trk, est, untrk, unest, new;
u32 ctstate = 0, ctstate_mask = 0; u32 ctstate = 0, ctstate_mask = 0;
u16 ct_state_on, ct_state_off; u16 ct_state_on, ct_state_off;
u16 ct_state, ct_state_mask; u16 ct_state, ct_state_mask;
...@@ -739,7 +746,6 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, ...@@ -739,7 +746,6 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW; new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW;
est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED; est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED; untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
unnew = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_NEW;
unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED; unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0; ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
...@@ -885,8 +891,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -885,8 +891,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT; bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {}; struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
struct mlx5_flow_spec *post_ct_spec = NULL;
struct mlx5_eswitch *esw = ct_priv->esw; struct mlx5_eswitch *esw = ct_priv->esw;
struct mlx5_flow_spec post_ct_spec = {};
struct mlx5_esw_flow_attr *pre_ct_attr; struct mlx5_esw_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr; struct mlx5_modify_hdr *mod_hdr;
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
...@@ -895,9 +901,13 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -895,9 +901,13 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
struct mlx5_ct_ft *ft; struct mlx5_ct_ft *ft;
u32 fte_id = 1; u32 fte_id = 1;
post_ct_spec = kzalloc(sizeof(*post_ct_spec), GFP_KERNEL);
ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
if (!ct_flow) if (!post_ct_spec || !ct_flow) {
kfree(post_ct_spec);
kfree(ct_flow);
return -ENOMEM; return -ENOMEM;
}
/* Register for CT established events */ /* Register for CT established events */
ft = mlx5_tc_ct_add_ft_cb(ct_priv, attr->ct_attr.zone, ft = mlx5_tc_ct_add_ft_cb(ct_priv, attr->ct_attr.zone,
...@@ -992,7 +1002,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -992,7 +1002,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
/* Post ct rule matches on fte_id and executes original rule's /* Post ct rule matches on fte_id and executes original rule's
* tc rule action * tc rule action
*/ */
mlx5e_tc_match_to_reg_match(&post_ct_spec, FTEID_TO_REG, mlx5e_tc_match_to_reg_match(post_ct_spec, FTEID_TO_REG,
fte_id, MLX5_FTE_ID_MASK); fte_id, MLX5_FTE_ID_MASK);
/* Put post_ct rule on post_ct fdb */ /* Put post_ct rule on post_ct fdb */
...@@ -1003,7 +1013,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -1003,7 +1013,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
ct_flow->post_ct_attr.inner_match_level = MLX5_MATCH_NONE; ct_flow->post_ct_attr.inner_match_level = MLX5_MATCH_NONE;
ct_flow->post_ct_attr.outer_match_level = MLX5_MATCH_NONE; ct_flow->post_ct_attr.outer_match_level = MLX5_MATCH_NONE;
ct_flow->post_ct_attr.action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP); ct_flow->post_ct_attr.action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
rule = mlx5_eswitch_add_offloaded_rule(esw, &post_ct_spec, rule = mlx5_eswitch_add_offloaded_rule(esw, post_ct_spec,
&ct_flow->post_ct_attr); &ct_flow->post_ct_attr);
ct_flow->post_ct_rule = rule; ct_flow->post_ct_rule = rule;
if (IS_ERR(ct_flow->post_ct_rule)) { if (IS_ERR(ct_flow->post_ct_rule)) {
...@@ -1027,6 +1037,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -1027,6 +1037,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
attr->ct_attr.ct_flow = ct_flow; attr->ct_attr.ct_flow = ct_flow;
*flow_rule = ct_flow->post_ct_rule; *flow_rule = ct_flow->post_ct_rule;
dealloc_mod_hdr_actions(&pre_mod_acts); dealloc_mod_hdr_actions(&pre_mod_acts);
kfree(post_ct_spec);
return 0; return 0;
...@@ -1043,6 +1054,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -1043,6 +1054,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
err_idr: err_idr:
mlx5_tc_ct_del_ft_cb(ct_priv, ft); mlx5_tc_ct_del_ft_cb(ct_priv, ft);
err_ft: err_ft:
kfree(post_ct_spec);
kfree(ct_flow); kfree(ct_flow);
netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err); netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
return err; return err;
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <net/tc_act/tc_ct.h> #include <net/tc_act/tc_ct.h>
#include "en.h"
struct mlx5_esw_flow_attr; struct mlx5_esw_flow_attr;
struct mlx5e_tc_mod_hdr_acts; struct mlx5e_tc_mod_hdr_acts;
struct mlx5_rep_uplink_priv; struct mlx5_rep_uplink_priv;
...@@ -128,6 +130,11 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, ...@@ -128,6 +130,11 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
struct flow_cls_offload *f, struct flow_cls_offload *f,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT))
return 0;
NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
netdev_warn(priv->netdev, "mlx5 tc ct offload isn't enabled.\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -137,6 +144,8 @@ mlx5_tc_ct_parse_action(struct mlx5e_priv *priv, ...@@ -137,6 +144,8 @@ mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act, const struct flow_action_entry *act,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
netdev_warn(priv->netdev, "mlx5 tc ct offload isn't enabled.\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
...@@ -66,6 +66,9 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, ...@@ -66,6 +66,9 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
mlx5e_is_uplink_rep(netdev_priv(*out_dev)))) mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (mlx5e_eswitch_uplink_rep(priv->netdev) && *out_dev != priv->netdev)
return -EOPNOTSUPP;
return 0; return 0;
} }
......
...@@ -5467,25 +5467,27 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) ...@@ -5467,25 +5467,27 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
goto err_destroy_netdev; goto err_destroy_netdev;
} }
err = register_netdev(netdev); err = mlx5e_devlink_port_register(priv);
if (err) { if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err); mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
goto err_detach; goto err_detach;
} }
err = mlx5e_devlink_port_register(netdev); err = register_netdev(netdev);
if (err) { if (err) {
mlx5_core_err(mdev, "mlx5e_devlink_phy_port_register failed, %d\n", err); mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
goto err_unregister_netdev; goto err_devlink_port_unregister;
} }
mlx5e_devlink_port_type_eth_set(priv);
#ifdef CONFIG_MLX5_CORE_EN_DCB #ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv); mlx5e_dcbnl_init_app(priv);
#endif #endif
return priv; return priv;
err_unregister_netdev: err_devlink_port_unregister:
unregister_netdev(netdev); mlx5e_devlink_port_unregister(priv);
err_detach: err_detach:
mlx5e_detach(mdev, priv); mlx5e_detach(mdev, priv);
err_destroy_netdev: err_destroy_netdev:
......
...@@ -1985,11 +1985,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, ...@@ -1985,11 +1985,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
*match_inner = !needs_mapping; *match_inner = !needs_mapping;
if ((needs_mapping || sets_mapping) && if ((needs_mapping || sets_mapping) &&
!mlx5_eswitch_vport_match_metadata_enabled(esw)) { !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
NL_SET_ERR_MSG(extack, NL_SET_ERR_MSG(extack,
"Chains on tunnel devices isn't supported without register metadata support"); "Chains on tunnel devices isn't supported without register loopback support");
netdev_warn(priv->netdev, netdev_warn(priv->netdev,
"Chains on tunnel devices isn't supported without register metadata support"); "Chains on tunnel devices isn't supported without register loopback support");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -3044,8 +3044,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv, ...@@ -3044,8 +3044,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct net_device *filter_dev = parse_attr->filter_dev; bool ct_flow;
bool drop_action, pop_action, ct_flow;
u32 actions; u32 actions;
ct_flow = flow_flag_test(flow, CT); ct_flow = flow_flag_test(flow, CT);
...@@ -3064,18 +3063,6 @@ static bool actions_match_supported(struct mlx5e_priv *priv, ...@@ -3064,18 +3063,6 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
actions = flow->nic_attr->action; actions = flow->nic_attr->action;
} }
drop_action = actions & MLX5_FLOW_CONTEXT_ACTION_DROP;
pop_action = actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
if (flow_flag_test(flow, EGRESS) && !drop_action) {
/* We only support filters on tunnel device, or on vlan
* devices if they have pop/drop action
*/
if (!mlx5e_get_tc_tun(filter_dev) ||
(is_vlan_dev(filter_dev) && !pop_action))
return false;
}
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
return modify_header_match_supported(&parse_attr->spec, return modify_header_match_supported(&parse_attr->spec,
flow_action, actions, flow_action, actions,
...@@ -3654,6 +3641,46 @@ static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw, ...@@ -3654,6 +3641,46 @@ static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
return 0; return 0;
} }
static int verify_uplink_forwarding(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct net_device *out_dev,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_rep_priv *rep_priv;
/* Forwarding non encapsulated traffic between
* uplink ports is allowed only if
* termination_table_raw_traffic cap is set.
*
* Input vport was stored esw_attr->in_rep.
* In LAG case, *priv* is the private data of
* uplink which may be not the input vport.
*/
rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
mlx5e_eswitch_uplink_rep(out_dev)))
return 0;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
termination_table_raw_traffic)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are both uplink, can't offload forwarding");
pr_err("devices %s %s are both uplink, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
return -EOPNOTSUPP;
} else if (out_dev != rep_priv->netdev) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not the same uplink, can't offload forwarding");
pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
return -EOPNOTSUPP;
}
return 0;
}
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action, struct flow_action *flow_action,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
...@@ -3751,7 +3778,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, ...@@ -3751,7 +3778,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper; struct net_device *uplink_upper;
struct mlx5e_rep_priv *rep_priv;
if (is_duplicated_output_device(priv->netdev, if (is_duplicated_output_device(priv->netdev,
out_dev, out_dev,
...@@ -3787,21 +3813,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, ...@@ -3787,21 +3813,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return err; return err;
} }
/* Don't allow forwarding between uplink. err = verify_uplink_forwarding(priv, flow, out_dev, extack);
* if (err)
* Input vport was stored esw_attr->in_rep. return err;
* In LAG case, *priv* is the private data of
* uplink which may be not the input vport.
*/
rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
if (mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
mlx5e_eswitch_uplink_rep(out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are both uplink, can't offload forwarding");
pr_err("devices %s %s are both uplink, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
return -EOPNOTSUPP;
}
if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
...@@ -4534,8 +4548,14 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, ...@@ -4534,8 +4548,14 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma) struct tc_cls_matchall_offload *ma)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = ma->common.extack; struct netlink_ext_ack *extack = ma->common.extack;
if (!mlx5_esw_qos_enabled(esw)) {
NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
return -EOPNOTSUPP;
}
if (ma->common.prio != 1) { if (ma->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
return -EINVAL; return -EINVAL;
......
...@@ -332,6 +332,7 @@ struct mlx5_termtbl_handle; ...@@ -332,6 +332,7 @@ struct mlx5_termtbl_handle;
bool bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr,
struct mlx5_flow_act *flow_act, struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec); struct mlx5_flow_spec *spec);
...@@ -393,6 +394,7 @@ enum { ...@@ -393,6 +394,7 @@ enum {
MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0), MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1), MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2), MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
MLX5_ESW_ATTR_FLAG_HAIRPIN = BIT(3),
}; };
struct mlx5_esw_flow_attr { struct mlx5_esw_flow_attr {
...@@ -453,6 +455,11 @@ int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw, ...@@ -453,6 +455,11 @@ int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
struct mlx5_vport *vport, struct mlx5_vport *vport,
u16 vlan_id, u32 flow_action); u16 vlan_id, u32 flow_action);
static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
{
return esw->qos.enabled;
}
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
u8 vlan_depth) u8 vlan_depth)
{ {
...@@ -677,7 +684,7 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) ...@@ -677,7 +684,7 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {} static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
static struct mlx5_flow_handle * static inline struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
{ {
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
......
...@@ -300,6 +300,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -300,6 +300,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
bool split = !!(attr->split_count); bool split = !!(attr->split_count);
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb; struct mlx5_flow_table *fdb;
bool hairpin = false;
int j, i = 0; int j, i = 0;
if (esw->mode != MLX5_ESWITCH_OFFLOADS) if (esw->mode != MLX5_ESWITCH_OFFLOADS)
...@@ -397,16 +398,21 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -397,16 +398,21 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
goto err_esw_get; goto err_esw_get;
} }
if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec)) if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) {
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr, rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
&flow_act, dest, i); &flow_act, dest, i);
else hairpin = true;
} else {
rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i); rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
}
if (IS_ERR(rule)) if (IS_ERR(rule))
goto err_add_rule; goto err_add_rule;
else else
atomic64_inc(&esw->offloads.num_flows); atomic64_inc(&esw->offloads.num_flows);
if (hairpin)
attr->flags |= MLX5_ESW_ATTR_FLAG_HAIRPIN;
return rule; return rule;
err_add_rule: err_add_rule:
...@@ -495,10 +501,12 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, ...@@ -495,10 +501,12 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_del_flow_rules(rule); mlx5_del_flow_rules(rule);
/* unref the term table */ if (attr->flags & MLX5_ESW_ATTR_FLAG_HAIRPIN) {
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { /* unref the term table */
if (attr->dests[i].termtbl) for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl); if (attr->dests[i].termtbl)
mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
}
} }
atomic64_dec(&esw->offloads.num_flows); atomic64_dec(&esw->offloads.num_flows);
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb) #define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb)
#define fdb_ignore_flow_level_supported(esw) \ #define fdb_ignore_flow_level_supported(esw) \
(MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level)) (MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
#define fdb_modify_header_fwd_to_table_supported(esw) \
(MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS), /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
* and a virtual memory region of 16M (ESW_SIZE), this region is duplicated * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
...@@ -107,7 +109,8 @@ bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw) ...@@ -107,7 +109,8 @@ bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw)
bool mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw) bool mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw)
{ {
return fdb_ignore_flow_level_supported(esw); return mlx5_esw_chains_prios_supported(esw) &&
fdb_ignore_flow_level_supported(esw);
} }
u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw) u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw)
...@@ -419,7 +422,8 @@ mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain, ...@@ -419,7 +422,8 @@ mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain,
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = next_fdb; dest.ft = next_fdb;
if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) { if (next_fdb == tc_end_fdb(esw) &&
fdb_modify_header_fwd_to_table_supported(esw)) {
act.modify_hdr = fdb_chain->miss_modify_hdr; act.modify_hdr = fdb_chain->miss_modify_hdr;
act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
} }
...@@ -779,6 +783,13 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw) ...@@ -779,6 +783,13 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw)
esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n"); esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
} else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
/* Disabled when ttl workaround is needed, e.g
* when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
*/
esw_warn(dev,
"Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
} else { } else {
esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n", esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include "eswitch.h" #include "eswitch.h"
#include "fs_core.h"
struct mlx5_termtbl_handle { struct mlx5_termtbl_handle {
struct hlist_node termtbl_hlist; struct hlist_node termtbl_hlist;
...@@ -28,6 +29,10 @@ mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act, ...@@ -28,6 +29,10 @@ mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act,
sizeof(dest->vport.num), hash); sizeof(dest->vport.num), hash);
hash = jhash((const void *)&dest->vport.vhca_id, hash = jhash((const void *)&dest->vport.vhca_id,
sizeof(dest->vport.num), hash); sizeof(dest->vport.num), hash);
if (dest->vport.pkt_reformat)
hash = jhash(dest->vport.pkt_reformat,
sizeof(*dest->vport.pkt_reformat),
hash);
return hash; return hash;
} }
...@@ -37,11 +42,19 @@ mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1, ...@@ -37,11 +42,19 @@ mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1,
struct mlx5_flow_act *flow_act2, struct mlx5_flow_act *flow_act2,
struct mlx5_flow_destination *dest2) struct mlx5_flow_destination *dest2)
{ {
return flow_act1->action != flow_act2->action || int ret;
dest1->vport.num != dest2->vport.num ||
dest1->vport.vhca_id != dest2->vport.vhca_id || ret = flow_act1->action != flow_act2->action ||
memcmp(&flow_act1->vlan, &flow_act2->vlan, dest1->vport.num != dest2->vport.num ||
sizeof(flow_act1->vlan)); dest1->vport.vhca_id != dest2->vport.vhca_id ||
memcmp(&flow_act1->vlan, &flow_act2->vlan,
sizeof(flow_act1->vlan));
if (ret)
return ret;
return dest1->vport.pkt_reformat && dest2->vport.pkt_reformat ?
memcmp(dest1->vport.pkt_reformat, dest2->vport.pkt_reformat,
sizeof(*dest1->vport.pkt_reformat)) : 0;
} }
static int static int
...@@ -62,7 +75,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev, ...@@ -62,7 +75,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
/* As this is the terminating action then the termination table is the /* As this is the terminating action then the termination table is the
* same prio as the slow path * same prio as the slow path
*/ */
ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION; ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION |
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft_attr.prio = FDB_SLOW_PATH; ft_attr.prio = FDB_SLOW_PATH;
ft_attr.max_fte = 1; ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1; ft_attr.autogroup.max_num_groups = 1;
...@@ -74,7 +88,6 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev, ...@@ -74,7 +88,6 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act, tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
&tt->dest, 1); &tt->dest, 1);
if (IS_ERR(tt->rule)) { if (IS_ERR(tt->rule)) {
esw_warn(dev, "Failed to create termination table rule\n"); esw_warn(dev, "Failed to create termination table rule\n");
goto add_flow_err; goto add_flow_err;
...@@ -92,7 +105,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev, ...@@ -92,7 +105,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
static struct mlx5_termtbl_handle * static struct mlx5_termtbl_handle *
mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw, mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
struct mlx5_flow_act *flow_act, struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest) struct mlx5_flow_destination *dest,
struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_termtbl_handle *tt; struct mlx5_termtbl_handle *tt;
bool found = false; bool found = false;
...@@ -100,7 +114,6 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw, ...@@ -100,7 +114,6 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
int err; int err;
mutex_lock(&esw->offloads.termtbl_mutex); mutex_lock(&esw->offloads.termtbl_mutex);
hash_key = mlx5_eswitch_termtbl_hash(flow_act, dest); hash_key = mlx5_eswitch_termtbl_hash(flow_act, dest);
hash_for_each_possible(esw->offloads.termtbl_tbl, tt, hash_for_each_possible(esw->offloads.termtbl_tbl, tt,
termtbl_hlist, hash_key) { termtbl_hlist, hash_key) {
...@@ -122,6 +135,7 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw, ...@@ -122,6 +135,7 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
tt->dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; tt->dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
tt->dest.vport.num = dest->vport.num; tt->dest.vport.num = dest->vport.num;
tt->dest.vport.vhca_id = dest->vport.vhca_id; tt->dest.vport.vhca_id = dest->vport.vhca_id;
tt->dest.vport.flags = dest->vport.flags;
memcpy(&tt->flow_act, flow_act, sizeof(*flow_act)); memcpy(&tt->flow_act, flow_act, sizeof(*flow_act));
err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act); err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act);
...@@ -156,25 +170,44 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw, ...@@ -156,25 +170,44 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
} }
} }
static bool mlx5_eswitch_termtbl_is_encap_reformat(struct mlx5_pkt_reformat *rt)
{
switch (rt->reformat_type) {
case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
return true;
default:
return false;
}
}
static void static void
mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src, mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
struct mlx5_flow_act *dst) struct mlx5_flow_act *dst)
{ {
if (!(src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)) if (src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
return; src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; memcpy(&dst->vlan[0], &src->vlan[0], sizeof(src->vlan[0]));
dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; memset(&src->vlan[0], 0, sizeof(src->vlan[0]));
memcpy(&dst->vlan[0], &src->vlan[0], sizeof(src->vlan[0]));
memset(&src->vlan[0], 0, sizeof(src->vlan[0])); if (src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
if (!(src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2)) dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
return; memcpy(&dst->vlan[1], &src->vlan[1], sizeof(src->vlan[1]));
memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
}
}
src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2; if (src->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2; mlx5_eswitch_termtbl_is_encap_reformat(src->pkt_reformat)) {
memcpy(&dst->vlan[1], &src->vlan[1], sizeof(src->vlan[1])); src->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
memset(&src->vlan[1], 0, sizeof(src->vlan[1])); dst->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
dst->pkt_reformat = src->pkt_reformat;
src->pkt_reformat = NULL;
}
} }
static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw, static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
...@@ -195,15 +228,27 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw, ...@@ -195,15 +228,27 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
bool bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr,
struct mlx5_flow_act *flow_act, struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec) struct mlx5_flow_spec *spec)
{ {
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table)) int i;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
!mlx5_eswitch_offload_is_uplink_port(esw, spec))
return false; return false;
/* push vlan on RX */ /* push vlan on RX */
return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) && if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)
mlx5_eswitch_offload_is_uplink_port(esw, spec); return true;
/* hairpin */
for (i = attr->split_count; i < attr->out_count; i++)
if (attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
return true;
return false;
} }
struct mlx5_flow_handle * struct mlx5_flow_handle *
...@@ -233,7 +278,7 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, ...@@ -233,7 +278,7 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
/* get the terminating table for the action list */ /* get the terminating table for the action list */
tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act, tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
&dest[i]); &dest[i], attr);
if (IS_ERR(tt)) { if (IS_ERR(tt)) {
esw_warn(esw->dev, "Failed to create termination table\n"); esw_warn(esw->dev, "Failed to create termination table\n");
goto revert_changes; goto revert_changes;
......
...@@ -416,7 +416,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits { ...@@ -416,7 +416,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 termination_table[0x1]; u8 termination_table[0x1];
u8 reformat_and_fwd_to_table[0x1]; u8 reformat_and_fwd_to_table[0x1];
u8 reserved_at_1a[0x6]; u8 reserved_at_1a[0x6];
u8 reserved_at_20[0x2]; u8 termination_table_raw_traffic[0x1];
u8 reserved_at_21[0x1];
u8 log_max_ft_size[0x6]; u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8]; u8 log_max_modify_header_context[0x8];
u8 max_modify_header_actions[0x8]; u8 max_modify_header_actions[0x8];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment