Commit c9c079b4 authored by Paul Blakey's avatar Paul Blakey Committed by David S. Miller

net/mlx5: CT: Set flow source hint from provided tuple device

Get originating device from tuple offload metadata match ingress_ifindex,
and set flow_source hint to either LOCAL for vf/sf reps, UPLINK for
uplink/wire/tunnel devices/bond, or ANY (as before this patch)
for all others.

This allows lower layer (software steering or firmware) to insert the tuple
rule only in one table (either rx or tx) instead of two (rx and tx).
Signed-off-by: default avatarPaul Blakey <paulb@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b702436a
...@@ -538,7 +538,7 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev) ...@@ -538,7 +538,7 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
return add_drivers(dev); return add_drivers(dev);
} }
static bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev) bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
{ {
u64 fsystem_guid, psystem_guid; u64 fsystem_guid, psystem_guid;
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/xarray.h> #include <linux/xarray.h>
#include <linux/if_macvlan.h>
#include "lib/fs_chains.h" #include "lib/fs_chains.h"
#include "en/tc_ct.h" #include "en/tc_ct.h"
...@@ -326,7 +327,33 @@ mlx5_tc_ct_rule_to_tuple_nat(struct mlx5_ct_tuple *tuple, ...@@ -326,7 +327,33 @@ mlx5_tc_ct_rule_to_tuple_nat(struct mlx5_ct_tuple *tuple,
} }
static int static int
mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, mlx5_tc_ct_get_flow_source_match(struct mlx5_tc_ct_priv *ct_priv,
struct net_device *ndev)
{
struct mlx5e_priv *other_priv = netdev_priv(ndev);
struct mlx5_core_dev *mdev = ct_priv->dev;
bool vf_rep, uplink_rep;
vf_rep = mlx5e_eswitch_vf_rep(ndev) && mlx5_same_hw_devs(mdev, other_priv->mdev);
uplink_rep = mlx5e_eswitch_uplink_rep(ndev) && mlx5_same_hw_devs(mdev, other_priv->mdev);
if (vf_rep)
return MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
if (uplink_rep)
return MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
if (is_vlan_dev(ndev))
return mlx5_tc_ct_get_flow_source_match(ct_priv, vlan_dev_real_dev(ndev));
if (netif_is_macvlan(ndev))
return mlx5_tc_ct_get_flow_source_match(ct_priv, macvlan_dev_real_dev(ndev));
if (mlx5e_get_tc_tun(ndev) || netif_is_lag_master(ndev))
return MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
return MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT;
}
static int
mlx5_tc_ct_set_tuple_match(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_flow_spec *spec,
struct flow_rule *rule) struct flow_rule *rule)
{ {
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
...@@ -341,8 +368,7 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, ...@@ -341,8 +368,7 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
flow_rule_match_basic(rule, &match); flow_rule_match_basic(rule, &match);
mlx5e_tc_set_ethertype(priv->mdev, &match, true, headers_c, mlx5e_tc_set_ethertype(ct_priv->dev, &match, true, headers_c, headers_v);
headers_v);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
match.mask->ip_proto); match.mask->ip_proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
...@@ -438,6 +464,23 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, ...@@ -438,6 +464,23 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
ntohs(match.key->flags)); ntohs(match.key->flags));
} }
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
struct flow_match_meta match;
flow_rule_match_meta(rule, &match);
if (match.key->ingress_ifindex & match.mask->ingress_ifindex) {
struct net_device *dev;
dev = dev_get_by_index(&init_net, match.key->ingress_ifindex);
if (dev && MLX5_CAP_ESW_FLOWTABLE(ct_priv->dev, flow_source))
spec->flow_context.flow_source =
mlx5_tc_ct_get_flow_source_match(ct_priv, dev);
dev_put(dev);
}
}
return 0; return 0;
} }
...@@ -770,7 +813,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -770,7 +813,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB) if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
attr->esw_attr->in_mdev = priv->mdev; attr->esw_attr->in_mdev = priv->mdev;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule); mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK); mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr); zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
......
...@@ -305,5 +305,6 @@ static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev) ...@@ -305,5 +305,6 @@ static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
bool mlx5_eth_supported(struct mlx5_core_dev *dev); bool mlx5_eth_supported(struct mlx5_core_dev *dev);
bool mlx5_rdma_supported(struct mlx5_core_dev *dev); bool mlx5_rdma_supported(struct mlx5_core_dev *dev);
bool mlx5_vnet_supported(struct mlx5_core_dev *dev); bool mlx5_vnet_supported(struct mlx5_core_dev *dev);
bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev);
#endif /* __MLX5_CORE_H__ */ #endif /* __MLX5_CORE_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment