Commit 04b206b8 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-SRIOV-offload-tunnel_key-set-release'

Saeed Mahameed says:

====================
Mellanox 100G SRIOV offloads tunnel_key set/release

From Hadar Hen Zion:

This series further enhances the SRIOV TC offloads of mlx5 to handle the
TC tunnel_key release and set actions.

This serves a common use-case in virtualization systems where the virtual
switch encapsulate packets (tunnel_key set action) sent from VMs with
outer headers corresponding to the local/remote host IPs and de-capsulate
(tunnel_key release) outer headers before the packets are received by the
VM.

We use the new E-Switch switchdev mode and TC tunnel_key set/release
action to achieve that also in SW defined SRIOV environments by
offloading TC rules that contain these actions along with forwarding
(TC mirred/redirect action) the packets.

The first six patches are adding the needed support in flow dissector,
flower and tc for offloading tunnel_key actions:
    - The first three patches are adding the needed help functions
      and enums
    - The next three patches in the series are adding UDP port attribute
      to tunnel_key release and set actions.

The addition of UDP ports would allow the HW driver to make sure they are
given (say) a VXLAN tunnel to offload (mlx5e uses that).

Patches 7-10 are mlx5 preparations for tunnel_key actions offloads support.

Patch #11 adds mlx5e support to offload tunnel_key release action, and the
last two patches (#12-13) add mlx5e support to tc tunnel_key set action.

Currently in order to offload tc tunnel_key release action, the tc rule
should be placed on top of the mlx5e offloading (uplink) interface instead
of the shared tunnel interface. The resolution between the tunnel interface
to the HW netdevice will be implemented in a follow up series.

This series was generated against commit
94edc86b ("Merge branch 'dwmac-sti-refactor-cleanup'")
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 35b80733 a54e20b4
......@@ -1857,7 +1857,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
ft = mlx5_create_auto_grouped_flow_table(ns, priority,
num_entries,
num_groups,
0);
0, 0);
if (!IS_ERR(ft)) {
prio->refcount = 0;
......@@ -1877,10 +1877,10 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
{
struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
unsigned int spec_index;
u32 action;
int err = 0;
if (!is_valid_attr(flow_attr))
......@@ -1905,12 +1905,12 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
}
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
flow_act.action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
handler->rule = mlx5_add_flow_rules(ft, spec,
action,
MLX5_FS_DEFAULT_FLOW_TAG,
dst, 1);
&flow_act,
dst, 1);
if (IS_ERR(handler->rule)) {
err = PTR_ERR(handler->rule);
......
......@@ -893,5 +893,9 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
void mlx5e_add_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti);
void mlx5e_del_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti);
#endif /* __MLX5_EN_H__ */
......@@ -174,6 +174,11 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5_flow_destination dest;
struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_spec *spec;
......@@ -206,8 +211,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
}
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&flow_act,
&dest, 1);
if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule);
......@@ -324,7 +328,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
int err;
ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL);
MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL, 0);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
......@@ -465,6 +469,11 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
......@@ -544,9 +553,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
rule = mlx5_add_flow_rules(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest, 1);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
......
......@@ -158,6 +158,11 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec)
{
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest;
struct mlx5_flow_handle **rule_p;
......@@ -187,10 +192,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
break;
}
*rule_p = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest, 1);
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
......@@ -623,6 +625,11 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
u16 etype,
u8 proto)
{
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err = 0;
......@@ -644,10 +651,7 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
}
rule = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
dest, 1);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
......@@ -777,7 +781,7 @@ static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
int err;
ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL);
MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL, 0);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
......@@ -810,6 +814,11 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type)
{
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
struct mlx5_flow_destination dest;
struct mlx5_flow_spec *spec;
......@@ -848,9 +857,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
break;
}
ai->rule = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest, 1);
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(ai->rule)) {
netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
__func__, mv_dmac);
......@@ -948,7 +955,7 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
ft->num_groups = 0;
ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL);
MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL, 0);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
......@@ -1038,7 +1045,7 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
ft->num_groups = 0;
ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL);
MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL, 0);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
......
......@@ -99,7 +99,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
MLX5E_ETHTOOL_NUM_ENTRIES);
ft = mlx5_create_auto_grouped_flow_table(ns, prio,
table_size,
MLX5E_ETHTOOL_NUM_GROUPS, 0);
MLX5E_ETHTOOL_NUM_GROUPS, 0, 0);
if (IS_ERR(ft))
return (void *)ft;
......@@ -290,10 +290,10 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs)
{
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
struct mlx5_flow_handle *rule;
int err = 0;
u32 action;
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec)
......@@ -304,7 +304,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
goto free;
if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
action = MLX5_FLOW_CONTEXT_ACTION_DROP;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
} else {
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst) {
......@@ -314,12 +314,12 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
rule = mlx5_add_flow_rules(ft, spec, action,
MLX5_FS_DEFAULT_FLOW_TAG, dst, 1);
flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
......
......@@ -2995,8 +2995,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
vf_stats);
}
static void mlx5e_add_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti)
void mlx5e_add_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
......@@ -3009,8 +3009,8 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
}
static void mlx5e_del_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti)
void mlx5e_del_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
......
......@@ -256,6 +256,8 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
.ndo_setup_tc = mlx5e_rep_ndo_setup_tc,
.ndo_get_stats64 = mlx5e_get_stats,
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
.ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
};
static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev,
......
......@@ -244,6 +244,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
int match_header = (is_zero_ether_addr(mac_c) ? 0 :
MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
struct mlx5_flow_spec *spec;
void *mv_misc = NULL;
......@@ -285,10 +286,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
dmac_v, dmac_c, vport);
spec->match_criteria_enable = match_header;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule =
mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest, 1);
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
......@@ -361,7 +362,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
memset(flow_group_in, 0, inlen);
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0, 0);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
......@@ -1212,6 +1213,7 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
int err = 0;
u8 *smac_v;
......@@ -1264,10 +1266,10 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
}
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->ingress.allow_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL, 0);
&flow_act, NULL, 0);
if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
esw_warn(esw->dev,
......@@ -1278,10 +1280,10 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
}
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
vport->ingress.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL, 0);
&flow_act, NULL, 0);
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
esw_warn(esw->dev,
......@@ -1301,6 +1303,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
int err = 0;
......@@ -1338,10 +1341,10 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->egress.allowed_vlan =
mlx5_add_flow_rules(vport->egress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL, 0);
&flow_act, NULL, 0);
if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
esw_warn(esw->dev,
......@@ -1353,10 +1356,10 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
/* Drop others rule (star rule) */
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
vport->egress.drop_rule =
mlx5_add_flow_rules(vport->egress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL, 0);
&flow_act, NULL, 0);
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
esw_warn(esw->dev,
......@@ -1779,6 +1782,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
hash_init(esw->offloads.encap_tbl);
mutex_init(&esw->state_lock);
for (vport_num = 0; vport_num < total_vports; vport_num++) {
......
......@@ -199,6 +199,7 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps;
DECLARE_HASHTABLE(encap_tbl, 8);
};
struct mlx5_eswitch {
......@@ -272,6 +273,24 @@ enum {
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
struct mlx5_encap_info {
__be32 daddr;
__be32 tun_id;
__be16 tp_dst;
};
struct mlx5_encap_entry {
struct hlist_node encap_hlist;
struct list_head flows;
u32 encap_id;
struct neighbour *n;
struct mlx5_encap_info tun_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;
int tunnel_type;
};
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_eswitch_rep *out_rep;
......@@ -279,6 +298,7 @@ struct mlx5_esw_flow_attr {
int action;
u16 vlan;
bool vlan_handled;
struct mlx5_encap_entry *encap;
};
int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
......
......@@ -49,23 +49,23 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr)
{
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {0};
struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule;
void *misc;
int action;
int i = 0;
if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
action = attr->action;
flow_act.action = attr->action;
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[i].vport_num = attr->out_rep->vport;
i++;
}
if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true);
if (IS_ERR(counter))
return ERR_CAST(counter);
......@@ -82,9 +82,14 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
MLX5_MATCH_MISC_PARAMETERS;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
if (attr->encap)
flow_act.encap_id = attr->encap->encap_id;
rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
spec, action, 0, dest, i);
spec, &flow_act, dest, i);
if (IS_ERR(rule))
mlx5_fc_destroy(esw->dev, counter);
......@@ -274,6 +279,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
static struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
......@@ -297,10 +303,10 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest, 1);
&flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
out:
......@@ -363,6 +369,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_spec *spec;
......@@ -377,10 +384,10 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = 0;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest, 1);
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
......@@ -407,6 +414,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
u32 *flow_group_in;
void *match_criteria;
int table_size, ix, err = 0;
u32 flags = 0;
flow_group_in = mlx5_vzalloc(inlen);
if (!flow_group_in)
......@@ -421,9 +429,14 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
ESW_OFFLOADS_NUM_ENTRIES,
ESW_OFFLOADS_NUM_GROUPS, 0);
ESW_OFFLOADS_NUM_GROUPS, 0,
flags);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
......@@ -432,7 +445,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
esw->fdb_table.fdb = fdb;
table_size = nvports + MAX_PF_SQ + 1;
fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0);
fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
......@@ -524,7 +537,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
return -ENOMEM;
}
ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0);
ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
if (IS_ERR(ft_offloads)) {
err = PTR_ERR(ft_offloads);
esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
......@@ -590,6 +603,7 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
......@@ -612,9 +626,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = tirn;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest, 1);
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
goto out;
......
......@@ -37,6 +37,7 @@
#include "fs_core.h"
#include "fs_cmd.h"
#include "mlx5_core.h"
#include "eswitch.h"
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft)
......@@ -61,8 +62,9 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
enum fs_flow_table_op_mod op_mod,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id)
*next_ft, unsigned int *table_id, u32 flags)
{
int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
int err;
......@@ -78,6 +80,9 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_table_in, in, other_vport, 1);
}
MLX5_SET(create_flow_table_in, in, decap_en, en_encap_decap);
MLX5_SET(create_flow_table_in, in, encap_en, en_encap_decap);
switch (op_mod) {
case FS_FT_OP_MOD_NORMAL:
if (next_ft) {
......@@ -243,6 +248,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action);
MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
......@@ -453,27 +459,32 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
*bytes = MLX5_GET64(traffic_counter, stats, octets);
}
#define MAX_ENCAP_SIZE (128)
int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
int header_type,
size_t size,
void *encap_header,
u32 *encap_id)
int mlx5_encap_alloc(struct mlx5_core_dev *dev,
int header_type,
size_t size,
void *encap_header,
u32 *encap_id)
{
int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
u32 in[MLX5_ST_SZ_DW(alloc_encap_header_in) +
(MAX_ENCAP_SIZE / sizeof(u32))];
void *encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in,
encap_header);
void *header = MLX5_ADDR_OF(encap_header_in, encap_header_in,
encap_header);
int inlen = header - (void *)in + size;
void *encap_header_in;
void *header;
int inlen;
int err;
u32 *in;
if (size > MAX_ENCAP_SIZE)
if (size > MLX5_CAP_ESW(dev, max_encap_header_size))
return -EINVAL;
in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + max_encap_size,
GFP_KERNEL);
if (!in)
return -ENOMEM;
encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header);
header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header);
inlen = header - (void *)in + size;
memset(in, 0, inlen);
MLX5_SET(alloc_encap_header_in, in, opcode,
MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
......@@ -485,10 +496,11 @@ int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
*encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
kfree(in);
return err;
}
void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id)
void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
{
u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
......
......@@ -38,7 +38,7 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
enum fs_flow_table_op_mod op_mod,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id);
*next_ft, unsigned int *table_id, u32 flags);
int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft);
......@@ -89,11 +89,4 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
struct mlx5_cmd_fc_bulk *b, u16 id,
u64 *packets, u64 *bytes);
int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
int header_type,
size_t size,
void *encap_header,
u32 *encap_id);
void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id);
#endif
......@@ -460,8 +460,7 @@ static void del_flow_group(struct fs_node *node)
fg->id, ft->id);
}
static struct fs_fte *alloc_fte(u8 action,
u32 flow_tag,
static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
u32 *match_value,
unsigned int index)
{
......@@ -473,9 +472,10 @@ static struct fs_fte *alloc_fte(u8 action,
memcpy(fte->val, match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY;
fte->flow_tag = flow_tag;
fte->flow_tag = flow_act->flow_tag;
fte->index = index;
fte->action = action;
fte->action = flow_act->action;
fte->encap_id = flow_act->encap_id;
return fte;
}
......@@ -505,7 +505,8 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
enum fs_flow_table_type table_type,
enum fs_flow_table_op_mod op_mod)
enum fs_flow_table_op_mod op_mod,
u32 flags)
{
struct mlx5_flow_table *ft;
......@@ -519,6 +520,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft
ft->type = table_type;
ft->vport = vport;
ft->max_fte = max_fte;
ft->flags = flags;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
......@@ -777,7 +779,8 @@ static void list_add_flow_table(struct mlx5_flow_table *ft,
static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
enum fs_flow_table_op_mod op_mod,
u16 vport, int prio,
int max_fte, u32 level)
int max_fte, u32 level,
u32 flags)
{
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_table *ft;
......@@ -810,7 +813,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
vport,
max_fte ? roundup_pow_of_two(max_fte) : 0,
root->table_type,
op_mod);
op_mod, flags);
if (!ft) {
err = -ENOMEM;
goto unlock_root;
......@@ -820,7 +823,8 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
ft->level, log_table_sz, next_ft, &ft->id);
ft->level, log_table_sz, next_ft, &ft->id,
ft->flags);
if (err)
goto free_ft;
......@@ -845,10 +849,11 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio, int max_fte,
u32 level)
u32 level,
u32 flags)
{
return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio,
max_fte, level);
max_fte, level, flags);
}
struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
......@@ -856,7 +861,7 @@ struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace
u32 level, u16 vport)
{
return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio,
max_fte, level);
max_fte, level, 0);
}
struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
......@@ -864,7 +869,7 @@ struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
int prio, u32 level)
{
return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0,
level);
level, 0);
}
EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
......@@ -872,14 +877,15 @@ struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_nam
int prio,
int num_flow_table_entries,
int max_num_groups,
u32 level)
u32 level,
u32 flags)
{
struct mlx5_flow_table *ft;
if (max_num_groups > num_flow_table_entries)
return ERR_PTR(-EINVAL);
ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level);
ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level, flags);
if (IS_ERR(ft))
return ft;
......@@ -1111,15 +1117,14 @@ static unsigned int get_free_fte_index(struct mlx5_flow_group *fg,
/* prev is output, prev->next = new_fte */
static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
u32 *match_value,
u8 action,
u32 flow_tag,
struct mlx5_flow_act *flow_act,
struct list_head **prev)
{
struct fs_fte *fte;
int index;
index = get_free_fte_index(fg, prev);
fte = alloc_fte(action, flow_tag, match_value, index);
fte = alloc_fte(flow_act, match_value, index);
if (IS_ERR(fte))
return fte;
......@@ -1213,8 +1218,7 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
u32 *match_value,
u8 action,
u32 flow_tag,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int dest_num)
{
......@@ -1228,12 +1232,13 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
fs_for_each_fte(fte, fg) {
nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
if (compare_match_value(&fg->mask, match_value, &fte->val) &&
(action & fte->action) && flow_tag == fte->flow_tag) {
(flow_act->action & fte->action) &&
flow_act->flow_tag == fte->flow_tag) {
int old_action = fte->action;
fte->action |= action;
fte->action |= flow_act->action;
handle = add_rule_fte(fte, fg, dest, dest_num,
old_action != action);
old_action != flow_act->action);
if (IS_ERR(handle)) {
fte->action = old_action;
goto unlock_fte;
......@@ -1249,7 +1254,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
goto unlock_fg;
}
fte = create_fte(fg, match_value, action, flow_tag, &prev);
fte = create_fte(fg, match_value, flow_act, &prev);
if (IS_ERR(fte)) {
handle = (void *)fte;
goto unlock_fg;
......@@ -1326,17 +1331,17 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
static struct mlx5_flow_handle *
_mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec,
u32 action,
u32 flow_tag,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int dest_num)
{
struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule;
int i;
for (i = 0; i < dest_num; i++) {
if (!dest_is_valid(&dest[i], action, ft))
if (!dest_is_valid(&dest[i], flow_act->action, ft))
return ERR_PTR(-EINVAL);
}
......@@ -1347,7 +1352,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
g->mask.match_criteria,
spec->match_criteria)) {
rule = add_rule_fg(g, spec->match_value,
action, flow_tag, dest, dest_num);
flow_act, dest, dest_num);
if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
goto unlock;
}
......@@ -1359,8 +1364,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
goto unlock;
}
rule = add_rule_fg(g, spec->match_value,
action, flow_tag, dest, dest_num);
rule = add_rule_fg(g, spec->match_value, flow_act, dest, dest_num);
if (IS_ERR(rule)) {
/* Remove assumes refcount > 0 and autogroup creates a group
* with a refcount = 0.
......@@ -1384,8 +1388,7 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec,
u32 action,
u32 flow_tag,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int dest_num)
{
......@@ -1393,11 +1396,11 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_destination gen_dest;
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_handle *handle = NULL;
u32 sw_action = action;
u32 sw_action = flow_act->action;
struct fs_prio *prio;
fs_get_obj(prio, ft->node.parent);
if (action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft))
return ERR_PTR(-EOPNOTSUPP);
if (dest)
......@@ -1409,15 +1412,14 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
gen_dest.ft = next_ft;
dest = &gen_dest;
dest_num = 1;
action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else {
mutex_unlock(&root->chain_lock);
return ERR_PTR(-EOPNOTSUPP);
}
}
handle = _mlx5_add_flow_rules(ft, spec, action, flow_tag, dest,
dest_num);
handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num);
if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!IS_ERR_OR_NULL(handle) &&
......@@ -1822,7 +1824,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
if (!ns)
return -EINVAL;
ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
if (IS_ERR(ft)) {
mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
return PTR_ERR(ft);
......
......@@ -117,6 +117,7 @@ struct mlx5_flow_table {
struct mutex lock;
/* FWD rules that point on this flow table */
struct list_head fwd_rules;
u32 flags;
};
struct mlx5_fc_cache {
......@@ -150,6 +151,7 @@ struct fs_fte {
u32 flow_tag;
u32 index;
u32 action;
u32 encap_id;
enum fs_fte_status status;
struct mlx5_fc *counter;
};
......
......@@ -121,6 +121,12 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void);
int mlx5_dev_list_trylock(void);
int mlx5_encap_alloc(struct mlx5_core_dev *dev,
int header_type,
size_t size,
void *encap_header,
u32 *encap_id);
void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
......
......@@ -42,6 +42,10 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
};
enum {
MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0),
};
#define LEFTOVERS_RULE_NUM 2
static inline void build_leftovers_ft_param(int *priority,
int *n_ent,
......@@ -97,13 +101,15 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
int max_num_groups,
u32 level);
u32 level,
u32 flags);
struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
u32 level);
u32 level,
u32 flags);
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio,
......@@ -124,14 +130,19 @@ struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
struct mlx5_flow_act {
u32 action;
u32 flow_tag;
u32 encap_id;
};
/* Single destination per rule.
* Group ID is implied by the match criteria.
*/
struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec,
u32 action,
u32 flow_tag,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int dest_num);
void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
......
......@@ -115,6 +115,7 @@ static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb
static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
__be32 daddr,
__u8 tos, __u8 ttl,
__be16 tp_dst,
__be16 flags,
__be64 tunnel_id,
int md_size)
......@@ -127,7 +128,7 @@ static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
ip_tunnel_key_init(&tun_dst->u.tun_info.key,
saddr, daddr, tos, ttl,
0, 0, 0, tunnel_id, flags);
0, 0, tp_dst, tunnel_id, flags);
return tun_dst;
}
......@@ -139,12 +140,13 @@ static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
const struct iphdr *iph = ip_hdr(skb);
return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
flags, tunnel_id, md_size);
0, flags, tunnel_id, md_size);
}
static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u8 tos, __u8 ttl,
__be16 tp_dst,
__be32 label,
__be16 flags,
__be64 tunnel_id,
......@@ -162,7 +164,7 @@ static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *sad
info->key.tun_flags = flags;
info->key.tun_id = tunnel_id;
info->key.tp_src = 0;
info->key.tp_dst = 0;
info->key.tp_dst = tp_dst;
info->key.u.ipv6.src = *saddr;
info->key.u.ipv6.dst = *daddr;
......@@ -183,7 +185,7 @@ static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
return __ipv6_tun_set_dst(&ip6h->saddr, &ip6h->daddr,
ipv6_get_dsfield(ip6h), ip6h->hop_limit,
ip6_flowlabel(ip6h), flags, tunnel_id,
0, ip6_flowlabel(ip6h), flags, tunnel_id,
md_size);
}
#endif /* __NET_DST_METADATA_H */
......@@ -128,6 +128,11 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
FLOW_DISSECTOR_KEY_ENC_KEYID, /* struct flow_dissector_key_keyid */
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
FLOW_DISSECTOR_KEY_ENC_CONTROL, /* struct flow_dissector_key_control */
FLOW_DISSECTOR_KEY_ENC_PORTS, /* struct flow_dissector_key_ports */
FLOW_DISSECTOR_KEY_MAX,
};
......
......@@ -12,6 +12,8 @@
#define __NET_TC_TUNNEL_KEY_H
#include <net/act_api.h>
#include <linux/tc_act/tc_tunnel_key.h>
#include <net/dst_metadata.h>
struct tcf_tunnel_key_params {
struct rcu_head rcu;
......@@ -27,4 +29,39 @@ struct tcf_tunnel_key {
#define to_tunnel_key(a) ((struct tcf_tunnel_key *)a)
static inline bool is_tcf_tunnel_set(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
struct tcf_tunnel_key *t = to_tunnel_key(a);
struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
if (a->ops && a->ops->type == TCA_ACT_TUNNEL_KEY)
return params->tcft_action == TCA_TUNNEL_KEY_ACT_SET;
#endif
return false;
}
static inline bool is_tcf_tunnel_release(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
struct tcf_tunnel_key *t = to_tunnel_key(a);
struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
if (a->ops && a->ops->type == TCA_ACT_TUNNEL_KEY)
return params->tcft_action == TCA_TUNNEL_KEY_ACT_RELEASE;
#endif
return false;
}
static inline struct ip_tunnel_info *tcf_tunnel_info(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
struct tcf_tunnel_key *t = to_tunnel_key(a);
struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
return &params->tcft_enc_metadata->u.tun_info;
#else
return NULL;
#endif
}
#endif /* __NET_TC_TUNNEL_KEY_H */
......@@ -452,6 +452,11 @@ enum {
TCA_FLOWER_KEY_SCTP_SRC, /* be16 */
TCA_FLOWER_KEY_SCTP_DST, /* be16 */
TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, /* be16 */
TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, /* be16 */
TCA_FLOWER_KEY_ENC_UDP_DST_PORT, /* be16 */
TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, /* be16 */
__TCA_FLOWER_MAX,
};
......
......@@ -33,6 +33,7 @@ enum {
TCA_TUNNEL_KEY_ENC_IPV6_DST, /* struct in6_addr */
TCA_TUNNEL_KEY_ENC_KEY_ID, /* be64 */
TCA_TUNNEL_KEY_PAD,
TCA_TUNNEL_KEY_ENC_DST_PORT, /* be16 */
__TCA_TUNNEL_KEY_MAX,
};
......
......@@ -16,7 +16,6 @@
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <linux/tc_act/tc_tunnel_key.h>
#include <net/tc_act/tc_tunnel_key.h>
......@@ -67,6 +66,7 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
[TCA_TUNNEL_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
[TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
[TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
[TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
};
static int tunnel_key_init(struct net *net, struct nlattr *nla,
......@@ -81,6 +81,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
struct tc_tunnel_key *parm;
struct tcf_tunnel_key *t;
bool exists = false;
__be16 dst_port = 0;
__be64 key_id;
int ret = 0;
int err;
......@@ -111,6 +112,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
key_id = key32_to_tunnel_id(nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]));
if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
__be32 saddr;
......@@ -120,7 +124,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
metadata = __ip_tun_set_dst(saddr, daddr, 0, 0,
TUNNEL_KEY, key_id, 0);
dst_port, TUNNEL_KEY,
key_id, 0);
} else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
struct in6_addr saddr;
......@@ -130,7 +135,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
metadata = __ipv6_tun_set_dst(&saddr, &daddr, 0, 0, 0,
TUNNEL_KEY, key_id, 0);
dst_port, TUNNEL_KEY,
key_id, 0);
}
if (!metadata) {
......@@ -258,7 +264,8 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id) ||
tunnel_key_dump_addresses(skb,
&params->tcft_enc_metadata->u.tun_info))
&params->tcft_enc_metadata->u.tun_info) ||
nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT, key->tp_dst))
goto nla_put_failure;
}
......
......@@ -43,6 +43,7 @@ struct fl_flow_key {
struct flow_dissector_key_ipv4_addrs enc_ipv4;
struct flow_dissector_key_ipv6_addrs enc_ipv6;
};
struct flow_dissector_key_ports enc_tp;
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
struct fl_flow_mask_range {
......@@ -155,6 +156,8 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
}
skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
skb_key.enc_tp.src = key->tp_src;
skb_key.enc_tp.dst = key->tp_dst;
}
skb_key.indev_ifindex = skb->skb_iif;
......@@ -348,6 +351,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
};
static void fl_set_key_val(struct nlattr **tb,
......@@ -500,6 +507,14 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
&mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
sizeof(key->enc_key_id.keyid));
fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
&mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
sizeof(key->enc_tp.src));
fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
&mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
sizeof(key->enc_tp.dst));
return 0;
}
......@@ -567,6 +582,18 @@ static void fl_init_dissector(struct cls_fl_head *head,
FLOW_DISSECTOR_KEY_PORTS, tp);
FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
FLOW_DISSECTOR_KEY_VLAN, vlan);
FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) ||
FL_KEY_IS_MASKED(&mask->key, enc_ipv6))
FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
enc_control);
FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
skb_flow_dissector_init(&head->dissector, keys, cnt);
}
......@@ -941,7 +968,17 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
&mask->enc_key_id, TCA_FLOWER_UNSPEC,
sizeof(key->enc_key_id)))
sizeof(key->enc_key_id)) ||
fl_dump_key_val(skb, &key->enc_tp.src,
TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
&mask->enc_tp.src,
TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
sizeof(key->enc_tp.src)) ||
fl_dump_key_val(skb, &key->enc_tp.dst,
TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
&mask->enc_tp.dst,
TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
sizeof(key->enc_tp.dst)))
goto nla_put_failure;
nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment