Commit 94ceffb4 authored by Vlad Buslov's avatar Vlad Buslov Committed by Saeed Mahameed

net/mlx5e: Implement CT entry update

With support for UDP NEW offload the flow_table may now send updates for
existing flows. Support properly replacing existing entries by updating
flow restore_cookie and replacing the rule with new one with the same match
but new mod_hdr action that sets updated ctinfo.
Signed-off-by: default avatarVlad Buslov <vladbu@nvidia.com>
Reviewed-by: default avatarOz Shlomo <ozsh@nvidia.com>
Reviewed-by: default avatarPaul Blakey <paulb@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 3ac0b6aa
...@@ -871,6 +871,68 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -871,6 +871,68 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
return err; return err;
} }
static int
mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
struct flow_rule *flow_rule,
struct mlx5_ct_entry *entry,
bool nat, u8 zone_restore_id)
{
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr;
struct mlx5e_mod_hdr_handle *mh;
struct mlx5_ct_fs_rule *rule;
struct mlx5_flow_spec *spec;
int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
old_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
if (!old_attr) {
err = -ENOMEM;
goto err_attr;
}
*old_attr = *attr;
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
nat, mlx5_tc_ct_entry_has_nat(entry));
if (err) {
ct_dbg("Failed to create ct entry mod hdr");
goto err_mod_hdr;
}
mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
ct_dbg("Failed to add replacement ct entry rule, nat: %d", nat);
goto err_rule;
}
ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
zone_rule->rule = rule;
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh);
zone_rule->mh = mh;
kfree(old_attr);
kvfree(spec);
ct_dbg("Replaced ct entry rule in zone %d", entry->tuple.zone);
return 0;
err_rule:
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
kfree(old_attr);
err_attr:
kvfree(spec);
return err;
}
static bool static bool
mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry) mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry)
{ {
...@@ -1065,6 +1127,52 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv, ...@@ -1065,6 +1127,52 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
return err; return err;
} }
static int
mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
struct flow_rule *flow_rule,
struct mlx5_ct_entry *entry,
u8 zone_restore_id)
{
int err;
err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
zone_restore_id);
if (err)
return err;
err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
zone_restore_id);
if (err)
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
return err;
}
static int
mlx5_tc_ct_block_flow_offload_replace(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
struct mlx5_ct_entry *entry, unsigned long cookie)
{
struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
int err;
err = mlx5_tc_ct_entry_replace_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
if (!err)
return 0;
/* If failed to update the entry, then look it up again under ht_lock
* protection and properly delete it.
*/
spin_lock_bh(&ct_priv->ht_lock);
entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
if (entry) {
rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
spin_unlock_bh(&ct_priv->ht_lock);
mlx5_tc_ct_entry_put(entry);
} else {
spin_unlock_bh(&ct_priv->ht_lock);
}
return err;
}
static int static int
mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
struct flow_cls_offload *flow) struct flow_cls_offload *flow)
...@@ -1087,9 +1195,17 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, ...@@ -1087,9 +1195,17 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
spin_lock_bh(&ct_priv->ht_lock); spin_lock_bh(&ct_priv->ht_lock);
entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params); entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
if (entry && refcount_inc_not_zero(&entry->refcnt)) { if (entry && refcount_inc_not_zero(&entry->refcnt)) {
if (entry->restore_cookie == meta_action->ct_metadata.cookie) {
spin_unlock_bh(&ct_priv->ht_lock);
mlx5_tc_ct_entry_put(entry);
return -EEXIST;
}
entry->restore_cookie = meta_action->ct_metadata.cookie;
spin_unlock_bh(&ct_priv->ht_lock); spin_unlock_bh(&ct_priv->ht_lock);
err = mlx5_tc_ct_block_flow_offload_replace(ft, flow_rule, entry, cookie);
mlx5_tc_ct_entry_put(entry); mlx5_tc_ct_entry_put(entry);
return -EEXIST; return err;
} }
spin_unlock_bh(&ct_priv->ht_lock); spin_unlock_bh(&ct_priv->ht_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment