Commit 53c7bb55 authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-ct-offload'

Simon Horman says:

====================
Introduce conntrack offloading to the nfp driver

Louis Peens says:

This is the first in a series of patches to offload conntrack
to the nfp. The approach followed is to flatten out three
different flow rules into a single offloaded flow. The three
different flows are:

1) The rule sending the packet to conntrack (pre_ct)
2) The rule matching on +trk+est after a packet has been through
   conntrack. (post_ct)
3) The rule received via callback from the netfilter (nft)

In order to offload a flow we need a combination of all three flows, but
they could be added/deleted at different times and in different order.

To solve this we save potential offloadable CT flows in the driver,
and every time we receive a callback we check against these saved flows
for valid merges. Once we have a valid combination of all three flows
this will be offloaded to the NFP. This is demonstrated in the diagram
below.

	+-------------+                      +----------+
	| pre_ct flow +--------+             | nft flow |
	+-------------+        v             +------+---+
	                  +----------+              |
	                  | tc_merge +--------+     |
	                  +----------+        v     v
	+--------------+       ^           +-------------+
	| post_ct flow +-------+       +---+nft_tc merge |
	+--------------+               |   +-------------+
	                               |
	                               |
	                               |
	                               v
	                        Offload to nfp

This series is only up to the point of the pre_ct and post_ct
merges into the tc_merge. Follow up series will continue
to add the nft flows and merging of these flows with the result
of the pre_ct and post_ct merged flows.

Changes since v2:
- nfp: flower-ct: add zone table entry when handling pre/post_ct flows
    Fixed another docstring. Should finally have the patch check
    environment properly configured now to avoid more of these.
- nfp: flower-ct: add tc merge functionality
    Fixed warning found by "kernel test robot <lkp@intel.com>"
    Added code comment explaining chain_index comparison

Changes since v1:
- nfp: flower-ct: add ct zone table
    Fixed unused variable compile warning
    Fixed missing colon in struct description
====================
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5debe0b3 3c863c30
......@@ -51,7 +51,8 @@ nfp-objs += \
flower/metadata.o \
flower/offload.o \
flower/tunnel_conf.o \
flower/qos_conf.o
flower/qos_conf.o \
flower/conntrack.o
endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2021 Corigine, Inc. */
#include "conntrack.h"
const struct rhashtable_params nfp_tc_ct_merge_params = {
.head_offset = offsetof(struct nfp_fl_ct_tc_merge,
hash_node),
.key_len = sizeof(unsigned long) * 2,
.key_offset = offsetof(struct nfp_fl_ct_tc_merge, cookie),
.automatic_shrinking = true,
};
/**
* get_hashentry() - Wrapper around hashtable lookup.
* @ht: hashtable where entry could be found
* @key: key to lookup
* @params: hashtable params
* @size: size of entry to allocate if not in table
*
* Returns an entry from a hashtable. If entry does not exist
* yet allocate the memory for it and return the new entry.
*/
static void *get_hashentry(struct rhashtable *ht, void *key,
const struct rhashtable_params params, size_t size)
{
void *result;
result = rhashtable_lookup_fast(ht, key, params);
if (result)
return result;
result = kzalloc(size, GFP_KERNEL);
if (!result)
return ERR_PTR(-ENOMEM);
return result;
}
bool is_pre_ct_flow(struct flow_cls_offload *flow)
{
struct flow_action_entry *act;
int i;
flow_action_for_each(i, act, &flow->rule->action) {
if (act->id == FLOW_ACTION_CT && !act->ct.action)
return true;
}
return false;
}
bool is_post_ct_flow(struct flow_cls_offload *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_dissector *dissector = rule->match.dissector;
struct flow_match_ct ct;
if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
flow_rule_match_ct(rule, &ct);
if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
return true;
}
return false;
}
static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
struct nfp_fl_ct_flow_entry *entry2)
{
return 0;
}
static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
struct nfp_fl_ct_flow_entry *ct_entry1,
struct nfp_fl_ct_flow_entry *ct_entry2)
{
struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
struct nfp_fl_ct_tc_merge *m_entry;
unsigned long new_cookie[2];
int err;
if (ct_entry1->type == CT_TYPE_PRE_CT) {
pre_ct_entry = ct_entry1;
post_ct_entry = ct_entry2;
} else {
post_ct_entry = ct_entry1;
pre_ct_entry = ct_entry2;
}
if (post_ct_entry->netdev != pre_ct_entry->netdev)
return -EINVAL;
/* Checks that the chain_index of the filter matches the
* chain_index of the GOTO action.
*/
if (post_ct_entry->chain_index != pre_ct_entry->chain_index)
return -EINVAL;
err = nfp_ct_merge_check(post_ct_entry, pre_ct_entry);
if (err)
return err;
new_cookie[0] = pre_ct_entry->cookie;
new_cookie[1] = post_ct_entry->cookie;
m_entry = get_hashentry(&zt->tc_merge_tb, &new_cookie,
nfp_tc_ct_merge_params, sizeof(*m_entry));
if (IS_ERR(m_entry))
return PTR_ERR(m_entry);
/* m_entry already present, not merging again */
if (!memcmp(&new_cookie, m_entry->cookie, sizeof(new_cookie)))
return 0;
memcpy(&m_entry->cookie, &new_cookie, sizeof(new_cookie));
m_entry->zt = zt;
m_entry->post_ct_parent = post_ct_entry;
m_entry->pre_ct_parent = pre_ct_entry;
/* Add this entry to the pre_ct and post_ct lists */
list_add(&m_entry->post_ct_list, &post_ct_entry->children);
list_add(&m_entry->pre_ct_list, &pre_ct_entry->children);
INIT_LIST_HEAD(&m_entry->children);
err = rhashtable_insert_fast(&zt->tc_merge_tb, &m_entry->hash_node,
nfp_tc_ct_merge_params);
if (err)
goto err_ct_tc_merge_insert;
zt->tc_merge_count++;
return 0;
err_ct_tc_merge_insert:
list_del(&m_entry->post_ct_list);
list_del(&m_entry->pre_ct_list);
kfree(m_entry);
return err;
}
static struct
nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
u16 zone, bool wildcarded)
{
struct nfp_fl_ct_zone_entry *zt;
int err;
if (wildcarded && priv->ct_zone_wc)
return priv->ct_zone_wc;
if (!wildcarded) {
zt = get_hashentry(&priv->ct_zone_table, &zone,
nfp_zone_table_params, sizeof(*zt));
/* If priv is set this is an existing entry, just return it */
if (IS_ERR(zt) || zt->priv)
return zt;
} else {
zt = kzalloc(sizeof(*zt), GFP_KERNEL);
if (!zt)
return ERR_PTR(-ENOMEM);
}
zt->zone = zone;
zt->priv = priv;
zt->nft = NULL;
/* init the various hash tables and lists*/
INIT_LIST_HEAD(&zt->pre_ct_list);
INIT_LIST_HEAD(&zt->post_ct_list);
err = rhashtable_init(&zt->tc_merge_tb, &nfp_tc_ct_merge_params);
if (err)
goto err_tc_merge_tb_init;
if (wildcarded) {
priv->ct_zone_wc = zt;
} else {
err = rhashtable_insert_fast(&priv->ct_zone_table,
&zt->hash_node,
nfp_zone_table_params);
if (err)
goto err_zone_insert;
}
return zt;
err_zone_insert:
rhashtable_destroy(&zt->tc_merge_tb);
err_tc_merge_tb_init:
kfree(zt);
return ERR_PTR(err);
}
static struct
nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
struct net_device *netdev,
struct flow_cls_offload *flow,
struct netlink_ext_ack *extack)
{
struct nfp_fl_ct_flow_entry *entry;
struct nfp_fl_ct_map_entry *map;
struct flow_action_entry *act;
int err, i;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return ERR_PTR(-ENOMEM);
entry->zt = zt;
entry->netdev = netdev;
entry->cookie = flow->cookie;
entry->rule = flow_rule_alloc(flow->rule->action.num_entries);
if (!entry->rule) {
err = -ENOMEM;
goto err_pre_ct_act;
}
entry->rule->match.dissector = flow->rule->match.dissector;
entry->rule->match.mask = flow->rule->match.mask;
entry->rule->match.key = flow->rule->match.key;
entry->chain_index = flow->common.chain_index;
entry->tun_offset = NFP_FL_CT_NO_TUN;
/* Copy over action data. Unfortunately we do not get a handle to the
* original tcf_action data, and the flow objects gets destroyed, so we
* cannot just save a pointer to this either, so need to copy over the
* data unfortunately.
*/
entry->rule->action.num_entries = flow->rule->action.num_entries;
flow_action_for_each(i, act, &flow->rule->action) {
struct flow_action_entry *new_act;
new_act = &entry->rule->action.entries[i];
memcpy(new_act, act, sizeof(struct flow_action_entry));
/* Entunnel is a special case, need to allocate and copy
* tunnel info.
*/
if (act->id == FLOW_ACTION_TUNNEL_ENCAP) {
struct ip_tunnel_info *tun = act->tunnel;
size_t tun_size = sizeof(*tun) + tun->options_len;
new_act->tunnel = kmemdup(tun, tun_size, GFP_ATOMIC);
if (!new_act->tunnel) {
err = -ENOMEM;
goto err_pre_ct_tun_cp;
}
entry->tun_offset = i;
}
}
INIT_LIST_HEAD(&entry->children);
/* Now add a ct map entry to flower-priv */
map = get_hashentry(&zt->priv->ct_map_table, &flow->cookie,
nfp_ct_map_params, sizeof(*map));
if (IS_ERR(map)) {
NL_SET_ERR_MSG_MOD(extack,
"offload error: ct map entry creation failed");
err = -ENOMEM;
goto err_ct_flow_insert;
}
map->cookie = flow->cookie;
map->ct_entry = entry;
err = rhashtable_insert_fast(&zt->priv->ct_map_table,
&map->hash_node,
nfp_ct_map_params);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"offload error: ct map entry table add failed");
goto err_map_insert;
}
return entry;
err_map_insert:
kfree(map);
err_ct_flow_insert:
if (entry->tun_offset != NFP_FL_CT_NO_TUN)
kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
err_pre_ct_tun_cp:
kfree(entry->rule);
err_pre_ct_act:
kfree(entry);
return ERR_PTR(err);
}
static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
{
}
static void nfp_del_tc_merge_entry(struct nfp_fl_ct_tc_merge *m_ent)
{
struct nfp_fl_ct_zone_entry *zt;
int err;
zt = m_ent->zt;
err = rhashtable_remove_fast(&zt->tc_merge_tb,
&m_ent->hash_node,
nfp_tc_ct_merge_params);
if (err)
pr_warn("WARNING: could not remove merge_entry from hashtable\n");
zt->tc_merge_count--;
list_del(&m_ent->post_ct_list);
list_del(&m_ent->pre_ct_list);
if (!list_empty(&m_ent->children))
nfp_free_nft_merge_children(m_ent, false);
kfree(m_ent);
}
static void nfp_free_tc_merge_children(struct nfp_fl_ct_flow_entry *entry)
{
struct nfp_fl_ct_tc_merge *m_ent, *tmp;
switch (entry->type) {
case CT_TYPE_PRE_CT:
list_for_each_entry_safe(m_ent, tmp, &entry->children, pre_ct_list) {
nfp_del_tc_merge_entry(m_ent);
}
break;
case CT_TYPE_POST_CT:
list_for_each_entry_safe(m_ent, tmp, &entry->children, post_ct_list) {
nfp_del_tc_merge_entry(m_ent);
}
break;
default:
break;
}
}
void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry)
{
list_del(&entry->list_node);
if (!list_empty(&entry->children)) {
if (entry->type == CT_TYPE_NFT)
nfp_free_nft_merge_children(entry, true);
else
nfp_free_tc_merge_children(entry);
}
if (entry->tun_offset != NFP_FL_CT_NO_TUN)
kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
kfree(entry->rule);
kfree(entry);
}
static struct flow_action_entry *get_flow_act(struct flow_cls_offload *flow,
enum flow_action_id act_id)
{
struct flow_action_entry *act = NULL;
int i;
flow_action_for_each(i, act, &flow->rule->action) {
if (act->id == act_id)
return act;
}
return NULL;
}
static void
nfp_ct_merge_tc_entries(struct nfp_fl_ct_flow_entry *ct_entry1,
struct nfp_fl_ct_zone_entry *zt_src,
struct nfp_fl_ct_zone_entry *zt_dst)
{
struct nfp_fl_ct_flow_entry *ct_entry2, *ct_tmp;
struct list_head *ct_list;
if (ct_entry1->type == CT_TYPE_PRE_CT)
ct_list = &zt_src->post_ct_list;
else if (ct_entry1->type == CT_TYPE_POST_CT)
ct_list = &zt_src->pre_ct_list;
else
return;
list_for_each_entry_safe(ct_entry2, ct_tmp, ct_list,
list_node) {
nfp_ct_do_tc_merge(zt_dst, ct_entry2, ct_entry1);
}
}
int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
struct net_device *netdev,
struct flow_cls_offload *flow,
struct netlink_ext_ack *extack)
{
struct flow_action_entry *ct_act, *ct_goto;
struct nfp_fl_ct_flow_entry *ct_entry;
struct nfp_fl_ct_zone_entry *zt;
ct_act = get_flow_act(flow, FLOW_ACTION_CT);
if (!ct_act) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: Conntrack action empty in conntrack offload");
return -EOPNOTSUPP;
}
ct_goto = get_flow_act(flow, FLOW_ACTION_GOTO);
if (!ct_goto) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: Conntrack requires ACTION_GOTO");
return -EOPNOTSUPP;
}
zt = get_nfp_zone_entry(priv, ct_act->ct.zone, false);
if (IS_ERR(zt)) {
NL_SET_ERR_MSG_MOD(extack,
"offload error: Could not create zone table entry");
return PTR_ERR(zt);
}
if (!zt->nft)
zt->nft = ct_act->ct.flow_table;
/* Add entry to pre_ct_list */
ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, extack);
if (IS_ERR(ct_entry))
return PTR_ERR(ct_entry);
ct_entry->type = CT_TYPE_PRE_CT;
ct_entry->chain_index = ct_goto->chain_index;
list_add(&ct_entry->list_node, &zt->pre_ct_list);
zt->pre_ct_count++;
nfp_ct_merge_tc_entries(ct_entry, zt, zt);
/* Need to check and merge with tables in the wc_zone as well */
if (priv->ct_zone_wc)
nfp_ct_merge_tc_entries(ct_entry, priv->ct_zone_wc, zt);
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: Conntrack action not supported");
return -EOPNOTSUPP;
}
int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
struct net_device *netdev,
struct flow_cls_offload *flow,
struct netlink_ext_ack *extack)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct nfp_fl_ct_flow_entry *ct_entry;
struct nfp_fl_ct_zone_entry *zt;
bool wildcarded = false;
struct flow_match_ct ct;
flow_rule_match_ct(rule, &ct);
if (!ct.mask->ct_zone) {
wildcarded = true;
} else if (ct.mask->ct_zone != U16_MAX) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: partially wildcarded ct_zone is not supported");
return -EOPNOTSUPP;
}
zt = get_nfp_zone_entry(priv, ct.key->ct_zone, wildcarded);
if (IS_ERR(zt)) {
NL_SET_ERR_MSG_MOD(extack,
"offload error: Could not create zone table entry");
return PTR_ERR(zt);
}
/* Add entry to post_ct_list */
ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, extack);
if (IS_ERR(ct_entry))
return PTR_ERR(ct_entry);
ct_entry->type = CT_TYPE_POST_CT;
ct_entry->chain_index = flow->common.chain_index;
list_add(&ct_entry->list_node, &zt->post_ct_list);
zt->post_ct_count++;
if (wildcarded) {
/* Iterate through all zone tables if not empty, look for merges with
* pre_ct entries and merge them.
*/
struct rhashtable_iter iter;
struct nfp_fl_ct_zone_entry *zone_table;
rhashtable_walk_enter(&priv->ct_zone_table, &iter);
rhashtable_walk_start(&iter);
while ((zone_table = rhashtable_walk_next(&iter)) != NULL) {
if (IS_ERR(zone_table))
continue;
rhashtable_walk_stop(&iter);
nfp_ct_merge_tc_entries(ct_entry, zone_table, zone_table);
rhashtable_walk_start(&iter);
}
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
} else {
nfp_ct_merge_tc_entries(ct_entry, zt, zt);
}
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: Conntrack match not supported");
return -EOPNOTSUPP;
}
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2021 Corigine, Inc. */
#ifndef __NFP_FLOWER_CONNTRACK_H__
#define __NFP_FLOWER_CONNTRACK_H__ 1
#include "main.h"
#define NFP_FL_CT_NO_TUN 0xff
extern const struct rhashtable_params nfp_zone_table_params;
extern const struct rhashtable_params nfp_ct_map_params;
extern const struct rhashtable_params nfp_tc_ct_merge_params;
/**
* struct nfp_fl_ct_zone_entry - Zone entry containing conntrack flow information
* @zone: The zone number, used as lookup key in hashtable
* @hash_node: Used by the hashtable
* @priv: Pointer to nfp_flower_priv data
* @nft: Pointer to nf_flowtable for this zone
*
* @pre_ct_list: The pre_ct_list of nfp_fl_ct_flow_entry entries
* @pre_ct_count: Keep count of the number of pre_ct entries
*
* @post_ct_list: The post_ct_list of nfp_fl_ct_flow_entry entries
* @post_ct_count: Keep count of the number of post_ct entries
*
* @tc_merge_tb: The table of merged tc flows
* @tc_merge_count: Keep count of the number of merged tc entries
*/
struct nfp_fl_ct_zone_entry {
u16 zone;
struct rhash_head hash_node;
struct nfp_flower_priv *priv;
struct nf_flowtable *nft;
struct list_head pre_ct_list;
unsigned int pre_ct_count;
struct list_head post_ct_list;
unsigned int post_ct_count;
struct rhashtable tc_merge_tb;
unsigned int tc_merge_count;
};
enum ct_entry_type {
CT_TYPE_PRE_CT,
CT_TYPE_NFT,
CT_TYPE_POST_CT,
};
/**
* struct nfp_fl_ct_flow_entry - Flow entry containing conntrack flow information
* @cookie: Flow cookie, same as original TC flow, used as key
* @list_node: Used by the list
* @chain_index: Chain index of the original flow
* @netdev: netdev structure.
* @type: Type of pre-entry from enum ct_entry_type
* @zt: Reference to the zone table this belongs to
* @children: List of tc_merge flows this flow forms part of
* @rule: Reference to the original TC flow rule
* @stats: Used to cache stats for updating
* @tun_offset: Used to indicate tunnel action offset in action list
*/
struct nfp_fl_ct_flow_entry {
unsigned long cookie;
struct list_head list_node;
u32 chain_index;
enum ct_entry_type type;
struct net_device *netdev;
struct nfp_fl_ct_zone_entry *zt;
struct list_head children;
struct flow_rule *rule;
struct flow_stats stats;
u8 tun_offset; // Set to NFP_FL_CT_NO_TUN if no tun
};
/**
* struct nfp_fl_ct_tc_merge - Merge of two flows from tc
* @cookie: Flow cookie, combination of pre and post ct cookies
* @hash_node: Used by the hashtable
* @pre_ct_list: This entry is part of a pre_ct_list
* @post_ct_list: This entry is part of a post_ct_list
* @zt: Reference to the zone table this belongs to
* @pre_ct_parent: The pre_ct_parent
* @post_ct_parent: The post_ct_parent
* @children: List of nft merged entries
*/
struct nfp_fl_ct_tc_merge {
unsigned long cookie[2];
struct rhash_head hash_node;
struct list_head pre_ct_list;
struct list_head post_ct_list;
struct nfp_fl_ct_zone_entry *zt;
struct nfp_fl_ct_flow_entry *pre_ct_parent;
struct nfp_fl_ct_flow_entry *post_ct_parent;
struct list_head children;
};
/**
* struct nfp_fl_ct_map_entry - Map between flow cookie and specific ct_flow
* @cookie: Flow cookie, same as original TC flow, used as key
* @hash_node: Used by the hashtable
* @ct_entry: Pointer to corresponding ct_entry
*/
struct nfp_fl_ct_map_entry {
unsigned long cookie;
struct rhash_head hash_node;
struct nfp_fl_ct_flow_entry *ct_entry;
};
bool is_pre_ct_flow(struct flow_cls_offload *flow);
bool is_post_ct_flow(struct flow_cls_offload *flow);
/**
* nfp_fl_ct_handle_pre_ct() - Handles -trk conntrack rules
* @priv: Pointer to app priv
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure.
* @extack: Extack pointer for errors
*
* Adds a new entry to the relevant zone table and tries to
* merge with other +trk+est entries and offload if possible.
*
* Return: negative value on error, 0 if configured successfully.
*/
int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
struct net_device *netdev,
struct flow_cls_offload *flow,
struct netlink_ext_ack *extack);
/**
* nfp_fl_ct_handle_post_ct() - Handles +trk+est conntrack rules
* @priv: Pointer to app priv
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure.
* @extack: Extack pointer for errors
*
* Adds a new entry to the relevant zone table and tries to
* merge with other -trk entries and offload if possible.
*
* Return: negative value on error, 0 if configured successfully.
*/
int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
struct net_device *netdev,
struct flow_cls_offload *flow,
struct netlink_ext_ack *extack);
/**
* nfp_fl_ct_clean_flow_entry() - Free a nfp_fl_ct_flow_entry
* @entry: Flow entry to cleanup
*/
void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry);
#endif
......@@ -193,6 +193,9 @@ struct nfp_fl_internal_ports {
* @qos_stats_lock: Lock on qos stats updates
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
* @merge_table: Hash table to store merged flows
* @ct_zone_table: Hash table used to store the different zones
* @ct_zone_wc: Special zone entry for wildcarded zone matches
* @ct_map_table: Hash table used to referennce ct flows
*/
struct nfp_flower_priv {
struct nfp_app *app;
......@@ -227,6 +230,9 @@ struct nfp_flower_priv {
spinlock_t qos_stats_lock; /* Protect the qos stats */
int pre_tun_rule_cnt;
struct rhashtable merge_table;
struct rhashtable ct_zone_table;
struct nfp_fl_ct_zone_entry *ct_zone_wc;
struct rhashtable ct_map_table;
};
/**
......
......@@ -9,6 +9,7 @@
#include <net/pkt_cls.h>
#include "cmsg.h"
#include "conntrack.h"
#include "main.h"
#include "../nfp_app.h"
......@@ -496,6 +497,20 @@ const struct rhashtable_params merge_table_params = {
.key_len = sizeof(u64),
};
const struct rhashtable_params nfp_zone_table_params = {
.head_offset = offsetof(struct nfp_fl_ct_zone_entry, hash_node),
.key_len = sizeof(u16),
.key_offset = offsetof(struct nfp_fl_ct_zone_entry, zone),
.automatic_shrinking = false,
};
const struct rhashtable_params nfp_ct_map_params = {
.head_offset = offsetof(struct nfp_fl_ct_map_entry, hash_node),
.key_len = sizeof(unsigned long),
.key_offset = offsetof(struct nfp_fl_ct_map_entry, cookie),
.automatic_shrinking = true,
};
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_num_mems)
{
......@@ -516,6 +531,14 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err)
goto err_free_stats_ctx_table;
err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
if (err)
goto err_free_merge_table;
err = rhashtable_init(&priv->ct_map_table, &nfp_ct_map_params);
if (err)
goto err_free_ct_zone_table;
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */
......@@ -523,7 +546,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf)
goto err_free_merge_table;
goto err_free_ct_map_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
......@@ -560,6 +583,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kfree(priv->mask_ids.last_used);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
err_free_ct_map_table:
rhashtable_destroy(&priv->ct_map_table);
err_free_ct_zone_table:
rhashtable_destroy(&priv->ct_zone_table);
err_free_merge_table:
rhashtable_destroy(&priv->merge_table);
err_free_stats_ctx_table:
......@@ -569,6 +596,72 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
return -ENOMEM;
}
static void nfp_zone_table_entry_destroy(struct nfp_fl_ct_zone_entry *zt)
{
if (!zt)
return;
if (!list_empty(&zt->pre_ct_list)) {
struct rhashtable *m_table = &zt->priv->ct_map_table;
struct nfp_fl_ct_flow_entry *entry, *tmp;
struct nfp_fl_ct_map_entry *map;
WARN_ONCE(1, "pre_ct_list not empty as expected, cleaning up\n");
list_for_each_entry_safe(entry, tmp, &zt->pre_ct_list,
list_node) {
map = rhashtable_lookup_fast(m_table,
&entry->cookie,
nfp_ct_map_params);
WARN_ON_ONCE(rhashtable_remove_fast(m_table,
&map->hash_node,
nfp_ct_map_params));
nfp_fl_ct_clean_flow_entry(entry);
kfree(map);
}
}
if (!list_empty(&zt->post_ct_list)) {
struct rhashtable *m_table = &zt->priv->ct_map_table;
struct nfp_fl_ct_flow_entry *entry, *tmp;
struct nfp_fl_ct_map_entry *map;
WARN_ONCE(1, "post_ct_list not empty as expected, cleaning up\n");
list_for_each_entry_safe(entry, tmp, &zt->post_ct_list,
list_node) {
map = rhashtable_lookup_fast(m_table,
&entry->cookie,
nfp_ct_map_params);
WARN_ON_ONCE(rhashtable_remove_fast(m_table,
&map->hash_node,
nfp_ct_map_params));
nfp_fl_ct_clean_flow_entry(entry);
kfree(map);
}
}
rhashtable_free_and_destroy(&zt->tc_merge_tb,
nfp_check_rhashtable_empty, NULL);
kfree(zt);
}
static void nfp_free_zone_table_entry(void *ptr, void *arg)
{
struct nfp_fl_ct_zone_entry *zt = ptr;
nfp_zone_table_entry_destroy(zt);
}
static void nfp_free_map_table_entry(void *ptr, void *arg)
{
struct nfp_fl_ct_map_entry *map = ptr;
if (!map)
return;
kfree(map);
}
void nfp_flower_metadata_cleanup(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
......@@ -582,6 +675,12 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
nfp_check_rhashtable_empty, NULL);
rhashtable_free_and_destroy(&priv->merge_table,
nfp_check_rhashtable_empty, NULL);
rhashtable_free_and_destroy(&priv->ct_zone_table,
nfp_free_zone_table_entry, NULL);
nfp_zone_table_entry_destroy(priv->ct_zone_wc);
rhashtable_free_and_destroy(&priv->ct_map_table,
nfp_free_map_table_entry, NULL);
kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
......
......@@ -7,6 +7,7 @@
#include "cmsg.h"
#include "main.h"
#include "conntrack.h"
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nsp.h"
#include "../nfp_app.h"
......@@ -1276,6 +1277,20 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return 0;
}
static bool offload_pre_check(struct flow_cls_offload *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_dissector *dissector = rule->match.dissector;
if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))
return false;
if (flow->common.chain_index)
return false;
return true;
}
/**
* nfp_flower_add_offload() - Adds a new flow to hardware.
* @app: Pointer to the APP handle
......@@ -1302,6 +1317,15 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
if (is_pre_ct_flow(flow))
return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack);
if (is_post_ct_flow(flow))
return nfp_fl_ct_handle_post_ct(priv, netdev, flow, extack);
if (!offload_pre_check(flow))
return -EOPNOTSUPP;
key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
if (!key_layer)
return -ENOMEM;
......@@ -1646,9 +1670,10 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct flow_cls_common_offload *common = type_data;
struct nfp_repr *repr = cb_priv;
if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
if (!tc_can_offload_extack(repr->netdev, common->extack))
return -EOPNOTSUPP;
switch (type) {
......@@ -1746,10 +1771,6 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
struct flow_cls_offload *flower = type_data;
if (flower->common.chain_index)
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSFLOWER:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment