Commit 46a83c85 authored by Wentao Jia's avatar Wentao Jia Committed by Jakub Kicinski

nfp: flower: prepare for parameterisation of number of offload rules

The fixed number of offload flow rule is only supported scenario of one
ct zone, in the scenario of multiple ct zones, dynamic number and more
number of offload flow rules are required. In order to support scenario
of multiple ct zones, parameter num_rules is added for to offload flow
rules
Signed-off-by: default avatarWentao Jia <wentao.jia@corigine.com>
Acked-by: default avatarSimon Horman <simon.horman@corigine.com>
Signed-off-by: default avatarLouis Peens <louis.peens@corigine.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 3e44d199
...@@ -693,34 +693,34 @@ static void nfp_fl_get_csum_flag(struct flow_action_entry *a_in, u8 ip_proto, u3 ...@@ -693,34 +693,34 @@ static void nfp_fl_get_csum_flag(struct flow_action_entry *a_in, u8 ip_proto, u3
static int nfp_fl_merge_actions_offload(struct flow_rule **rules, static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
struct nfp_flower_priv *priv, struct nfp_flower_priv *priv,
struct net_device *netdev, struct net_device *netdev,
struct nfp_fl_payload *flow_pay) struct nfp_fl_payload *flow_pay,
int num_rules)
{ {
enum flow_action_hw_stats tmp_stats = FLOW_ACTION_HW_STATS_DONT_CARE; enum flow_action_hw_stats tmp_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
struct flow_action_entry *a_in; struct flow_action_entry *a_in;
int i, j, num_actions, id; int i, j, id, num_actions = 0;
struct flow_rule *a_rule; struct flow_rule *a_rule;
int err = 0, offset = 0; int err = 0, offset = 0;
num_actions = rules[CT_TYPE_PRE_CT]->action.num_entries + for (i = 0; i < num_rules; i++)
rules[CT_TYPE_NFT]->action.num_entries + num_actions += rules[i]->action.num_entries;
rules[CT_TYPE_POST_CT]->action.num_entries;
/* Add one action to make sure there is enough room to add an checksum action /* Add one action to make sure there is enough room to add an checksum action
* when do nat. * when do nat.
*/ */
a_rule = flow_rule_alloc(num_actions + 1); a_rule = flow_rule_alloc(num_actions + (num_rules / 2));
if (!a_rule) if (!a_rule)
return -ENOMEM; return -ENOMEM;
/* Actions need a BASIC dissector. */
a_rule->match = rules[CT_TYPE_PRE_CT]->match;
/* post_ct entry have one action at least. */ /* post_ct entry have one action at least. */
if (rules[CT_TYPE_POST_CT]->action.num_entries != 0) { if (rules[num_rules - 1]->action.num_entries != 0)
tmp_stats = rules[CT_TYPE_POST_CT]->action.entries[0].hw_stats; tmp_stats = rules[num_rules - 1]->action.entries[0].hw_stats;
}
/* Actions need a BASIC dissector. */
a_rule->match = rules[0]->match;
/* Copy actions */ /* Copy actions */
for (j = 0; j < _CT_TYPE_MAX; j++) { for (j = 0; j < num_rules; j++) {
u32 csum_updated = 0; u32 csum_updated = 0;
u8 ip_proto = 0; u8 ip_proto = 0;
...@@ -758,8 +758,9 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules, ...@@ -758,8 +758,9 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
/* nft entry is generated by tc ct, which mangle action do not care /* nft entry is generated by tc ct, which mangle action do not care
* the stats, inherit the post entry stats to meet the * the stats, inherit the post entry stats to meet the
* flow_action_hw_stats_check. * flow_action_hw_stats_check.
* nft entry flow rules are at odd array index.
*/ */
if (j == CT_TYPE_NFT) { if (j & 0x01) {
if (a_in->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE) if (a_in->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE)
a_in->hw_stats = tmp_stats; a_in->hw_stats = tmp_stats;
nfp_fl_get_csum_flag(a_in, ip_proto, &csum_updated); nfp_fl_get_csum_flag(a_in, ip_proto, &csum_updated);
...@@ -801,6 +802,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -801,6 +802,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
struct nfp_fl_payload *flow_pay; struct nfp_fl_payload *flow_pay;
struct flow_rule *rules[_CT_TYPE_MAX]; struct flow_rule *rules[_CT_TYPE_MAX];
int num_rules = _CT_TYPE_MAX;
u8 *key, *msk, *kdata, *mdata; u8 *key, *msk, *kdata, *mdata;
struct nfp_port *port = NULL; struct nfp_port *port = NULL;
struct net_device *netdev; struct net_device *netdev;
...@@ -820,7 +822,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -820,7 +822,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
memset(&key_map, 0, sizeof(key_map)); memset(&key_map, 0, sizeof(key_map));
/* Calculate the resultant key layer and size for offload */ /* Calculate the resultant key layer and size for offload */
for (i = 0; i < _CT_TYPE_MAX; i++) { for (i = 0; i < num_rules; i++) {
err = nfp_flower_calculate_key_layers(priv->app, err = nfp_flower_calculate_key_layers(priv->app,
m_entry->netdev, m_entry->netdev,
&tmp_layer, rules[i], &tmp_layer, rules[i],
...@@ -886,7 +888,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -886,7 +888,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
* that the layer is not present. * that the layer is not present.
*/ */
if (!qinq_sup) { if (!qinq_sup) {
for (i = 0; i < _CT_TYPE_MAX; i++) { for (i = 0; i < num_rules; i++) {
offset = key_map[FLOW_PAY_META_TCI]; offset = key_map[FLOW_PAY_META_TCI];
key = kdata + offset; key = kdata + offset;
msk = mdata + offset; msk = mdata + offset;
...@@ -900,7 +902,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -900,7 +902,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
offset = key_map[FLOW_PAY_MAC_MPLS]; offset = key_map[FLOW_PAY_MAC_MPLS];
key = kdata + offset; key = kdata + offset;
msk = mdata + offset; msk = mdata + offset;
for (i = 0; i < _CT_TYPE_MAX; i++) { for (i = 0; i < num_rules; i++) {
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key, nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key,
(struct nfp_flower_mac_mpls *)msk, (struct nfp_flower_mac_mpls *)msk,
rules[i]); rules[i]);
...@@ -916,7 +918,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -916,7 +918,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
offset = key_map[FLOW_PAY_IPV4]; offset = key_map[FLOW_PAY_IPV4];
key = kdata + offset; key = kdata + offset;
msk = mdata + offset; msk = mdata + offset;
for (i = 0; i < _CT_TYPE_MAX; i++) { for (i = 0; i < num_rules; i++) {
nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key, nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key,
(struct nfp_flower_ipv4 *)msk, (struct nfp_flower_ipv4 *)msk,
rules[i]); rules[i]);
...@@ -927,7 +929,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -927,7 +929,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
offset = key_map[FLOW_PAY_IPV6]; offset = key_map[FLOW_PAY_IPV6];
key = kdata + offset; key = kdata + offset;
msk = mdata + offset; msk = mdata + offset;
for (i = 0; i < _CT_TYPE_MAX; i++) { for (i = 0; i < num_rules; i++) {
nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key, nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key,
(struct nfp_flower_ipv6 *)msk, (struct nfp_flower_ipv6 *)msk,
rules[i]); rules[i]);
...@@ -938,7 +940,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -938,7 +940,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
offset = key_map[FLOW_PAY_L4]; offset = key_map[FLOW_PAY_L4];
key = kdata + offset; key = kdata + offset;
msk = mdata + offset; msk = mdata + offset;
for (i = 0; i < _CT_TYPE_MAX; i++) { for (i = 0; i < num_rules; i++) {
nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key, nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key,
(struct nfp_flower_tp_ports *)msk, (struct nfp_flower_tp_ports *)msk,
rules[i]); rules[i]);
...@@ -949,7 +951,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -949,7 +951,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
offset = key_map[FLOW_PAY_QINQ]; offset = key_map[FLOW_PAY_QINQ];
key = kdata + offset; key = kdata + offset;
msk = mdata + offset; msk = mdata + offset;
for (i = 0; i < _CT_TYPE_MAX; i++) { for (i = 0; i < num_rules; i++) {
nfp_flower_compile_vlan((struct nfp_flower_vlan *)key, nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
(struct nfp_flower_vlan *)msk, (struct nfp_flower_vlan *)msk,
rules[i]); rules[i]);
...@@ -965,7 +967,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -965,7 +967,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
struct nfp_ipv6_addr_entry *entry; struct nfp_ipv6_addr_entry *entry;
struct in6_addr *dst; struct in6_addr *dst;
for (i = 0; i < _CT_TYPE_MAX; i++) { for (i = 0; i < num_rules; i++) {
nfp_flower_compile_ipv6_gre_tun((void *)key, nfp_flower_compile_ipv6_gre_tun((void *)key,
(void *)msk, rules[i]); (void *)msk, rules[i]);
} }
...@@ -982,7 +984,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -982,7 +984,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
} else { } else {
__be32 dst; __be32 dst;
for (i = 0; i < _CT_TYPE_MAX; i++) { for (i = 0; i < num_rules; i++) {
nfp_flower_compile_ipv4_gre_tun((void *)key, nfp_flower_compile_ipv4_gre_tun((void *)key,
(void *)msk, rules[i]); (void *)msk, rules[i]);
} }
...@@ -1006,7 +1008,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -1006,7 +1008,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
struct nfp_ipv6_addr_entry *entry; struct nfp_ipv6_addr_entry *entry;
struct in6_addr *dst; struct in6_addr *dst;
for (i = 0; i < _CT_TYPE_MAX; i++) { for (i = 0; i < num_rules; i++) {
nfp_flower_compile_ipv6_udp_tun((void *)key, nfp_flower_compile_ipv6_udp_tun((void *)key,
(void *)msk, rules[i]); (void *)msk, rules[i]);
} }
...@@ -1023,7 +1025,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -1023,7 +1025,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
} else { } else {
__be32 dst; __be32 dst;
for (i = 0; i < _CT_TYPE_MAX; i++) { for (i = 0; i < num_rules; i++) {
nfp_flower_compile_ipv4_udp_tun((void *)key, nfp_flower_compile_ipv4_udp_tun((void *)key,
(void *)msk, rules[i]); (void *)msk, rules[i]);
} }
...@@ -1040,13 +1042,13 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -1040,13 +1042,13 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
offset = key_map[FLOW_PAY_GENEVE_OPT]; offset = key_map[FLOW_PAY_GENEVE_OPT];
key = kdata + offset; key = kdata + offset;
msk = mdata + offset; msk = mdata + offset;
for (i = 0; i < _CT_TYPE_MAX; i++) for (i = 0; i < num_rules; i++)
nfp_flower_compile_geneve_opt(key, msk, rules[i]); nfp_flower_compile_geneve_opt(key, msk, rules[i]);
} }
} }
/* Merge actions into flow_pay */ /* Merge actions into flow_pay */
err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay); err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay, num_rules);
if (err) if (err)
goto ct_offload_err; goto ct_offload_err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment