Commit 9c590490 authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-offload-LAG-for-tc-flower-egress'

Jakub Kicinski says:

====================
nfp: offload LAG for tc flower egress

This series from John adds bond offload to the nfp driver.  Patch 5
exposes the hash type for NETDEV_LAG_TX_TYPE_HASH to make sure nfp
hashing matches that of the software LAG.  This may be unnecessarily
conservative, let's see what LAG maintainers think :)

John says:

This patchset sets up the infrastructure and offloads output actions for
when a TC flower rule attempts to egress a packet to a LAG port.

Firstly it adds some of the infrastructure required to the flower app and
to the nfp core. This includes the ability to change the MAC address of a
repr, a function for combining lookup and write to a FW symbol, and the
addition of private data to a repr on a per app basis.

Patch 6 continues by implementing notifiers that track Linux bonds and
communicates to the FW those which enslave reprs, along with the current
state of reprs within the bond.

Patch 7 ensures bonds are synchronised with FW by receiving and acting
upon cmsgs sent to the kernel. These may request that a bond message is
retransmitted when FW can process it, or may request a full sync of the
bonds defined in the kernel.

Patch 8 offloads a flower action when that action requires egressing to a
pre-defined Linux bond.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d97cde6a 7e24a593
...@@ -1218,12 +1218,37 @@ static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) ...@@ -1218,12 +1218,37 @@ static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
} }
} }
static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
enum netdev_lag_tx_type type)
{
if (type != NETDEV_LAG_TX_TYPE_HASH)
return NETDEV_LAG_HASH_NONE;
switch (bond->params.xmit_policy) {
case BOND_XMIT_POLICY_LAYER2:
return NETDEV_LAG_HASH_L2;
case BOND_XMIT_POLICY_LAYER34:
return NETDEV_LAG_HASH_L34;
case BOND_XMIT_POLICY_LAYER23:
return NETDEV_LAG_HASH_L23;
case BOND_XMIT_POLICY_ENCAP23:
return NETDEV_LAG_HASH_E23;
case BOND_XMIT_POLICY_ENCAP34:
return NETDEV_LAG_HASH_E34;
default:
return NETDEV_LAG_HASH_UNKNOWN;
}
}
static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave, static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct netdev_lag_upper_info lag_upper_info; struct netdev_lag_upper_info lag_upper_info;
enum netdev_lag_tx_type type;
lag_upper_info.tx_type = bond_lag_tx_type(bond); type = bond_lag_tx_type(bond);
lag_upper_info.tx_type = type;
lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
return netdev_master_upper_dev_link(slave->dev, bond->dev, slave, return netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
&lag_upper_info, extack); &lag_upper_info, extack);
......
...@@ -37,6 +37,7 @@ ifeq ($(CONFIG_NFP_APP_FLOWER),y) ...@@ -37,6 +37,7 @@ ifeq ($(CONFIG_NFP_APP_FLOWER),y)
nfp-objs += \ nfp-objs += \
flower/action.o \ flower/action.o \
flower/cmsg.o \ flower/cmsg.o \
flower/lag_conf.o \
flower/main.o \ flower/main.o \
flower/match.o \ flower/match.o \
flower/metadata.o \ flower/metadata.o \
......
...@@ -72,6 +72,42 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, ...@@ -72,6 +72,42 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
} }
static int
nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
struct nfp_fl_payload *nfp_flow, int act_len)
{
size_t act_size = sizeof(struct nfp_fl_pre_lag);
struct nfp_fl_pre_lag *pre_lag;
struct net_device *out_dev;
int err;
out_dev = tcf_mirred_dev(action);
if (!out_dev || !netif_is_lag_master(out_dev))
return 0;
if (act_len + act_size > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
/* Pre_lag action must be first on action list.
* If other actions already exist they need pushed forward.
*/
if (act_len)
memmove(nfp_flow->action_data + act_size,
nfp_flow->action_data, act_len);
pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
if (err)
return err;
pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
return act_size;
}
static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev, static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
enum nfp_flower_tun_type tun_type) enum nfp_flower_tun_type tun_type)
{ {
...@@ -88,12 +124,13 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev, ...@@ -88,12 +124,13 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
} }
static int static int
nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action, nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
struct nfp_fl_payload *nfp_flow, bool last, const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
struct net_device *in_dev, enum nfp_flower_tun_type tun_type, bool last, struct net_device *in_dev,
int *tun_out_cnt) enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
{ {
size_t act_size = sizeof(struct nfp_fl_output); size_t act_size = sizeof(struct nfp_fl_output);
struct nfp_flower_priv *priv = app->priv;
struct net_device *out_dev; struct net_device *out_dev;
u16 tmp_flags; u16 tmp_flags;
...@@ -118,6 +155,15 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action, ...@@ -118,6 +155,15 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
output->flags = cpu_to_be16(tmp_flags | output->flags = cpu_to_be16(tmp_flags |
NFP_FL_OUT_FLAGS_USE_TUN); NFP_FL_OUT_FLAGS_USE_TUN);
output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
} else if (netif_is_lag_master(out_dev) &&
priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
int gid;
output->flags = cpu_to_be16(tmp_flags);
gid = nfp_flower_lag_get_output_id(app, out_dev);
if (gid < 0)
return gid;
output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
} else { } else {
/* Set action output parameters. */ /* Set action output parameters. */
output->flags = cpu_to_be16(tmp_flags); output->flags = cpu_to_be16(tmp_flags);
...@@ -164,7 +210,7 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len) ...@@ -164,7 +210,7 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
struct nfp_fl_pre_tunnel *pre_tun_act; struct nfp_fl_pre_tunnel *pre_tun_act;
/* Pre_tunnel action must be first on action list. /* Pre_tunnel action must be first on action list.
* If other actions already exist they need pushed forward. * If other actions already exist they need to be pushed forward.
*/ */
if (act_len) if (act_len)
memmove(act_data + act_size, act_data, act_len); memmove(act_data + act_size, act_data, act_len);
...@@ -443,42 +489,73 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) ...@@ -443,42 +489,73 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
} }
static int static int
nfp_flower_loop_action(const struct tc_action *a, nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
int *out_cnt)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_output *output;
int err, prelag_size;
if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
tun_out_cnt);
if (err)
return err;
*a_len += sizeof(struct nfp_fl_output);
if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
/* nfp_fl_pre_lag returns -err or size of prelag action added.
* This will be 0 if it is not egressing to a lag dev.
*/
prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
if (prelag_size < 0)
return prelag_size;
else if (prelag_size > 0 && (!last || *out_cnt))
return -EOPNOTSUPP;
*a_len += prelag_size;
}
(*out_cnt)++;
return 0;
}
static int
nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
struct nfp_fl_payload *nfp_fl, int *a_len, struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt) enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
int *out_cnt)
{ {
struct nfp_fl_set_ipv4_udp_tun *set_tun; struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun; struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_push_vlan *psh_v; struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_pop_vlan *pop_v; struct nfp_fl_pop_vlan *pop_v;
struct nfp_fl_output *output;
int err; int err;
if (is_tcf_gact_shot(a)) { if (is_tcf_gact_shot(a)) {
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP); nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
} else if (is_tcf_mirred_egress_redirect(a)) { } else if (is_tcf_mirred_egress_redirect(a)) {
if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
return -EOPNOTSUPP; true, tun_type, tun_out_cnt,
out_cnt);
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
err = nfp_fl_output(output, a, nfp_fl, true, netdev, *tun_type,
tun_out_cnt);
if (err) if (err)
return err; return err;
*a_len += sizeof(struct nfp_fl_output);
} else if (is_tcf_mirred_egress_mirror(a)) { } else if (is_tcf_mirred_egress_mirror(a)) {
if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
return -EOPNOTSUPP; false, tun_type, tun_out_cnt,
out_cnt);
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
err = nfp_fl_output(output, a, nfp_fl, false, netdev, *tun_type,
tun_out_cnt);
if (err) if (err)
return err; return err;
*a_len += sizeof(struct nfp_fl_output);
} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -535,11 +612,12 @@ nfp_flower_loop_action(const struct tc_action *a, ...@@ -535,11 +612,12 @@ nfp_flower_loop_action(const struct tc_action *a,
return 0; return 0;
} }
int nfp_flower_compile_action(struct tc_cls_flower_offload *flow, int nfp_flower_compile_action(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct net_device *netdev, struct net_device *netdev,
struct nfp_fl_payload *nfp_flow) struct nfp_fl_payload *nfp_flow)
{ {
int act_len, act_cnt, err, tun_out_cnt; int act_len, act_cnt, err, tun_out_cnt, out_cnt;
enum nfp_flower_tun_type tun_type; enum nfp_flower_tun_type tun_type;
const struct tc_action *a; const struct tc_action *a;
LIST_HEAD(actions); LIST_HEAD(actions);
...@@ -550,11 +628,12 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow, ...@@ -550,11 +628,12 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
act_len = 0; act_len = 0;
act_cnt = 0; act_cnt = 0;
tun_out_cnt = 0; tun_out_cnt = 0;
out_cnt = 0;
tcf_exts_to_list(flow->exts, &actions); tcf_exts_to_list(flow->exts, &actions);
list_for_each_entry(a, &actions, list) { list_for_each_entry(a, &actions, list) {
err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev, err = nfp_flower_loop_action(app, a, nfp_flow, &act_len, netdev,
&tun_type, &tun_out_cnt); &tun_type, &tun_out_cnt, &out_cnt);
if (err) if (err)
return err; return err;
act_cnt++; act_cnt++;
......
...@@ -239,8 +239,10 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -239,8 +239,10 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
static void static void
nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
{ {
struct nfp_flower_priv *app_priv = app->priv;
struct nfp_flower_cmsg_hdr *cmsg_hdr; struct nfp_flower_cmsg_hdr *cmsg_hdr;
enum nfp_flower_cmsg_type_port type; enum nfp_flower_cmsg_type_port type;
bool skb_stored = false;
cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
...@@ -258,13 +260,20 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -258,13 +260,20 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb); nfp_tunnel_keep_alive(app, skb);
break; break;
case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
break;
}
/* fall through */
default: default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type); type);
goto out; goto out;
} }
dev_consume_skb_any(skb); if (!skb_stored)
dev_consume_skb_any(skb);
return; return;
out: out:
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
......
...@@ -92,6 +92,7 @@ ...@@ -92,6 +92,7 @@
#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12 #define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12
#define NFP_FL_ACTION_OPCODE_SET_UDP 14 #define NFP_FL_ACTION_OPCODE_SET_UDP 14
#define NFP_FL_ACTION_OPCODE_SET_TCP 15 #define NFP_FL_ACTION_OPCODE_SET_TCP 15
#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17 #define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
#define NFP_FL_ACTION_OPCODE_NUM 32 #define NFP_FL_ACTION_OPCODE_NUM 32
...@@ -103,6 +104,9 @@ ...@@ -103,6 +104,9 @@
#define NFP_FL_PUSH_VLAN_CFI BIT(12) #define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
/* LAG ports */
#define NFP_FL_LAG_OUT 0xC0DE0000
/* Tunnel ports */ /* Tunnel ports */
#define NFP_FL_PORT_TYPE_TUN 0x50000000 #define NFP_FL_PORT_TYPE_TUN 0x50000000
#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) #define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
...@@ -177,6 +181,15 @@ struct nfp_fl_pop_vlan { ...@@ -177,6 +181,15 @@ struct nfp_fl_pop_vlan {
__be16 reserved; __be16 reserved;
}; };
struct nfp_fl_pre_lag {
struct nfp_fl_act_head head;
__be16 group_id;
u8 lag_version[3];
u8 instance;
};
#define NFP_FL_PRE_LAG_VER_OFF 8
struct nfp_fl_pre_tunnel { struct nfp_fl_pre_tunnel {
struct nfp_fl_act_head head; struct nfp_fl_act_head head;
__be16 reserved; __be16 reserved;
...@@ -366,6 +379,7 @@ struct nfp_flower_cmsg_hdr { ...@@ -366,6 +379,7 @@ struct nfp_flower_cmsg_hdr {
enum nfp_flower_cmsg_type_port { enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0, NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2, NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
NFP_FLOWER_CMSG_TYPE_LAG_CONFIG = 4,
NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6, NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7, NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8, NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
......
/*
* Copyright (C) 2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "main.h"
/* LAG group config flags. */
#define NFP_FL_LAG_LAST BIT(1)
#define NFP_FL_LAG_FIRST BIT(2)
#define NFP_FL_LAG_DATA BIT(3)
#define NFP_FL_LAG_XON BIT(4)
#define NFP_FL_LAG_SYNC BIT(5)
#define NFP_FL_LAG_SWITCH BIT(6)
#define NFP_FL_LAG_RESET BIT(7)
/* LAG port state flags. */
#define NFP_PORT_LAG_LINK_UP BIT(0)
#define NFP_PORT_LAG_TX_ENABLED BIT(1)
#define NFP_PORT_LAG_CHANGED BIT(2)
enum nfp_fl_lag_batch {
NFP_FL_LAG_BATCH_FIRST,
NFP_FL_LAG_BATCH_MEMBER,
NFP_FL_LAG_BATCH_FINISHED
};
/**
* struct nfp_flower_cmsg_lag_config - control message payload for LAG config
* @ctrl_flags: Configuration flags
* @reserved: Reserved for future use
* @ttl: Time to live of packet - host always sets to 0xff
* @pkt_number: Config message packet number - increment for each message
* @batch_ver: Batch version of messages - increment for each batch of messages
* @group_id: Group ID applicable
* @group_inst: Group instance number - increment when group is reused
* @members: Array of 32-bit words listing all active group members
*/
struct nfp_flower_cmsg_lag_config {
u8 ctrl_flags;
u8 reserved[2];
u8 ttl;
__be32 pkt_number;
__be32 batch_ver;
__be32 group_id;
__be32 group_inst;
__be32 members[];
};
/**
* struct nfp_fl_lag_group - list entry for each LAG group
* @group_id: Assigned group ID for host/kernel sync
* @group_inst: Group instance in case of ID reuse
* @list: List entry
* @master_ndev: Group master Netdev
* @dirty: Marked if the group needs synced to HW
* @offloaded: Marked if the group is currently offloaded to NIC
* @to_remove: Marked if the group should be removed from NIC
* @to_destroy: Marked if the group should be removed from driver
* @slave_cnt: Number of slaves in group
*/
struct nfp_fl_lag_group {
unsigned int group_id;
u8 group_inst;
struct list_head list;
struct net_device *master_ndev;
bool dirty;
bool offloaded;
bool to_remove;
bool to_destroy;
unsigned int slave_cnt;
};
#define NFP_FL_LAG_PKT_NUMBER_MASK GENMASK(30, 0)
#define NFP_FL_LAG_VERSION_MASK GENMASK(22, 0)
#define NFP_FL_LAG_HOST_TTL 0xff
/* Use this ID with zero members to ack a batch config */
#define NFP_FL_LAG_SYNC_ID 0
#define NFP_FL_LAG_GROUP_MIN 1 /* ID 0 reserved */
#define NFP_FL_LAG_GROUP_MAX 32 /* IDs 1 to 31 are valid */
/* wait for more config */
#define NFP_FL_LAG_DELAY (msecs_to_jiffies(2))
#define NFP_FL_LAG_RETRANS_LIMIT 100 /* max retrans cmsgs to store */
static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag)
{
lag->pkt_num++;
lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK;
return lag->pkt_num;
}
static void nfp_fl_increment_version(struct nfp_fl_lag *lag)
{
/* LSB is not considered by firmware so add 2 for each increment. */
lag->batch_ver += 2;
lag->batch_ver &= NFP_FL_LAG_VERSION_MASK;
/* Zero is reserved by firmware. */
if (!lag->batch_ver)
lag->batch_ver += 2;
}
static struct nfp_fl_lag_group *
nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
{
struct nfp_fl_lag_group *group;
struct nfp_flower_priv *priv;
int id;
priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
id = ida_simple_get(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
NFP_FL_LAG_GROUP_MAX, GFP_KERNEL);
if (id < 0) {
nfp_flower_cmsg_warn(priv->app,
"No more bonding groups available\n");
return ERR_PTR(id);
}
group = kmalloc(sizeof(*group), GFP_KERNEL);
if (!group) {
ida_simple_remove(&lag->ida_handle, id);
return ERR_PTR(-ENOMEM);
}
group->group_id = id;
group->master_ndev = master;
group->dirty = true;
group->offloaded = false;
group->to_remove = false;
group->to_destroy = false;
group->slave_cnt = 0;
group->group_inst = ++lag->global_inst;
list_add_tail(&group->list, &lag->group_list);
return group;
}
static struct nfp_fl_lag_group *
nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
struct net_device *master)
{
struct nfp_fl_lag_group *entry;
if (!master)
return NULL;
list_for_each_entry(entry, &lag->group_list, list)
if (entry->master_ndev == master)
return entry;
return NULL;
}
int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct net_device *master,
struct nfp_fl_pre_lag *pre_act)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_lag_group *group = NULL;
__be32 temp_vers;
mutex_lock(&priv->nfp_lag.lock);
group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
master);
if (!group) {
mutex_unlock(&priv->nfp_lag.lock);
return -ENOENT;
}
pre_act->group_id = cpu_to_be16(group->group_id);
temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver <<
NFP_FL_PRE_LAG_VER_OFF);
memcpy(pre_act->lag_version, &temp_vers, 3);
pre_act->instance = group->group_inst;
mutex_unlock(&priv->nfp_lag.lock);
return 0;
}
int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_lag_group *group = NULL;
int group_id = -ENOENT;
mutex_lock(&priv->nfp_lag.lock);
group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
master);
if (group)
group_id = group->group_id;
mutex_unlock(&priv->nfp_lag.lock);
return group_id;
}
static int
nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
struct net_device **active_members,
unsigned int member_cnt, enum nfp_fl_lag_batch *batch)
{
struct nfp_flower_cmsg_lag_config *cmsg_payload;
struct nfp_flower_priv *priv;
unsigned long int flags;
unsigned int size, i;
struct sk_buff *skb;
priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
size = sizeof(*cmsg_payload) + sizeof(__be32) * member_cnt;
skb = nfp_flower_cmsg_alloc(priv->app, size,
NFP_FLOWER_CMSG_TYPE_LAG_CONFIG,
GFP_KERNEL);
if (!skb)
return -ENOMEM;
cmsg_payload = nfp_flower_cmsg_get_data(skb);
flags = 0;
/* Increment batch version for each new batch of config messages. */
if (*batch == NFP_FL_LAG_BATCH_FIRST) {
flags |= NFP_FL_LAG_FIRST;
nfp_fl_increment_version(lag);
*batch = NFP_FL_LAG_BATCH_MEMBER;
}
/* If it is a reset msg then it is also the end of the batch. */
if (lag->rst_cfg) {
flags |= NFP_FL_LAG_RESET;
*batch = NFP_FL_LAG_BATCH_FINISHED;
}
/* To signal the end of a batch, both the switch and last flags are set
* and the the reserved SYNC group ID is used.
*/
if (*batch == NFP_FL_LAG_BATCH_FINISHED) {
flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST;
lag->rst_cfg = false;
cmsg_payload->group_id = cpu_to_be32(NFP_FL_LAG_SYNC_ID);
cmsg_payload->group_inst = 0;
} else {
cmsg_payload->group_id = cpu_to_be32(group->group_id);
cmsg_payload->group_inst = cpu_to_be32(group->group_inst);
}
cmsg_payload->reserved[0] = 0;
cmsg_payload->reserved[1] = 0;
cmsg_payload->ttl = NFP_FL_LAG_HOST_TTL;
cmsg_payload->ctrl_flags = flags;
cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver);
cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag));
for (i = 0; i < member_cnt; i++)
cmsg_payload->members[i] =
cpu_to_be32(nfp_repr_get_port_id(active_members[i]));
nfp_ctrl_tx(priv->app->ctrl, skb);
return 0;
}
static void nfp_fl_lag_do_work(struct work_struct *work)
{
enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
struct nfp_fl_lag_group *entry, *storage;
struct delayed_work *delayed_work;
struct nfp_flower_priv *priv;
struct nfp_fl_lag *lag;
int err;
delayed_work = to_delayed_work(work);
lag = container_of(delayed_work, struct nfp_fl_lag, work);
priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
mutex_lock(&lag->lock);
list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
struct net_device *iter_netdev, **acti_netdevs;
struct nfp_flower_repr_priv *repr_priv;
int active_count = 0, slaves = 0;
struct nfp_repr *repr;
unsigned long *flags;
if (entry->to_remove) {
/* Active count of 0 deletes group on hw. */
err = nfp_fl_lag_config_group(lag, entry, NULL, 0,
&batch);
if (!err) {
entry->to_remove = false;
entry->offloaded = false;
} else {
nfp_flower_cmsg_warn(priv->app,
"group delete failed\n");
schedule_delayed_work(&lag->work,
NFP_FL_LAG_DELAY);
continue;
}
if (entry->to_destroy) {
ida_simple_remove(&lag->ida_handle,
entry->group_id);
list_del(&entry->list);
kfree(entry);
}
continue;
}
acti_netdevs = kmalloc_array(entry->slave_cnt,
sizeof(*acti_netdevs), GFP_KERNEL);
/* Include sanity check in the loop. It may be that a bond has
* changed between processing the last notification and the
* work queue triggering. If the number of slaves has changed
* or it now contains netdevs that cannot be offloaded, ignore
* the group until pending notifications are processed.
*/
rcu_read_lock();
for_each_netdev_in_bond_rcu(entry->master_ndev, iter_netdev) {
if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
slaves = 0;
break;
}
repr = netdev_priv(iter_netdev);
if (repr->app != priv->app) {
slaves = 0;
break;
}
slaves++;
if (slaves > entry->slave_cnt)
break;
/* Check the ports for state changes. */
repr_priv = repr->app_priv;
flags = &repr_priv->lag_port_flags;
if (*flags & NFP_PORT_LAG_CHANGED) {
*flags &= ~NFP_PORT_LAG_CHANGED;
entry->dirty = true;
}
if ((*flags & NFP_PORT_LAG_TX_ENABLED) &&
(*flags & NFP_PORT_LAG_LINK_UP))
acti_netdevs[active_count++] = iter_netdev;
}
rcu_read_unlock();
if (slaves != entry->slave_cnt || !entry->dirty) {
kfree(acti_netdevs);
continue;
}
err = nfp_fl_lag_config_group(lag, entry, acti_netdevs,
active_count, &batch);
if (!err) {
entry->offloaded = true;
entry->dirty = false;
} else {
nfp_flower_cmsg_warn(priv->app,
"group offload failed\n");
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
}
kfree(acti_netdevs);
}
/* End the config batch if at least one packet has been batched. */
if (batch == NFP_FL_LAG_BATCH_MEMBER) {
batch = NFP_FL_LAG_BATCH_FINISHED;
err = nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
if (err)
nfp_flower_cmsg_warn(priv->app,
"group batch end cmsg failed\n");
}
mutex_unlock(&lag->lock);
}
static int
nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
{
struct nfp_flower_cmsg_lag_config *cmsg_payload;
cmsg_payload = nfp_flower_cmsg_get_data(skb);
if (be32_to_cpu(cmsg_payload->group_id) >= NFP_FL_LAG_GROUP_MAX)
return -EINVAL;
/* Drop cmsg retrans if storage limit is exceeded to prevent
* overloading. If the fw notices that expected messages have not been
* received in a given time block, it will request a full resync.
*/
if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT)
return -ENOSPC;
__skb_queue_tail(&lag->retrans_skbs, skb);
return 0;
}
static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag)
{
struct nfp_flower_priv *priv;
struct sk_buff *skb;
priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
while ((skb = __skb_dequeue(&lag->retrans_skbs)))
nfp_ctrl_tx(priv->app->ctrl, skb);
}
bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_cmsg_lag_config *cmsg_payload;
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_lag_group *group_entry;
unsigned long int flags;
bool store_skb = false;
int err;
cmsg_payload = nfp_flower_cmsg_get_data(skb);
flags = cmsg_payload->ctrl_flags;
/* Note the intentional fall through below. If DATA and XON are both
* set, the message will stored and sent again with the rest of the
* unprocessed messages list.
*/
/* Store */
if (flags & NFP_FL_LAG_DATA)
if (!nfp_fl_lag_put_unprocessed(&priv->nfp_lag, skb))
store_skb = true;
/* Send stored */
if (flags & NFP_FL_LAG_XON)
nfp_fl_send_unprocessed(&priv->nfp_lag);
/* Resend all */
if (flags & NFP_FL_LAG_SYNC) {
/* To resend all config:
* 1) Clear all unprocessed messages
* 2) Mark all groups dirty
* 3) Reset NFP group config
* 4) Schedule a LAG config update
*/
__skb_queue_purge(&priv->nfp_lag.retrans_skbs);
mutex_lock(&priv->nfp_lag.lock);
list_for_each_entry(group_entry, &priv->nfp_lag.group_list,
list)
group_entry->dirty = true;
err = nfp_flower_lag_reset(&priv->nfp_lag);
if (err)
nfp_flower_cmsg_warn(priv->app,
"mem err in group reset msg\n");
mutex_unlock(&priv->nfp_lag.lock);
schedule_delayed_work(&priv->nfp_lag.work, 0);
}
return store_skb;
}
static void
nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
struct nfp_fl_lag_group *group)
{
group->to_remove = true;
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
}
static int
nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
struct net_device *master)
{
struct nfp_fl_lag_group *group;
mutex_lock(&lag->lock);
group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
if (!group) {
mutex_unlock(&lag->lock);
return -ENOENT;
}
group->to_remove = true;
group->to_destroy = true;
mutex_unlock(&lag->lock);
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
return 0;
}
static int
nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
struct netdev_notifier_changeupper_info *info)
{
struct net_device *upper = info->upper_dev, *iter_netdev;
struct netdev_lag_upper_info *lag_upper_info;
struct nfp_fl_lag_group *group;
struct nfp_flower_priv *priv;
unsigned int slave_count = 0;
bool can_offload = true;
struct nfp_repr *repr;
if (!netif_is_lag_master(upper))
return 0;
priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, iter_netdev) {
if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
can_offload = false;
break;
}
repr = netdev_priv(iter_netdev);
/* Ensure all ports are created by the same app/on same card. */
if (repr->app != priv->app) {
can_offload = false;
break;
}
slave_count++;
}
rcu_read_unlock();
lag_upper_info = info->upper_info;
/* Firmware supports active/backup and L3/L4 hash bonds. */
if (lag_upper_info &&
lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
(lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
(lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
lag_upper_info->hash_type != NETDEV_LAG_HASH_E34))) {
can_offload = false;
nfp_flower_cmsg_warn(priv->app,
"Unable to offload tx_type %u hash %u\n",
lag_upper_info->tx_type,
lag_upper_info->hash_type);
}
mutex_lock(&lag->lock);
group = nfp_fl_lag_find_group_for_master_with_lag(lag, upper);
if (slave_count == 0 || !can_offload) {
/* Cannot offload the group - remove if previously offloaded. */
if (group && group->offloaded)
nfp_fl_lag_schedule_group_remove(lag, group);
mutex_unlock(&lag->lock);
return 0;
}
if (!group) {
group = nfp_fl_lag_group_create(lag, upper);
if (IS_ERR(group)) {
mutex_unlock(&lag->lock);
return PTR_ERR(group);
}
}
group->dirty = true;
group->slave_cnt = slave_count;
/* Group may have been on queue for removal but is now offfloable. */
group->to_remove = false;
mutex_unlock(&lag->lock);
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
return 0;
}
static int
nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
struct netdev_notifier_changelowerstate_info *info)
{
struct netdev_lag_lower_state_info *lag_lower_info;
struct nfp_flower_repr_priv *repr_priv;
struct nfp_flower_priv *priv;
struct nfp_repr *repr;
unsigned long *flags;
if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev))
return 0;
lag_lower_info = info->lower_state_info;
if (!lag_lower_info)
return 0;
priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
repr = netdev_priv(netdev);
/* Verify that the repr is associated with this app. */
if (repr->app != priv->app)
return 0;
repr_priv = repr->app_priv;
flags = &repr_priv->lag_port_flags;
mutex_lock(&lag->lock);
if (lag_lower_info->link_up)
*flags |= NFP_PORT_LAG_LINK_UP;
else
*flags &= ~NFP_PORT_LAG_LINK_UP;
if (lag_lower_info->tx_enabled)
*flags |= NFP_PORT_LAG_TX_ENABLED;
else
*flags &= ~NFP_PORT_LAG_TX_ENABLED;
*flags |= NFP_PORT_LAG_CHANGED;
mutex_unlock(&lag->lock);
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
return 0;
}
static int
nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct net_device *netdev;
struct nfp_fl_lag *lag;
int err;
netdev = netdev_notifier_info_to_dev(ptr);
lag = container_of(nb, struct nfp_fl_lag, lag_nb);
switch (event) {
case NETDEV_CHANGEUPPER:
err = nfp_fl_lag_changeupper_event(lag, ptr);
if (err)
return NOTIFY_BAD;
return NOTIFY_OK;
case NETDEV_CHANGELOWERSTATE:
err = nfp_fl_lag_changels_event(lag, netdev, ptr);
if (err)
return NOTIFY_BAD;
return NOTIFY_OK;
case NETDEV_UNREGISTER:
if (netif_is_bond_master(netdev)) {
err = nfp_fl_lag_schedule_group_delete(lag, netdev);
if (err)
return NOTIFY_BAD;
return NOTIFY_OK;
}
}
return NOTIFY_DONE;
}
int nfp_flower_lag_reset(struct nfp_fl_lag *lag)
{
enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
lag->rst_cfg = true;
return nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
}
void nfp_flower_lag_init(struct nfp_fl_lag *lag)
{
INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work);
INIT_LIST_HEAD(&lag->group_list);
mutex_init(&lag->lock);
ida_init(&lag->ida_handle);
__skb_queue_head_init(&lag->retrans_skbs);
/* 0 is a reserved batch version so increment to first valid value. */
nfp_fl_increment_version(lag);
lag->lag_nb.notifier_call = nfp_fl_lag_netdev_event;
}
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
{
struct nfp_fl_lag_group *entry, *storage;
cancel_delayed_work_sync(&lag->work);
__skb_queue_purge(&lag->retrans_skbs);
/* Remove all groups. */
mutex_lock(&lag->lock);
list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
list_del(&entry->list);
kfree(entry);
}
mutex_unlock(&lag->lock);
mutex_destroy(&lag->lock);
ida_destroy(&lag->ida_handle);
}
...@@ -185,6 +185,10 @@ nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev) ...@@ -185,6 +185,10 @@ nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
static void static void
nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
{ {
struct nfp_repr *repr = netdev_priv(netdev);
kfree(repr->app_priv);
tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb, tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
netdev_priv(netdev)); netdev_priv(netdev));
} }
...@@ -225,7 +229,9 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, ...@@ -225,7 +229,9 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
atomic_t *replies = &priv->reify_replies; atomic_t *replies = &priv->reify_replies;
struct nfp_flower_repr_priv *repr_priv;
enum nfp_port_type port_type; enum nfp_port_type port_type;
struct nfp_repr *nfp_repr;
struct nfp_reprs *reprs; struct nfp_reprs *reprs;
int i, err, reify_cnt; int i, err, reify_cnt;
const u8 queue = 0; const u8 queue = 0;
...@@ -248,6 +254,15 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, ...@@ -248,6 +254,15 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
goto err_reprs_clean; goto err_reprs_clean;
} }
repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
if (!repr_priv) {
err = -ENOMEM;
goto err_reprs_clean;
}
nfp_repr = netdev_priv(repr);
nfp_repr->app_priv = repr_priv;
/* For now we only support 1 PF */ /* For now we only support 1 PF */
WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
...@@ -324,6 +339,8 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) ...@@ -324,6 +339,8 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
{ {
struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
atomic_t *replies = &priv->reify_replies; atomic_t *replies = &priv->reify_replies;
struct nfp_flower_repr_priv *repr_priv;
struct nfp_repr *nfp_repr;
struct sk_buff *ctrl_skb; struct sk_buff *ctrl_skb;
struct nfp_reprs *reprs; struct nfp_reprs *reprs;
int err, reify_cnt; int err, reify_cnt;
...@@ -351,6 +368,15 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) ...@@ -351,6 +368,15 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
goto err_reprs_clean; goto err_reprs_clean;
} }
repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
if (!repr_priv) {
err = -ENOMEM;
goto err_reprs_clean;
}
nfp_repr = netdev_priv(repr);
nfp_repr->app_priv = repr_priv;
port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
if (IS_ERR(port)) { if (IS_ERR(port)) {
err = PTR_ERR(port); err = PTR_ERR(port);
...@@ -546,8 +572,22 @@ static int nfp_flower_init(struct nfp_app *app) ...@@ -546,8 +572,22 @@ static int nfp_flower_init(struct nfp_app *app)
else else
app_priv->flower_ext_feats = features; app_priv->flower_ext_feats = features;
/* Tell the firmware that the driver supports lag. */
err = nfp_rtsym_write_le(app->pf->rtbl,
"_abi_flower_balance_sync_enable", 1);
if (!err) {
app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG;
nfp_flower_lag_init(&app_priv->nfp_lag);
} else if (err == -ENOENT) {
nfp_warn(app->cpp, "LAG not supported by FW.\n");
} else {
goto err_cleanup_metadata;
}
return 0; return 0;
err_cleanup_metadata:
nfp_flower_metadata_cleanup(app);
err_free_app_priv: err_free_app_priv:
vfree(app->priv); vfree(app->priv);
return err; return err;
...@@ -561,6 +601,9 @@ static void nfp_flower_clean(struct nfp_app *app) ...@@ -561,6 +601,9 @@ static void nfp_flower_clean(struct nfp_app *app)
skb_queue_purge(&app_priv->cmsg_skbs_low); skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work); flush_work(&app_priv->cmsg_work);
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag);
nfp_flower_metadata_cleanup(app); nfp_flower_metadata_cleanup(app);
vfree(app->priv); vfree(app->priv);
app->priv = NULL; app->priv = NULL;
...@@ -627,11 +670,29 @@ nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, ...@@ -627,11 +670,29 @@ nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
static int nfp_flower_start(struct nfp_app *app) static int nfp_flower_start(struct nfp_app *app)
{ {
struct nfp_flower_priv *app_priv = app->priv;
int err;
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
err = nfp_flower_lag_reset(&app_priv->nfp_lag);
if (err)
return err;
err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
if (err)
return err;
}
return nfp_tunnel_config_start(app); return nfp_tunnel_config_start(app);
} }
static void nfp_flower_stop(struct nfp_app *app) static void nfp_flower_stop(struct nfp_app *app)
{ {
struct nfp_flower_priv *app_priv = app->priv;
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
nfp_tunnel_config_stop(app); nfp_tunnel_config_stop(app);
} }
......
...@@ -43,7 +43,9 @@ ...@@ -43,7 +43,9 @@
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <net/tcp.h> #include <net/tcp.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/idr.h>
struct nfp_fl_pre_lag;
struct net_device; struct net_device;
struct nfp_app; struct nfp_app;
...@@ -67,6 +69,7 @@ struct nfp_app; ...@@ -67,6 +69,7 @@ struct nfp_app;
/* Extra features bitmap. */ /* Extra features bitmap. */
#define NFP_FL_FEATS_GENEVE BIT(0) #define NFP_FL_FEATS_GENEVE BIT(0)
#define NFP_FL_NBI_MTU_SETTING BIT(1) #define NFP_FL_NBI_MTU_SETTING BIT(1)
#define NFP_FL_FEATS_LAG BIT(31)
struct nfp_fl_mask_id { struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list; struct circ_buf mask_id_free_list;
...@@ -96,6 +99,33 @@ struct nfp_mtu_conf { ...@@ -96,6 +99,33 @@ struct nfp_mtu_conf {
spinlock_t lock; spinlock_t lock;
}; };
/**
* struct nfp_fl_lag - Flower APP priv data for link aggregation
* @lag_nb: Notifier to track master/slave events
* @work: Work queue for writing configs to the HW
* @lock: Lock to protect lag_group_list
* @group_list: List of all master/slave groups offloaded
* @ida_handle: IDA to handle group ids
* @pkt_num: Incremented for each config packet sent
* @batch_ver: Incremented for each batch of config packets
* @global_inst: Instance allocator for groups
* @rst_cfg: Marker to reset HW LAG config
* @retrans_skbs: Cmsgs that could not be processed by HW and require
* retransmission
*/
struct nfp_fl_lag {
struct notifier_block lag_nb;
struct delayed_work work;
struct mutex lock;
struct list_head group_list;
struct ida ida_handle;
unsigned int pkt_num;
unsigned int batch_ver;
u8 global_inst;
bool rst_cfg;
struct sk_buff_head retrans_skbs;
};
/** /**
* struct nfp_flower_priv - Flower APP per-vNIC priv data * struct nfp_flower_priv - Flower APP per-vNIC priv data
* @app: Back pointer to app * @app: Back pointer to app
...@@ -128,6 +158,7 @@ struct nfp_mtu_conf { ...@@ -128,6 +158,7 @@ struct nfp_mtu_conf {
* from firmware for repr reify * from firmware for repr reify
* @reify_wait_queue: wait queue for repr reify response counting * @reify_wait_queue: wait queue for repr reify response counting
* @mtu_conf: Configuration of repr MTU value * @mtu_conf: Configuration of repr MTU value
* @nfp_lag: Link aggregation data block
*/ */
struct nfp_flower_priv { struct nfp_flower_priv {
struct nfp_app *app; struct nfp_app *app;
...@@ -157,6 +188,15 @@ struct nfp_flower_priv { ...@@ -157,6 +188,15 @@ struct nfp_flower_priv {
atomic_t reify_replies; atomic_t reify_replies;
wait_queue_head_t reify_wait_queue; wait_queue_head_t reify_wait_queue;
struct nfp_mtu_conf mtu_conf; struct nfp_mtu_conf mtu_conf;
struct nfp_fl_lag nfp_lag;
};
/**
* struct nfp_flower_repr_priv - Flower APP per-repr priv data
* @lag_port_flags: Extended port flags to record lag state of repr
*/
struct nfp_flower_repr_priv {
unsigned long lag_port_flags;
}; };
struct nfp_fl_key_ls { struct nfp_fl_key_ls {
...@@ -214,7 +254,8 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, ...@@ -214,7 +254,8 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct net_device *netdev, struct net_device *netdev,
struct nfp_fl_payload *nfp_flow, struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type); enum nfp_flower_tun_type tun_type);
int nfp_flower_compile_action(struct tc_cls_flower_offload *flow, int nfp_flower_compile_action(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct net_device *netdev, struct net_device *netdev,
struct nfp_fl_payload *nfp_flow); struct nfp_fl_payload *nfp_flow);
int nfp_compile_flow_metadata(struct nfp_app *app, int nfp_compile_flow_metadata(struct nfp_app *app,
...@@ -241,5 +282,14 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb); ...@@ -241,5 +282,14 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb); void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
void *cb_priv); void *cb_priv);
void nfp_flower_lag_init(struct nfp_fl_lag *lag);
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct net_device *master,
struct nfp_fl_pre_lag *pre_act);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master);
#endif #endif
...@@ -440,7 +440,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, ...@@ -440,7 +440,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err) if (err)
goto err_destroy_flow; goto err_destroy_flow;
err = nfp_flower_compile_action(flow, netdev, flow_pay); err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
if (err) if (err)
goto err_destroy_flow; goto err_destroy_flow;
......
...@@ -277,6 +277,7 @@ const struct net_device_ops nfp_repr_netdev_ops = { ...@@ -277,6 +277,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_get_vf_config = nfp_app_get_vf_config, .ndo_get_vf_config = nfp_app_get_vf_config,
.ndo_set_vf_link_state = nfp_app_set_vf_link_state, .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
.ndo_set_features = nfp_port_set_features, .ndo_set_features = nfp_port_set_features,
.ndo_set_mac_address = eth_mac_addr,
}; };
static void nfp_repr_clean(struct nfp_repr *repr) static void nfp_repr_clean(struct nfp_repr *repr)
......
...@@ -100,6 +100,8 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name); ...@@ -100,6 +100,8 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name);
u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
int *error); int *error);
int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name,
u64 value);
u8 __iomem * u8 __iomem *
nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id,
unsigned int min_size, struct nfp_cpp_area **area); unsigned int min_size, struct nfp_cpp_area **area);
......
...@@ -286,6 +286,49 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, ...@@ -286,6 +286,49 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
return val; return val;
} }
/**
* nfp_rtsym_write_le() - Write an unsigned scalar value to a symbol
* @rtbl: NFP RTsym table
* @name: Symbol name
* @value: Value to write
*
* Lookup a symbol and write a value to it. Symbol can be 4 or 8 bytes in size.
* If 4 bytes then the lower 32-bits of 'value' are used. Value will be
* written as simple little-endian unsigned value.
*
* Return: 0 on success or error code.
*/
int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name,
u64 value)
{
const struct nfp_rtsym *sym;
int err;
u32 id;
sym = nfp_rtsym_lookup(rtbl, name);
if (!sym)
return -ENOENT;
id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain);
switch (sym->size) {
case 4:
err = nfp_cpp_writel(rtbl->cpp, id, sym->addr, value);
break;
case 8:
err = nfp_cpp_writeq(rtbl->cpp, id, sym->addr, value);
break;
default:
nfp_err(rtbl->cpp,
"rtsym '%s' unsupported or non-scalar size: %lld\n",
name, sym->size);
err = -EINVAL;
break;
}
return err;
}
u8 __iomem * u8 __iomem *
nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id,
unsigned int min_size, struct nfp_cpp_area **area) unsigned int min_size, struct nfp_cpp_area **area)
......
...@@ -1129,6 +1129,7 @@ static int team_upper_dev_link(struct team *team, struct team_port *port, ...@@ -1129,6 +1129,7 @@ static int team_upper_dev_link(struct team *team, struct team_port *port,
int err; int err;
lag_upper_info.tx_type = team->mode->lag_tx_type; lag_upper_info.tx_type = team->mode->lag_tx_type;
lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
err = netdev_master_upper_dev_link(port->dev, team->dev, NULL, err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
&lag_upper_info, extack); &lag_upper_info, extack);
if (err) if (err)
......
...@@ -2332,8 +2332,19 @@ enum netdev_lag_tx_type { ...@@ -2332,8 +2332,19 @@ enum netdev_lag_tx_type {
NETDEV_LAG_TX_TYPE_HASH, NETDEV_LAG_TX_TYPE_HASH,
}; };
enum netdev_lag_hash {
NETDEV_LAG_HASH_NONE,
NETDEV_LAG_HASH_L2,
NETDEV_LAG_HASH_L34,
NETDEV_LAG_HASH_L23,
NETDEV_LAG_HASH_E23,
NETDEV_LAG_HASH_E34,
NETDEV_LAG_HASH_UNKNOWN,
};
struct netdev_lag_upper_info { struct netdev_lag_upper_info {
enum netdev_lag_tx_type tx_type; enum netdev_lag_tx_type tx_type;
enum netdev_lag_hash hash_type;
}; };
struct netdev_lag_lower_state_info { struct netdev_lag_lower_state_info {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment