Commit b6ed55cb authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-Flower-flow-merging'

Simon Horman says:

====================
nfp: Flower flow merging

John Hurley says,

These patches deal with 'implicit recirculation' on the NFP. This is a
firmware feature whereby a packet egresses to an 'internal' port meaning
that it will recirculate back to the header extract phase with the
'internal' port now marked as its ingress port. This internal port can
then be matched on by another rule. This process simulates how OvS
datapath outputs to an internal port. The FW traces the packet's
recirculation route and sends a 'merge hint' to the driver telling it
which flows it matched against. The driver can then decide if these flows
can be merged to a single rule and offloaded.

The patches deal with the following issues:

- assigning/freeing IDs to/from each of these new internal ports
- offloading rules that match on internal ports
- offloading neighbour table entries whose egress port is internal
- handling fallback traffic with an internal port as ingress
- using merge hints to create 'faster path' flows and tracking stats etc.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 47a1a225 8af56f40
...@@ -53,7 +53,8 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev, ...@@ -53,7 +53,8 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
} }
} }
static struct net_device *nfp_abm_repr_get(struct nfp_app *app, u32 port_id) static struct net_device *
nfp_abm_repr_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
{ {
enum nfp_repr_type rtype; enum nfp_repr_type rtype;
struct nfp_reprs *reprs; struct nfp_reprs *reprs;
...@@ -549,5 +550,5 @@ const struct nfp_app_type app_abm = { ...@@ -549,5 +550,5 @@ const struct nfp_app_type app_abm = {
.eswitch_mode_get = nfp_abm_eswitch_mode_get, .eswitch_mode_get = nfp_abm_eswitch_mode_get,
.eswitch_mode_set = nfp_abm_eswitch_mode_set, .eswitch_mode_set = nfp_abm_eswitch_mode_set,
.repr_get = nfp_abm_repr_get, .dev_get = nfp_abm_repr_get,
}; };
...@@ -159,7 +159,7 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -159,7 +159,7 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
rtnl_lock(); rtnl_lock();
rcu_read_lock(); rcu_read_lock();
netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum)); netdev = nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
rcu_read_unlock(); rcu_read_unlock();
if (!netdev) { if (!netdev) {
nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n", nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
...@@ -192,7 +192,7 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -192,7 +192,7 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
msg = nfp_flower_cmsg_get_data(skb); msg = nfp_flower_cmsg_get_data(skb);
rcu_read_lock(); rcu_read_lock();
exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum)); exists = !!nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
rcu_read_unlock(); rcu_read_unlock();
if (!exists) { if (!exists) {
nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n", nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
...@@ -204,6 +204,50 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -204,6 +204,50 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
wake_up(&priv->reify_wait_queue); wake_up(&priv->reify_wait_queue);
} }
static void
nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
{
unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
struct nfp_flower_cmsg_merge_hint *msg;
struct nfp_fl_payload *sub_flows[2];
int err, i, flow_cnt;
msg = nfp_flower_cmsg_get_data(skb);
/* msg->count starts at 0 and always assumes at least 1 entry. */
flow_cnt = msg->count + 1;
if (msg_len < struct_size(msg, flow, flow_cnt)) {
nfp_flower_cmsg_warn(app, "Merge hint ctrl msg too short - %d bytes but expect %ld\n",
msg_len, struct_size(msg, flow, flow_cnt));
return;
}
if (flow_cnt != 2) {
nfp_flower_cmsg_warn(app, "Merge hint contains %d flows - two are expected\n",
flow_cnt);
return;
}
rtnl_lock();
for (i = 0; i < flow_cnt; i++) {
u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
if (!sub_flows[i]) {
nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
goto err_rtnl_unlock;
}
}
err = nfp_flower_merge_offloaded_flows(app, sub_flows[0], sub_flows[1]);
/* Only warn on memory fail. Hint veto will not break functionality. */
if (err == -ENOMEM)
nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
err_rtnl_unlock:
rtnl_unlock();
}
static void static void
nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
{ {
...@@ -222,6 +266,12 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -222,6 +266,12 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_PORT_MOD: case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
nfp_flower_cmsg_portmod_rx(app, skb); nfp_flower_cmsg_portmod_rx(app, skb);
break; break;
case NFP_FLOWER_CMSG_TYPE_MERGE_HINT:
if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) {
nfp_flower_cmsg_merge_hint_rx(app, skb);
break;
}
goto err_default;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH: case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
nfp_tunnel_request_route(app, skb); nfp_tunnel_request_route(app, skb);
break; break;
...@@ -235,6 +285,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -235,6 +285,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
} }
/* fall through */ /* fall through */
default: default:
err_default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type); type);
goto out; goto out;
......
...@@ -402,11 +402,13 @@ struct nfp_flower_cmsg_hdr { ...@@ -402,11 +402,13 @@ struct nfp_flower_cmsg_hdr {
/* Types defined for port related control messages */ /* Types defined for port related control messages */
enum nfp_flower_cmsg_type_port { enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0, NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
NFP_FLOWER_CMSG_TYPE_FLOW_MOD = 1,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2, NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
NFP_FLOWER_CMSG_TYPE_LAG_CONFIG = 4, NFP_FLOWER_CMSG_TYPE_LAG_CONFIG = 4,
NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6, NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7, NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8, NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
NFP_FLOWER_CMSG_TYPE_MERGE_HINT = 9,
NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10, NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11, NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11,
NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12, NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12,
...@@ -451,6 +453,16 @@ struct nfp_flower_cmsg_portreify { ...@@ -451,6 +453,16 @@ struct nfp_flower_cmsg_portreify {
#define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST BIT(0) #define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST BIT(0)
/* NFP_FLOWER_CMSG_TYPE_FLOW_MERGE_HINT */
struct nfp_flower_cmsg_merge_hint {
u8 reserved[3];
u8 count;
struct {
__be32 host_ctx;
__be64 host_cookie;
} __packed flow[0];
};
enum nfp_flower_cmsg_port_type { enum nfp_flower_cmsg_port_type {
NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0, NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1, NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
...@@ -473,6 +485,13 @@ enum nfp_flower_cmsg_port_vnic_type { ...@@ -473,6 +485,13 @@ enum nfp_flower_cmsg_port_vnic_type {
#define NFP_FLOWER_CMSG_PORT_PCIE_Q GENMASK(5, 0) #define NFP_FLOWER_CMSG_PORT_PCIE_Q GENMASK(5, 0)
#define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM GENMASK(7, 0) #define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM GENMASK(7, 0)
static inline u32 nfp_flower_internal_port_get_port_id(u8 internal_port)
{
return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, internal_port) |
FIELD_PREP(NFP_FLOWER_CMSG_PORT_TYPE,
NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT);
}
static inline u32 nfp_flower_cmsg_phys_port(u8 phys_port) static inline u32 nfp_flower_cmsg_phys_port(u8 phys_port)
{ {
return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, phys_port) | return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, phys_port) |
......
...@@ -22,6 +22,9 @@ ...@@ -22,6 +22,9 @@
#define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
#define NFP_MIN_INT_PORT_ID 1
#define NFP_MAX_INT_PORT_ID 256
static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn) static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
{ {
return "FLOWER"; return "FLOWER";
...@@ -32,6 +35,113 @@ static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app) ...@@ -32,6 +35,113 @@ static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
return DEVLINK_ESWITCH_MODE_SWITCHDEV; return DEVLINK_ESWITCH_MODE_SWITCHDEV;
} }
static int
nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv,
struct net_device *netdev)
{
struct net_device *entry;
int i, id = 0;
rcu_read_lock();
idr_for_each_entry(&priv->internal_ports.port_ids, entry, i)
if (entry == netdev) {
id = i;
break;
}
rcu_read_unlock();
return id;
}
static int
nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
{
struct nfp_flower_priv *priv = app->priv;
int id;
id = nfp_flower_lookup_internal_port_id(priv, netdev);
if (id > 0)
return id;
idr_preload(GFP_ATOMIC);
spin_lock_bh(&priv->internal_ports.lock);
id = idr_alloc(&priv->internal_ports.port_ids, netdev,
NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC);
spin_unlock_bh(&priv->internal_ports.lock);
idr_preload_end();
return id;
}
u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
struct net_device *netdev)
{
int ext_port;
if (nfp_netdev_is_nfp_repr(netdev)) {
return nfp_repr_get_port_id(netdev);
} else if (nfp_flower_internal_port_can_offload(app, netdev)) {
ext_port = nfp_flower_get_internal_port_id(app, netdev);
if (ext_port < 0)
return 0;
return nfp_flower_internal_port_get_port_id(ext_port);
}
return 0;
}
static struct net_device *
nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id)
{
struct nfp_flower_priv *priv = app->priv;
struct net_device *netdev;
rcu_read_lock();
netdev = idr_find(&priv->internal_ports.port_ids, port_id);
rcu_read_unlock();
return netdev;
}
static void
nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev)
{
struct nfp_flower_priv *priv = app->priv;
int id;
id = nfp_flower_lookup_internal_port_id(priv, netdev);
if (!id)
return;
spin_lock_bh(&priv->internal_ports.lock);
idr_remove(&priv->internal_ports.port_ids, id);
spin_unlock_bh(&priv->internal_ports.lock);
}
static int
nfp_flower_internal_port_event_handler(struct nfp_app *app,
struct net_device *netdev,
unsigned long event)
{
if (event == NETDEV_UNREGISTER &&
nfp_flower_internal_port_can_offload(app, netdev))
nfp_flower_free_internal_port_id(app, netdev);
return NOTIFY_OK;
}
static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv)
{
spin_lock_init(&priv->internal_ports.lock);
idr_init(&priv->internal_ports.port_ids);
}
static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv)
{
idr_destroy(&priv->internal_ports.port_ids);
}
static struct nfp_flower_non_repr_priv * static struct nfp_flower_non_repr_priv *
nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev) nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
{ {
...@@ -119,12 +229,21 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port) ...@@ -119,12 +229,21 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
} }
static struct net_device * static struct net_device *
nfp_flower_repr_get(struct nfp_app *app, u32 port_id) nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
{ {
enum nfp_repr_type repr_type; enum nfp_repr_type repr_type;
struct nfp_reprs *reprs; struct nfp_reprs *reprs;
u8 port = 0; u8 port = 0;
/* Check if the port is internal. */
if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) ==
NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) {
if (redir_egress)
*redir_egress = true;
port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id);
return nfp_flower_get_netdev_from_internal_port_id(app, port);
}
repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
if (repr_type > NFP_REPR_TYPE_MAX) if (repr_type > NFP_REPR_TYPE_MAX)
return NULL; return NULL;
...@@ -641,11 +760,30 @@ static int nfp_flower_init(struct nfp_app *app) ...@@ -641,11 +760,30 @@ static int nfp_flower_init(struct nfp_app *app)
goto err_cleanup_metadata; goto err_cleanup_metadata;
} }
if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
/* Tell the firmware that the driver supports flow merging. */
err = nfp_rtsym_write_le(app->pf->rtbl,
"_abi_flower_merge_hint_enable", 1);
if (!err) {
app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE;
nfp_flower_internal_port_init(app_priv);
} else if (err == -ENOENT) {
nfp_warn(app->cpp, "Flow merge not supported by FW.\n");
} else {
goto err_lag_clean;
}
} else {
nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
}
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
INIT_LIST_HEAD(&app_priv->non_repr_priv); INIT_LIST_HEAD(&app_priv->non_repr_priv);
return 0; return 0;
err_lag_clean:
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag);
err_cleanup_metadata: err_cleanup_metadata:
nfp_flower_metadata_cleanup(app); nfp_flower_metadata_cleanup(app);
err_free_app_priv: err_free_app_priv:
...@@ -664,6 +802,9 @@ static void nfp_flower_clean(struct nfp_app *app) ...@@ -664,6 +802,9 @@ static void nfp_flower_clean(struct nfp_app *app)
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag); nfp_flower_lag_cleanup(&app_priv->nfp_lag);
if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE)
nfp_flower_internal_port_cleanup(app_priv);
nfp_flower_metadata_cleanup(app); nfp_flower_metadata_cleanup(app);
vfree(app->priv); vfree(app->priv);
app->priv = NULL; app->priv = NULL;
...@@ -762,6 +903,10 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev, ...@@ -762,6 +903,10 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
if (ret & NOTIFY_STOP_MASK) if (ret & NOTIFY_STOP_MASK)
return ret; return ret;
ret = nfp_flower_internal_port_event_handler(app, netdev, event);
if (ret & NOTIFY_STOP_MASK)
return ret;
return nfp_tunnel_mac_event_handler(app, netdev, event, ptr); return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
} }
...@@ -800,7 +945,7 @@ const struct nfp_app_type app_flower = { ...@@ -800,7 +945,7 @@ const struct nfp_app_type app_flower = {
.sriov_disable = nfp_flower_sriov_disable, .sriov_disable = nfp_flower_sriov_disable,
.eswitch_mode_get = eswitch_mode_get, .eswitch_mode_get = eswitch_mode_get,
.repr_get = nfp_flower_repr_get, .dev_get = nfp_flower_dev_get,
.setup_tc = nfp_flower_setup_tc, .setup_tc = nfp_flower_setup_tc,
}; };
...@@ -39,6 +39,8 @@ struct nfp_app; ...@@ -39,6 +39,8 @@ struct nfp_app;
#define NFP_FL_NBI_MTU_SETTING BIT(1) #define NFP_FL_NBI_MTU_SETTING BIT(1)
#define NFP_FL_FEATS_GENEVE_OPT BIT(2) #define NFP_FL_FEATS_GENEVE_OPT BIT(2)
#define NFP_FL_FEATS_VLAN_PCP BIT(3) #define NFP_FL_FEATS_VLAN_PCP BIT(3)
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31) #define NFP_FL_FEATS_LAG BIT(31)
struct nfp_fl_mask_id { struct nfp_fl_mask_id {
...@@ -114,6 +116,16 @@ struct nfp_fl_lag { ...@@ -114,6 +116,16 @@ struct nfp_fl_lag {
struct sk_buff_head retrans_skbs; struct sk_buff_head retrans_skbs;
}; };
/**
* struct nfp_fl_internal_ports - Flower APP priv data for additional ports
* @port_ids: Assignment of ids to any additional ports
* @lock: Lock for extra ports list
*/
struct nfp_fl_internal_ports {
struct idr port_ids;
spinlock_t lock;
};
/** /**
* struct nfp_flower_priv - Flower APP per-vNIC priv data * struct nfp_flower_priv - Flower APP per-vNIC priv data
* @app: Back pointer to app * @app: Back pointer to app
...@@ -128,6 +140,7 @@ struct nfp_fl_lag { ...@@ -128,6 +140,7 @@ struct nfp_fl_lag {
* @flow_table: Hash table used to store flower rules * @flow_table: Hash table used to store flower rules
* @stats: Stored stats updates for flower rules * @stats: Stored stats updates for flower rules
* @stats_lock: Lock for flower rule stats updates * @stats_lock: Lock for flower rule stats updates
* @stats_ctx_table: Hash table to map stats contexts to its flow rule
* @cmsg_work: Workqueue for control messages processing * @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs_high: List of higher priority skbs for control message * @cmsg_skbs_high: List of higher priority skbs for control message
* processing * processing
...@@ -143,6 +156,7 @@ struct nfp_fl_lag { ...@@ -143,6 +156,7 @@ struct nfp_fl_lag {
* @non_repr_priv: List of offloaded non-repr ports and their priv data * @non_repr_priv: List of offloaded non-repr ports and their priv data
* @active_mem_unit: Current active memory unit for flower rules * @active_mem_unit: Current active memory unit for flower rules
* @total_mem_units: Total number of available memory units for flower rules * @total_mem_units: Total number of available memory units for flower rules
* @internal_ports: Internal port ids used in offloaded rules
*/ */
struct nfp_flower_priv { struct nfp_flower_priv {
struct nfp_app *app; struct nfp_app *app;
...@@ -157,6 +171,7 @@ struct nfp_flower_priv { ...@@ -157,6 +171,7 @@ struct nfp_flower_priv {
struct rhashtable flow_table; struct rhashtable flow_table;
struct nfp_fl_stats *stats; struct nfp_fl_stats *stats;
spinlock_t stats_lock; /* lock stats */ spinlock_t stats_lock; /* lock stats */
struct rhashtable stats_ctx_table;
struct work_struct cmsg_work; struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs_high; struct sk_buff_head cmsg_skbs_high;
struct sk_buff_head cmsg_skbs_low; struct sk_buff_head cmsg_skbs_low;
...@@ -169,6 +184,7 @@ struct nfp_flower_priv { ...@@ -169,6 +184,7 @@ struct nfp_flower_priv {
struct list_head non_repr_priv; struct list_head non_repr_priv;
unsigned int active_mem_unit; unsigned int active_mem_unit;
unsigned int total_mem_units; unsigned int total_mem_units;
struct nfp_fl_internal_ports internal_ports;
}; };
/** /**
...@@ -236,6 +252,25 @@ struct nfp_fl_payload { ...@@ -236,6 +252,25 @@ struct nfp_fl_payload {
char *unmasked_data; char *unmasked_data;
char *mask_data; char *mask_data;
char *action_data; char *action_data;
struct list_head linked_flows;
bool in_hw;
};
struct nfp_fl_payload_link {
/* A link contains a pointer to a merge flow and an associated sub_flow.
* Each merge flow will feature in 2 links to its underlying sub_flows.
* A sub_flow will have at least 1 link to a merge flow or more if it
* has been used to create multiple merge flows.
*
* For a merge flow, 'linked_flows' in its nfp_fl_payload struct lists
* all links to sub_flows (sub_flow.flow) via merge.list.
* For a sub_flow, 'linked_flows' gives all links to merge flows it has
* formed (merge_flow.flow) via sub_flow.list.
*/
struct {
struct list_head list;
struct nfp_fl_payload *flow;
} merge_flow, sub_flow;
}; };
extern const struct rhashtable_params nfp_flower_table_params; extern const struct rhashtable_params nfp_flower_table_params;
...@@ -247,12 +282,40 @@ struct nfp_fl_stats_frame { ...@@ -247,12 +282,40 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie; __be64 stats_cookie;
}; };
static inline bool
nfp_flower_internal_port_can_offload(struct nfp_app *app,
struct net_device *netdev)
{
struct nfp_flower_priv *app_priv = app->priv;
if (!(app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE))
return false;
if (!netdev->rtnl_link_ops)
return false;
if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
return true;
return false;
}
/* The address of the merged flow acts as its cookie.
* Cookies supplied to us by TC flower are also addresses to allocated
* memory and thus this scheme should not generate any collisions.
*/
static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay)
{
return flow_pay->tc_flower_cookie == (unsigned long)flow_pay;
}
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_ctx_split); unsigned int host_ctx_split);
void nfp_flower_metadata_cleanup(struct nfp_app *app); void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data); enum tc_setup_type type, void *type_data);
int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2);
int nfp_flower_compile_flow_match(struct nfp_app *app, int nfp_flower_compile_flow_match(struct nfp_app *app,
struct tc_cls_flower_offload *flow, struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls, struct nfp_fl_key_ls *key_ls,
...@@ -267,6 +330,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app, ...@@ -267,6 +330,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
struct tc_cls_flower_offload *flow, struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_flow, struct nfp_fl_payload *nfp_flow,
struct net_device *netdev); struct net_device *netdev);
void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
struct nfp_fl_payload *nfp_flow);
int nfp_modify_flow_metadata(struct nfp_app *app, int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow); struct nfp_fl_payload *nfp_flow);
...@@ -274,6 +339,8 @@ struct nfp_fl_payload * ...@@ -274,6 +339,8 @@ struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
struct net_device *netdev); struct net_device *netdev);
struct nfp_fl_payload * struct nfp_fl_payload *
nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id);
struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie); nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb); void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
...@@ -311,4 +378,6 @@ void ...@@ -311,4 +378,6 @@ void
__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv); __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv);
void void
nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev); nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
struct net_device *netdev);
#endif #endif
...@@ -326,13 +326,12 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, ...@@ -326,13 +326,12 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow, struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type) enum nfp_flower_tun_type tun_type)
{ {
u32 cmsg_port = 0; u32 port_id;
int err; int err;
u8 *ext; u8 *ext;
u8 *msk; u8 *msk;
if (nfp_netdev_is_nfp_repr(netdev)) port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
cmsg_port = nfp_repr_get_port_id(netdev);
memset(nfp_flow->unmasked_data, 0, key_ls->key_size); memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size); memset(nfp_flow->mask_data, 0, key_ls->key_size);
...@@ -358,13 +357,13 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, ...@@ -358,13 +357,13 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
/* Populate Exact Port data. */ /* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext, err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
cmsg_port, false, tun_type); port_id, false, tun_type);
if (err) if (err)
return err; return err;
/* Populate Mask Port Data. */ /* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
cmsg_port, true, tun_type); port_id, true, tun_type);
if (err) if (err)
return err; return err;
......
...@@ -24,6 +24,18 @@ struct nfp_fl_flow_table_cmp_arg { ...@@ -24,6 +24,18 @@ struct nfp_fl_flow_table_cmp_arg {
unsigned long cookie; unsigned long cookie;
}; };
struct nfp_fl_stats_ctx_to_flow {
struct rhash_head ht_node;
u32 stats_cxt;
struct nfp_fl_payload *flow;
};
static const struct rhashtable_params stats_ctx_table_params = {
.key_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
.head_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
.key_len = sizeof(u32),
};
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id) static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
{ {
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
...@@ -264,9 +276,6 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, ...@@ -264,9 +276,6 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
if (!mask_entry) if (!mask_entry)
return false; return false;
if (meta_flags)
*meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
*mask_id = mask_entry->mask_id; *mask_id = mask_entry->mask_id;
mask_entry->ref_cnt--; mask_entry->ref_cnt--;
if (!mask_entry->ref_cnt) { if (!mask_entry->ref_cnt) {
...@@ -285,25 +294,42 @@ int nfp_compile_flow_metadata(struct nfp_app *app, ...@@ -285,25 +294,42 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow, struct nfp_fl_payload *nfp_flow,
struct net_device *netdev) struct net_device *netdev)
{ {
struct nfp_fl_stats_ctx_to_flow *ctx_entry;
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *check_entry; struct nfp_fl_payload *check_entry;
u8 new_mask_id; u8 new_mask_id;
u32 stats_cxt; u32 stats_cxt;
int err;
if (nfp_get_stats_entry(app, &stats_cxt)) err = nfp_get_stats_entry(app, &stats_cxt);
return -ENOENT; if (err)
return err;
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt); nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie); nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
nfp_flow->ingress_dev = netdev; nfp_flow->ingress_dev = netdev;
ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
if (!ctx_entry) {
err = -ENOMEM;
goto err_release_stats;
}
ctx_entry->stats_cxt = stats_cxt;
ctx_entry->flow = nfp_flow;
if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
stats_ctx_table_params)) {
err = -ENOMEM;
goto err_free_ctx_entry;
}
new_mask_id = 0; new_mask_id = 0;
if (!nfp_check_mask_add(app, nfp_flow->mask_data, if (!nfp_check_mask_add(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len, nfp_flow->meta.mask_len,
&nfp_flow->meta.flags, &new_mask_id)) { &nfp_flow->meta.flags, &new_mask_id)) {
if (nfp_release_stats_entry(app, stats_cxt)) err = -ENOENT;
return -EINVAL; goto err_remove_rhash;
return -ENOENT;
} }
nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version); nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
...@@ -317,43 +343,82 @@ int nfp_compile_flow_metadata(struct nfp_app *app, ...@@ -317,43 +343,82 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev); check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (check_entry) { if (check_entry) {
if (nfp_release_stats_entry(app, stats_cxt)) err = -EEXIST;
return -EINVAL; goto err_remove_mask;
if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len,
NULL, &new_mask_id))
return -EINVAL;
return -EEXIST;
} }
return 0; return 0;
err_remove_mask:
nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len,
NULL, &new_mask_id);
err_remove_rhash:
WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
&ctx_entry->ht_node,
stats_ctx_table_params));
err_free_ctx_entry:
kfree(ctx_entry);
err_release_stats:
nfp_release_stats_entry(app, stats_cxt);
return err;
}
void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
struct nfp_fl_payload *nfp_flow)
{
nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
priv->flower_version++;
} }
int nfp_modify_flow_metadata(struct nfp_app *app, int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow) struct nfp_fl_payload *nfp_flow)
{ {
struct nfp_fl_stats_ctx_to_flow *ctx_entry;
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
u8 new_mask_id = 0; u8 new_mask_id = 0;
u32 temp_ctx_id; u32 temp_ctx_id;
__nfp_modify_flow_metadata(priv, nfp_flow);
nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_check_mask_remove(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len, &nfp_flow->meta.flags, nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
&new_mask_id); &new_mask_id);
nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
priv->flower_version++;
/* Update flow payload with mask ids. */ /* Update flow payload with mask ids. */
nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
/* Release the stats ctx id. */ /* Release the stats ctx id and ctx to flow table entry. */
temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
stats_ctx_table_params);
if (!ctx_entry)
return -ENOENT;
WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
&ctx_entry->ht_node,
stats_ctx_table_params));
kfree(ctx_entry);
return nfp_release_stats_entry(app, temp_ctx_id); return nfp_release_stats_entry(app, temp_ctx_id);
} }
struct nfp_fl_payload *
nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
{
struct nfp_fl_stats_ctx_to_flow *ctx_entry;
struct nfp_flower_priv *priv = app->priv;
ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
stats_ctx_table_params);
if (!ctx_entry)
return NULL;
return ctx_entry->flow;
}
static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg, static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
const void *obj) const void *obj)
{ {
...@@ -403,6 +468,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, ...@@ -403,6 +468,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err) if (err)
return err; return err;
err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
if (err)
goto err_free_flow_table;
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed)); get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */ /* Init ring buffer and unallocated mask_ids. */
...@@ -410,7 +479,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, ...@@ -410,7 +479,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf) if (!priv->mask_ids.mask_id_free_list.buf)
goto err_free_flow_table; goto err_free_stats_ctx_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1; priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
...@@ -447,6 +516,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, ...@@ -447,6 +516,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kfree(priv->mask_ids.last_used); kfree(priv->mask_ids.last_used);
err_free_mask_id: err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.mask_id_free_list.buf);
err_free_stats_ctx_table:
rhashtable_destroy(&priv->stats_ctx_table);
err_free_flow_table: err_free_flow_table:
rhashtable_destroy(&priv->flow_table); rhashtable_destroy(&priv->flow_table);
return -ENOMEM; return -ENOMEM;
...@@ -461,6 +532,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app) ...@@ -461,6 +532,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
rhashtable_free_and_destroy(&priv->flow_table, rhashtable_free_and_destroy(&priv->flow_table,
nfp_check_rhashtable_empty, NULL); nfp_check_rhashtable_empty, NULL);
rhashtable_free_and_destroy(&priv->stats_ctx_table,
nfp_check_rhashtable_empty, NULL);
kvfree(priv->stats); kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used); kfree(priv->mask_ids.last_used);
......
...@@ -171,7 +171,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) ...@@ -171,7 +171,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
ipv4_addr = payload->tun_info[i].ipv4; ipv4_addr = payload->tun_info[i].ipv4;
port = be32_to_cpu(payload->tun_info[i].egress_port); port = be32_to_cpu(payload->tun_info[i].egress_port);
netdev = nfp_app_repr_get(app, port); netdev = nfp_app_dev_get(app, port, NULL);
if (!netdev) if (!netdev)
continue; continue;
...@@ -270,9 +270,10 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, ...@@ -270,9 +270,10 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
struct flowi4 *flow, struct neighbour *neigh, gfp_t flag) struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
{ {
struct nfp_tun_neigh payload; struct nfp_tun_neigh payload;
u32 port_id;
/* Only offload representor IPv4s for now. */ port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
if (!nfp_netdev_is_nfp_repr(netdev)) if (!port_id)
return; return;
memset(&payload, 0, sizeof(struct nfp_tun_neigh)); memset(&payload, 0, sizeof(struct nfp_tun_neigh));
...@@ -290,7 +291,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, ...@@ -290,7 +291,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
payload.src_ipv4 = flow->saddr; payload.src_ipv4 = flow->saddr;
ether_addr_copy(payload.src_addr, netdev->dev_addr); ether_addr_copy(payload.src_addr, netdev->dev_addr);
neigh_ha_snapshot(payload.dst_addr, neigh, netdev); neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev)); payload.port_id = cpu_to_be32(port_id);
/* Add destination of new route to NFP cache. */ /* Add destination of new route to NFP cache. */
nfp_tun_add_route_to_cache(app, payload.dst_ipv4); nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
...@@ -366,7 +367,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) ...@@ -366,7 +367,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
payload = nfp_flower_cmsg_get_data(skb); payload = nfp_flower_cmsg_get_data(skb);
netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port)); netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
if (!netdev) if (!netdev)
goto route_fail_warning; goto route_fail_warning;
......
...@@ -79,7 +79,7 @@ extern const struct nfp_app_type app_abm; ...@@ -79,7 +79,7 @@ extern const struct nfp_app_type app_abm;
* @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock) * @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock)
* @sriov_enable: app-specific sriov initialisation * @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up * @sriov_disable: app-specific sriov clean-up
* @repr_get: get representor netdev * @dev_get: get representor or internal port representing netdev
*/ */
struct nfp_app_type { struct nfp_app_type {
enum nfp_app_id id; enum nfp_app_id id;
...@@ -143,7 +143,8 @@ struct nfp_app_type { ...@@ -143,7 +143,8 @@ struct nfp_app_type {
enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app); enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app);
int (*eswitch_mode_set)(struct nfp_app *app, u16 mode); int (*eswitch_mode_set)(struct nfp_app *app, u16 mode);
struct net_device *(*repr_get)(struct nfp_app *app, u32 id); struct net_device *(*dev_get)(struct nfp_app *app, u32 id,
bool *redir_egress);
}; };
/** /**
...@@ -397,12 +398,14 @@ static inline void nfp_app_sriov_disable(struct nfp_app *app) ...@@ -397,12 +398,14 @@ static inline void nfp_app_sriov_disable(struct nfp_app *app)
app->type->sriov_disable(app); app->type->sriov_disable(app);
} }
static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id) static inline
struct net_device *nfp_app_dev_get(struct nfp_app *app, u32 id,
bool *redir_egress)
{ {
if (unlikely(!app || !app->type->repr_get)) if (unlikely(!app || !app->type->dev_get))
return NULL; return NULL;
return app->type->repr_get(app, id); return app->type->dev_get(app, id, redir_egress);
} }
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev); struct nfp_app *nfp_app_from_netdev(struct net_device *netdev);
......
...@@ -1683,6 +1683,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1683,6 +1683,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net_rx_buf *rxbuf; struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd; struct nfp_net_rx_desc *rxd;
struct nfp_meta_parsed meta; struct nfp_meta_parsed meta;
bool redir_egress = false;
struct net_device *netdev; struct net_device *netdev;
dma_addr_t new_dma_addr; dma_addr_t new_dma_addr;
u32 meta_len_xdp = 0; u32 meta_len_xdp = 0;
...@@ -1818,13 +1819,16 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1818,13 +1819,16 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net *nn; struct nfp_net *nn;
nn = netdev_priv(dp->netdev); nn = netdev_priv(dp->netdev);
netdev = nfp_app_repr_get(nn->app, meta.portid); netdev = nfp_app_dev_get(nn->app, meta.portid,
&redir_egress);
if (unlikely(!netdev)) { if (unlikely(!netdev)) {
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
NULL); NULL);
continue; continue;
} }
nfp_repr_inc_rx_stats(netdev, pkt_len);
if (nfp_netdev_is_nfp_repr(netdev))
nfp_repr_inc_rx_stats(netdev, pkt_len);
} }
skb = build_skb(rxbuf->frag, true_bufsz); skb = build_skb(rxbuf->frag, true_bufsz);
...@@ -1859,7 +1863,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1859,7 +1863,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
if (meta_len_xdp) if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp); skb_metadata_set(skb, meta_len_xdp);
napi_gro_receive(&rx_ring->r_vec->napi, skb); if (likely(!redir_egress)) {
napi_gro_receive(&rx_ring->r_vec->napi, skb);
} else {
skb->dev = netdev;
__skb_push(skb, ETH_HLEN);
dev_queue_xmit(skb);
}
} }
if (xdp_prog) { if (xdp_prog) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment