Commit 837708a8 authored by David S. Miller's avatar David S. Miller

Merge branch...

Merge branch 'nfp-improve-signal-handing-on-FW-waits-and-flower-control-message-Jakub Kicinski says:

====================
nfp: improve signal handing on FW waits and flower control message processing

The first part of this set aims to improve handling of interrupted
waits.  Patch 1 makes waiting for management FW responses
uninterruptible while patch 2 adds a message when signal arrives
while waiting for an NFP mutex.  We can't interrupt execution of
FW commands so uninterruptible sleep seems reasonable there.
Exiting a wait for a mutex should be clean and have no side affects
so we are allowing to abort it.  Note that both waits have rather
large timeouts (tens of seconds).

Patches 3 and 4 improve flower offload operation under heavy load.
Currently there is no cap on the number of queued FW notifications.
Some of the notifications have to be processed from a workqueue
which may lead to very large number of messages getting queued
if workqueue never gets a chance to run.  Pieter puts a limit
on number of queued messages, tries to drop some messages we ignore
without queuing and process more important messages first.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
processing'
parents 335b929b cf2cbadc
......@@ -258,9 +258,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH:
/* Acks from the NFP that the route is added - ignore. */
break;
default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type);
......@@ -275,18 +272,49 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
void nfp_flower_cmsg_process_rx(struct work_struct *work)
{
struct sk_buff_head cmsg_joined;
struct nfp_flower_priv *priv;
struct sk_buff *skb;
priv = container_of(work, struct nfp_flower_priv, cmsg_work);
skb_queue_head_init(&cmsg_joined);
spin_lock_bh(&priv->cmsg_skbs_high.lock);
skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined);
spin_unlock_bh(&priv->cmsg_skbs_high.lock);
while ((skb = skb_dequeue(&priv->cmsg_skbs)))
spin_lock_bh(&priv->cmsg_skbs_low.lock);
skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined);
spin_unlock_bh(&priv->cmsg_skbs_low.lock);
while ((skb = __skb_dequeue(&cmsg_joined)))
nfp_flower_cmsg_process_one_rx(priv->app, skb);
}
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
static void
nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
{
struct nfp_flower_priv *priv = app->priv;
struct sk_buff_head *skb_head;
if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
skb_head = &priv->cmsg_skbs_high;
else
skb_head = &priv->cmsg_skbs_low;
if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) {
nfp_flower_cmsg_warn(app, "Dropping queued control messages\n");
dev_kfree_skb_any(skb);
return;
}
skb_queue_tail(skb_head, skb);
schedule_work(&priv->cmsg_work);
}
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_cmsg_hdr *cmsg_hdr;
cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
......@@ -306,8 +334,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
nfp_flower_process_mtu_ack(app, skb)) {
/* Handle MTU acks outside wq to prevent RTNL conflict. */
dev_consume_skb_any(skb);
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
/* Acks from the NFP that the route is added - ignore. */
dev_consume_skb_any(skb);
} else {
skb_queue_tail(&priv->cmsg_skbs, skb);
schedule_work(&priv->cmsg_work);
nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
}
}
......@@ -108,6 +108,8 @@
#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0)
#define NFP_FLOWER_WORKQ_MAX_SKBS 30000
#define nfp_flower_cmsg_warn(app, fmt, args...) \
do { \
if (net_ratelimit()) \
......
......@@ -519,7 +519,8 @@ static int nfp_flower_init(struct nfp_app *app)
app->priv = app_priv;
app_priv->app = app;
skb_queue_head_init(&app_priv->cmsg_skbs);
skb_queue_head_init(&app_priv->cmsg_skbs_high);
skb_queue_head_init(&app_priv->cmsg_skbs_low);
INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
init_waitqueue_head(&app_priv->reify_wait_queue);
......@@ -549,7 +550,8 @@ static void nfp_flower_clean(struct nfp_app *app)
{
struct nfp_flower_priv *app_priv = app->priv;
skb_queue_purge(&app_priv->cmsg_skbs);
skb_queue_purge(&app_priv->cmsg_skbs_high);
skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work);
nfp_flower_metadata_cleanup(app);
......
......@@ -107,7 +107,10 @@ struct nfp_mtu_conf {
* @mask_table: Hash table used to store masks
* @flow_table: Hash table used to store flower rules
* @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs: List of skbs for control message processing
* @cmsg_skbs_high: List of higher priority skbs for control message
* processing
* @cmsg_skbs_low: List of lower priority skbs for control message
* processing
* @nfp_mac_off_list: List of MAC addresses to offload
* @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs
* @nfp_ipv4_off_list: List of IPv4 addresses to offload
......@@ -136,7 +139,8 @@ struct nfp_flower_priv {
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs;
struct sk_buff_head cmsg_skbs_high;
struct sk_buff_head cmsg_skbs_low;
struct list_head nfp_mac_off_list;
struct list_head nfp_mac_index_list;
struct list_head nfp_ipv4_off_list;
......
......@@ -211,8 +211,11 @@ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
break;
err = msleep_interruptible(timeout_ms);
if (err != 0)
if (err != 0) {
nfp_info(mutex->cpp,
"interrupted waiting for NFP mutex\n");
return -ERESTARTSYS;
}
if (time_is_before_eq_jiffies(warn_at)) {
warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ;
......
......@@ -281,8 +281,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr,
if ((*reg & mask) == val)
return 0;
if (msleep_interruptible(25))
return -ERESTARTSYS;
msleep(25);
if (time_after(start_time, wait_until))
return -ETIMEDOUT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment