Commit 2e1cc522 authored by John Hurley's avatar John Hurley Committed by David S. Miller

nfp: flower: implement host cmsg handler for LAG

Adds the control message handler to synchronize offloaded group config
with that of the kernel. Such messages are sent from fw to driver and
feature the following 3 flags:

- Data: an attached cmsg could not be processed - store for retransmission
- Xon: FW can accept new messages - retransmit any stored cmsgs
- Sync: full sync requested so retransmit all kernel LAG group info
Signed-off-by: default avatarJohn Hurley <john.hurley@netronome.com>
Reviewed-by: default avatarPieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
Reviewed-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bb9a8d03
...@@ -242,6 +242,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -242,6 +242,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
struct nfp_flower_priv *app_priv = app->priv; struct nfp_flower_priv *app_priv = app->priv;
struct nfp_flower_cmsg_hdr *cmsg_hdr; struct nfp_flower_cmsg_hdr *cmsg_hdr;
enum nfp_flower_cmsg_type_port type; enum nfp_flower_cmsg_type_port type;
bool skb_stored = false;
cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
...@@ -260,8 +261,10 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -260,8 +261,10 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
nfp_tunnel_keep_alive(app, skb); nfp_tunnel_keep_alive(app, skb);
break; break;
case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG: case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
break; break;
}
/* fall through */ /* fall through */
default: default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
...@@ -269,6 +272,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -269,6 +272,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
goto out; goto out;
} }
if (!skb_stored)
dev_consume_skb_any(skb); dev_consume_skb_any(skb);
return; return;
out: out:
......
...@@ -36,6 +36,9 @@ ...@@ -36,6 +36,9 @@
/* LAG group config flags. */ /* LAG group config flags. */
#define NFP_FL_LAG_LAST BIT(1) #define NFP_FL_LAG_LAST BIT(1)
#define NFP_FL_LAG_FIRST BIT(2) #define NFP_FL_LAG_FIRST BIT(2)
#define NFP_FL_LAG_DATA BIT(3)
#define NFP_FL_LAG_XON BIT(4)
#define NFP_FL_LAG_SYNC BIT(5)
#define NFP_FL_LAG_SWITCH BIT(6) #define NFP_FL_LAG_SWITCH BIT(6)
#define NFP_FL_LAG_RESET BIT(7) #define NFP_FL_LAG_RESET BIT(7)
...@@ -108,6 +111,8 @@ struct nfp_fl_lag_group { ...@@ -108,6 +111,8 @@ struct nfp_fl_lag_group {
/* wait for more config */ /* wait for more config */
#define NFP_FL_LAG_DELAY (msecs_to_jiffies(2)) #define NFP_FL_LAG_DELAY (msecs_to_jiffies(2))
#define NFP_FL_LAG_RETRANS_LIMIT 100 /* max retrans cmsgs to store */
static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag) static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag)
{ {
lag->pkt_num++; lag->pkt_num++;
...@@ -360,6 +365,92 @@ static void nfp_fl_lag_do_work(struct work_struct *work) ...@@ -360,6 +365,92 @@ static void nfp_fl_lag_do_work(struct work_struct *work)
mutex_unlock(&lag->lock); mutex_unlock(&lag->lock);
} }
static int
nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
{
struct nfp_flower_cmsg_lag_config *cmsg_payload;
cmsg_payload = nfp_flower_cmsg_get_data(skb);
if (be32_to_cpu(cmsg_payload->group_id) >= NFP_FL_LAG_GROUP_MAX)
return -EINVAL;
/* Drop cmsg retrans if storage limit is exceeded to prevent
* overloading. If the fw notices that expected messages have not been
* received in a given time block, it will request a full resync.
*/
if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT)
return -ENOSPC;
__skb_queue_tail(&lag->retrans_skbs, skb);
return 0;
}
static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag)
{
struct nfp_flower_priv *priv;
struct sk_buff *skb;
priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
while ((skb = __skb_dequeue(&lag->retrans_skbs)))
nfp_ctrl_tx(priv->app->ctrl, skb);
}
bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_cmsg_lag_config *cmsg_payload;
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_lag_group *group_entry;
unsigned long int flags;
bool store_skb = false;
int err;
cmsg_payload = nfp_flower_cmsg_get_data(skb);
flags = cmsg_payload->ctrl_flags;
/* Note the intentional fall through below. If DATA and XON are both
* set, the message will stored and sent again with the rest of the
* unprocessed messages list.
*/
/* Store */
if (flags & NFP_FL_LAG_DATA)
if (!nfp_fl_lag_put_unprocessed(&priv->nfp_lag, skb))
store_skb = true;
/* Send stored */
if (flags & NFP_FL_LAG_XON)
nfp_fl_send_unprocessed(&priv->nfp_lag);
/* Resend all */
if (flags & NFP_FL_LAG_SYNC) {
/* To resend all config:
* 1) Clear all unprocessed messages
* 2) Mark all groups dirty
* 3) Reset NFP group config
* 4) Schedule a LAG config update
*/
__skb_queue_purge(&priv->nfp_lag.retrans_skbs);
mutex_lock(&priv->nfp_lag.lock);
list_for_each_entry(group_entry, &priv->nfp_lag.group_list,
list)
group_entry->dirty = true;
err = nfp_flower_lag_reset(&priv->nfp_lag);
if (err)
nfp_flower_cmsg_warn(priv->app,
"mem err in group reset msg\n");
mutex_unlock(&priv->nfp_lag.lock);
schedule_delayed_work(&priv->nfp_lag.work, 0);
}
return store_skb;
}
static void static void
nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag, nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
struct nfp_fl_lag_group *group) struct nfp_fl_lag_group *group)
...@@ -565,6 +656,8 @@ void nfp_flower_lag_init(struct nfp_fl_lag *lag) ...@@ -565,6 +656,8 @@ void nfp_flower_lag_init(struct nfp_fl_lag *lag)
mutex_init(&lag->lock); mutex_init(&lag->lock);
ida_init(&lag->ida_handle); ida_init(&lag->ida_handle);
__skb_queue_head_init(&lag->retrans_skbs);
/* 0 is a reserved batch version so increment to first valid value. */ /* 0 is a reserved batch version so increment to first valid value. */
nfp_fl_increment_version(lag); nfp_fl_increment_version(lag);
...@@ -577,6 +670,8 @@ void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag) ...@@ -577,6 +670,8 @@ void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
cancel_delayed_work_sync(&lag->work); cancel_delayed_work_sync(&lag->work);
__skb_queue_purge(&lag->retrans_skbs);
/* Remove all groups. */ /* Remove all groups. */
mutex_lock(&lag->lock); mutex_lock(&lag->lock);
list_for_each_entry_safe(entry, storage, &lag->group_list, list) { list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
......
...@@ -109,6 +109,8 @@ struct nfp_mtu_conf { ...@@ -109,6 +109,8 @@ struct nfp_mtu_conf {
* @batch_ver: Incremented for each batch of config packets * @batch_ver: Incremented for each batch of config packets
* @global_inst: Instance allocator for groups * @global_inst: Instance allocator for groups
* @rst_cfg: Marker to reset HW LAG config * @rst_cfg: Marker to reset HW LAG config
* @retrans_skbs: Cmsgs that could not be processed by HW and require
* retransmission
*/ */
struct nfp_fl_lag { struct nfp_fl_lag {
struct notifier_block lag_nb; struct notifier_block lag_nb;
...@@ -120,6 +122,7 @@ struct nfp_fl_lag { ...@@ -120,6 +122,7 @@ struct nfp_fl_lag {
unsigned int batch_ver; unsigned int batch_ver;
u8 global_inst; u8 global_inst;
bool rst_cfg; bool rst_cfg;
struct sk_buff_head retrans_skbs;
}; };
/** /**
...@@ -280,5 +283,6 @@ int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, ...@@ -280,5 +283,6 @@ int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
void nfp_flower_lag_init(struct nfp_fl_lag *lag); void nfp_flower_lag_init(struct nfp_fl_lag *lag);
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag); void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
int nfp_flower_lag_reset(struct nfp_fl_lag *lag); int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment