Commit 7fade107 authored by Pieter Jansen van Vuuren's avatar Pieter Jansen van Vuuren Committed by David S. Miller

nfp: flower: use stats array instead of storing stats per flow

Make use of an array stats instead of storing stats per flow which
would require a hash lookup at critical times.
Signed-off-by: default avatarPieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
Reviewed-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c01d0efa
...@@ -139,6 +139,8 @@ struct nfp_fl_lag { ...@@ -139,6 +139,8 @@ struct nfp_fl_lag {
* @mask_ids: List of free mask ids * @mask_ids: List of free mask ids
* @mask_table: Hash table used to store masks * @mask_table: Hash table used to store masks
* @flow_table: Hash table used to store flower rules * @flow_table: Hash table used to store flower rules
* @stats: Stored stats updates for flower rules
* @stats_lock: Lock for flower rule stats updates
* @cmsg_work: Workqueue for control messages processing * @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs_high: List of higher priority skbs for control message * @cmsg_skbs_high: List of higher priority skbs for control message
* processing * processing
...@@ -172,6 +174,8 @@ struct nfp_flower_priv { ...@@ -172,6 +174,8 @@ struct nfp_flower_priv {
struct nfp_fl_mask_id mask_ids; struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
struct rhashtable flow_table; struct rhashtable flow_table;
struct nfp_fl_stats *stats;
spinlock_t stats_lock; /* lock stats */
struct work_struct cmsg_work; struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs_high; struct sk_buff_head cmsg_skbs_high;
struct sk_buff_head cmsg_skbs_low; struct sk_buff_head cmsg_skbs_low;
...@@ -229,8 +233,6 @@ struct nfp_fl_payload { ...@@ -229,8 +233,6 @@ struct nfp_fl_payload {
unsigned long tc_flower_cookie; unsigned long tc_flower_cookie;
struct rhash_head fl_node; struct rhash_head fl_node;
struct rcu_head rcu; struct rcu_head rcu;
spinlock_t lock; /* lock stats */
struct nfp_fl_stats stats;
__be32 nfp_tun_ipv4_addr; __be32 nfp_tun_ipv4_addr;
struct net_device *ingress_dev; struct net_device *ingress_dev;
char *unmasked_data; char *unmasked_data;
......
...@@ -119,42 +119,26 @@ nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, ...@@ -119,42 +119,26 @@ nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
nfp_flower_table_params); nfp_flower_table_params);
} }
static void
nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats)
{
struct nfp_fl_payload *nfp_flow;
unsigned long flower_cookie;
flower_cookie = be64_to_cpu(stats->stats_cookie);
rcu_read_lock();
nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL,
stats->stats_con_id);
if (!nfp_flow)
goto exit_rcu_unlock;
spin_lock(&nfp_flow->lock);
nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count);
nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count);
nfp_flow->stats.used = jiffies;
spin_unlock(&nfp_flow->lock);
exit_rcu_unlock:
rcu_read_unlock();
}
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb) void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
{ {
unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb); unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
struct nfp_fl_stats_frame *stats_frame; struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_stats_frame *stats;
unsigned char *msg; unsigned char *msg;
u32 ctx_id;
int i; int i;
msg = nfp_flower_cmsg_get_data(skb); msg = nfp_flower_cmsg_get_data(skb);
stats_frame = (struct nfp_fl_stats_frame *)msg; spin_lock(&priv->stats_lock);
for (i = 0; i < msg_len / sizeof(*stats_frame); i++) for (i = 0; i < msg_len / sizeof(*stats); i++) {
nfp_flower_update_stats(app, stats_frame + i); stats = (struct nfp_fl_stats_frame *)msg + i;
ctx_id = be32_to_cpu(stats->stats_con_id);
priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
priv->stats[ctx_id].used = jiffies;
}
spin_unlock(&priv->stats_lock);
} }
static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
...@@ -348,9 +332,9 @@ int nfp_compile_flow_metadata(struct nfp_app *app, ...@@ -348,9 +332,9 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
/* Update flow payload with mask ids. */ /* Update flow payload with mask ids. */
nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
nfp_flow->stats.pkts = 0; priv->stats[stats_cxt].pkts = 0;
nfp_flow->stats.bytes = 0; priv->stats[stats_cxt].bytes = 0;
nfp_flow->stats.used = jiffies; priv->stats[stats_cxt].used = jiffies;
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev, check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
NFP_FL_STATS_CTX_DONT_CARE); NFP_FL_STATS_CTX_DONT_CARE);
...@@ -469,8 +453,17 @@ int nfp_flower_metadata_init(struct nfp_app *app) ...@@ -469,8 +453,17 @@ int nfp_flower_metadata_init(struct nfp_app *app)
priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX; priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX;
priv->stats = kvmalloc_array(NFP_FL_STATS_ENTRY_RS,
sizeof(struct nfp_fl_stats), GFP_KERNEL);
if (!priv->stats)
goto err_free_ring_buf;
spin_lock_init(&priv->stats_lock);
return 0; return 0;
err_free_ring_buf:
vfree(priv->stats_ids.free_list.buf);
err_free_last_used: err_free_last_used:
kfree(priv->mask_ids.last_used); kfree(priv->mask_ids.last_used);
err_free_mask_id: err_free_mask_id:
...@@ -489,6 +482,7 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app) ...@@ -489,6 +482,7 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
rhashtable_free_and_destroy(&priv->flow_table, rhashtable_free_and_destroy(&priv->flow_table,
nfp_check_rhashtable_empty, NULL); nfp_check_rhashtable_empty, NULL);
kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used); kfree(priv->mask_ids.last_used);
vfree(priv->stats_ids.free_list.buf); vfree(priv->stats_ids.free_list.buf);
......
...@@ -428,8 +428,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) ...@@ -428,8 +428,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
flow_pay->nfp_tun_ipv4_addr = 0; flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0; flow_pay->meta.flags = 0;
spin_lock_init(&flow_pay->lock);
flow_pay->ingress_offload = !egress; flow_pay->ingress_offload = !egress;
return flow_pay; return flow_pay;
...@@ -604,8 +602,10 @@ static int ...@@ -604,8 +602,10 @@ static int
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress) struct tc_cls_flower_offload *flow, bool egress)
{ {
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow; struct nfp_fl_payload *nfp_flow;
struct net_device *ingr_dev; struct net_device *ingr_dev;
u32 ctx_id;
ingr_dev = egress ? NULL : netdev; ingr_dev = egress ? NULL : netdev;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
...@@ -616,13 +616,16 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, ...@@ -616,13 +616,16 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
if (nfp_flow->ingress_offload && egress) if (nfp_flow->ingress_offload && egress)
return 0; return 0;
spin_lock_bh(&nfp_flow->lock); ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
nfp_flow->stats.pkts, nfp_flow->stats.used); spin_lock_bh(&priv->stats_lock);
tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes,
priv->stats[ctx_id].pkts,
priv->stats[ctx_id].used);
nfp_flow->stats.pkts = 0; priv->stats[ctx_id].pkts = 0;
nfp_flow->stats.bytes = 0; priv->stats[ctx_id].bytes = 0;
spin_unlock_bh(&nfp_flow->lock); spin_unlock_bh(&priv->stats_lock);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment