Commit abfcdc1d authored by Pieter Jansen van Vuuren's avatar Pieter Jansen van Vuuren Committed by David S. Miller

nfp: add a stats handler for flower offloads

Previously there was no way of updating flow rule stats after they
have been offloaded to hardware. This is solved by keeping track of
stats received from hardware and providing this to the TC handler
on request.
Signed-off-by: default avatarPieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
Signed-off-by: default avatarSimon Horman <simon.horman@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 43f84b72
...@@ -52,11 +52,6 @@ nfp_flower_cmsg_get_hdr(struct sk_buff *skb) ...@@ -52,11 +52,6 @@ nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
return (struct nfp_flower_cmsg_hdr *)skb->data; return (struct nfp_flower_cmsg_hdr *)skb->data;
} }
static void *nfp_flower_cmsg_get_data(struct sk_buff *skb)
{
return (unsigned char *)skb->data + NFP_FLOWER_CMSG_HLEN;
}
static struct sk_buff * static struct sk_buff *
nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size, nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
enum nfp_flower_cmsg_type_port type) enum nfp_flower_cmsg_type_port type)
......
...@@ -300,6 +300,11 @@ nfp_flower_cmsg_pcie_port(u8 nfp_pcie, enum nfp_flower_cmsg_port_vnic_type type, ...@@ -300,6 +300,11 @@ nfp_flower_cmsg_pcie_port(u8 nfp_pcie, enum nfp_flower_cmsg_port_vnic_type type,
NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT); NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT);
} }
static inline void *nfp_flower_cmsg_get_data(struct sk_buff *skb)
{
return (unsigned char *)skb->data + NFP_FLOWER_CMSG_HLEN;
}
int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok); int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok);
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb); void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
......
...@@ -43,6 +43,9 @@ struct tc_to_netdev; ...@@ -43,6 +43,9 @@ struct tc_to_netdev;
struct net_device; struct net_device;
struct nfp_app; struct nfp_app;
#define NFP_FL_STATS_ENTRY_RS BIT(20)
#define NFP_FL_STATS_ELEM_RS 4
#define NFP_FL_REPEATED_HASH_MAX BIT(17)
#define NFP_FLOWER_HASH_BITS 19 #define NFP_FLOWER_HASH_BITS 19
#define NFP_FLOWER_MASK_ENTRY_RS 256 #define NFP_FLOWER_MASK_ENTRY_RS 256
#define NFP_FLOWER_MASK_ELEMENT_RS 1 #define NFP_FLOWER_MASK_ELEMENT_RS 1
...@@ -60,11 +63,18 @@ struct nfp_fl_mask_id { ...@@ -60,11 +63,18 @@ struct nfp_fl_mask_id {
u8 init_unallocated; u8 init_unallocated;
}; };
struct nfp_fl_stats_id {
struct circ_buf free_list;
u32 init_unalloc;
u8 repeated_em_count;
};
/** /**
* struct nfp_flower_priv - Flower APP per-vNIC priv data * struct nfp_flower_priv - Flower APP per-vNIC priv data
* @nn: Pointer to vNIC * @nn: Pointer to vNIC
* @mask_id_seed: Seed used for mask hash table * @mask_id_seed: Seed used for mask hash table
* @flower_version: HW version of flower * @flower_version: HW version of flower
* @stats_ids: List of free stats ids
* @mask_ids: List of free mask ids * @mask_ids: List of free mask ids
* @mask_table: Hash table used to store masks * @mask_table: Hash table used to store masks
* @flow_table: Hash table used to store flower rules * @flow_table: Hash table used to store flower rules
...@@ -73,6 +83,7 @@ struct nfp_flower_priv { ...@@ -73,6 +83,7 @@ struct nfp_flower_priv {
struct nfp_net *nn; struct nfp_net *nn;
u32 mask_id_seed; u32 mask_id_seed;
u64 flower_version; u64 flower_version;
struct nfp_fl_stats_id stats_ids;
struct nfp_fl_mask_id mask_ids; struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
...@@ -95,16 +106,31 @@ struct nfp_fl_rule_metadata { ...@@ -95,16 +106,31 @@ struct nfp_fl_rule_metadata {
__be32 shortcut; __be32 shortcut;
}; };
struct nfp_fl_stats {
u64 pkts;
u64 bytes;
u64 used;
};
struct nfp_fl_payload { struct nfp_fl_payload {
struct nfp_fl_rule_metadata meta; struct nfp_fl_rule_metadata meta;
unsigned long tc_flower_cookie; unsigned long tc_flower_cookie;
struct hlist_node link; struct hlist_node link;
struct rcu_head rcu; struct rcu_head rcu;
spinlock_t lock; /* lock stats */
struct nfp_fl_stats stats;
char *unmasked_data; char *unmasked_data;
char *mask_data; char *mask_data;
char *action_data; char *action_data;
}; };
struct nfp_fl_stats_frame {
__be32 stats_con_id;
__be32 pkt_count;
__be64 byte_count;
__be64 stats_cookie;
};
int nfp_flower_metadata_init(struct nfp_app *app); int nfp_flower_metadata_init(struct nfp_app *app);
void nfp_flower_metadata_cleanup(struct nfp_app *app); void nfp_flower_metadata_cleanup(struct nfp_app *app);
...@@ -128,4 +154,6 @@ nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie); ...@@ -128,4 +154,6 @@ nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
struct nfp_fl_payload * struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie); nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
#endif #endif
...@@ -48,6 +48,55 @@ struct nfp_mask_id_table { ...@@ -48,6 +48,55 @@ struct nfp_mask_id_table {
u8 mask_id; u8 mask_id;
}; };
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
{
struct nfp_flower_priv *priv = app->priv;
struct circ_buf *ring;
ring = &priv->stats_ids.free_list;
/* Check if buffer is full. */
if (!CIRC_SPACE(ring->head, ring->tail, NFP_FL_STATS_ENTRY_RS *
NFP_FL_STATS_ELEM_RS -
NFP_FL_STATS_ELEM_RS + 1))
return -ENOBUFS;
memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
(NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
return 0;
}
static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
{
struct nfp_flower_priv *priv = app->priv;
u32 freed_stats_id, temp_stats_id;
struct circ_buf *ring;
ring = &priv->stats_ids.free_list;
freed_stats_id = NFP_FL_STATS_ENTRY_RS;
/* Check for unallocated entries first. */
if (priv->stats_ids.init_unalloc > 0) {
*stats_context_id = priv->stats_ids.init_unalloc - 1;
priv->stats_ids.init_unalloc--;
return 0;
}
/* Check if buffer is empty. */
if (ring->head == ring->tail) {
*stats_context_id = freed_stats_id;
return -ENOENT;
}
memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
*stats_context_id = temp_stats_id;
memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
(NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
return 0;
}
/* Must be called with either RTNL or rcu_read_lock */ /* Must be called with either RTNL or rcu_read_lock */
struct nfp_fl_payload * struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie) nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie)
...@@ -63,6 +112,46 @@ nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie) ...@@ -63,6 +112,46 @@ nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie)
return NULL; return NULL;
} }
static void
nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats)
{
struct nfp_fl_payload *nfp_flow;
unsigned long flower_cookie;
flower_cookie = be64_to_cpu(stats->stats_cookie);
rcu_read_lock();
nfp_flow = nfp_flower_search_fl_table(app, flower_cookie);
if (!nfp_flow)
goto exit_rcu_unlock;
if (nfp_flow->meta.host_ctx_id != stats->stats_con_id)
goto exit_rcu_unlock;
spin_lock(&nfp_flow->lock);
nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count);
nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count);
nfp_flow->stats.used = jiffies;
spin_unlock(&nfp_flow->lock);
exit_rcu_unlock:
rcu_read_unlock();
}
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
{
unsigned int msg_len = skb->len - NFP_FLOWER_CMSG_HLEN;
struct nfp_fl_stats_frame *stats_frame;
unsigned char *msg;
int i;
msg = nfp_flower_cmsg_get_data(skb);
stats_frame = (struct nfp_fl_stats_frame *)msg;
for (i = 0; i < msg_len / sizeof(*stats_frame); i++)
nfp_flower_update_stats(app, stats_frame + i);
}
static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
{ {
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
...@@ -230,21 +319,37 @@ int nfp_compile_flow_metadata(struct nfp_app *app, ...@@ -230,21 +319,37 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *check_entry; struct nfp_fl_payload *check_entry;
u8 new_mask_id; u8 new_mask_id;
u32 stats_cxt;
if (nfp_get_stats_entry(app, &stats_cxt))
return -ENOENT;
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
new_mask_id = 0; new_mask_id = 0;
if (!nfp_check_mask_add(app, nfp_flow->mask_data, if (!nfp_check_mask_add(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len, nfp_flow->meta.mask_len,
&nfp_flow->meta.flags, &new_mask_id)) &nfp_flow->meta.flags, &new_mask_id)) {
if (nfp_release_stats_entry(app, stats_cxt))
return -EINVAL;
return -ENOENT; return -ENOENT;
}
nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version); nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
priv->flower_version++; priv->flower_version++;
/* Update flow payload with mask ids. */ /* Update flow payload with mask ids. */
nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
nfp_flow->stats.pkts = 0;
nfp_flow->stats.bytes = 0;
nfp_flow->stats.used = jiffies;
check_entry = nfp_flower_search_fl_table(app, flow->cookie); check_entry = nfp_flower_search_fl_table(app, flow->cookie);
if (check_entry) { if (check_entry) {
if (nfp_release_stats_entry(app, stats_cxt))
return -EINVAL;
if (!nfp_check_mask_remove(app, nfp_flow->mask_data, if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len, nfp_flow->meta.mask_len,
NULL, &new_mask_id)) NULL, &new_mask_id))
...@@ -261,6 +366,7 @@ int nfp_modify_flow_metadata(struct nfp_app *app, ...@@ -261,6 +366,7 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
{ {
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
u8 new_mask_id = 0; u8 new_mask_id = 0;
u32 temp_ctx_id;
nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_check_mask_remove(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len, &nfp_flow->meta.flags, nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
...@@ -272,7 +378,10 @@ int nfp_modify_flow_metadata(struct nfp_app *app, ...@@ -272,7 +378,10 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
/* Update flow payload with mask ids. */ /* Update flow payload with mask ids. */
nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
return 0; /* Release the stats ctx id. */
temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
return nfp_release_stats_entry(app, temp_ctx_id);
} }
int nfp_flower_metadata_init(struct nfp_app *app) int nfp_flower_metadata_init(struct nfp_app *app)
...@@ -299,8 +408,18 @@ int nfp_flower_metadata_init(struct nfp_app *app) ...@@ -299,8 +408,18 @@ int nfp_flower_metadata_init(struct nfp_app *app)
if (!priv->mask_ids.last_used) if (!priv->mask_ids.last_used)
goto err_free_mask_id; goto err_free_mask_id;
/* Init ring buffer and unallocated stats_ids. */
priv->stats_ids.free_list.buf =
vmalloc(NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
if (!priv->stats_ids.free_list.buf)
goto err_free_last_used;
priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX;
return 0; return 0;
err_free_last_used:
kfree(priv->stats_ids.free_list.buf);
err_free_mask_id: err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.mask_id_free_list.buf);
return -ENOMEM; return -ENOMEM;
...@@ -315,4 +434,5 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app) ...@@ -315,4 +434,5 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used); kfree(priv->mask_ids.last_used);
vfree(priv->stats_ids.free_list.buf);
} }
...@@ -170,6 +170,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) ...@@ -170,6 +170,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
goto err_free_mask; goto err_free_mask;
flow_pay->meta.flags = 0; flow_pay->meta.flags = 0;
spin_lock_init(&flow_pay->lock);
return flow_pay; return flow_pay;
...@@ -291,7 +292,21 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, ...@@ -291,7 +292,21 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
static int static int
nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow) nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
{ {
return -EOPNOTSUPP; struct nfp_fl_payload *nfp_flow;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
if (!nfp_flow)
return -EINVAL;
spin_lock_bh(&nfp_flow->lock);
tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
nfp_flow->stats.pkts, nfp_flow->stats.used);
nfp_flow->stats.pkts = 0;
nfp_flow->stats.bytes = 0;
spin_unlock_bh(&nfp_flow->lock);
return 0;
} }
static int static int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment