Commit 12ecf615 authored by Pieter Jansen van Vuuren's avatar Pieter Jansen van Vuuren Committed by David S. Miller

nfp: flower: use host context count provided by firmware

Read the host context count symbols provided by firmware and use
it to determine the number of allocated stats ids. Previously it
won't be possible to offload more than 2^17 filter even if FW was
able to do so.
Signed-off-by: default avatarPieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
Reviewed-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7fade107
......@@ -518,8 +518,8 @@ static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn)
static int nfp_flower_init(struct nfp_app *app)
{
const struct nfp_pf *pf = app->pf;
u64 version, features, ctx_count;
struct nfp_flower_priv *app_priv;
u64 version, features;
int err;
if (!pf->eth_tbl) {
......@@ -543,6 +543,16 @@ static int nfp_flower_init(struct nfp_app *app)
return err;
}
ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
&err);
if (err) {
nfp_warn(app->cpp,
"FlowerNIC: unsupported host context count: %d\n",
err);
err = 0;
ctx_count = BIT(17);
}
/* We need to ensure hardware has enough flower capabilities. */
if (version != NFP_FLOWER_ALLOWED_VER) {
nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
......@@ -553,6 +563,7 @@ static int nfp_flower_init(struct nfp_app *app)
if (!app_priv)
return -ENOMEM;
app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
app->priv = app_priv;
app_priv->app = app;
skb_queue_head_init(&app_priv->cmsg_skbs_high);
......@@ -563,7 +574,7 @@ static int nfp_flower_init(struct nfp_app *app)
init_waitqueue_head(&app_priv->mtu_conf.wait_q);
spin_lock_init(&app_priv->mtu_conf.lock);
err = nfp_flower_metadata_init(app);
err = nfp_flower_metadata_init(app, ctx_count);
if (err)
goto err_free_app_priv;
......
......@@ -51,9 +51,8 @@ struct net_device;
struct nfp_app;
#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff)
#define NFP_FL_STATS_ENTRY_RS BIT(20)
#define NFP_FL_STATS_ELEM_RS 4
#define NFP_FL_REPEATED_HASH_MAX BIT(17)
#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \
init_unalloc)
#define NFP_FLOWER_MASK_ENTRY_RS 256
#define NFP_FLOWER_MASK_ELEMENT_RS 1
#define NFP_FLOWER_MASK_HASH_BITS 10
......@@ -138,6 +137,7 @@ struct nfp_fl_lag {
* @stats_ids: List of free stats ids
* @mask_ids: List of free mask ids
* @mask_table: Hash table used to store masks
* @stats_ring_size: Maximum number of allowed stats ids
* @flow_table: Hash table used to store flower rules
* @stats: Stored stats updates for flower rules
* @stats_lock: Lock for flower rule stats updates
......@@ -173,6 +173,7 @@ struct nfp_flower_priv {
struct nfp_fl_stats_id stats_ids;
struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
u32 stats_ring_size;
struct rhashtable flow_table;
struct nfp_fl_stats *stats;
spinlock_t stats_lock; /* lock stats */
......@@ -250,7 +251,7 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie;
};
int nfp_flower_metadata_init(struct nfp_app *app);
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count);
void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
......
......@@ -61,14 +61,14 @@ static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
ring = &priv->stats_ids.free_list;
/* Check if buffer is full. */
if (!CIRC_SPACE(ring->head, ring->tail, NFP_FL_STATS_ENTRY_RS *
NFP_FL_STATS_ELEM_RS -
if (!CIRC_SPACE(ring->head, ring->tail,
priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
NFP_FL_STATS_ELEM_RS + 1))
return -ENOBUFS;
memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
(NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
(priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
return 0;
}
......@@ -80,7 +80,7 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
struct circ_buf *ring;
ring = &priv->stats_ids.free_list;
freed_stats_id = NFP_FL_STATS_ENTRY_RS;
freed_stats_id = priv->stats_ring_size;
/* Check for unallocated entries first. */
if (priv->stats_ids.init_unalloc > 0) {
*stats_context_id = priv->stats_ids.init_unalloc - 1;
......@@ -98,7 +98,7 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
*stats_context_id = temp_stats_id;
memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
(NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
(priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
return 0;
}
......@@ -415,7 +415,7 @@ const struct rhashtable_params nfp_flower_table_params = {
.automatic_shrinking = true,
};
int nfp_flower_metadata_init(struct nfp_app *app)
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count)
{
struct nfp_flower_priv *priv = app->priv;
int err;
......@@ -447,13 +447,13 @@ int nfp_flower_metadata_init(struct nfp_app *app)
/* Init ring buffer and unallocated stats_ids. */
priv->stats_ids.free_list.buf =
vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
NFP_FL_STATS_ENTRY_RS));
priv->stats_ring_size));
if (!priv->stats_ids.free_list.buf)
goto err_free_last_used;
priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX;
priv->stats_ids.init_unalloc = host_ctx_count;
priv->stats = kvmalloc_array(NFP_FL_STATS_ENTRY_RS,
priv->stats = kvmalloc_array(priv->stats_ring_size,
sizeof(struct nfp_fl_stats), GFP_KERNEL);
if (!priv->stats)
goto err_free_ring_buf;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment