Commit a0d163f4 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: add shared buffer configuration

Allow app FW to advertise its shared buffer pool information.
Use the per-PF mailbox to configure them from devlink.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0c693323
...@@ -30,6 +30,7 @@ nfp-objs := \ ...@@ -30,6 +30,7 @@ nfp-objs := \
nfp_net_sriov.o \ nfp_net_sriov.o \
nfp_netvf_main.o \ nfp_netvf_main.o \
nfp_port.o \ nfp_port.o \
nfp_shared_buf.o \
nic/main.o nic/main.o
ifeq ($(CONFIG_NFP_APP_FLOWER),y) ifeq ($(CONFIG_NFP_APP_FLOWER),y)
......
...@@ -51,9 +51,79 @@ ...@@ -51,9 +51,79 @@
* *
* @NFP_MBOX_NO_CMD: null command * @NFP_MBOX_NO_CMD: null command
* Used to indicate previous command has finished. * Used to indicate previous command has finished.
*
* @NFP_MBOX_POOL_GET: get shared buffer pool info/config
* Input - struct nfp_shared_buf_pool_id
* Output - struct nfp_shared_buf_pool_info_get
*
* @NFP_MBOX_POOL_SET: set shared buffer pool info/config
* Input - struct nfp_shared_buf_pool_info_set
* Output - None
*/ */
enum nfp_mbox_cmd { enum nfp_mbox_cmd {
NFP_MBOX_NO_CMD = 0x00, NFP_MBOX_NO_CMD = 0x00,
NFP_MBOX_POOL_GET = 0x01,
NFP_MBOX_POOL_SET = 0x02,
};
#define NFP_SHARED_BUF_COUNT_SYM_NAME "_abi_nfd_pf%u_sb_cnt"
#define NFP_SHARED_BUF_TABLE_SYM_NAME "_abi_nfd_pf%u_sb_tbl"
/**
* struct nfp_shared_buf - NFP shared buffer description
* @id: numerical user-visible id of the shared buffer
* @size: size in bytes of the buffer
* @ingress_pools_count: number of ingress pools
* @egress_pools_count: number of egress pools
* @ingress_tc_count: number of ingress trafic classes
* @egress_tc_count: number of egress trafic classes
* @pool_size_unit: pool size may be in credits, each credit is
* @pool_size_unit bytes
*/
struct nfp_shared_buf {
__le32 id;
__le32 size;
__le16 ingress_pools_count;
__le16 egress_pools_count;
__le16 ingress_tc_count;
__le16 egress_tc_count;
__le32 pool_size_unit;
};
/**
* struct nfp_shared_buf_pool_id - shared buffer pool identification
* @shared_buf: shared buffer id
* @pool: pool index
*/
struct nfp_shared_buf_pool_id {
__le32 shared_buf;
__le32 pool;
};
/**
* struct nfp_shared_buf_pool_info_get - struct devlink_sb_pool_info mirror
* @pool_type: one of enum devlink_sb_pool_type
* @size: pool size in units of SB's @pool_size_unit
* @threshold_type: one of enum devlink_sb_threshold_type
*/
struct nfp_shared_buf_pool_info_get {
__le32 pool_type;
__le32 size;
__le32 threshold_type;
};
/**
* struct nfp_shared_buf_pool_info_set - packed args of sb_pool_set
* @id: pool identification info
* @size: pool size in units of SB's @pool_size_unit
* @threshold_type: one of enum devlink_sb_threshold_type
*/
struct nfp_shared_buf_pool_info_set {
struct nfp_shared_buf_pool_id id;
__le32 size;
__le32 threshold_type;
}; };
#endif #endif
...@@ -149,6 +149,26 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index) ...@@ -149,6 +149,26 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index)
return ret; return ret;
} }
static int
nfp_devlink_sb_pool_get(struct devlink *devlink, unsigned int sb_index,
u16 pool_index, struct devlink_sb_pool_info *pool_info)
{
struct nfp_pf *pf = devlink_priv(devlink);
return nfp_shared_buf_pool_get(pf, sb_index, pool_index, pool_info);
}
static int
nfp_devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
u16 pool_index,
u32 size, enum devlink_sb_threshold_type threshold_type)
{
struct nfp_pf *pf = devlink_priv(devlink);
return nfp_shared_buf_pool_set(pf, sb_index, pool_index,
size, threshold_type);
}
static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{ {
struct nfp_pf *pf = devlink_priv(devlink); struct nfp_pf *pf = devlink_priv(devlink);
...@@ -159,6 +179,8 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) ...@@ -159,6 +179,8 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
const struct devlink_ops nfp_devlink_ops = { const struct devlink_ops nfp_devlink_ops = {
.port_split = nfp_devlink_port_split, .port_split = nfp_devlink_port_split,
.port_unsplit = nfp_devlink_port_unsplit, .port_unsplit = nfp_devlink_port_unsplit,
.sb_pool_get = nfp_devlink_sb_pool_get,
.sb_pool_set = nfp_devlink_sb_pool_set,
.eswitch_mode_get = nfp_devlink_eswitch_mode_get, .eswitch_mode_get = nfp_devlink_eswitch_mode_get,
}; };
......
...@@ -46,10 +46,10 @@ ...@@ -46,10 +46,10 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <net/devlink.h>
struct dentry; struct dentry;
struct device; struct device;
struct devlink_ops;
struct pci_dev; struct pci_dev;
struct nfp_cpp; struct nfp_cpp;
...@@ -62,6 +62,7 @@ struct nfp_nsp_identify; ...@@ -62,6 +62,7 @@ struct nfp_nsp_identify;
struct nfp_port; struct nfp_port;
struct nfp_rtsym; struct nfp_rtsym;
struct nfp_rtsym_table; struct nfp_rtsym_table;
struct nfp_shared_buf;
/** /**
* struct nfp_dumpspec - NFP FW dump specification structure * struct nfp_dumpspec - NFP FW dump specification structure
...@@ -110,6 +111,8 @@ struct nfp_dumpspec { ...@@ -110,6 +111,8 @@ struct nfp_dumpspec {
* @ports: Linked list of port structures (struct nfp_port) * @ports: Linked list of port structures (struct nfp_port)
* @wq: Workqueue for running works which need to grab @lock * @wq: Workqueue for running works which need to grab @lock
* @port_refresh_work: Work entry for taking netdevs out * @port_refresh_work: Work entry for taking netdevs out
* @shared_bufs: Array of shared buffer structures if FW has any SBs
* @num_shared_bufs: Number of elements in @shared_bufs
* @lock: Protects all fields which may change after probe * @lock: Protects all fields which may change after probe
*/ */
struct nfp_pf { struct nfp_pf {
...@@ -162,6 +165,9 @@ struct nfp_pf { ...@@ -162,6 +165,9 @@ struct nfp_pf {
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct work_struct port_refresh_work; struct work_struct port_refresh_work;
struct nfp_shared_buf *shared_bufs;
unsigned int num_shared_bufs;
struct mutex lock; struct mutex lock;
}; };
...@@ -200,4 +206,11 @@ s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec, ...@@ -200,4 +206,11 @@ s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec,
int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec, int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec,
struct ethtool_dump *dump_param, void *dest); struct ethtool_dump *dump_param, void *dest);
int nfp_shared_buf_register(struct nfp_pf *pf);
void nfp_shared_buf_unregister(struct nfp_pf *pf);
int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index,
struct devlink_sb_pool_info *pool_info);
int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type);
#endif /* NFP_MAIN_H */ #endif /* NFP_MAIN_H */
...@@ -728,6 +728,10 @@ int nfp_net_pci_probe(struct nfp_pf *pf) ...@@ -728,6 +728,10 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err) if (err)
goto err_app_clean; goto err_app_clean;
err = nfp_shared_buf_register(pf);
if (err)
goto err_devlink_unreg;
mutex_lock(&pf->lock); mutex_lock(&pf->lock);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev); pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
...@@ -761,6 +765,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf) ...@@ -761,6 +765,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
err_clean_ddir: err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir); nfp_net_debugfs_dir_clean(&pf->ddir);
mutex_unlock(&pf->lock); mutex_unlock(&pf->lock);
nfp_shared_buf_unregister(pf);
err_devlink_unreg:
cancel_work_sync(&pf->port_refresh_work); cancel_work_sync(&pf->port_refresh_work);
devlink_unregister(devlink); devlink_unregister(devlink);
err_app_clean: err_app_clean:
...@@ -788,6 +794,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf) ...@@ -788,6 +794,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
mutex_unlock(&pf->lock); mutex_unlock(&pf->lock);
nfp_shared_buf_unregister(pf);
devlink_unregister(priv_to_devlink(pf)); devlink_unregister(priv_to_devlink(pf));
nfp_net_pf_free_irqs(pf); nfp_net_pf_free_irqs(pf);
......
// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
/*
* Copyright (C) 2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <net/devlink.h>
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nffw.h"
#include "nfp_abi.h"
#include "nfp_app.h"
#include "nfp_main.h"
static u32 nfp_shared_buf_pool_unit(struct nfp_pf *pf, unsigned int sb)
{
__le32 sb_id = cpu_to_le32(sb);
unsigned int i;
for (i = 0; i < pf->num_shared_bufs; i++)
if (pf->shared_bufs[i].id == sb_id)
return le32_to_cpu(pf->shared_bufs[i].pool_size_unit);
WARN_ON_ONCE(1);
return 0;
}
int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
struct nfp_shared_buf_pool_info_get get_data;
struct nfp_shared_buf_pool_id id = {
.shared_buf = cpu_to_le32(sb),
.pool = cpu_to_le32(pool_index),
};
unsigned int unit_size;
int n;
unit_size = nfp_shared_buf_pool_unit(pf, sb);
if (!unit_size)
return -EINVAL;
n = nfp_mbox_cmd(pf, NFP_MBOX_POOL_GET, &id, sizeof(id),
&get_data, sizeof(get_data));
if (n < 0)
return n;
if (n < sizeof(get_data))
return -EIO;
pool_info->pool_type = le32_to_cpu(get_data.pool_type);
pool_info->threshold_type = le32_to_cpu(get_data.threshold_type);
pool_info->size = le32_to_cpu(get_data.size) * unit_size;
return 0;
}
int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type)
{
struct nfp_shared_buf_pool_info_set set_data = {
.id = {
.shared_buf = cpu_to_le32(sb),
.pool = cpu_to_le32(pool_index),
},
.threshold_type = cpu_to_le32(threshold_type),
};
unsigned int unit_size;
unit_size = nfp_shared_buf_pool_unit(pf, sb);
if (!unit_size || size % unit_size)
return -EINVAL;
set_data.size = cpu_to_le32(size / unit_size);
return nfp_mbox_cmd(pf, NFP_MBOX_POOL_SET, &set_data, sizeof(set_data),
NULL, 0);
}
int nfp_shared_buf_register(struct nfp_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
unsigned int i, num_entries, entry_sz;
struct nfp_cpp_area *sb_desc_area;
u8 __iomem *sb_desc;
int n, err;
if (!pf->mbox)
return 0;
n = nfp_pf_rtsym_read_optional(pf, NFP_SHARED_BUF_COUNT_SYM_NAME, 0);
if (n <= 0)
return n;
num_entries = n;
sb_desc = nfp_pf_map_rtsym(pf, "sb_tbl", NFP_SHARED_BUF_TABLE_SYM_NAME,
num_entries * sizeof(pf->shared_bufs[0]),
&sb_desc_area);
if (IS_ERR(sb_desc))
return PTR_ERR(sb_desc);
entry_sz = nfp_cpp_area_size(sb_desc_area) / num_entries;
pf->shared_bufs = kmalloc_array(num_entries, sizeof(pf->shared_bufs[0]),
GFP_KERNEL);
if (!pf->shared_bufs) {
err = -ENOMEM;
goto err_release_area;
}
for (i = 0; i < num_entries; i++) {
struct nfp_shared_buf *sb = &pf->shared_bufs[i];
/* Entries may be larger in future FW */
memcpy_fromio(sb, sb_desc + i * entry_sz, sizeof(*sb));
err = devlink_sb_register(devlink,
le32_to_cpu(sb->id),
le32_to_cpu(sb->size),
le16_to_cpu(sb->ingress_pools_count),
le16_to_cpu(sb->egress_pools_count),
le16_to_cpu(sb->ingress_tc_count),
le16_to_cpu(sb->egress_tc_count));
if (err)
goto err_unreg_prev;
}
pf->num_shared_bufs = num_entries;
nfp_cpp_area_release_free(sb_desc_area);
return 0;
err_unreg_prev:
while (i--)
devlink_sb_unregister(devlink,
le32_to_cpu(pf->shared_bufs[i].id));
kfree(pf->shared_bufs);
err_release_area:
nfp_cpp_area_release_free(sb_desc_area);
return err;
}
void nfp_shared_buf_unregister(struct nfp_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
unsigned int i;
for (i = 0; i < pf->num_shared_bufs; i++)
devlink_sb_unregister(devlink,
le32_to_cpu(pf->shared_bufs[i].id));
kfree(pf->shared_bufs);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment