Commit 47de868b authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-abm-add-basic-support-for-advanced-buffering-NIC'

Jakub Kicinski says:

====================
nfp: abm: add basic support for advanced buffering NIC

This series lays groundwork for advanced buffer management NIC feature.
It makes necessary NFP core changes, spawns representors and adds devlink
glue.  Following series will add the actual buffering configuration (patch
series size limit).

First three patches add support for configuring NFP buffer pools via a
mailbox.  The existing devlink APIs are used for the purpose.

Third patch allows us to perform small reads from the NFP memory.

The rest of the patch set adds eswitch mode change support and makes
the driver spawn appropriate representors.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e95a5f54 51c1df83
...@@ -36,6 +36,19 @@ config NFP_APP_FLOWER ...@@ -36,6 +36,19 @@ config NFP_APP_FLOWER
either directly, with Open vSwitch, or any other way. Note that either directly, with Open vSwitch, or any other way. Note that
TC Flower offload requires specific FW to work. TC Flower offload requires specific FW to work.
config NFP_APP_ABM_NIC
bool "NFP4000/NFP6000 Advanced buffer management NIC support"
depends on NFP
depends on NET_SWITCHDEV
default y
help
Enable driver support for Advanced buffer management NIC on NFP.
ABM NIC allows advanced configuration of queuing and scheduling
of packets, including ECN marking. Say Y, if you are planning to
use one of the NFP4000 and NFP6000 platforms which support this
functionality.
Code will be built into the nfp.ko driver.
config NFP_DEBUG config NFP_DEBUG
bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers" bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers"
depends on NFP depends on NFP
......
...@@ -30,6 +30,7 @@ nfp-objs := \ ...@@ -30,6 +30,7 @@ nfp-objs := \
nfp_net_sriov.o \ nfp_net_sriov.o \
nfp_netvf_main.o \ nfp_netvf_main.o \
nfp_port.o \ nfp_port.o \
nfp_shared_buf.o \
nic/main.o nic/main.o
ifeq ($(CONFIG_NFP_APP_FLOWER),y) ifeq ($(CONFIG_NFP_APP_FLOWER),y)
...@@ -52,4 +53,10 @@ nfp-objs += \ ...@@ -52,4 +53,10 @@ nfp-objs += \
bpf/jit.o bpf/jit.o
endif endif
ifeq ($(CONFIG_NFP_APP_ABM_NIC),y)
nfp-objs += \
abm/ctrl.o \
abm/main.o
endif
nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o
// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
/*
* Copyright (C) 2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include "../nfpcore/nfp_cpp.h"
#include "../nfp_app.h"
#include "../nfp_main.h"
#include "../nfp_net.h"
#include "main.h"
void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
{
alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ);
alink->queue_base /= alink->vnic->stride_rx;
}
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm)
{
struct nfp_pf *pf = abm->app->pf;
unsigned int pf_id;
pf_id = nfp_cppcore_pcie_unit(pf->cpp);
abm->pf_id = pf_id;
return 0;
}
This diff is collapsed.
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
/*
* Copyright (C) 2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __NFP_ABM_H__
#define __NFP_ABM_H__ 1
#include <net/devlink.h>
struct nfp_app;
struct nfp_net;
#define NFP_ABM_PORTID_TYPE GENMASK(23, 16)
#define NFP_ABM_PORTID_ID GENMASK(7, 0)
/**
* struct nfp_abm - ABM NIC app structure
* @app: back pointer to nfp_app
* @pf_id: ID of our PF link
* @eswitch_mode: devlink eswitch mode, advanced functions only visible
* in switchdev mode
*/
struct nfp_abm {
struct nfp_app *app;
unsigned int pf_id;
enum devlink_eswitch_mode eswitch_mode;
};
/**
* struct nfp_abm_link - port tuple of a ABM NIC
* @abm: back pointer to nfp_abm
* @vnic: data vNIC
* @id: id of the data vNIC
* @queue_base: id of base to host queue within PCIe (not QC idx)
*/
struct nfp_abm_link {
struct nfp_abm *abm;
struct nfp_net *vnic;
unsigned int id;
unsigned int queue_base;
};
void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
#endif
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
/*
* Copyright (C) 2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __NFP_ABI__
#define __NFP_ABI__ 1
#include <linux/types.h>
#define NFP_MBOX_SYM_NAME "_abi_nfd_pf%u_mbox"
#define NFP_MBOX_SYM_MIN_SIZE 16 /* When no data needed */
#define NFP_MBOX_CMD 0x00
#define NFP_MBOX_RET 0x04
#define NFP_MBOX_DATA_LEN 0x08
#define NFP_MBOX_RESERVED 0x0c
#define NFP_MBOX_DATA 0x10
/**
* enum nfp_mbox_cmd - PF mailbox commands
*
* @NFP_MBOX_NO_CMD: null command
* Used to indicate previous command has finished.
*
* @NFP_MBOX_POOL_GET: get shared buffer pool info/config
* Input - struct nfp_shared_buf_pool_id
* Output - struct nfp_shared_buf_pool_info_get
*
* @NFP_MBOX_POOL_SET: set shared buffer pool info/config
* Input - struct nfp_shared_buf_pool_info_set
* Output - None
*/
enum nfp_mbox_cmd {
NFP_MBOX_NO_CMD = 0x00,
NFP_MBOX_POOL_GET = 0x01,
NFP_MBOX_POOL_SET = 0x02,
};
#define NFP_SHARED_BUF_COUNT_SYM_NAME "_abi_nfd_pf%u_sb_cnt"
#define NFP_SHARED_BUF_TABLE_SYM_NAME "_abi_nfd_pf%u_sb_tbl"
/**
* struct nfp_shared_buf - NFP shared buffer description
* @id: numerical user-visible id of the shared buffer
* @size: size in bytes of the buffer
* @ingress_pools_count: number of ingress pools
* @egress_pools_count: number of egress pools
* @ingress_tc_count: number of ingress trafic classes
* @egress_tc_count: number of egress trafic classes
* @pool_size_unit: pool size may be in credits, each credit is
* @pool_size_unit bytes
*/
struct nfp_shared_buf {
__le32 id;
__le32 size;
__le16 ingress_pools_count;
__le16 egress_pools_count;
__le16 ingress_tc_count;
__le16 egress_tc_count;
__le32 pool_size_unit;
};
/**
* struct nfp_shared_buf_pool_id - shared buffer pool identification
* @shared_buf: shared buffer id
* @pool: pool index
*/
struct nfp_shared_buf_pool_id {
__le32 shared_buf;
__le32 pool;
};
/**
* struct nfp_shared_buf_pool_info_get - struct devlink_sb_pool_info mirror
* @pool_type: one of enum devlink_sb_pool_type
* @size: pool size in units of SB's @pool_size_unit
* @threshold_type: one of enum devlink_sb_threshold_type
*/
struct nfp_shared_buf_pool_info_get {
__le32 pool_type;
__le32 size;
__le32 threshold_type;
};
/**
* struct nfp_shared_buf_pool_info_set - packed args of sb_pool_set
* @id: pool identification info
* @size: pool size in units of SB's @pool_size_unit
* @threshold_type: one of enum devlink_sb_threshold_type
*/
struct nfp_shared_buf_pool_info_set {
struct nfp_shared_buf_pool_id id;
__le32 size;
__le32 threshold_type;
};
#endif
...@@ -54,6 +54,9 @@ static const struct nfp_app_type *apps[] = { ...@@ -54,6 +54,9 @@ static const struct nfp_app_type *apps[] = {
#ifdef CONFIG_NFP_APP_FLOWER #ifdef CONFIG_NFP_APP_FLOWER
[NFP_APP_FLOWER_NIC] = &app_flower, [NFP_APP_FLOWER_NIC] = &app_flower,
#endif #endif
#ifdef CONFIG_NFP_APP_ABM_NIC
[NFP_APP_ACTIVE_BUFFER_MGMT_NIC] = &app_abm,
#endif
}; };
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev) struct nfp_app *nfp_app_from_netdev(struct net_device *netdev)
......
...@@ -57,11 +57,13 @@ enum nfp_app_id { ...@@ -57,11 +57,13 @@ enum nfp_app_id {
NFP_APP_CORE_NIC = 0x1, NFP_APP_CORE_NIC = 0x1,
NFP_APP_BPF_NIC = 0x2, NFP_APP_BPF_NIC = 0x2,
NFP_APP_FLOWER_NIC = 0x3, NFP_APP_FLOWER_NIC = 0x3,
NFP_APP_ACTIVE_BUFFER_MGMT_NIC = 0x4,
}; };
extern const struct nfp_app_type app_nic; extern const struct nfp_app_type app_nic;
extern const struct nfp_app_type app_bpf; extern const struct nfp_app_type app_bpf;
extern const struct nfp_app_type app_flower; extern const struct nfp_app_type app_flower;
extern const struct nfp_app_type app_abm;
/** /**
* struct nfp_app_type - application definition * struct nfp_app_type - application definition
...@@ -95,6 +97,7 @@ extern const struct nfp_app_type app_flower; ...@@ -95,6 +97,7 @@ extern const struct nfp_app_type app_flower;
* @bpf: BPF ndo offload-related calls * @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program * @xdp_offload: offload an XDP program
* @eswitch_mode_get: get SR-IOV eswitch mode * @eswitch_mode_get: get SR-IOV eswitch mode
* @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock)
* @sriov_enable: app-specific sriov initialisation * @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up * @sriov_disable: app-specific sriov clean-up
* @repr_get: get representor netdev * @repr_get: get representor netdev
...@@ -146,6 +149,7 @@ struct nfp_app_type { ...@@ -146,6 +149,7 @@ struct nfp_app_type {
void (*sriov_disable)(struct nfp_app *app); void (*sriov_disable)(struct nfp_app *app);
enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app); enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app);
int (*eswitch_mode_set)(struct nfp_app *app, u16 mode);
struct net_device *(*repr_get)(struct nfp_app *app, u32 id); struct net_device *(*repr_get)(struct nfp_app *app, u32 id);
}; };
...@@ -370,6 +374,13 @@ static inline int nfp_app_eswitch_mode_get(struct nfp_app *app, u16 *mode) ...@@ -370,6 +374,13 @@ static inline int nfp_app_eswitch_mode_get(struct nfp_app *app, u16 *mode)
return 0; return 0;
} }
static inline int nfp_app_eswitch_mode_set(struct nfp_app *app, u16 mode)
{
if (!app->type->eswitch_mode_set)
return -EOPNOTSUPP;
return app->type->eswitch_mode_set(app, mode);
}
static inline int nfp_app_sriov_enable(struct nfp_app *app, int num_vfs) static inline int nfp_app_sriov_enable(struct nfp_app *app, int num_vfs)
{ {
if (!app || !app->type->sriov_enable) if (!app || !app->type->sriov_enable)
...@@ -410,5 +421,7 @@ void nfp_app_free(struct nfp_app *app); ...@@ -410,5 +421,7 @@ void nfp_app_free(struct nfp_app *app);
int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
unsigned int id); unsigned int id);
int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
struct nfp_net *nn, unsigned int id);
#endif #endif
...@@ -38,9 +38,8 @@ ...@@ -38,9 +38,8 @@
#include "nfp_net.h" #include "nfp_net.h"
#include "nfp_port.h" #include "nfp_port.h"
static int int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, struct nfp_net *nn, unsigned int id)
struct nfp_net *nn, unsigned int id)
{ {
int err; int err;
......
...@@ -149,6 +149,26 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index) ...@@ -149,6 +149,26 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index)
return ret; return ret;
} }
static int
nfp_devlink_sb_pool_get(struct devlink *devlink, unsigned int sb_index,
u16 pool_index, struct devlink_sb_pool_info *pool_info)
{
struct nfp_pf *pf = devlink_priv(devlink);
return nfp_shared_buf_pool_get(pf, sb_index, pool_index, pool_info);
}
static int
nfp_devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
u16 pool_index,
u32 size, enum devlink_sb_threshold_type threshold_type)
{
struct nfp_pf *pf = devlink_priv(devlink);
return nfp_shared_buf_pool_set(pf, sb_index, pool_index,
size, threshold_type);
}
static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{ {
struct nfp_pf *pf = devlink_priv(devlink); struct nfp_pf *pf = devlink_priv(devlink);
...@@ -156,10 +176,25 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) ...@@ -156,10 +176,25 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return nfp_app_eswitch_mode_get(pf->app, mode); return nfp_app_eswitch_mode_get(pf->app, mode);
} }
static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
{
struct nfp_pf *pf = devlink_priv(devlink);
int ret;
mutex_lock(&pf->lock);
ret = nfp_app_eswitch_mode_set(pf->app, mode);
mutex_unlock(&pf->lock);
return ret;
}
const struct devlink_ops nfp_devlink_ops = { const struct devlink_ops nfp_devlink_ops = {
.port_split = nfp_devlink_port_split, .port_split = nfp_devlink_port_split,
.port_unsplit = nfp_devlink_port_unsplit, .port_unsplit = nfp_devlink_port_unsplit,
.sb_pool_get = nfp_devlink_sb_pool_get,
.sb_pool_set = nfp_devlink_sb_pool_set,
.eswitch_mode_get = nfp_devlink_eswitch_mode_get, .eswitch_mode_get = nfp_devlink_eswitch_mode_get,
.eswitch_mode_set = nfp_devlink_eswitch_mode_set,
}; };
int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#include "nfpcore/nfp6000_pcie.h" #include "nfpcore/nfp6000_pcie.h"
#include "nfp_abi.h"
#include "nfp_app.h" #include "nfp_app.h"
#include "nfp_main.h" #include "nfp_main.h"
#include "nfp_net.h" #include "nfp_net.h"
...@@ -75,6 +76,122 @@ static const struct pci_device_id nfp_pci_device_ids[] = { ...@@ -75,6 +76,122 @@ static const struct pci_device_id nfp_pci_device_ids[] = {
}; };
MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids);
int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
unsigned int default_val)
{
char name[256];
int err = 0;
u64 val;
snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp));
val = nfp_rtsym_read_le(pf->rtbl, name, &err);
if (err) {
if (err == -ENOENT)
return default_val;
nfp_err(pf->cpp, "Unable to read symbol %s\n", name);
return err;
}
return val;
}
u8 __iomem *
nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
unsigned int min_size, struct nfp_cpp_area **area)
{
char pf_symbol[256];
snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
nfp_cppcore_pcie_unit(pf->cpp));
return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area);
}
/* Callers should hold the devlink instance lock */
int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length,
void *out_data, u64 out_length)
{
unsigned long long addr;
unsigned long err_at;
u64 max_data_sz;
u32 val = 0;
u32 cpp_id;
int n, err;
if (!pf->mbox)
return -EOPNOTSUPP;
cpp_id = NFP_CPP_ISLAND_ID(pf->mbox->target, NFP_CPP_ACTION_RW, 0,
pf->mbox->domain);
addr = pf->mbox->addr;
max_data_sz = pf->mbox->size - NFP_MBOX_SYM_MIN_SIZE;
/* Check if cmd field is clear */
err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val);
if (err || val) {
nfp_warn(pf->cpp, "failed to issue command (%u): %u, err: %d\n",
cmd, val, err);
return err ?: -EBUSY;
}
in_length = min(in_length, max_data_sz);
n = nfp_cpp_write(pf->cpp, cpp_id, addr + NFP_MBOX_DATA,
in_data, in_length);
if (n != in_length)
return -EIO;
/* Write data_len and wipe reserved */
err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN,
in_length);
if (err)
return err;
/* Read back for ordering */
err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val);
if (err)
return err;
/* Write cmd and wipe return value */
err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, cmd);
if (err)
return err;
err_at = jiffies + 5 * HZ;
while (true) {
/* Wait for command to go to 0 (NFP_MBOX_NO_CMD) */
err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val);
if (err)
return err;
if (!val)
break;
if (time_is_before_eq_jiffies(err_at))
return -ETIMEDOUT;
msleep(5);
}
/* Copy output if any (could be error info, do it before reading ret) */
err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val);
if (err)
return err;
out_length = min_t(u32, val, min(out_length, max_data_sz));
n = nfp_cpp_read(pf->cpp, cpp_id, addr + NFP_MBOX_DATA,
out_data, out_length);
if (n != out_length)
return -EIO;
/* Check if there is an error */
err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_RET, &val);
if (err)
return err;
if (val)
return -val;
return out_length;
}
static bool nfp_board_ready(struct nfp_pf *pf) static bool nfp_board_ready(struct nfp_pf *pf)
{ {
const char *cp; const char *cp;
...@@ -436,6 +553,25 @@ static void nfp_fw_unload(struct nfp_pf *pf) ...@@ -436,6 +553,25 @@ static void nfp_fw_unload(struct nfp_pf *pf)
nfp_nsp_close(nsp); nfp_nsp_close(nsp);
} }
static int nfp_pf_find_rtsyms(struct nfp_pf *pf)
{
char pf_symbol[256];
unsigned int pf_id;
pf_id = nfp_cppcore_pcie_unit(pf->cpp);
/* Optional per-PCI PF mailbox */
snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id);
pf->mbox = nfp_rtsym_lookup(pf->rtbl, pf_symbol);
if (pf->mbox && pf->mbox->size < NFP_MBOX_SYM_MIN_SIZE) {
nfp_err(pf->cpp, "PF mailbox symbol too small: %llu < %d\n",
pf->mbox->size, NFP_MBOX_SYM_MIN_SIZE);
return -EINVAL;
}
return 0;
}
static int nfp_pci_probe(struct pci_dev *pdev, static int nfp_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id) const struct pci_device_id *pci_id)
{ {
...@@ -510,6 +646,10 @@ static int nfp_pci_probe(struct pci_dev *pdev, ...@@ -510,6 +646,10 @@ static int nfp_pci_probe(struct pci_dev *pdev,
pf->mip = nfp_mip_open(pf->cpp); pf->mip = nfp_mip_open(pf->cpp);
pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip); pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip);
err = nfp_pf_find_rtsyms(pf);
if (err)
goto err_fw_unload;
pf->dump_flag = NFP_DUMP_NSP_DIAG; pf->dump_flag = NFP_DUMP_NSP_DIAG;
pf->dumpspec = nfp_net_dump_load_dumpspec(pf->cpp, pf->rtbl); pf->dumpspec = nfp_net_dump_load_dumpspec(pf->cpp, pf->rtbl);
......
...@@ -46,10 +46,10 @@ ...@@ -46,10 +46,10 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <net/devlink.h>
struct dentry; struct dentry;
struct device; struct device;
struct devlink_ops;
struct pci_dev; struct pci_dev;
struct nfp_cpp; struct nfp_cpp;
...@@ -60,7 +60,9 @@ struct nfp_mip; ...@@ -60,7 +60,9 @@ struct nfp_mip;
struct nfp_net; struct nfp_net;
struct nfp_nsp_identify; struct nfp_nsp_identify;
struct nfp_port; struct nfp_port;
struct nfp_rtsym;
struct nfp_rtsym_table; struct nfp_rtsym_table;
struct nfp_shared_buf;
/** /**
* struct nfp_dumpspec - NFP FW dump specification structure * struct nfp_dumpspec - NFP FW dump specification structure
...@@ -87,6 +89,7 @@ struct nfp_dumpspec { ...@@ -87,6 +89,7 @@ struct nfp_dumpspec {
* @vf_cfg_mem: Pointer to mapped VF configuration area * @vf_cfg_mem: Pointer to mapped VF configuration area
* @vfcfg_tbl2_area: Pointer to the CPP area for the VF config table * @vfcfg_tbl2_area: Pointer to the CPP area for the VF config table
* @vfcfg_tbl2: Pointer to mapped VF config table * @vfcfg_tbl2: Pointer to mapped VF config table
* @mbox: RTSym of per-PCI PF mailbox (under devlink lock)
* @irq_entries: Array of MSI-X entries for all vNICs * @irq_entries: Array of MSI-X entries for all vNICs
* @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit) * @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit)
* @num_vfs: Number of SR-IOV VFs enabled * @num_vfs: Number of SR-IOV VFs enabled
...@@ -108,6 +111,8 @@ struct nfp_dumpspec { ...@@ -108,6 +111,8 @@ struct nfp_dumpspec {
* @ports: Linked list of port structures (struct nfp_port) * @ports: Linked list of port structures (struct nfp_port)
* @wq: Workqueue for running works which need to grab @lock * @wq: Workqueue for running works which need to grab @lock
* @port_refresh_work: Work entry for taking netdevs out * @port_refresh_work: Work entry for taking netdevs out
* @shared_bufs: Array of shared buffer structures if FW has any SBs
* @num_shared_bufs: Number of elements in @shared_bufs
* @lock: Protects all fields which may change after probe * @lock: Protects all fields which may change after probe
*/ */
struct nfp_pf { struct nfp_pf {
...@@ -127,6 +132,8 @@ struct nfp_pf { ...@@ -127,6 +132,8 @@ struct nfp_pf {
struct nfp_cpp_area *vfcfg_tbl2_area; struct nfp_cpp_area *vfcfg_tbl2_area;
u8 __iomem *vfcfg_tbl2; u8 __iomem *vfcfg_tbl2;
const struct nfp_rtsym *mbox;
struct msix_entry *irq_entries; struct msix_entry *irq_entries;
unsigned int limit_vfs; unsigned int limit_vfs;
...@@ -158,6 +165,9 @@ struct nfp_pf { ...@@ -158,6 +165,9 @@ struct nfp_pf {
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct work_struct port_refresh_work; struct work_struct port_refresh_work;
struct nfp_shared_buf *shared_bufs;
unsigned int num_shared_bufs;
struct mutex lock; struct mutex lock;
}; };
...@@ -177,6 +187,14 @@ nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev, ...@@ -177,6 +187,14 @@ nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
unsigned int default_val);
u8 __iomem *
nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
unsigned int min_size, struct nfp_cpp_area **area);
int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length,
void *out_data, u64 out_length);
enum nfp_dump_diag { enum nfp_dump_diag {
NFP_DUMP_NSP_DIAG = 0, NFP_DUMP_NSP_DIAG = 0,
}; };
...@@ -188,4 +206,11 @@ s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec, ...@@ -188,4 +206,11 @@ s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec,
int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec, int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec,
struct ethtool_dump *dump_param, void *dest); struct ethtool_dump *dump_param, void *dest);
int nfp_shared_buf_register(struct nfp_pf *pf);
void nfp_shared_buf_unregister(struct nfp_pf *pf);
int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index,
struct devlink_sb_pool_info *pool_info);
int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type);
#endif /* NFP_MAIN_H */ #endif /* NFP_MAIN_H */
...@@ -545,6 +545,7 @@ struct nfp_net_dp { ...@@ -545,6 +545,7 @@ struct nfp_net_dp {
/** /**
* struct nfp_net - NFP network device structure * struct nfp_net - NFP network device structure
* @dp: Datapath structure * @dp: Datapath structure
* @id: vNIC id within the PF (0 for VFs)
* @fw_ver: Firmware version * @fw_ver: Firmware version
* @cap: Capabilities advertised by the Firmware * @cap: Capabilities advertised by the Firmware
* @max_mtu: Maximum support MTU advertised by the Firmware * @max_mtu: Maximum support MTU advertised by the Firmware
...@@ -597,6 +598,8 @@ struct nfp_net { ...@@ -597,6 +598,8 @@ struct nfp_net {
struct nfp_net_fw_version fw_ver; struct nfp_net_fw_version fw_ver;
u32 id;
u32 cap; u32 cap;
u32 max_mtu; u32 max_mtu;
...@@ -909,7 +912,7 @@ int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new, ...@@ -909,7 +912,7 @@ int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
void nfp_net_debugfs_create(void); void nfp_net_debugfs_create(void);
void nfp_net_debugfs_destroy(void); void nfp_net_debugfs_destroy(void);
struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev); struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id); void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir);
void nfp_net_debugfs_dir_clean(struct dentry **dir); void nfp_net_debugfs_dir_clean(struct dentry **dir);
#else #else
static inline void nfp_net_debugfs_create(void) static inline void nfp_net_debugfs_create(void)
...@@ -926,7 +929,7 @@ static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev) ...@@ -926,7 +929,7 @@ static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
} }
static inline void static inline void
nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id) nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
{ {
} }
......
...@@ -3277,6 +3277,24 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev, ...@@ -3277,6 +3277,24 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
return features; return features;
} }
static int
nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
{
struct nfp_net *nn = netdev_priv(netdev);
int n;
if (nn->port)
return nfp_port_get_phys_port_name(netdev, name, len);
if (!nn->dp.is_vf) {
n = snprintf(name, len, "%d", nn->id);
if (n >= len)
return -EINVAL;
}
return 0;
}
/** /**
* nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
* @nn: NFP Net device to reconfigure * @nn: NFP Net device to reconfigure
...@@ -3475,7 +3493,7 @@ const struct net_device_ops nfp_net_netdev_ops = { ...@@ -3475,7 +3493,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_mac_address = nfp_net_set_mac_address, .ndo_set_mac_address = nfp_net_set_mac_address,
.ndo_set_features = nfp_net_set_features, .ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check, .ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_port_get_phys_port_name, .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port, .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port, .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_bpf = nfp_net_xdp, .ndo_bpf = nfp_net_xdp,
......
...@@ -201,7 +201,7 @@ static const struct file_operations nfp_xdp_q_fops = { ...@@ -201,7 +201,7 @@ static const struct file_operations nfp_xdp_q_fops = {
.llseek = seq_lseek .llseek = seq_lseek
}; };
void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id) void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
{ {
struct dentry *queues, *tx, *rx, *xdp; struct dentry *queues, *tx, *rx, *xdp;
char name[20]; char name[20];
...@@ -211,7 +211,7 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id) ...@@ -211,7 +211,7 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
return; return;
if (nfp_net_is_data_vnic(nn)) if (nfp_net_is_data_vnic(nn))
sprintf(name, "vnic%d", id); sprintf(name, "vnic%d", nn->id);
else else
strcpy(name, "ctrl-vnic"); strcpy(name, "ctrl-vnic");
nn->debugfs_dir = debugfs_create_dir(name, ddir); nn->debugfs_dir = debugfs_create_dir(name, ddir);
......
...@@ -101,48 +101,15 @@ nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index) ...@@ -101,48 +101,15 @@ nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index)
return NULL; return NULL;
} }
static int
nfp_net_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
unsigned int default_val)
{
char name[256];
int err = 0;
u64 val;
snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp));
val = nfp_rtsym_read_le(pf->rtbl, name, &err);
if (err) {
if (err == -ENOENT)
return default_val;
nfp_err(pf->cpp, "Unable to read symbol %s\n", name);
return err;
}
return val;
}
static int nfp_net_pf_get_num_ports(struct nfp_pf *pf) static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
{ {
return nfp_net_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1); return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
} }
static int nfp_net_pf_get_app_id(struct nfp_pf *pf) static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
{ {
return nfp_net_pf_rtsym_read_optional(pf, "_pf%u_net_app_id", return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
NFP_APP_CORE_NIC); NFP_APP_CORE_NIC);
}
static u8 __iomem *
nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
unsigned int min_size, struct nfp_cpp_area **area)
{
char pf_symbol[256];
snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
nfp_cppcore_pcie_unit(pf->cpp));
return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area);
} }
static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn) static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
...@@ -211,11 +178,13 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id) ...@@ -211,11 +178,13 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
{ {
int err; int err;
nn->id = id;
err = nfp_net_init(nn); err = nfp_net_init(nn);
if (err) if (err)
return err; return err;
nfp_net_debugfs_vnic_add(nn, pf->ddir, id); nfp_net_debugfs_vnic_add(nn, pf->ddir);
if (nn->port) { if (nn->port) {
err = nfp_devlink_port_register(pf->app, nn->port); err = nfp_devlink_port_register(pf->app, nn->port);
...@@ -379,9 +348,8 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) ...@@ -379,9 +348,8 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
if (!nfp_app_needs_ctrl_vnic(pf->app)) if (!nfp_app_needs_ctrl_vnic(pf->app))
return 0; return 0;
ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar", ctrl_bar = nfp_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
NFP_PF_CSR_SLICE_SIZE, NFP_PF_CSR_SLICE_SIZE, &pf->ctrl_vnic_bar);
&pf->ctrl_vnic_bar);
if (IS_ERR(ctrl_bar)) { if (IS_ERR(ctrl_bar)) {
nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n"); nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n");
err = PTR_ERR(ctrl_bar); err = PTR_ERR(ctrl_bar);
...@@ -507,8 +475,8 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) ...@@ -507,8 +475,8 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
int err; int err;
min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE; min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
mem = nfp_net_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0", mem = nfp_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0",
min_size, &pf->data_vnic_bar); min_size, &pf->data_vnic_bar);
if (IS_ERR(mem)) { if (IS_ERR(mem)) {
nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n"); nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
return PTR_ERR(mem); return PTR_ERR(mem);
...@@ -528,10 +496,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) ...@@ -528,10 +496,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
} }
} }
pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg", pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar",
"_pf%d_net_vf_bar", NFP_NET_CFG_BAR_SZ * pf->limit_vfs,
NFP_NET_CFG_BAR_SZ * &pf->vf_cfg_bar);
pf->limit_vfs, &pf->vf_cfg_bar);
if (IS_ERR(pf->vf_cfg_mem)) { if (IS_ERR(pf->vf_cfg_mem)) {
if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) { if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
err = PTR_ERR(pf->vf_cfg_mem); err = PTR_ERR(pf->vf_cfg_mem);
...@@ -541,9 +508,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) ...@@ -541,9 +508,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
} }
min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ; min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ;
pf->vfcfg_tbl2 = nfp_net_pf_map_rtsym(pf, "net.vfcfg_tbl2", pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2",
"_pf%d_net_vf_cfg2", "_pf%d_net_vf_cfg2",
min_size, &pf->vfcfg_tbl2_area); min_size, &pf->vfcfg_tbl2_area);
if (IS_ERR(pf->vfcfg_tbl2)) { if (IS_ERR(pf->vfcfg_tbl2)) {
if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) { if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) {
err = PTR_ERR(pf->vfcfg_tbl2); err = PTR_ERR(pf->vfcfg_tbl2);
...@@ -763,6 +730,10 @@ int nfp_net_pci_probe(struct nfp_pf *pf) ...@@ -763,6 +730,10 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err) if (err)
goto err_app_clean; goto err_app_clean;
err = nfp_shared_buf_register(pf);
if (err)
goto err_devlink_unreg;
mutex_lock(&pf->lock); mutex_lock(&pf->lock);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev); pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
...@@ -796,6 +767,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf) ...@@ -796,6 +767,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
err_clean_ddir: err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir); nfp_net_debugfs_dir_clean(&pf->ddir);
mutex_unlock(&pf->lock); mutex_unlock(&pf->lock);
nfp_shared_buf_unregister(pf);
err_devlink_unreg:
cancel_work_sync(&pf->port_refresh_work); cancel_work_sync(&pf->port_refresh_work);
devlink_unregister(devlink); devlink_unregister(devlink);
err_app_clean: err_app_clean:
...@@ -823,6 +796,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf) ...@@ -823,6 +796,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
mutex_unlock(&pf->lock); mutex_unlock(&pf->lock);
nfp_shared_buf_unregister(pf);
devlink_unregister(priv_to_devlink(pf)); devlink_unregister(priv_to_devlink(pf));
nfp_net_pf_free_irqs(pf); nfp_net_pf_free_irqs(pf);
......
...@@ -385,7 +385,7 @@ struct net_device *nfp_repr_alloc(struct nfp_app *app) ...@@ -385,7 +385,7 @@ struct net_device *nfp_repr_alloc(struct nfp_app *app)
return NULL; return NULL;
} }
static void nfp_repr_clean_and_free(struct nfp_repr *repr) void nfp_repr_clean_and_free(struct nfp_repr *repr)
{ {
nfp_info(repr->app->cpp, "Destroying Representor(%s)\n", nfp_info(repr->app->cpp, "Destroying Representor(%s)\n",
repr->netdev->name); repr->netdev->name);
......
...@@ -76,6 +76,7 @@ struct nfp_repr_pcpu_stats { ...@@ -76,6 +76,7 @@ struct nfp_repr_pcpu_stats {
* @port: Port of representor * @port: Port of representor
* @app: APP handle * @app: APP handle
* @stats: Statistic of packets hitting CPU * @stats: Statistic of packets hitting CPU
* @app_priv: Pointer for APP data
*/ */
struct nfp_repr { struct nfp_repr {
struct net_device *netdev; struct net_device *netdev;
...@@ -83,6 +84,7 @@ struct nfp_repr { ...@@ -83,6 +84,7 @@ struct nfp_repr {
struct nfp_port *port; struct nfp_port *port;
struct nfp_app *app; struct nfp_app *app;
struct nfp_repr_pcpu_stats __percpu *stats; struct nfp_repr_pcpu_stats __percpu *stats;
void *app_priv;
}; };
/** /**
...@@ -125,6 +127,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, ...@@ -125,6 +127,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
struct net_device *pf_netdev); struct net_device *pf_netdev);
void nfp_repr_free(struct net_device *netdev); void nfp_repr_free(struct net_device *netdev);
struct net_device *nfp_repr_alloc(struct nfp_app *app); struct net_device *nfp_repr_alloc(struct nfp_app *app);
void nfp_repr_clean_and_free(struct nfp_repr *repr);
void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs); void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs);
void nfp_reprs_clean_and_free_by_type(struct nfp_app *app, void nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
enum nfp_repr_type type); enum nfp_repr_type type);
......
...@@ -283,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, ...@@ -283,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
nfp_net_info(nn); nfp_net_info(nn);
vf->ddir = nfp_net_debugfs_device_add(pdev); vf->ddir = nfp_net_debugfs_device_add(pdev);
nfp_net_debugfs_vnic_add(nn, vf->ddir, 0); nfp_net_debugfs_vnic_add(nn, vf->ddir);
return 0; return 0;
......
...@@ -181,7 +181,11 @@ nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len) ...@@ -181,7 +181,11 @@ nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
eth_port->label_subport); eth_port->label_subport);
break; break;
case NFP_PORT_PF_PORT: case NFP_PORT_PF_PORT:
n = snprintf(name, len, "pf%d", port->pf_id); if (!port->pf_split)
n = snprintf(name, len, "pf%d", port->pf_id);
else
n = snprintf(name, len, "pf%ds%d", port->pf_id,
port->pf_split_id);
break; break;
case NFP_PORT_VF_PORT: case NFP_PORT_VF_PORT:
n = snprintf(name, len, "pf%dvf%d", port->pf_id, port->vf_id); n = snprintf(name, len, "pf%dvf%d", port->pf_id, port->vf_id);
...@@ -218,6 +222,8 @@ int nfp_port_configure(struct net_device *netdev, bool configed) ...@@ -218,6 +222,8 @@ int nfp_port_configure(struct net_device *netdev, bool configed)
eth_port = __nfp_port_get_eth_port(port); eth_port = __nfp_port_get_eth_port(port);
if (!eth_port) if (!eth_port)
return 0; return 0;
if (port->eth_forced)
return 0;
err = nfp_eth_set_configured(port->app->cpp, eth_port->index, configed); err = nfp_eth_set_configured(port->app->cpp, eth_port->index, configed);
return err < 0 && err != -EOPNOTSUPP ? err : 0; return err < 0 && err != -EOPNOTSUPP ? err : 0;
......
...@@ -77,10 +77,13 @@ enum nfp_port_flags { ...@@ -77,10 +77,13 @@ enum nfp_port_flags {
* @app: backpointer to the app structure * @app: backpointer to the app structure
* @dl_port: devlink port structure * @dl_port: devlink port structure
* @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme * @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme
* @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change
* @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry * @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
* @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available * @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available
* @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3) * @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
* @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id * @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
* @pf_split: for %NFP_PORT_PF_PORT %true if PCI PF has more than one vNIC
* @pf_split_id:for %NFP_PORT_PF_PORT ID of PCI PF vNIC (valid if @pf_split)
* @vnic: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT vNIC ctrl memory * @vnic: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT vNIC ctrl memory
* @port_list: entry on pf's list of ports * @port_list: entry on pf's list of ports
*/ */
...@@ -99,6 +102,7 @@ struct nfp_port { ...@@ -99,6 +102,7 @@ struct nfp_port {
/* NFP_PORT_PHYS_PORT */ /* NFP_PORT_PHYS_PORT */
struct { struct {
unsigned int eth_id; unsigned int eth_id;
bool eth_forced;
struct nfp_eth_table_port *eth_port; struct nfp_eth_table_port *eth_port;
u8 __iomem *eth_stats; u8 __iomem *eth_stats;
}; };
...@@ -106,6 +110,8 @@ struct nfp_port { ...@@ -106,6 +110,8 @@ struct nfp_port {
struct { struct {
unsigned int pf_id; unsigned int pf_id;
unsigned int vf_id; unsigned int vf_id;
bool pf_split;
unsigned int pf_split_id;
u8 __iomem *vnic; u8 __iomem *vnic;
}; };
}; };
......
// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
/*
* Copyright (C) 2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <net/devlink.h>
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nffw.h"
#include "nfp_abi.h"
#include "nfp_app.h"
#include "nfp_main.h"
static u32 nfp_shared_buf_pool_unit(struct nfp_pf *pf, unsigned int sb)
{
__le32 sb_id = cpu_to_le32(sb);
unsigned int i;
for (i = 0; i < pf->num_shared_bufs; i++)
if (pf->shared_bufs[i].id == sb_id)
return le32_to_cpu(pf->shared_bufs[i].pool_size_unit);
WARN_ON_ONCE(1);
return 0;
}
int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
struct nfp_shared_buf_pool_info_get get_data;
struct nfp_shared_buf_pool_id id = {
.shared_buf = cpu_to_le32(sb),
.pool = cpu_to_le32(pool_index),
};
unsigned int unit_size;
int n;
unit_size = nfp_shared_buf_pool_unit(pf, sb);
if (!unit_size)
return -EINVAL;
n = nfp_mbox_cmd(pf, NFP_MBOX_POOL_GET, &id, sizeof(id),
&get_data, sizeof(get_data));
if (n < 0)
return n;
if (n < sizeof(get_data))
return -EIO;
pool_info->pool_type = le32_to_cpu(get_data.pool_type);
pool_info->threshold_type = le32_to_cpu(get_data.threshold_type);
pool_info->size = le32_to_cpu(get_data.size) * unit_size;
return 0;
}
int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type)
{
struct nfp_shared_buf_pool_info_set set_data = {
.id = {
.shared_buf = cpu_to_le32(sb),
.pool = cpu_to_le32(pool_index),
},
.threshold_type = cpu_to_le32(threshold_type),
};
unsigned int unit_size;
unit_size = nfp_shared_buf_pool_unit(pf, sb);
if (!unit_size || size % unit_size)
return -EINVAL;
set_data.size = cpu_to_le32(size / unit_size);
return nfp_mbox_cmd(pf, NFP_MBOX_POOL_SET, &set_data, sizeof(set_data),
NULL, 0);
}
int nfp_shared_buf_register(struct nfp_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
unsigned int i, num_entries, entry_sz;
struct nfp_cpp_area *sb_desc_area;
u8 __iomem *sb_desc;
int n, err;
if (!pf->mbox)
return 0;
n = nfp_pf_rtsym_read_optional(pf, NFP_SHARED_BUF_COUNT_SYM_NAME, 0);
if (n <= 0)
return n;
num_entries = n;
sb_desc = nfp_pf_map_rtsym(pf, "sb_tbl", NFP_SHARED_BUF_TABLE_SYM_NAME,
num_entries * sizeof(pf->shared_bufs[0]),
&sb_desc_area);
if (IS_ERR(sb_desc))
return PTR_ERR(sb_desc);
entry_sz = nfp_cpp_area_size(sb_desc_area) / num_entries;
pf->shared_bufs = kmalloc_array(num_entries, sizeof(pf->shared_bufs[0]),
GFP_KERNEL);
if (!pf->shared_bufs) {
err = -ENOMEM;
goto err_release_area;
}
for (i = 0; i < num_entries; i++) {
struct nfp_shared_buf *sb = &pf->shared_bufs[i];
/* Entries may be larger in future FW */
memcpy_fromio(sb, sb_desc + i * entry_sz, sizeof(*sb));
err = devlink_sb_register(devlink,
le32_to_cpu(sb->id),
le32_to_cpu(sb->size),
le16_to_cpu(sb->ingress_pools_count),
le16_to_cpu(sb->egress_pools_count),
le16_to_cpu(sb->ingress_tc_count),
le16_to_cpu(sb->egress_tc_count));
if (err)
goto err_unreg_prev;
}
pf->num_shared_bufs = num_entries;
nfp_cpp_area_release_free(sb_desc_area);
return 0;
err_unreg_prev:
while (i--)
devlink_sb_unregister(devlink,
le32_to_cpu(pf->shared_bufs[i].id));
kfree(pf->shared_bufs);
err_release_area:
nfp_cpp_area_release_free(sb_desc_area);
return err;
}
void nfp_shared_buf_unregister(struct nfp_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
unsigned int i;
for (i = 0; i < pf->num_shared_bufs; i++)
devlink_sb_unregister(devlink,
le32_to_cpu(pf->shared_bufs[i].id));
kfree(pf->shared_bufs);
}
...@@ -933,7 +933,6 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, ...@@ -933,7 +933,6 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
u32 *wrptr32 = kernel_vaddr; u32 *wrptr32 = kernel_vaddr;
const u32 __iomem *rdptr32; const u32 __iomem *rdptr32;
int n, width; int n, width;
bool is_64;
priv = nfp_cpp_area_priv(area); priv = nfp_cpp_area_priv(area);
rdptr64 = priv->iomem + offset; rdptr64 = priv->iomem + offset;
...@@ -943,10 +942,15 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, ...@@ -943,10 +942,15 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
return -EFAULT; return -EFAULT;
width = priv->width.read; width = priv->width.read;
if (width <= 0) if (width <= 0)
return -EINVAL; return -EINVAL;
/* MU reads via a PCIe2CPP BAR support 32bit (and other) lengths */
if (priv->target == (NFP_CPP_TARGET_MU & NFP_CPP_TARGET_ID_MASK) &&
priv->action == NFP_CPP_ACTION_RW &&
(offset % sizeof(u64) == 4 || length % sizeof(u64) == 4))
width = TARGET_WIDTH_32;
/* Unaligned? Translate to an explicit access */ /* Unaligned? Translate to an explicit access */
if ((priv->offset + offset) & (width - 1)) if ((priv->offset + offset) & (width - 1))
return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area), return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area),
...@@ -956,36 +960,29 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, ...@@ -956,36 +960,29 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
priv->offset + offset, priv->offset + offset,
kernel_vaddr, length, width); kernel_vaddr, length, width);
is_64 = width == TARGET_WIDTH_64; if (WARN_ON(!priv->bar))
return -EFAULT;
/* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */
if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
priv->action == NFP_CPP_ACTION_RW)
is_64 = false;
if (is_64) { switch (width) {
if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) case TARGET_WIDTH_32:
return -EINVAL;
} else {
if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0) if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
return -EINVAL; return -EINVAL;
}
if (WARN_ON(!priv->bar)) for (n = 0; n < length; n += sizeof(u32))
return -EFAULT; *wrptr32++ = __raw_readl(rdptr32++);
return n;
#ifdef __raw_readq
case TARGET_WIDTH_64:
if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
return -EINVAL;
if (is_64)
#ifndef __raw_readq
return -EINVAL;
#else
for (n = 0; n < length; n += sizeof(u64)) for (n = 0; n < length; n += sizeof(u64))
*wrptr64++ = __raw_readq(rdptr64++); *wrptr64++ = __raw_readq(rdptr64++);
return n;
#endif #endif
else default:
for (n = 0; n < length; n += sizeof(u32)) return -EINVAL;
*wrptr32++ = __raw_readl(rdptr32++); }
return n;
} }
static int static int
...@@ -999,7 +996,6 @@ nfp6000_area_write(struct nfp_cpp_area *area, ...@@ -999,7 +996,6 @@ nfp6000_area_write(struct nfp_cpp_area *area,
struct nfp6000_area_priv *priv; struct nfp6000_area_priv *priv;
u32 __iomem *wrptr32; u32 __iomem *wrptr32;
int n, width; int n, width;
bool is_64;
priv = nfp_cpp_area_priv(area); priv = nfp_cpp_area_priv(area);
wrptr64 = priv->iomem + offset; wrptr64 = priv->iomem + offset;
...@@ -1009,10 +1005,15 @@ nfp6000_area_write(struct nfp_cpp_area *area, ...@@ -1009,10 +1005,15 @@ nfp6000_area_write(struct nfp_cpp_area *area,
return -EFAULT; return -EFAULT;
width = priv->width.write; width = priv->width.write;
if (width <= 0) if (width <= 0)
return -EINVAL; return -EINVAL;
/* MU writes via a PCIe2CPP BAR support 32bit (and other) lengths */
if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
priv->action == NFP_CPP_ACTION_RW &&
(offset % sizeof(u64) == 4 || length % sizeof(u64) == 4))
width = TARGET_WIDTH_32;
/* Unaligned? Translate to an explicit access */ /* Unaligned? Translate to an explicit access */
if ((priv->offset + offset) & (width - 1)) if ((priv->offset + offset) & (width - 1))
return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area), return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area),
...@@ -1022,40 +1023,33 @@ nfp6000_area_write(struct nfp_cpp_area *area, ...@@ -1022,40 +1023,33 @@ nfp6000_area_write(struct nfp_cpp_area *area,
priv->offset + offset, priv->offset + offset,
kernel_vaddr, length, width); kernel_vaddr, length, width);
is_64 = width == TARGET_WIDTH_64; if (WARN_ON(!priv->bar))
return -EFAULT;
/* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */
if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
priv->action == NFP_CPP_ACTION_RW)
is_64 = false;
if (is_64) { switch (width) {
if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) case TARGET_WIDTH_32:
return -EINVAL;
} else {
if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0) if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
return -EINVAL; return -EINVAL;
}
if (WARN_ON(!priv->bar)) for (n = 0; n < length; n += sizeof(u32)) {
return -EFAULT; __raw_writel(*rdptr32++, wrptr32++);
wmb();
}
return n;
#ifdef __raw_writeq
case TARGET_WIDTH_64:
if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
return -EINVAL;
if (is_64)
#ifndef __raw_writeq
return -EINVAL;
#else
for (n = 0; n < length; n += sizeof(u64)) { for (n = 0; n < length; n += sizeof(u64)) {
__raw_writeq(*rdptr64++, wrptr64++); __raw_writeq(*rdptr64++, wrptr64++);
wmb(); wmb();
} }
return n;
#endif #endif
else default:
for (n = 0; n < length; n += sizeof(u32)) { return -EINVAL;
__raw_writel(*rdptr32++, wrptr32++); }
wmb();
}
return n;
} }
struct nfp6000_explicit_priv { struct nfp6000_explicit_priv {
......
...@@ -2756,7 +2756,8 @@ static const struct genl_ops devlink_nl_ops[] = { ...@@ -2756,7 +2756,8 @@ static const struct genl_ops devlink_nl_ops[] = {
.doit = devlink_nl_cmd_eswitch_set_doit, .doit = devlink_nl_cmd_eswitch_set_doit,
.policy = devlink_nl_policy, .policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM, .flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NO_LOCK,
}, },
{ {
.cmd = DEVLINK_CMD_DPIPE_TABLE_GET, .cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment