Commit d4e7f092 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: rename netdev/port to vNIC

vNIC is a PCIe-side abstraction NFP firmwares supported by this
driver use.  It was initially meant to represent a device port
and therefore a netdev but today should be thought of as a way
of grouping descriptor rings and associated state.  Advanced apps
will have vNICs without netdevs and ports without a vNIC (using
representors instead).

Make sure code refers to vNICs as vNICs and not ports or netdevs.
No functional changes.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarSimon Horman <simon.horman@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent beba69ca
...@@ -340,7 +340,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, ...@@ -340,7 +340,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
err = -ENOMEM; err = -ENOMEM;
goto err_rel_regions; goto err_rel_regions;
} }
INIT_LIST_HEAD(&pf->ports); INIT_LIST_HEAD(&pf->vnics);
pci_set_drvdata(pdev, pf); pci_set_drvdata(pdev, pf);
pf->pdev = pdev; pf->pdev = pdev;
......
...@@ -57,27 +57,27 @@ struct nfp_eth_table; ...@@ -57,27 +57,27 @@ struct nfp_eth_table;
* struct nfp_pf - NFP PF-specific device structure * struct nfp_pf - NFP PF-specific device structure
* @pdev: Backpointer to PCI device * @pdev: Backpointer to PCI device
* @cpp: Pointer to the CPP handle * @cpp: Pointer to the CPP handle
* @ctrl_area: Pointer to the CPP area for the control BAR * @data_vnic_bar: Pointer to the CPP area for the data vNICs' BARs
* @tx_area: Pointer to the CPP area for the TX queues * @tx_area: Pointer to the CPP area for the TX queues
* @rx_area: Pointer to the CPP area for the FL/RX queues * @rx_area: Pointer to the CPP area for the FL/RX queues
* @irq_entries: Array of MSI-X entries for all ports * @irq_entries: Array of MSI-X entries for all vNICs
* @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit) * @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit)
* @num_vfs: Number of SR-IOV VFs enabled * @num_vfs: Number of SR-IOV VFs enabled
* @fw_loaded: Is the firmware loaded? * @fw_loaded: Is the firmware loaded?
* @eth_tbl: NSP ETH table * @eth_tbl: NSP ETH table
* @ddir: Per-device debugfs directory * @ddir: Per-device debugfs directory
* @num_ports: Number of adapter ports app firmware supports * @max_data_vnics: Number of data vNICs app firmware supports
* @num_netdevs: Number of netdevs spawned * @num_vnics: Number of vNICs spawned
* @ports: Linked list of port structures (struct nfp_net) * @vnics: Linked list of vNIC structures (struct nfp_net)
* @port_lock: Protects @ports, @num_ports, @num_netdevs
* @port_refresh_work: Work entry for taking netdevs out * @port_refresh_work: Work entry for taking netdevs out
* @lock: Protects all fields which may change after probe
*/ */
struct nfp_pf { struct nfp_pf {
struct pci_dev *pdev; struct pci_dev *pdev;
struct nfp_cpp *cpp; struct nfp_cpp *cpp;
struct nfp_cpp_area *ctrl_area; struct nfp_cpp_area *data_vnic_bar;
struct nfp_cpp_area *tx_area; struct nfp_cpp_area *tx_area;
struct nfp_cpp_area *rx_area; struct nfp_cpp_area *rx_area;
...@@ -92,12 +92,12 @@ struct nfp_pf { ...@@ -92,12 +92,12 @@ struct nfp_pf {
struct dentry *ddir; struct dentry *ddir;
unsigned int num_ports; unsigned int max_data_vnics;
unsigned int num_netdevs; unsigned int num_vnics;
struct list_head ports; struct list_head vnics;
struct work_struct port_refresh_work; struct work_struct port_refresh_work;
struct mutex port_lock; struct mutex lock;
}; };
extern struct pci_driver nfp_netvf_pci_driver; extern struct pci_driver nfp_netvf_pci_driver;
......
...@@ -84,7 +84,7 @@ ...@@ -84,7 +84,7 @@
#define NFP_NET_NON_Q_VECTORS 2 #define NFP_NET_NON_Q_VECTORS 2
#define NFP_NET_IRQ_LSC_IDX 0 #define NFP_NET_IRQ_LSC_IDX 0
#define NFP_NET_IRQ_EXN_IDX 1 #define NFP_NET_IRQ_EXN_IDX 1
#define NFP_NET_MIN_PORT_IRQS (NFP_NET_NON_Q_VECTORS + 1) #define NFP_NET_MIN_VNIC_IRQS (NFP_NET_NON_Q_VECTORS + 1)
/* Queue/Ring definitions */ /* Queue/Ring definitions */
#define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */ #define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */
...@@ -555,7 +555,7 @@ struct nfp_net_dp { ...@@ -555,7 +555,7 @@ struct nfp_net_dp {
* @rx_bar: Pointer to mapped FL/RX queues * @rx_bar: Pointer to mapped FL/RX queues
* @debugfs_dir: Device directory in debugfs * @debugfs_dir: Device directory in debugfs
* @ethtool_dump_flag: Ethtool dump flag * @ethtool_dump_flag: Ethtool dump flag
* @port_list: Entry on device port list * @vnic_list: Entry on device vNIC list
* @pdev: Backpointer to PCI device * @pdev: Backpointer to PCI device
* @cpp: CPP device handle if available * @cpp: CPP device handle if available
* @eth_port: Translated ETH Table port entry * @eth_port: Translated ETH Table port entry
...@@ -625,7 +625,7 @@ struct nfp_net { ...@@ -625,7 +625,7 @@ struct nfp_net {
struct dentry *debugfs_dir; struct dentry *debugfs_dir;
u32 ethtool_dump_flag; u32 ethtool_dump_flag;
struct list_head port_list; struct list_head vnic_list;
struct pci_dev *pdev; struct pci_dev *pdev;
struct nfp_cpp *cpp; struct nfp_cpp *cpp;
...@@ -842,7 +842,7 @@ void nfp_net_refresh_port_table(struct nfp_net *nn); ...@@ -842,7 +842,7 @@ void nfp_net_refresh_port_table(struct nfp_net *nn);
void nfp_net_debugfs_create(void); void nfp_net_debugfs_create(void);
void nfp_net_debugfs_destroy(void); void nfp_net_debugfs_destroy(void);
struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev); struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id); void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id);
void nfp_net_debugfs_dir_clean(struct dentry **dir); void nfp_net_debugfs_dir_clean(struct dentry **dir);
#else #else
static inline void nfp_net_debugfs_create(void) static inline void nfp_net_debugfs_create(void)
...@@ -859,7 +859,7 @@ static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev) ...@@ -859,7 +859,7 @@ static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
} }
static inline void static inline void
nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id) nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
{ {
} }
......
...@@ -200,7 +200,7 @@ static const struct file_operations nfp_xdp_q_fops = { ...@@ -200,7 +200,7 @@ static const struct file_operations nfp_xdp_q_fops = {
.llseek = seq_lseek .llseek = seq_lseek
}; };
void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id) void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
{ {
struct dentry *queues, *tx, *rx, *xdp; struct dentry *queues, *tx, *rx, *xdp;
char name[20]; char name[20];
...@@ -209,7 +209,7 @@ void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id) ...@@ -209,7 +209,7 @@ void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
if (IS_ERR_OR_NULL(nfp_dir)) if (IS_ERR_OR_NULL(nfp_dir))
return; return;
sprintf(name, "port%d", id); sprintf(name, "vnic%d", id);
nn->debugfs_dir = debugfs_create_dir(name, ddir); nn->debugfs_dir = debugfs_create_dir(name, ddir);
if (IS_ERR_OR_NULL(nn->debugfs_dir)) if (IS_ERR_OR_NULL(nn->debugfs_dir))
return; return;
......
...@@ -197,10 +197,10 @@ static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf) ...@@ -197,10 +197,10 @@ static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
nfp_cppcore_pcie_unit(pf->cpp)); nfp_cppcore_pcie_unit(pf->cpp));
val = nfp_rtsym_read_le(pf->cpp, name, &err); val = nfp_rtsym_read_le(pf->cpp, name, &err);
/* Default to one port */ /* Default to one port/vNIC */
if (err) { if (err) {
if (err != -ENOENT) if (err != -ENOENT)
nfp_err(pf->cpp, "Unable to read adapter port count\n"); nfp_err(pf->cpp, "Unable to read adapter vNIC count\n");
val = 1; val = 1;
} }
...@@ -216,7 +216,7 @@ nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar, ...@@ -216,7 +216,7 @@ nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
min_qc = readl(ctrl_bar + start_off); min_qc = readl(ctrl_bar + start_off);
max_qc = min_qc; max_qc = min_qc;
for (i = 0; i < pf->num_ports; i++) { for (i = 0; i < pf->max_data_vnics; i++) {
/* To make our lives simpler only accept configuration where /* To make our lives simpler only accept configuration where
* queues are allocated to PFs in order (queues of PFn all have * queues are allocated to PFs in order (queues of PFn all have
* indexes lower than PFn+1). * indexes lower than PFn+1).
...@@ -248,17 +248,17 @@ static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf) ...@@ -248,17 +248,17 @@ static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
return NULL; return NULL;
} }
if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) { if (ctrl_sym->size < pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"PF BAR0 too small to contain %d ports\n", "PF BAR0 too small to contain %d vNICs\n",
pf->num_ports); pf->max_data_vnics);
return NULL; return NULL;
} }
ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl", ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl",
ctrl_sym->domain, ctrl_sym->target, ctrl_sym->domain, ctrl_sym->target,
ctrl_sym->addr, ctrl_sym->size, ctrl_sym->addr, ctrl_sym->size,
&pf->ctrl_area); &pf->data_vnic_bar);
if (IS_ERR(ctrl_bar)) { if (IS_ERR(ctrl_bar)) {
dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n", dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n",
PTR_ERR(ctrl_bar)); PTR_ERR(ctrl_bar));
...@@ -268,24 +268,24 @@ static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf) ...@@ -268,24 +268,24 @@ static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
return ctrl_bar; return ctrl_bar;
} }
static void nfp_net_pf_free_netdevs(struct nfp_pf *pf) static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
{ {
struct nfp_net *nn; struct nfp_net *nn;
while (!list_empty(&pf->ports)) { while (!list_empty(&pf->vnics)) {
nn = list_first_entry(&pf->ports, struct nfp_net, port_list); nn = list_first_entry(&pf->vnics, struct nfp_net, vnic_list);
list_del(&nn->port_list); list_del(&nn->vnic_list);
pf->num_netdevs--; pf->num_vnics--;
nfp_net_free(nn); nfp_net_free(nn);
} }
} }
static struct nfp_net * static struct nfp_net *
nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar, nfp_net_pf_alloc_vnic(struct nfp_pf *pf, void __iomem *ctrl_bar,
void __iomem *tx_bar, void __iomem *rx_bar, void __iomem *tx_bar, void __iomem *rx_bar,
int stride, struct nfp_net_fw_version *fw_ver, int stride, struct nfp_net_fw_version *fw_ver,
struct nfp_eth_table_port *eth_port) struct nfp_eth_table_port *eth_port)
{ {
u32 n_tx_rings, n_rx_rings; u32 n_tx_rings, n_rx_rings;
struct nfp_net *nn; struct nfp_net *nn;
...@@ -293,7 +293,7 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar, ...@@ -293,7 +293,7 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS); n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS); n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
/* Allocate and initialise the netdev */ /* Allocate and initialise the vNIC */
nn = nfp_net_alloc(pf->pdev, n_tx_rings, n_rx_rings); nn = nfp_net_alloc(pf->pdev, n_tx_rings, n_rx_rings);
if (IS_ERR(nn)) if (IS_ERR(nn))
return nn; return nn;
...@@ -312,8 +312,7 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar, ...@@ -312,8 +312,7 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
} }
static int static int
nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn, nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
unsigned int id)
{ {
int err; int err;
...@@ -330,7 +329,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn, ...@@ -330,7 +329,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
if (err) if (err)
return err; return err;
nfp_net_debugfs_port_add(nn, pf->ddir, id); nfp_net_debugfs_vnic_add(nn, pf->ddir, id);
nfp_net_info(nn); nfp_net_info(nn);
...@@ -338,9 +337,9 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn, ...@@ -338,9 +337,9 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
} }
static int static int
nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar, nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
void __iomem *tx_bar, void __iomem *rx_bar, void __iomem *tx_bar, void __iomem *rx_bar,
int stride, struct nfp_net_fw_version *fw_ver) int stride, struct nfp_net_fw_version *fw_ver)
{ {
u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base; u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
struct nfp_eth_table_port *eth_port; struct nfp_eth_table_port *eth_port;
...@@ -351,7 +350,7 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar, ...@@ -351,7 +350,7 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
for (i = 0; i < pf->num_ports; i++) { for (i = 0; i < pf->max_data_vnics; i++) {
tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ; tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
...@@ -363,49 +362,48 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar, ...@@ -363,49 +362,48 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
if (eth_port && eth_port->override_changed) { if (eth_port && eth_port->override_changed) {
nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i); nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i);
} else { } else {
nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, nn = nfp_net_pf_alloc_vnic(pf, ctrl_bar, tx_bar, rx_bar,
rx_bar, stride, stride, fw_ver, eth_port);
fw_ver, eth_port);
if (IS_ERR(nn)) { if (IS_ERR(nn)) {
err = PTR_ERR(nn); err = PTR_ERR(nn);
goto err_free_prev; goto err_free_prev;
} }
list_add_tail(&nn->port_list, &pf->ports); list_add_tail(&nn->vnic_list, &pf->vnics);
pf->num_netdevs++; pf->num_vnics++;
} }
ctrl_bar += NFP_PF_CSR_SLICE_SIZE; ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
} }
if (list_empty(&pf->ports)) if (list_empty(&pf->vnics))
return -ENODEV; return -ENODEV;
return 0; return 0;
err_free_prev: err_free_prev:
nfp_net_pf_free_netdevs(pf); nfp_net_pf_free_vnics(pf);
return err; return err;
} }
static int static int
nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
void __iomem *ctrl_bar, void __iomem *tx_bar, void __iomem *ctrl_bar, void __iomem *tx_bar,
void __iomem *rx_bar, int stride, void __iomem *rx_bar, int stride,
struct nfp_net_fw_version *fw_ver) struct nfp_net_fw_version *fw_ver)
{ {
unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left; unsigned int id, wanted_irqs, num_irqs, vnics_left, irqs_left;
struct nfp_net *nn; struct nfp_net *nn;
int err; int err;
/* Allocate the netdevs and do basic init */ /* Allocate the vnics and do basic init */
err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar, err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, tx_bar, rx_bar,
stride, fw_ver); stride, fw_ver);
if (err) if (err)
return err; return err;
/* Get MSI-X vectors */ /* Get MSI-X vectors */
wanted_irqs = 0; wanted_irqs = 0;
list_for_each_entry(nn, &pf->ports, port_list) list_for_each_entry(nn, &pf->vnics, vnic_list)
wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs; wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries), pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
GFP_KERNEL); GFP_KERNEL);
...@@ -415,7 +413,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, ...@@ -415,7 +413,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
} }
num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries, num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
NFP_NET_MIN_PORT_IRQS * pf->num_netdevs, NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
wanted_irqs); wanted_irqs);
if (!num_irqs) { if (!num_irqs) {
nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
...@@ -423,23 +421,23 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, ...@@ -423,23 +421,23 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
goto err_vec_free; goto err_vec_free;
} }
/* Distribute IRQs to ports */ /* Distribute IRQs to vNICs */
irqs_left = num_irqs; irqs_left = num_irqs;
ports_left = pf->num_netdevs; vnics_left = pf->num_vnics;
list_for_each_entry(nn, &pf->ports, port_list) { list_for_each_entry(nn, &pf->vnics, vnic_list) {
unsigned int n; unsigned int n;
n = DIV_ROUND_UP(irqs_left, ports_left); n = DIV_ROUND_UP(irqs_left, vnics_left);
nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left], nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
n); n);
irqs_left -= n; irqs_left -= n;
ports_left--; vnics_left--;
} }
/* Finish netdev init and register */ /* Finish vNIC init and register */
id = 0; id = 0;
list_for_each_entry(nn, &pf->ports, port_list) { list_for_each_entry(nn, &pf->vnics, vnic_list) {
err = nfp_net_pf_init_port_netdev(pf, nn, id); err = nfp_net_pf_init_vnic(pf, nn, id);
if (err) if (err)
goto err_prev_deinit; goto err_prev_deinit;
...@@ -449,7 +447,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, ...@@ -449,7 +447,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
return 0; return 0;
err_prev_deinit: err_prev_deinit:
list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) { list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list) {
nfp_net_debugfs_dir_clean(&nn->debugfs_dir); nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn); nfp_net_clean(nn);
} }
...@@ -457,7 +455,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, ...@@ -457,7 +455,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
err_vec_free: err_vec_free:
kfree(pf->irq_entries); kfree(pf->irq_entries);
err_nn_free: err_nn_free:
nfp_net_pf_free_netdevs(pf); nfp_net_pf_free_vnics(pf);
return err; return err;
} }
...@@ -470,23 +468,23 @@ static void nfp_net_pci_remove_finish(struct nfp_pf *pf) ...@@ -470,23 +468,23 @@ static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
nfp_cpp_area_release_free(pf->rx_area); nfp_cpp_area_release_free(pf->rx_area);
nfp_cpp_area_release_free(pf->tx_area); nfp_cpp_area_release_free(pf->tx_area);
nfp_cpp_area_release_free(pf->ctrl_area); nfp_cpp_area_release_free(pf->data_vnic_bar);
} }
static void nfp_net_refresh_netdevs(struct work_struct *work) static void nfp_net_refresh_vnics(struct work_struct *work)
{ {
struct nfp_pf *pf = container_of(work, struct nfp_pf, struct nfp_pf *pf = container_of(work, struct nfp_pf,
port_refresh_work); port_refresh_work);
struct nfp_eth_table *eth_table; struct nfp_eth_table *eth_table;
struct nfp_net *nn, *next; struct nfp_net *nn, *next;
mutex_lock(&pf->port_lock); mutex_lock(&pf->lock);
/* Check for nfp_net_pci_remove() racing against us */ /* Check for nfp_net_pci_remove() racing against us */
if (list_empty(&pf->ports)) if (list_empty(&pf->vnics))
goto out; goto out;
list_for_each_entry(nn, &pf->ports, port_list) list_for_each_entry(nn, &pf->vnics, vnic_list)
nfp_net_link_changed_read_clear(nn); nfp_net_link_changed_read_clear(nn);
eth_table = nfp_eth_read_ports(pf->cpp); eth_table = nfp_eth_read_ports(pf->cpp);
...@@ -496,7 +494,7 @@ static void nfp_net_refresh_netdevs(struct work_struct *work) ...@@ -496,7 +494,7 @@ static void nfp_net_refresh_netdevs(struct work_struct *work)
} }
rtnl_lock(); rtnl_lock();
list_for_each_entry(nn, &pf->ports, port_list) { list_for_each_entry(nn, &pf->vnics, vnic_list) {
if (!nn->eth_port) if (!nn->eth_port)
continue; continue;
nn->eth_port = nfp_net_find_port(eth_table, nn->eth_port = nfp_net_find_port(eth_table,
...@@ -507,7 +505,7 @@ static void nfp_net_refresh_netdevs(struct work_struct *work) ...@@ -507,7 +505,7 @@ static void nfp_net_refresh_netdevs(struct work_struct *work)
kfree(pf->eth_tbl); kfree(pf->eth_tbl);
pf->eth_tbl = eth_table; pf->eth_tbl = eth_table;
list_for_each_entry_safe(nn, next, &pf->ports, port_list) { list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
if (!nn->eth_port) { if (!nn->eth_port) {
nfp_warn(pf->cpp, "Warning: port not present after reconfig\n"); nfp_warn(pf->cpp, "Warning: port not present after reconfig\n");
continue; continue;
...@@ -520,15 +518,15 @@ static void nfp_net_refresh_netdevs(struct work_struct *work) ...@@ -520,15 +518,15 @@ static void nfp_net_refresh_netdevs(struct work_struct *work)
nfp_net_debugfs_dir_clean(&nn->debugfs_dir); nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn); nfp_net_clean(nn);
list_del(&nn->port_list); list_del(&nn->vnic_list);
pf->num_netdevs--; pf->num_vnics--;
nfp_net_free(nn); nfp_net_free(nn);
} }
if (list_empty(&pf->ports)) if (list_empty(&pf->vnics))
nfp_net_pci_remove_finish(pf); nfp_net_pci_remove_finish(pf);
out: out:
mutex_unlock(&pf->port_lock); mutex_unlock(&pf->lock);
} }
void nfp_net_refresh_port_table(struct nfp_net *nn) void nfp_net_refresh_port_table(struct nfp_net *nn)
...@@ -576,8 +574,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf) ...@@ -576,8 +574,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
int stride; int stride;
int err; int err;
INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_netdevs); INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
mutex_init(&pf->port_lock); mutex_init(&pf->lock);
/* Verify that the board has completed initialization */ /* Verify that the board has completed initialization */
if (!nfp_is_ready(pf->cpp)) { if (!nfp_is_ready(pf->cpp)) {
...@@ -585,8 +583,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf) ...@@ -585,8 +583,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
return -EINVAL; return -EINVAL;
} }
mutex_lock(&pf->port_lock); mutex_lock(&pf->lock);
pf->num_ports = nfp_net_pf_get_num_ports(pf); pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
ctrl_bar = nfp_net_pf_map_ctrl_bar(pf); ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
if (!ctrl_bar) { if (!ctrl_bar) {
...@@ -661,12 +659,12 @@ int nfp_net_pci_probe(struct nfp_pf *pf) ...@@ -661,12 +659,12 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
pf->ddir = nfp_net_debugfs_device_add(pf->pdev); pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar, err = nfp_net_pf_spawn_vnics(pf, ctrl_bar, tx_bar, rx_bar,
stride, &fw_ver); stride, &fw_ver);
if (err) if (err)
goto err_clean_ddir; goto err_clean_ddir;
mutex_unlock(&pf->port_lock); mutex_unlock(&pf->lock);
return 0; return 0;
...@@ -676,9 +674,9 @@ int nfp_net_pci_probe(struct nfp_pf *pf) ...@@ -676,9 +674,9 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
err_unmap_tx: err_unmap_tx:
nfp_cpp_area_release_free(pf->tx_area); nfp_cpp_area_release_free(pf->tx_area);
err_ctrl_unmap: err_ctrl_unmap:
nfp_cpp_area_release_free(pf->ctrl_area); nfp_cpp_area_release_free(pf->data_vnic_bar);
err_unlock: err_unlock:
mutex_unlock(&pf->port_lock); mutex_unlock(&pf->lock);
return err; return err;
} }
...@@ -686,21 +684,21 @@ void nfp_net_pci_remove(struct nfp_pf *pf) ...@@ -686,21 +684,21 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
{ {
struct nfp_net *nn; struct nfp_net *nn;
mutex_lock(&pf->port_lock); mutex_lock(&pf->lock);
if (list_empty(&pf->ports)) if (list_empty(&pf->vnics))
goto out; goto out;
list_for_each_entry(nn, &pf->ports, port_list) { list_for_each_entry(nn, &pf->vnics, vnic_list) {
nfp_net_debugfs_dir_clean(&nn->debugfs_dir); nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn); nfp_net_clean(nn);
} }
nfp_net_pf_free_netdevs(pf); nfp_net_pf_free_vnics(pf);
nfp_net_pci_remove_finish(pf); nfp_net_pci_remove_finish(pf);
out: out:
mutex_unlock(&pf->port_lock); mutex_unlock(&pf->lock);
cancel_work_sync(&pf->port_refresh_work); cancel_work_sync(&pf->port_refresh_work);
} }
...@@ -267,7 +267,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, ...@@ -267,7 +267,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
nfp_netvf_get_mac_addr(nn); nfp_netvf_get_mac_addr(nn);
num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries, num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
NFP_NET_MIN_PORT_IRQS, NFP_NET_MIN_VNIC_IRQS,
NFP_NET_NON_Q_VECTORS + NFP_NET_NON_Q_VECTORS +
nn->dp.num_r_vecs); nn->dp.num_r_vecs);
if (!num_irqs) { if (!num_irqs) {
...@@ -289,7 +289,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, ...@@ -289,7 +289,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
nfp_net_info(nn); nfp_net_info(nn);
vf->ddir = nfp_net_debugfs_device_add(pdev); vf->ddir = nfp_net_debugfs_device_add(pdev);
nfp_net_debugfs_port_add(nn, vf->ddir, 0); nfp_net_debugfs_vnic_add(nn, vf->ddir, 0);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment