Commit 73e253f0 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: map all queue controllers at once

RX and TX queue controllers are interleaved.  Instead of creating
two mappings which map the same area at slightly different offset,
create only one mapping.  Always map all queue controllers to simplify
the code and allow reusing the mapping for non-data vNICs.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c24ca95f
......@@ -63,8 +63,7 @@ struct nfp_nsp_identify;
* @cpp: Pointer to the CPP handle
* @app: Pointer to the APP handle
* @data_vnic_bar: Pointer to the CPP area for the data vNICs' BARs
* @tx_area: Pointer to the CPP area for the TX queues
* @rx_area: Pointer to the CPP area for the FL/RX queues
* @qc_area: Pointer to the CPP area for the queues
* @irq_entries: Array of MSI-X entries for all vNICs
* @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit)
* @num_vfs: Number of SR-IOV VFs enabled
......@@ -88,8 +87,7 @@ struct nfp_pf {
struct nfp_app *app;
struct nfp_cpp_area *data_vnic_bar;
struct nfp_cpp_area *tx_area;
struct nfp_cpp_area *rx_area;
struct nfp_cpp_area *qc_area;
struct msix_entry *irq_entries;
......
......@@ -705,6 +705,7 @@ static inline void nn_pci_flush(struct nfp_net *nn)
* either add to a pointer or to read the pointer value.
*/
#define NFP_QCP_QUEUE_ADDR_SZ 0x800
#define NFP_QCP_QUEUE_AREA_SZ 0x80000
#define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
#define NFP_QCP_QUEUE_ADD_RPTR 0x0000
#define NFP_QCP_QUEUE_ADD_WPTR 0x0004
......
......@@ -223,31 +223,6 @@ static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
NFP_APP_CORE_NIC);
}
static unsigned int
nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
unsigned int stride, u32 start_off, u32 num_off)
{
unsigned int i, min_qc, max_qc;
min_qc = readl(ctrl_bar + start_off);
max_qc = min_qc;
for (i = 0; i < pf->max_data_vnics; i++) {
/* To make our lives simpler only accept configuration where
* queues are allocated to PFs in order (queues of PFn all have
* indexes lower than PFn+1).
*/
if (max_qc > readl(ctrl_bar + start_off))
return 0;
max_qc = readl(ctrl_bar + start_off);
max_qc += readl(ctrl_bar + num_off) * stride;
ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
}
return max_qc - min_qc;
}
static u8 __iomem *
nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
unsigned int min_size, struct nfp_cpp_area **area)
......@@ -301,15 +276,16 @@ static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
static struct nfp_net *
nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
void __iomem *ctrl_bar,
void __iomem *tx_bar, void __iomem *rx_bar,
void __iomem *ctrl_bar, void __iomem *qc_bar,
int stride, struct nfp_net_fw_version *fw_ver,
unsigned int eth_id)
{
u32 n_tx_rings, n_rx_rings;
u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
struct nfp_net *nn;
int err;
tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
......@@ -321,8 +297,8 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
nn->app = pf->app;
nn->fw_ver = *fw_ver;
nn->dp.ctrl_bar = ctrl_bar;
nn->tx_bar = tx_bar;
nn->rx_bar = rx_bar;
nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->dp.is_vf = 0;
nn->stride_rx = stride;
nn->stride_tx = stride;
......@@ -374,26 +350,15 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
static int
nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
void __iomem *tx_bar, void __iomem *rx_bar,
int stride, struct nfp_net_fw_version *fw_ver)
void __iomem *qc_bar, int stride,
struct nfp_net_fw_version *fw_ver)
{
u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
struct nfp_net *nn;
unsigned int i;
int err;
prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
for (i = 0; i < pf->max_data_vnics; i++) {
tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ;
prev_tx_base = tgt_tx_base;
prev_rx_base = tgt_rx_base;
nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, tx_bar, rx_bar,
nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
stride, fw_ver, i);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
......@@ -430,8 +395,7 @@ static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
static int
nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
void __iomem *ctrl_bar, void __iomem *tx_bar,
void __iomem *rx_bar, int stride,
void __iomem *ctrl_bar, void __iomem *qc_bar, int stride,
struct nfp_net_fw_version *fw_ver)
{
unsigned int id, wanted_irqs, num_irqs, vnics_left, irqs_left;
......@@ -439,8 +403,7 @@ nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
int err;
/* Allocate the vnics and do basic init */
err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, tx_bar, rx_bar,
stride, fw_ver);
err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride, fw_ver);
if (err)
return err;
......@@ -534,8 +497,7 @@ static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
nfp_net_pf_app_clean(pf);
nfp_cpp_area_release_free(pf->rx_area);
nfp_cpp_area_release_free(pf->tx_area);
nfp_cpp_area_release_free(pf->qc_area);
nfp_cpp_area_release_free(pf->data_vnic_bar);
}
......@@ -659,11 +621,9 @@ int nfp_net_refresh_eth_port(struct nfp_port *port)
*/
int nfp_net_pci_probe(struct nfp_pf *pf)
{
u32 ctrl_bar_sz, tx_area_sz, rx_area_sz;
u8 __iomem *ctrl_bar, *tx_bar, *rx_bar;
u32 total_tx_qcs, total_rx_qcs;
struct nfp_net_fw_version fw_ver;
u32 start_q;
u8 __iomem *ctrl_bar, *qc_bar;
u32 ctrl_bar_sz;
int stride;
int err;
......@@ -718,53 +678,23 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
}
}
/* Find how many QC structs need to be mapped */
total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
NFP_NET_CFG_START_TXQ,
NFP_NET_CFG_MAX_TXRINGS);
total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
NFP_NET_CFG_START_RXQ,
NFP_NET_CFG_MAX_RXRINGS);
if (!total_tx_qcs || !total_rx_qcs) {
nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n",
total_tx_qcs, total_rx_qcs);
err = -EINVAL;
goto err_ctrl_unmap;
}
tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs;
rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs;
/* Map TX queues */
start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0,
NFP_PCIE_QUEUE(start_q),
tx_area_sz, &pf->tx_area);
if (IS_ERR(tx_bar)) {
nfp_err(pf->cpp, "Failed to map TX area.\n");
err = PTR_ERR(tx_bar);
/* Map queues */
qc_bar = nfp_net_map_area(pf->cpp, "net.qc", 0, 0,
NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
&pf->qc_area);
if (IS_ERR(qc_bar)) {
nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
err = PTR_ERR(qc_bar);
goto err_ctrl_unmap;
}
/* Map RX queues */
start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0,
NFP_PCIE_QUEUE(start_q),
rx_area_sz, &pf->rx_area);
if (IS_ERR(rx_bar)) {
nfp_err(pf->cpp, "Failed to map RX area.\n");
err = PTR_ERR(rx_bar);
goto err_unmap_tx;
}
err = nfp_net_pf_app_init(pf);
if (err)
goto err_unmap_rx;
goto err_unmap_qc;
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
err = nfp_net_pf_spawn_vnics(pf, ctrl_bar, tx_bar, rx_bar,
stride, &fw_ver);
err = nfp_net_pf_spawn_vnics(pf, ctrl_bar, qc_bar, stride, &fw_ver);
if (err)
goto err_clean_ddir;
......@@ -775,10 +705,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
nfp_net_pf_app_clean(pf);
err_unmap_rx:
nfp_cpp_area_release_free(pf->rx_area);
err_unmap_tx:
nfp_cpp_area_release_free(pf->tx_area);
err_unmap_qc:
nfp_cpp_area_release_free(pf->qc_area);
err_ctrl_unmap:
nfp_cpp_area_release_free(pf->data_vnic_bar);
err_unlock:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment