Commit e40d5d78 authored by David S. Miller's avatar David S. Miller

Merge branch 'netronome-NFP4000-and-NFP6000-PF-driver'

Jakub Kicinski says:

====================
Netronome NFP4000 and NFP6000 PF driver

This is a base PF driver for Netronome NFP4000 and NFP6000 chips.  This
series doesn't add any exciting new features, it provides a foundation
for supporting more advanced firmware applications.

Patch 1 moves a bitfield-related helper from our BPF code to the global
header.

Patch 2 renames the kernel module and adds a new main file.  We were
considering 3-module approach (pf, vf, common netdev library) but
ultimately settled on a single module to keep things simple.

Patch 3 adds support for accessing chip internals.  It provides a way of
configuring access windows to different parts of chip memory and issuing
pretty much any commands on chip's NoC.

Patches 4, 5, 6, 7, 8 provide support for accessing and interpreting
various hardware and firmware information structures.

Patch 9 introduces service processor (NSP) ABI.  This ABI gives us
access to PHY/SFP module configuration and information as well as
methods for unloading and loading application firmware.

Patches 10 and 11 modify the existing netdev code to make it possible
to support multi-port devices (sharing a PCI device).

Patch 12 adds a new driver probe path which will be used for the PF
PCI device IDs.  It utilizes the newly added infrastructure and is able
to load application FW and spawn netdevs for all card's ports.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4f2bd6b3 63461a02
...@@ -15,21 +15,21 @@ config NET_VENDOR_NETRONOME ...@@ -15,21 +15,21 @@ config NET_VENDOR_NETRONOME
if NET_VENDOR_NETRONOME if NET_VENDOR_NETRONOME
config NFP_NETVF config NFP
tristate "Netronome(R) NFP4000/NFP6000 VF NIC driver" tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
depends on PCI && PCI_MSI depends on PCI && PCI_MSI
depends on VXLAN || VXLAN=n depends on VXLAN || VXLAN=n
---help--- ---help---
This driver supports SR-IOV virtual functions of This driver supports the Netronome(R) NFP4000/NFP6000 based
the Netronome(R) NFP4000/NFP6000 cards working as cards working as a advanced Ethernet NIC. It works with both
a advanced Ethernet NIC. SR-IOV physical and virtual functions.
config NFP_NET_DEBUG config NFP_DEBUG
bool "Debug support for Netronome(R) NFP3200/NFP6000 NIC drivers" bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers"
depends on NFP_NET || NFP_NETVF depends on NFP
---help--- ---help---
Enable extra sanity checks and debugfs support in Enable extra sanity checks and debugfs support in
Netronome(R) NFP3200/NFP6000 NIC PF and VF drivers. Netronome(R) NFP4000/NFP6000 NIC drivers.
Note: selecting this option may adversely impact Note: selecting this option may adversely impact
performance. performance.
......
...@@ -2,4 +2,4 @@ ...@@ -2,4 +2,4 @@
# Makefile for the Netronome network device drivers # Makefile for the Netronome network device drivers
# #
obj-$(CONFIG_NFP_NETVF) += nfp/ obj-$(CONFIG_NFP) += nfp/
obj-$(CONFIG_NFP_NETVF) += nfp_netvf.o obj-$(CONFIG_NFP) += nfp.o
nfp_netvf-objs := \ nfp-objs := \
nfpcore/nfp6000_pcie.o \
nfpcore/nfp_cppcore.o \
nfpcore/nfp_cpplib.o \
nfpcore/nfp_hwinfo.o \
nfpcore/nfp_mip.o \
nfpcore/nfp_nffw.o \
nfpcore/nfp_nsp.o \
nfpcore/nfp_nsp_eth.o \
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
nfp_main.o \
nfp_net_common.o \ nfp_net_common.o \
nfp_net_ethtool.o \ nfp_net_ethtool.o \
nfp_net_offload.o \ nfp_net_offload.o \
nfp_net_main.o \
nfp_netvf_main.o nfp_netvf_main.o
ifeq ($(CONFIG_BPF_SYSCALL),y) ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp_netvf-objs += \ nfp-objs += \
nfp_bpf_verifier.o \ nfp_bpf_verifier.o \
nfp_bpf_jit.o nfp_bpf_jit.o
endif endif
nfp_netvf-$(CONFIG_NFP_NET_DEBUG) += nfp_net_debugfs.o nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o
...@@ -39,8 +39,6 @@ ...@@ -39,8 +39,6 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/types.h> #include <linux/types.h>
#define FIELD_FIT(mask, val) (!((((u64)val) << __bf_shf(mask)) & ~(mask)))
/* For branch fixup logic use up-most byte of branch instruction as scratch /* For branch fixup logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW! * area. Remember to clear this before sending instructions to HW!
*/ */
......
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_main.c
* Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
* Alejandro Lucero <alejandro.lucero@netronome.com>
* Jason McMullan <jason.mcmullan@netronome.com>
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/vermagic.h>
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nsp_eth.h"
#include "nfpcore/nfp6000_pcie.h"
#include "nfp_main.h"
#include "nfp_net.h"
static const char nfp_driver_name[] = "nfp";
const char nfp_driver_version[] = VERMAGIC_STRING;
static const struct pci_device_id nfp_pci_device_ids[] = {
{ PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
PCI_ANY_ID, 0,
},
{ PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP4000,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
PCI_ANY_ID, 0,
},
{ 0, } /* Required last entry. */
};
MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids);
static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
struct nfp_pf *pf = pci_get_drvdata(pdev);
int err;
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
dev_warn(&pdev->dev, "Failed to enable PCI sriov: %d\n", err);
return err;
}
pf->num_vfs = num_vfs;
dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs);
return num_vfs;
#endif
return 0;
}
static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
{
#ifdef CONFIG_PCI_IOV
struct nfp_pf *pf = pci_get_drvdata(pdev);
/* If the VFs are assigned we cannot shut down SR-IOV without
* causing issues, so just leave the hardware available but
* disabled
*/
if (pci_vfs_assigned(pdev)) {
dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n");
return -EPERM;
}
pf->num_vfs = 0;
pci_disable_sriov(pdev);
dev_dbg(&pdev->dev, "Removed VFs.\n");
#endif
return 0;
}
static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
return nfp_pcie_sriov_disable(pdev);
else
return nfp_pcie_sriov_enable(pdev, num_vfs);
}
/**
* nfp_net_fw_find() - Find the correct firmware image for netdev mode
* @pdev: PCI Device structure
* @pf: NFP PF Device structure
*
* Return: firmware if found and requested successfully.
*/
static const struct firmware *
nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf)
{
const struct firmware *fw = NULL;
struct nfp_eth_table_port *port;
const char *fw_model;
char fw_name[256];
int spc, err = 0;
int i, j;
if (!pf->eth_tbl) {
dev_err(&pdev->dev, "Error: can't identify media config\n");
return NULL;
}
fw_model = nfp_hwinfo_lookup(pf->cpp, "assembly.partno");
if (!fw_model) {
dev_err(&pdev->dev, "Error: can't read part number\n");
return NULL;
}
spc = ARRAY_SIZE(fw_name);
spc -= snprintf(fw_name, spc, "netronome/nic_%s", fw_model);
for (i = 0; spc > 0 && i < pf->eth_tbl->count; i += j) {
port = &pf->eth_tbl->ports[i];
j = 1;
while (i + j < pf->eth_tbl->count &&
port->speed == port[j].speed)
j++;
spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc,
"_%dx%d", j, port->speed / 1000);
}
if (spc <= 0)
return NULL;
spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc, ".nffw");
if (spc <= 0)
return NULL;
err = request_firmware(&fw, fw_name, &pdev->dev);
if (err)
return NULL;
dev_info(&pdev->dev, "Loading FW image: %s\n", fw_name);
return fw;
}
/**
* nfp_net_fw_load() - Load the firmware image
* @pdev: PCI Device structure
* @pf: NFP PF Device structure
* @nsp: NFP SP handle
*
* Return: -ERRNO, 0 for no firmware loaded, 1 for firmware loaded
*/
static int
nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp)
{
const struct firmware *fw;
u16 interface;
int err;
interface = nfp_cpp_interface(pf->cpp);
if (NFP_CPP_INTERFACE_UNIT_of(interface) != 0) {
/* Only Unit 0 should reset or load firmware */
dev_info(&pdev->dev, "Firmware will be loaded by partner\n");
return 0;
}
fw = nfp_net_fw_find(pdev, pf);
if (!fw)
return 0;
dev_info(&pdev->dev, "Soft-reset, loading FW image\n");
err = nfp_nsp_device_soft_reset(nsp);
if (err < 0) {
dev_err(&pdev->dev, "Failed to soft reset the NFP: %d\n",
err);
goto exit_release_fw;
}
err = nfp_nsp_load_fw(nsp, fw);
if (err < 0) {
dev_err(&pdev->dev, "FW loading failed: %d\n", err);
goto exit_release_fw;
}
dev_info(&pdev->dev, "Finished loading FW image\n");
exit_release_fw:
release_firmware(fw);
return err < 0 ? err : 1;
}
static void nfp_fw_unload(struct nfp_pf *pf)
{
struct nfp_nsp *nsp;
int err;
nsp = nfp_nsp_open(pf->cpp);
if (IS_ERR(nsp)) {
nfp_err(pf->cpp, "Reset failed, can't open NSP\n");
return;
}
err = nfp_nsp_device_soft_reset(nsp);
if (err < 0)
dev_warn(&pf->pdev->dev, "Couldn't unload firmware: %d\n", err);
else
dev_info(&pf->pdev->dev, "Firmware safely unloaded\n");
nfp_nsp_close(nsp);
}
static int nfp_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
struct nfp_nsp *nsp;
struct nfp_pf *pf;
int err;
err = pci_enable_device(pdev);
if (err < 0)
return err;
pci_set_master(pdev);
err = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
if (err)
goto err_pci_disable;
err = pci_request_regions(pdev, nfp_driver_name);
if (err < 0) {
dev_err(&pdev->dev, "Unable to reserve pci resources.\n");
goto err_pci_disable;
}
pf = kzalloc(sizeof(*pf), GFP_KERNEL);
if (!pf) {
err = -ENOMEM;
goto err_rel_regions;
}
INIT_LIST_HEAD(&pf->ports);
pci_set_drvdata(pdev, pf);
pf->pdev = pdev;
pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev);
if (IS_ERR_OR_NULL(pf->cpp)) {
err = PTR_ERR(pf->cpp);
if (err >= 0)
err = -ENOMEM;
goto err_disable_msix;
}
nsp = nfp_nsp_open(pf->cpp);
if (IS_ERR(nsp)) {
err = PTR_ERR(nsp);
goto err_cpp_free;
}
err = nfp_nsp_wait(nsp);
if (err < 0) {
nfp_nsp_close(nsp);
goto err_cpp_free;
}
pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
err = nfp_fw_load(pdev, pf, nsp);
nfp_nsp_close(nsp);
if (err < 0) {
dev_err(&pdev->dev, "Failed to load FW\n");
goto err_eth_tbl_free;
}
pf->fw_loaded = !!err;
err = nfp_net_pci_probe(pf);
if (err)
goto err_fw_unload;
return 0;
err_fw_unload:
if (pf->fw_loaded)
nfp_fw_unload(pf);
err_eth_tbl_free:
kfree(pf->eth_tbl);
err_cpp_free:
nfp_cpp_free(pf->cpp);
err_disable_msix:
pci_set_drvdata(pdev, NULL);
kfree(pf);
err_rel_regions:
pci_release_regions(pdev);
err_pci_disable:
pci_disable_device(pdev);
return err;
}
static void nfp_pci_remove(struct pci_dev *pdev)
{
struct nfp_pf *pf = pci_get_drvdata(pdev);
if (!list_empty(&pf->ports))
nfp_net_pci_remove(pf);
nfp_pcie_sriov_disable(pdev);
if (pf->fw_loaded)
nfp_fw_unload(pf);
pci_set_drvdata(pdev, NULL);
nfp_cpp_free(pf->cpp);
kfree(pf->eth_tbl);
kfree(pf);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static struct pci_driver nfp_pci_driver = {
.name = nfp_driver_name,
.id_table = nfp_pci_device_ids,
.probe = nfp_pci_probe,
.remove = nfp_pci_remove,
.sriov_configure = nfp_pcie_sriov_configure,
};
static int __init nfp_main_init(void)
{
int err;
pr_info("%s: NFP PCIe Driver, Copyright (C) 2014-2017 Netronome Systems\n",
nfp_driver_name);
nfp_net_debugfs_create();
err = pci_register_driver(&nfp_pci_driver);
if (err < 0)
goto err_destroy_debugfs;
err = pci_register_driver(&nfp_netvf_pci_driver);
if (err)
goto err_unreg_pf;
return err;
err_unreg_pf:
pci_unregister_driver(&nfp_pci_driver);
err_destroy_debugfs:
nfp_net_debugfs_destroy();
return err;
}
static void __exit nfp_main_exit(void)
{
pci_unregister_driver(&nfp_netvf_pci_driver);
pci_unregister_driver(&nfp_pci_driver);
nfp_net_debugfs_destroy();
}
module_init(nfp_main_init);
module_exit(nfp_main_exit);
MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_1x40.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_4x10.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0096-0001_2x10.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_2x40.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_4x10_1x40.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_8x10.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw");
MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver.");
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_main.h
* Author: Jason McMullan <jason.mcmullan@netronome.com>
*/
#ifndef NFP_MAIN_H
#define NFP_MAIN_H
#include <linux/list.h>
#include <linux/types.h>
#include <linux/msi.h>
#include <linux/pci.h>
struct dentry;
struct pci_dev;
struct nfp_cpp;
struct nfp_cpp_area;
struct nfp_eth_table;
/**
* struct nfp_pf - NFP PF-specific device structure
* @pdev: Backpointer to PCI device
* @cpp: Pointer to the CPP handle
* @ctrl_area: Pointer to the CPP area for the control BAR
* @tx_area: Pointer to the CPP area for the TX queues
* @rx_area: Pointer to the CPP area for the FL/RX queues
* @irq_entries: Array of MSI-X entries for all ports
* @num_vfs: Number of SR-IOV VFs enabled
* @fw_loaded: Is the firmware loaded?
* @eth_tbl: NSP ETH table
* @ddir: Per-device debugfs directory
* @num_ports: Number of adapter ports
* @ports: Linked list of port structures (struct nfp_net)
*/
struct nfp_pf {
struct pci_dev *pdev;
struct nfp_cpp *cpp;
struct nfp_cpp_area *ctrl_area;
struct nfp_cpp_area *tx_area;
struct nfp_cpp_area *rx_area;
struct msix_entry *irq_entries;
unsigned int num_vfs;
bool fw_loaded;
struct nfp_eth_table *eth_tbl;
struct dentry *ddir;
unsigned int num_ports;
struct list_head ports;
};
extern struct pci_driver nfp_netvf_pci_driver;
int nfp_net_pci_probe(struct nfp_pf *pf);
void nfp_net_pci_remove(struct nfp_pf *pf);
#endif /* NFP_MAIN_H */
/* /*
* Copyright (C) 2015 Netronome Systems, Inc. * Copyright (C) 2015-2017 Netronome Systems, Inc.
* *
* This software is dual licensed under the GNU General License Version 2, * This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this * June 1991 as shown in the file COPYING in the top-level directory of this
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#define _NFP_NET_H_ #define _NFP_NET_H_
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/io-64-nonatomic-hi-lo.h>
...@@ -83,6 +84,7 @@ ...@@ -83,6 +84,7 @@
#define NFP_NET_NON_Q_VECTORS 2 #define NFP_NET_NON_Q_VECTORS 2
#define NFP_NET_IRQ_LSC_IDX 0 #define NFP_NET_IRQ_LSC_IDX 0
#define NFP_NET_IRQ_EXN_IDX 1 #define NFP_NET_IRQ_EXN_IDX 1
#define NFP_NET_MIN_PORT_IRQS (NFP_NET_NON_Q_VECTORS + 1)
/* Queue/Ring definitions */ /* Queue/Ring definitions */
#define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */ #define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */
...@@ -345,7 +347,7 @@ struct nfp_net_rx_ring { ...@@ -345,7 +347,7 @@ struct nfp_net_rx_ring {
* @tx_ring: Pointer to TX ring * @tx_ring: Pointer to TX ring
* @rx_ring: Pointer to RX ring * @rx_ring: Pointer to RX ring
* @xdp_ring: Pointer to an extra TX ring for XDP * @xdp_ring: Pointer to an extra TX ring for XDP
* @irq_idx: Index into MSI-X table * @irq_entry: MSI-X table entry (use for talking to the device)
* @rx_sync: Seqlock for atomic updates of RX stats * @rx_sync: Seqlock for atomic updates of RX stats
* @rx_pkts: Number of received packets * @rx_pkts: Number of received packets
* @rx_bytes: Number of received bytes * @rx_bytes: Number of received bytes
...@@ -362,6 +364,7 @@ struct nfp_net_rx_ring { ...@@ -362,6 +364,7 @@ struct nfp_net_rx_ring {
* @tx_lso: Counter of LSO packets sent * @tx_lso: Counter of LSO packets sent
* @tx_errors: How many TX errors were encountered * @tx_errors: How many TX errors were encountered
* @tx_busy: How often was TX busy (no space)? * @tx_busy: How often was TX busy (no space)?
* @irq_vector: Interrupt vector number (use for talking to the OS)
* @handler: Interrupt handler for this ring vector * @handler: Interrupt handler for this ring vector
* @name: Name of the interrupt vector * @name: Name of the interrupt vector
* @affinity_mask: SMP affinity mask for this vector * @affinity_mask: SMP affinity mask for this vector
...@@ -378,7 +381,7 @@ struct nfp_net_r_vector { ...@@ -378,7 +381,7 @@ struct nfp_net_r_vector {
struct nfp_net_tx_ring *tx_ring; struct nfp_net_tx_ring *tx_ring;
struct nfp_net_rx_ring *rx_ring; struct nfp_net_rx_ring *rx_ring;
int irq_idx; u16 irq_entry;
struct u64_stats_sync rx_sync; struct u64_stats_sync rx_sync;
u64 rx_pkts; u64 rx_pkts;
...@@ -400,6 +403,7 @@ struct nfp_net_r_vector { ...@@ -400,6 +403,7 @@ struct nfp_net_r_vector {
u64 tx_errors; u64 tx_errors;
u64 tx_busy; u64 tx_busy;
u32 irq_vector;
irq_handler_t handler; irq_handler_t handler;
char name[IFNAMSIZ + 8]; char name[IFNAMSIZ + 8];
cpumask_t affinity_mask; cpumask_t affinity_mask;
...@@ -431,20 +435,13 @@ struct nfp_stat_pair { ...@@ -431,20 +435,13 @@ struct nfp_stat_pair {
* struct nfp_net - NFP network device structure * struct nfp_net - NFP network device structure
* @pdev: Backpointer to PCI device * @pdev: Backpointer to PCI device
* @netdev: Backpointer to net_device structure * @netdev: Backpointer to net_device structure
* @nfp_fallback: Is the driver used in fallback mode?
* @is_vf: Is the driver attached to a VF? * @is_vf: Is the driver attached to a VF?
* @fw_loaded: Is the firmware loaded?
* @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
* @bpf_offload_xdp: Offloaded BPF program is XDP * @bpf_offload_xdp: Offloaded BPF program is XDP
* @ctrl: Local copy of the control register/word. * @ctrl: Local copy of the control register/word.
* @fl_bufsz: Currently configured size of the freelist buffers * @fl_bufsz: Currently configured size of the freelist buffers
* @rx_offset: Offset in the RX buffers where packet data starts * @rx_offset: Offset in the RX buffers where packet data starts
* @xdp_prog: Installed XDP program * @xdp_prog: Installed XDP program
* @cpp: Pointer to the CPP handle
* @nfp_dev_cpp: Pointer to the NFP Device handle
* @ctrl_area: Pointer to the CPP area for the control BAR
* @tx_area: Pointer to the CPP area for the TX queues
* @rx_area: Pointer to the CPP area for the FL/RX queues
* @fw_ver: Firmware version * @fw_ver: Firmware version
* @cap: Capabilities advertised by the Firmware * @cap: Capabilities advertised by the Firmware
* @max_mtu: Maximum support MTU advertised by the Firmware * @max_mtu: Maximum support MTU advertised by the Firmware
...@@ -494,14 +491,13 @@ struct nfp_stat_pair { ...@@ -494,14 +491,13 @@ struct nfp_stat_pair {
* @tx_bar: Pointer to mapped TX queues * @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues * @rx_bar: Pointer to mapped FL/RX queues
* @debugfs_dir: Device directory in debugfs * @debugfs_dir: Device directory in debugfs
* @port_list: Entry on device port list
*/ */
struct nfp_net { struct nfp_net {
struct pci_dev *pdev; struct pci_dev *pdev;
struct net_device *netdev; struct net_device *netdev;
unsigned nfp_fallback:1;
unsigned is_vf:1; unsigned is_vf:1;
unsigned fw_loaded:1;
unsigned bpf_offload_skip_sw:1; unsigned bpf_offload_skip_sw:1;
unsigned bpf_offload_xdp:1; unsigned bpf_offload_xdp:1;
...@@ -515,18 +511,6 @@ struct nfp_net { ...@@ -515,18 +511,6 @@ struct nfp_net {
struct nfp_net_tx_ring *tx_rings; struct nfp_net_tx_ring *tx_rings;
struct nfp_net_rx_ring *rx_rings; struct nfp_net_rx_ring *rx_rings;
#ifdef CONFIG_PCI_IOV
unsigned int num_vfs;
struct vf_data_storage *vfinfo;
int vf_rate_link_speed;
#endif
struct nfp_cpp *cpp;
struct platform_device *nfp_dev_cpp;
struct nfp_cpp_area *ctrl_area;
struct nfp_cpp_area *tx_area;
struct nfp_cpp_area *rx_area;
struct nfp_net_fw_version fw_ver; struct nfp_net_fw_version fw_ver;
u32 cap; u32 cap;
u32 max_mtu; u32 max_mtu;
...@@ -589,11 +573,12 @@ struct nfp_net { ...@@ -589,11 +573,12 @@ struct nfp_net {
u8 __iomem *qcp_cfg; u8 __iomem *qcp_cfg;
u8 __iomem *ctrl_bar; u8 __iomem *ctrl_bar;
u8 __iomem *q_bar;
u8 __iomem *tx_bar; u8 __iomem *tx_bar;
u8 __iomem *rx_bar; u8 __iomem *rx_bar;
struct dentry *debugfs_dir; struct dentry *debugfs_dir;
struct list_head port_list;
}; };
struct nfp_net_ring_set { struct nfp_net_ring_set {
...@@ -770,8 +755,7 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q) ...@@ -770,8 +755,7 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
} }
/* Globals */ /* Globals */
extern const char nfp_net_driver_name[]; extern const char nfp_driver_version[];
extern const char nfp_net_driver_version[];
/* Prototypes */ /* Prototypes */
void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
...@@ -789,17 +773,24 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update); ...@@ -789,17 +773,24 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update);
void nfp_net_rss_write_itbl(struct nfp_net *nn); void nfp_net_rss_write_itbl(struct nfp_net *nn);
void nfp_net_rss_write_key(struct nfp_net *nn); void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn); void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
int nfp_net_irqs_alloc(struct nfp_net *nn);
void nfp_net_irqs_disable(struct nfp_net *nn); unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
unsigned int min_irqs, unsigned int want_irqs);
void nfp_net_irqs_disable(struct pci_dev *pdev);
void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
unsigned int n);
int int
nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog, nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx); struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx);
#ifdef CONFIG_NFP_NET_DEBUG #ifdef CONFIG_NFP_DEBUG
void nfp_net_debugfs_create(void); void nfp_net_debugfs_create(void);
void nfp_net_debugfs_destroy(void); void nfp_net_debugfs_destroy(void);
void nfp_net_debugfs_adapter_add(struct nfp_net *nn); struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
void nfp_net_debugfs_adapter_del(struct nfp_net *nn); void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id);
void nfp_net_debugfs_dir_clean(struct dentry **dir);
#else #else
static inline void nfp_net_debugfs_create(void) static inline void nfp_net_debugfs_create(void)
{ {
...@@ -809,14 +800,20 @@ static inline void nfp_net_debugfs_destroy(void) ...@@ -809,14 +800,20 @@ static inline void nfp_net_debugfs_destroy(void)
{ {
} }
static inline void nfp_net_debugfs_adapter_add(struct nfp_net *nn) static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
{
return NULL;
}
static inline void
nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
{ {
} }
static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn) static inline void nfp_net_debugfs_dir_clean(struct dentry **dir)
{ {
} }
#endif /* CONFIG_NFP_NET_DEBUG */ #endif /* CONFIG_NFP_DEBUG */
void nfp_net_filter_stats_timer(unsigned long data); void nfp_net_filter_stats_timer(unsigned long data);
int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf); int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf);
......
/* /*
* Copyright (C) 2015 Netronome Systems, Inc. * Copyright (C) 2015-2017 Netronome Systems, Inc.
* *
* This software is dual licensed under the GNU General License Version 2, * This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this * June 1991 as shown in the file COPYING in the top-level directory of this
...@@ -281,72 +281,76 @@ static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr) ...@@ -281,72 +281,76 @@ static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
} }
/** /**
* nfp_net_msix_alloc() - Try to allocate MSI-X irqs * nfp_net_irqs_alloc() - allocates MSI-X irqs
* @nn: NFP Network structure * @pdev: PCI device structure
* @nr_vecs: Number of MSI-X vectors to allocate * @irq_entries: Array to be initialized and used to hold the irq entries
* * @min_irqs: Minimal acceptable number of interrupts
* For MSI-X we want at least NFP_NET_NON_Q_VECTORS + 1 vectors. * @wanted_irqs: Target number of interrupts to allocate
* *
* Return: Number of MSI-X vectors obtained or 0 on error. * Return: Number of irqs obtained or 0 on error.
*/ */
static int nfp_net_msix_alloc(struct nfp_net *nn, int nr_vecs) unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
unsigned int min_irqs, unsigned int wanted_irqs)
{ {
struct pci_dev *pdev = nn->pdev; unsigned int i;
int nvecs; int got_irqs;
int i;
for (i = 0; i < nr_vecs; i++) for (i = 0; i < wanted_irqs; i++)
nn->irq_entries[i].entry = i; irq_entries[i].entry = i;
nvecs = pci_enable_msix_range(pdev, nn->irq_entries, got_irqs = pci_enable_msix_range(pdev, irq_entries,
NFP_NET_NON_Q_VECTORS + 1, nr_vecs); min_irqs, wanted_irqs);
if (nvecs < 0) { if (got_irqs < 0) {
nn_warn(nn, "Failed to enable MSI-X. Wanted %d-%d (err=%d)\n", dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
NFP_NET_NON_Q_VECTORS + 1, nr_vecs, nvecs); min_irqs, wanted_irqs, got_irqs);
return 0; return 0;
} }
return nvecs; if (got_irqs < wanted_irqs)
dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
wanted_irqs, got_irqs);
return got_irqs;
} }
/** /**
* nfp_net_irqs_alloc() - allocates MSI-X irqs * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev
* @nn: NFP Network structure * @nn: NFP Network structure
* @irq_entries: Table of allocated interrupts
* @n: Size of @irq_entries (number of entries to grab)
* *
* Return: Number of irqs obtained or 0 on error. * After interrupts are allocated with nfp_net_irqs_alloc() this function
* should be called to assign them to a specific netdev (port).
*/ */
int nfp_net_irqs_alloc(struct nfp_net *nn) void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
unsigned int n)
{ {
int wanted_irqs;
unsigned int n;
wanted_irqs = nn->num_r_vecs + NFP_NET_NON_Q_VECTORS;
n = nfp_net_msix_alloc(nn, wanted_irqs);
if (n == 0) {
nn_err(nn, "Failed to allocate MSI-X IRQs\n");
return 0;
}
nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS; nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
nn->num_r_vecs = nn->max_r_vecs; nn->num_r_vecs = nn->max_r_vecs;
if (n < wanted_irqs) memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
nn_warn(nn, "Unable to allocate %d vectors. Got %d instead\n",
wanted_irqs, n);
return n; if (nn->num_rx_rings > nn->num_r_vecs ||
nn->num_tx_rings > nn->num_r_vecs)
nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
nn->num_stack_tx_rings = nn->num_tx_rings;
} }
/** /**
* nfp_net_irqs_disable() - Disable interrupts * nfp_net_irqs_disable() - Disable interrupts
* @nn: NFP Network structure * @pdev: PCI device structure
* *
* Undoes what @nfp_net_irqs_alloc() does. * Undoes what @nfp_net_irqs_alloc() does.
*/ */
void nfp_net_irqs_disable(struct nfp_net *nn) void nfp_net_irqs_disable(struct pci_dev *pdev)
{ {
pci_disable_msix(nn->pdev); pci_disable_msix(pdev);
} }
/** /**
...@@ -410,10 +414,13 @@ static void nfp_net_read_link_status(struct nfp_net *nn) ...@@ -410,10 +414,13 @@ static void nfp_net_read_link_status(struct nfp_net *nn)
static irqreturn_t nfp_net_irq_lsc(int irq, void *data) static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
{ {
struct nfp_net *nn = data; struct nfp_net *nn = data;
struct msix_entry *entry;
entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
nfp_net_read_link_status(nn); nfp_net_read_link_status(nn);
nfp_net_irq_unmask(nn, NFP_NET_IRQ_LSC_IDX); nfp_net_irq_unmask(nn, entry->entry);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -476,32 +483,28 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, ...@@ -476,32 +483,28 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
} }
/** /**
* nfp_net_irqs_assign() - Assign IRQs and setup rvecs. * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
* @netdev: netdev structure * @netdev: netdev structure
*/ */
static void nfp_net_irqs_assign(struct net_device *netdev) static void nfp_net_vecs_init(struct net_device *netdev)
{ {
struct nfp_net *nn = netdev_priv(netdev); struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_r_vector *r_vec; struct nfp_net_r_vector *r_vec;
int r; int r;
if (nn->num_rx_rings > nn->num_r_vecs ||
nn->num_tx_rings > nn->num_r_vecs)
nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
nn->num_stack_tx_rings = nn->num_tx_rings;
nn->lsc_handler = nfp_net_irq_lsc; nn->lsc_handler = nfp_net_irq_lsc;
nn->exn_handler = nfp_net_irq_exn; nn->exn_handler = nfp_net_irq_exn;
for (r = 0; r < nn->max_r_vecs; r++) { for (r = 0; r < nn->max_r_vecs; r++) {
struct msix_entry *entry;
entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
r_vec = &nn->r_vecs[r]; r_vec = &nn->r_vecs[r];
r_vec->nfp_net = nn; r_vec->nfp_net = nn;
r_vec->handler = nfp_net_irq_rxtx; r_vec->handler = nfp_net_irq_rxtx;
r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r; r_vec->irq_entry = entry->entry;
r_vec->irq_vector = entry->vector;
cpumask_set_cpu(r, &r_vec->affinity_mask); cpumask_set_cpu(r, &r_vec->affinity_mask);
} }
...@@ -534,7 +537,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, ...@@ -534,7 +537,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
entry->vector, err); entry->vector, err);
return err; return err;
} }
nn_writeb(nn, ctrl_offset, vector_idx); nn_writeb(nn, ctrl_offset, entry->entry);
return 0; return 0;
} }
...@@ -1706,7 +1709,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) ...@@ -1706,7 +1709,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
if (pkts_polled < budget) { if (pkts_polled < budget) {
napi_complete_done(napi, pkts_polled); napi_complete_done(napi, pkts_polled);
nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_idx); nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
} }
return pkts_polled; return pkts_polled;
...@@ -1988,7 +1991,6 @@ static int ...@@ -1988,7 +1991,6 @@ static int
nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
int idx) int idx)
{ {
struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
int err; int err;
/* Setup NAPI */ /* Setup NAPI */
...@@ -1997,17 +1999,19 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, ...@@ -1997,17 +1999,19 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
snprintf(r_vec->name, sizeof(r_vec->name), snprintf(r_vec->name, sizeof(r_vec->name),
"%s-rxtx-%d", nn->netdev->name, idx); "%s-rxtx-%d", nn->netdev->name, idx);
err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec); err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
r_vec);
if (err) { if (err) {
netif_napi_del(&r_vec->napi); netif_napi_del(&r_vec->napi);
nn_err(nn, "Error requesting IRQ %d\n", entry->vector); nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
return err; return err;
} }
disable_irq(entry->vector); disable_irq(r_vec->irq_vector);
irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask); irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry); nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
r_vec->irq_entry);
return 0; return 0;
} }
...@@ -2015,11 +2019,9 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, ...@@ -2015,11 +2019,9 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
static void static void
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
{ {
struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx]; irq_set_affinity_hint(r_vec->irq_vector, NULL);
irq_set_affinity_hint(entry->vector, NULL);
netif_napi_del(&r_vec->napi); netif_napi_del(&r_vec->napi);
free_irq(entry->vector, r_vec); free_irq(r_vec->irq_vector, r_vec);
} }
/** /**
...@@ -2148,7 +2150,7 @@ nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn, ...@@ -2148,7 +2150,7 @@ nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
/* Write the DMA address, size and MSI-X info to the device */ /* Write the DMA address, size and MSI-X info to the device */
nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma); nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt)); nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_idx); nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
} }
static void static void
...@@ -2157,7 +2159,7 @@ nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn, ...@@ -2157,7 +2159,7 @@ nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
{ {
nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma); nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt)); nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_idx); nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
} }
static int __nfp_net_set_config_and_enable(struct nfp_net *nn) static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
...@@ -2251,7 +2253,7 @@ static void nfp_net_open_stack(struct nfp_net *nn) ...@@ -2251,7 +2253,7 @@ static void nfp_net_open_stack(struct nfp_net *nn)
for (r = 0; r < nn->num_r_vecs; r++) { for (r = 0; r < nn->num_r_vecs; r++) {
napi_enable(&nn->r_vecs[r].napi); napi_enable(&nn->r_vecs[r].napi);
enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector); enable_irq(nn->r_vecs[r].irq_vector);
} }
netif_tx_wake_all_queues(nn->netdev); netif_tx_wake_all_queues(nn->netdev);
...@@ -2375,7 +2377,7 @@ static void nfp_net_close_stack(struct nfp_net *nn) ...@@ -2375,7 +2377,7 @@ static void nfp_net_close_stack(struct nfp_net *nn)
nn->link_up = false; nn->link_up = false;
for (r = 0; r < nn->num_r_vecs; r++) { for (r = 0; r < nn->num_r_vecs; r++) {
disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector); disable_irq(nn->r_vecs[r].irq_vector);
napi_disable(&nn->r_vecs[r].napi); napi_disable(&nn->r_vecs[r].napi);
} }
...@@ -3259,7 +3261,7 @@ int nfp_net_netdev_init(struct net_device *netdev) ...@@ -3259,7 +3261,7 @@ int nfp_net_netdev_init(struct net_device *netdev)
netif_carrier_off(netdev); netif_carrier_off(netdev);
nfp_net_set_ethtool_ops(netdev); nfp_net_set_ethtool_ops(netdev);
nfp_net_irqs_assign(netdev); nfp_net_vecs_init(netdev);
return register_netdev(netdev); return register_netdev(netdev);
} }
......
/* /*
* Copyright (C) 2015 Netronome Systems, Inc. * Copyright (C) 2015-2017 Netronome Systems, Inc.
* *
* This software is dual licensed under the GNU General License Version 2, * This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this * June 1991 as shown in the file COPYING in the top-level directory of this
...@@ -202,16 +202,17 @@ static const struct file_operations nfp_xdp_q_fops = { ...@@ -202,16 +202,17 @@ static const struct file_operations nfp_xdp_q_fops = {
.llseek = seq_lseek .llseek = seq_lseek
}; };
void nfp_net_debugfs_adapter_add(struct nfp_net *nn) void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
{ {
struct dentry *queues, *tx, *rx, *xdp; struct dentry *queues, *tx, *rx, *xdp;
char int_name[16]; char name[20];
int i; int i;
if (IS_ERR_OR_NULL(nfp_dir)) if (IS_ERR_OR_NULL(nfp_dir))
return; return;
nn->debugfs_dir = debugfs_create_dir(pci_name(nn->pdev), nfp_dir); sprintf(name, "port%d", id);
nn->debugfs_dir = debugfs_create_dir(name, ddir);
if (IS_ERR_OR_NULL(nn->debugfs_dir)) if (IS_ERR_OR_NULL(nn->debugfs_dir))
return; return;
...@@ -227,24 +228,38 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn) ...@@ -227,24 +228,38 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
return; return;
for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) { for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) {
sprintf(int_name, "%d", i); sprintf(name, "%d", i);
debugfs_create_file(int_name, S_IRUSR, rx, debugfs_create_file(name, S_IRUSR, rx,
&nn->r_vecs[i], &nfp_rx_q_fops); &nn->r_vecs[i], &nfp_rx_q_fops);
debugfs_create_file(int_name, S_IRUSR, xdp, debugfs_create_file(name, S_IRUSR, xdp,
&nn->r_vecs[i], &nfp_xdp_q_fops); &nn->r_vecs[i], &nfp_xdp_q_fops);
} }
for (i = 0; i < min(nn->max_tx_rings, nn->max_r_vecs); i++) { for (i = 0; i < min(nn->max_tx_rings, nn->max_r_vecs); i++) {
sprintf(int_name, "%d", i); sprintf(name, "%d", i);
debugfs_create_file(int_name, S_IRUSR, tx, debugfs_create_file(name, S_IRUSR, tx,
&nn->r_vecs[i], &nfp_tx_q_fops); &nn->r_vecs[i], &nfp_tx_q_fops);
} }
} }
void nfp_net_debugfs_adapter_del(struct nfp_net *nn) struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
{ {
debugfs_remove_recursive(nn->debugfs_dir); struct dentry *dev_dir;
nn->debugfs_dir = NULL;
if (IS_ERR_OR_NULL(nfp_dir))
return NULL;
dev_dir = debugfs_create_dir(pci_name(pdev), nfp_dir);
if (IS_ERR_OR_NULL(dev_dir))
return NULL;
return dev_dir;
}
void nfp_net_debugfs_dir_clean(struct dentry **dir)
{
debugfs_remove_recursive(*dir);
*dir = NULL;
} }
void nfp_net_debugfs_create(void) void nfp_net_debugfs_create(void)
......
/* /*
* Copyright (C) 2015 Netronome Systems, Inc. * Copyright (C) 2015-2017 Netronome Systems, Inc.
* *
* This software is dual licensed under the GNU General License Version 2, * This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this * June 1991 as shown in the file COPYING in the top-level directory of this
...@@ -132,9 +132,9 @@ static void nfp_net_get_drvinfo(struct net_device *netdev, ...@@ -132,9 +132,9 @@ static void nfp_net_get_drvinfo(struct net_device *netdev,
{ {
struct nfp_net *nn = netdev_priv(netdev); struct nfp_net *nn = netdev_priv(netdev);
strlcpy(drvinfo->driver, nfp_net_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->driver, nn->pdev->driver->name,
strlcpy(drvinfo->version, nfp_net_driver_version, sizeof(drvinfo->driver));
sizeof(drvinfo->version)); strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d.%d", "%d.%d.%d.%d",
......
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_net_main.c
* Netronome network device driver: Main entry point
* Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
* Alejandro Lucero <alejandro.lucero@netronome.com>
* Jason McMullan <jason.mcmullan@netronome.com>
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*/
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/msi.h>
#include <linux/random.h>
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nffw.h"
#include "nfpcore/nfp_nsp_eth.h"
#include "nfpcore/nfp6000_pcie.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
#include "nfp_main.h"
#define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
static int nfp_is_ready(struct nfp_cpp *cpp)
{
const char *cp;
long state;
int err;
cp = nfp_hwinfo_lookup(cpp, "board.state");
if (!cp)
return 0;
err = kstrtol(cp, 0, &state);
if (err < 0)
return 0;
return state == 15;
}
/**
* nfp_net_map_area() - Help function to map an area
* @cpp: NFP CPP handler
* @name: Name for the area
* @target: CPP target
* @addr: CPP address
* @size: Size of the area
* @area: Area handle (returned).
*
* This function is primarily to simplify the code in the main probe
* function. To undo the effect of this functions call
* @nfp_cpp_area_release_free(*area);
*
* Return: Pointer to memory mapped area or ERR_PTR
*/
static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
const char *name, int isl, int target,
unsigned long long addr, unsigned long size,
struct nfp_cpp_area **area)
{
u8 __iomem *res;
u32 dest;
int err;
dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl);
*area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size);
if (!*area) {
err = -EIO;
goto err_area;
}
err = nfp_cpp_area_acquire(*area);
if (err < 0)
goto err_acquire;
res = nfp_cpp_area_iomem(*area);
if (!res) {
err = -EIO;
goto err_map;
}
return res;
err_map:
nfp_cpp_area_release(*area);
err_acquire:
nfp_cpp_area_free(*area);
err_area:
return (u8 __iomem *)ERR_PTR(err);
}
static void
nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
unsigned int id)
{
u8 mac_addr[ETH_ALEN];
const char *mac_str;
char name[32];
snprintf(name, sizeof(name), "eth%d.mac", id);
mac_str = nfp_hwinfo_lookup(cpp, name);
if (!mac_str) {
dev_warn(&nn->pdev->dev,
"Can't lookup MAC address. Generate\n");
eth_hw_addr_random(nn->netdev);
return;
}
if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
&mac_addr[0], &mac_addr[1], &mac_addr[2],
&mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
dev_warn(&nn->pdev->dev,
"Can't parse MAC address (%s). Generate.\n", mac_str);
eth_hw_addr_random(nn->netdev);
return;
}
ether_addr_copy(nn->netdev->dev_addr, mac_addr);
ether_addr_copy(nn->netdev->perm_addr, mac_addr);
}
/**
* nfp_net_get_mac_addr() - Get the MAC address.
* @nn: NFP Network structure
* @pf: NFP PF device structure
* @id: NFP port id
*
* First try to get the MAC address from NSP ETH table. If that
* fails try HWInfo. As a last resort generate a random address.
*/
static void
nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id)
{
int i;
for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++)
if (pf->eth_tbl->ports[i].eth_index == id) {
const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr;
ether_addr_copy(nn->netdev->dev_addr, mac_addr);
ether_addr_copy(nn->netdev->perm_addr, mac_addr);
return;
}
nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id);
}
static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
{
char name[256];
u16 interface;
int pcie_pf;
int err = 0;
u64 val;
interface = nfp_cpp_interface(pf->cpp);
pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
snprintf(name, sizeof(name), "nfd_cfg_pf%d_num_ports", pcie_pf);
val = nfp_rtsym_read_le(pf->cpp, name, &err);
/* Default to one port */
if (err) {
if (err != -ENOENT)
nfp_err(pf->cpp, "Unable to read adapter port count\n");
val = 1;
}
return val;
}
static unsigned int
nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
unsigned int stride, u32 start_off, u32 num_off)
{
unsigned int i, min_qc, max_qc;
min_qc = readl(ctrl_bar + start_off);
max_qc = min_qc;
for (i = 0; i < pf->num_ports; i++) {
/* To make our lives simpler only accept configuration where
* queues are allocated to PFs in order (queues of PFn all have
* indexes lower than PFn+1).
*/
if (max_qc > readl(ctrl_bar + start_off))
return 0;
max_qc = readl(ctrl_bar + start_off);
max_qc += readl(ctrl_bar + num_off) * stride;
ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
}
return max_qc - min_qc;
}
static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
{
const struct nfp_rtsym *ctrl_sym;
u8 __iomem *ctrl_bar;
char pf_symbol[256];
u16 interface;
int pcie_pf;
interface = nfp_cpp_interface(pf->cpp);
pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
snprintf(pf_symbol, sizeof(pf_symbol), "_pf%d_net_bar0", pcie_pf);
ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
if (!ctrl_sym) {
dev_err(&pf->pdev->dev,
"Failed to find PF BAR0 symbol %s\n", pf_symbol);
return NULL;
}
if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) {
dev_err(&pf->pdev->dev,
"PF BAR0 too small to contain %d ports\n",
pf->num_ports);
return NULL;
}
ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl",
ctrl_sym->domain, ctrl_sym->target,
ctrl_sym->addr, ctrl_sym->size,
&pf->ctrl_area);
if (IS_ERR(ctrl_bar)) {
dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n",
PTR_ERR(ctrl_bar));
return NULL;
}
return ctrl_bar;
}
static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
{
struct nfp_net *nn;
while (!list_empty(&pf->ports)) {
nn = list_first_entry(&pf->ports, struct nfp_net, port_list);
list_del(&nn->port_list);
nfp_net_netdev_free(nn);
}
}
static struct nfp_net *
nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
void __iomem *tx_bar, void __iomem *rx_bar,
int stride, struct nfp_net_fw_version *fw_ver)
{
u32 n_tx_rings, n_rx_rings;
struct nfp_net *nn;
n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
/* Allocate and initialise the netdev */
nn = nfp_net_netdev_alloc(pf->pdev, n_tx_rings, n_rx_rings);
if (IS_ERR(nn))
return nn;
nn->fw_ver = *fw_ver;
nn->ctrl_bar = ctrl_bar;
nn->tx_bar = tx_bar;
nn->rx_bar = rx_bar;
nn->is_vf = 0;
nn->stride_rx = stride;
nn->stride_tx = stride;
return nn;
}
static int
nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
unsigned int id)
{
int err;
/* Get MAC address */
nfp_net_get_mac_addr(nn, pf, id);
/* Get ME clock frequency from ctrl BAR
* XXX for now frequency is hardcoded until we figure out how
* to get the value from nfp-hwinfo into ctrl bar
*/
nn->me_freq_mhz = 1200;
err = nfp_net_netdev_init(nn->netdev);
if (err)
return err;
nfp_net_debugfs_port_add(nn, pf->ddir, id);
nfp_net_info(nn);
return 0;
}
static int
nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
void __iomem *tx_bar, void __iomem *rx_bar,
int stride, struct nfp_net_fw_version *fw_ver)
{
u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
struct nfp_net *nn;
unsigned int i;
int err;
prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
for (i = 0; i < pf->num_ports; i++) {
tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ;
prev_tx_base = tgt_tx_base;
prev_rx_base = tgt_rx_base;
nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, rx_bar,
stride, fw_ver);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
goto err_free_prev;
}
list_add_tail(&nn->port_list, &pf->ports);
ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
}
return 0;
err_free_prev:
nfp_net_pf_free_netdevs(pf);
return err;
}
static int
nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
void __iomem *ctrl_bar, void __iomem *tx_bar,
void __iomem *rx_bar, int stride,
struct nfp_net_fw_version *fw_ver)
{
unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left;
struct nfp_net *nn;
int err;
/* Allocate the netdevs and do basic init */
err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
stride, fw_ver);
if (err)
return err;
/* Get MSI-X vectors */
wanted_irqs = 0;
list_for_each_entry(nn, &pf->ports, port_list)
wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs;
pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
GFP_KERNEL);
if (!pf->irq_entries) {
err = -ENOMEM;
goto err_nn_free;
}
num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
NFP_NET_MIN_PORT_IRQS * pf->num_ports,
wanted_irqs);
if (!num_irqs) {
nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
err = -ENOMEM;
goto err_vec_free;
}
/* Distribute IRQs to ports */
irqs_left = num_irqs;
ports_left = pf->num_ports;
list_for_each_entry(nn, &pf->ports, port_list) {
unsigned int n;
n = DIV_ROUND_UP(irqs_left, ports_left);
nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
n);
irqs_left -= n;
ports_left--;
}
/* Finish netdev init and register */
id = 0;
list_for_each_entry(nn, &pf->ports, port_list) {
err = nfp_net_pf_init_port_netdev(pf, nn, id);
if (err)
goto err_prev_deinit;
id++;
}
return 0;
err_prev_deinit:
list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_netdev_clean(nn->netdev);
}
nfp_net_irqs_disable(pf->pdev);
err_vec_free:
kfree(pf->irq_entries);
err_nn_free:
nfp_net_pf_free_netdevs(pf);
return err;
}
/*
* PCI device functions
*/
int nfp_net_pci_probe(struct nfp_pf *pf)
{
u8 __iomem *ctrl_bar, *tx_bar, *rx_bar;
u32 total_tx_qcs, total_rx_qcs;
struct nfp_net_fw_version fw_ver;
u32 tx_area_sz, rx_area_sz;
u32 start_q;
int stride;
int err;
/* Verify that the board has completed initialization */
if (!nfp_is_ready(pf->cpp)) {
nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
return -EINVAL;
}
pf->num_ports = nfp_net_pf_get_num_ports(pf);
ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
if (!ctrl_bar)
return pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_ctrl_unmap;
}
/* Determine stride */
if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
stride = 2;
nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
} else {
switch (fw_ver.major) {
case 1 ... 4:
stride = 4;
break;
default:
nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
fw_ver.resv, fw_ver.class,
fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_ctrl_unmap;
}
}
/* Find how many QC structs need to be mapped */
total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
NFP_NET_CFG_START_TXQ,
NFP_NET_CFG_MAX_TXRINGS);
total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
NFP_NET_CFG_START_RXQ,
NFP_NET_CFG_MAX_RXRINGS);
if (!total_tx_qcs || !total_rx_qcs) {
nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n",
total_tx_qcs, total_rx_qcs);
err = -EINVAL;
goto err_ctrl_unmap;
}
tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs;
rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs;
/* Map TX queues */
start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0,
NFP_PCIE_QUEUE(start_q),
tx_area_sz, &pf->tx_area);
if (IS_ERR(tx_bar)) {
nfp_err(pf->cpp, "Failed to map TX area.\n");
err = PTR_ERR(tx_bar);
goto err_ctrl_unmap;
}
/* Map RX queues */
start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0,
NFP_PCIE_QUEUE(start_q),
rx_area_sz, &pf->rx_area);
if (IS_ERR(rx_bar)) {
nfp_err(pf->cpp, "Failed to map RX area.\n");
err = PTR_ERR(rx_bar);
goto err_unmap_tx;
}
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
stride, &fw_ver);
if (err)
goto err_clean_ddir;
return 0;
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
nfp_cpp_area_release_free(pf->rx_area);
err_unmap_tx:
nfp_cpp_area_release_free(pf->tx_area);
err_ctrl_unmap:
nfp_cpp_area_release_free(pf->ctrl_area);
return err;
}
void nfp_net_pci_remove(struct nfp_pf *pf)
{
struct nfp_net *nn;
list_for_each_entry(nn, &pf->ports, port_list) {
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_netdev_clean(nn->netdev);
}
nfp_net_pf_free_netdevs(pf);
nfp_net_debugfs_dir_clean(&pf->ddir);
nfp_net_irqs_disable(pf->pdev);
kfree(pf->irq_entries);
nfp_cpp_area_release_free(pf->rx_area);
nfp_cpp_area_release_free(pf->tx_area);
nfp_cpp_area_release_free(pf->ctrl_area);
}
/* /*
* Copyright (C) 2015 Netronome Systems, Inc. * Copyright (C) 2015-2017 Netronome Systems, Inc.
* *
* This software is dual licensed under the GNU General License Version 2, * This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this * June 1991 as shown in the file COPYING in the top-level directory of this
...@@ -45,9 +45,27 @@ ...@@ -45,9 +45,27 @@
#include "nfp_net_ctrl.h" #include "nfp_net_ctrl.h"
#include "nfp_net.h" #include "nfp_net.h"
#include "nfp_main.h"
/**
* struct nfp_net_vf - NFP VF-specific device structure
* @nn: NFP Net structure for this device
* @irq_entries: Pre-allocated array of MSI-X entries
* @q_bar: Pointer to mapped QC memory (NULL if TX/RX mapped directly)
* @ddir: Per-device debugfs directory
*/
struct nfp_net_vf {
struct nfp_net *nn;
struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS +
NFP_NET_MAX_TX_RINGS];
u8 __iomem *q_bar;
struct dentry *ddir;
};
static const char nfp_net_driver_name[] = "nfp_netvf";
const char nfp_net_driver_name[] = "nfp_netvf";
const char nfp_net_driver_version[] = "0.1";
#define PCI_DEVICE_NFP6000VF 0x6003 #define PCI_DEVICE_NFP6000VF 0x6003
static const struct pci_device_id nfp_netvf_pci_device_ids[] = { static const struct pci_device_id nfp_netvf_pci_device_ids[] = {
{ PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF, { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF,
...@@ -82,15 +100,22 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, ...@@ -82,15 +100,22 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
u32 tx_bar_off, rx_bar_off; u32 tx_bar_off, rx_bar_off;
u32 tx_bar_sz, rx_bar_sz; u32 tx_bar_sz, rx_bar_sz;
int tx_bar_no, rx_bar_no; int tx_bar_no, rx_bar_no;
struct nfp_net_vf *vf;
unsigned int num_irqs;
u8 __iomem *ctrl_bar; u8 __iomem *ctrl_bar;
struct nfp_net *nn; struct nfp_net *nn;
u32 startq; u32 startq;
int stride; int stride;
int err; int err;
vf = kzalloc(sizeof(*vf), GFP_KERNEL);
if (!vf)
return -ENOMEM;
pci_set_drvdata(pdev, vf);
err = pci_enable_device_mem(pdev); err = pci_enable_device_mem(pdev);
if (err) if (err)
return err; goto err_free_vf;
err = pci_request_regions(pdev, nfp_net_driver_name); err = pci_request_regions(pdev, nfp_net_driver_name);
if (err) { if (err) {
...@@ -182,6 +207,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, ...@@ -182,6 +207,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
err = PTR_ERR(nn); err = PTR_ERR(nn);
goto err_ctrl_unmap; goto err_ctrl_unmap;
} }
vf->nn = nn;
nn->fw_ver = fw_ver; nn->fw_ver = fw_ver;
nn->ctrl_bar = ctrl_bar; nn->ctrl_bar = ctrl_bar;
...@@ -205,17 +231,17 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, ...@@ -205,17 +231,17 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
bar_sz = (rx_bar_off + rx_bar_sz) - bar_off; bar_sz = (rx_bar_off + rx_bar_sz) - bar_off;
map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off; map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off;
nn->q_bar = ioremap_nocache(map_addr, bar_sz); vf->q_bar = ioremap_nocache(map_addr, bar_sz);
if (!nn->q_bar) { if (!vf->q_bar) {
nn_err(nn, "Failed to map resource %d\n", tx_bar_no); nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
err = -EIO; err = -EIO;
goto err_netdev_free; goto err_netdev_free;
} }
/* TX queues */ /* TX queues */
nn->tx_bar = nn->q_bar + (tx_bar_off - bar_off); nn->tx_bar = vf->q_bar + (tx_bar_off - bar_off);
/* RX queues */ /* RX queues */
nn->rx_bar = nn->q_bar + (rx_bar_off - bar_off); nn->rx_bar = vf->q_bar + (rx_bar_off - bar_off);
} else { } else {
resource_size_t map_addr; resource_size_t map_addr;
...@@ -240,12 +266,15 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, ...@@ -240,12 +266,15 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
nfp_netvf_get_mac_addr(nn); nfp_netvf_get_mac_addr(nn);
err = nfp_net_irqs_alloc(nn); num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
if (!err) { NFP_NET_MIN_PORT_IRQS,
NFP_NET_NON_Q_VECTORS + nn->num_r_vecs);
if (!num_irqs) {
nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
err = -EIO; err = -EIO;
goto err_unmap_rx; goto err_unmap_rx;
} }
nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs);
/* Get ME clock frequency from ctrl BAR /* Get ME clock frequency from ctrl BAR
* XXX for now frequency is hardcoded until we figure out how * XXX for now frequency is hardcoded until we figure out how
...@@ -257,25 +286,23 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, ...@@ -257,25 +286,23 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
if (err) if (err)
goto err_irqs_disable; goto err_irqs_disable;
pci_set_drvdata(pdev, nn);
nfp_net_info(nn); nfp_net_info(nn);
nfp_net_debugfs_adapter_add(nn); vf->ddir = nfp_net_debugfs_device_add(pdev);
nfp_net_debugfs_port_add(nn, vf->ddir, 0);
return 0; return 0;
err_irqs_disable: err_irqs_disable:
nfp_net_irqs_disable(nn); nfp_net_irqs_disable(pdev);
err_unmap_rx: err_unmap_rx:
if (!nn->q_bar) if (!vf->q_bar)
iounmap(nn->rx_bar); iounmap(nn->rx_bar);
err_unmap_tx: err_unmap_tx:
if (!nn->q_bar) if (!vf->q_bar)
iounmap(nn->tx_bar); iounmap(nn->tx_bar);
else else
iounmap(nn->q_bar); iounmap(vf->q_bar);
err_netdev_free: err_netdev_free:
pci_set_drvdata(pdev, NULL);
nfp_net_netdev_free(nn); nfp_net_netdev_free(nn);
err_ctrl_unmap: err_ctrl_unmap:
iounmap(ctrl_bar); iounmap(ctrl_bar);
...@@ -283,71 +310,47 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, ...@@ -283,71 +310,47 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
pci_release_regions(pdev); pci_release_regions(pdev);
err_pci_disable: err_pci_disable:
pci_disable_device(pdev); pci_disable_device(pdev);
err_free_vf:
pci_set_drvdata(pdev, NULL);
kfree(vf);
return err; return err;
} }
static void nfp_netvf_pci_remove(struct pci_dev *pdev) static void nfp_netvf_pci_remove(struct pci_dev *pdev)
{ {
struct nfp_net *nn = pci_get_drvdata(pdev); struct nfp_net_vf *vf = pci_get_drvdata(pdev);
struct nfp_net *nn = vf->nn;
/* Note, the order is slightly different from above as we need /* Note, the order is slightly different from above as we need
* to keep the nn pointer around till we have freed everything. * to keep the nn pointer around till we have freed everything.
*/ */
nfp_net_debugfs_adapter_del(nn); nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_debugfs_dir_clean(&vf->ddir);
nfp_net_netdev_clean(nn->netdev); nfp_net_netdev_clean(nn->netdev);
nfp_net_irqs_disable(nn); nfp_net_irqs_disable(pdev);
if (!nn->q_bar) { if (!vf->q_bar) {
iounmap(nn->rx_bar); iounmap(nn->rx_bar);
iounmap(nn->tx_bar); iounmap(nn->tx_bar);
} else { } else {
iounmap(nn->q_bar); iounmap(vf->q_bar);
} }
iounmap(nn->ctrl_bar); iounmap(nn->ctrl_bar);
pci_set_drvdata(pdev, NULL);
nfp_net_netdev_free(nn); nfp_net_netdev_free(nn);
pci_release_regions(pdev); pci_release_regions(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
kfree(vf);
} }
static struct pci_driver nfp_netvf_pci_driver = { struct pci_driver nfp_netvf_pci_driver = {
.name = nfp_net_driver_name, .name = nfp_net_driver_name,
.id_table = nfp_netvf_pci_device_ids, .id_table = nfp_netvf_pci_device_ids,
.probe = nfp_netvf_pci_probe, .probe = nfp_netvf_pci_probe,
.remove = nfp_netvf_pci_remove, .remove = nfp_netvf_pci_remove,
}; };
static int __init nfp_netvf_init(void)
{
int err;
pr_info("%s: NFP VF Network driver, Copyright (C) 2014-2015 Netronome Systems\n",
nfp_net_driver_name);
nfp_net_debugfs_create();
err = pci_register_driver(&nfp_netvf_pci_driver);
if (err) {
nfp_net_debugfs_destroy();
return err;
}
return 0;
}
static void __exit nfp_netvf_exit(void)
{
pci_unregister_driver(&nfp_netvf_pci_driver);
nfp_net_debugfs_destroy();
}
module_init(nfp_netvf_init);
module_exit(nfp_netvf_exit);
MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("NFP VF network device driver");
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef NFP_CRC32_H
#define NFP_CRC32_H
#include <linux/kernel.h>
#include <linux/crc32.h>
/**
* crc32_posix_end() - Finalize POSIX CRC32 working state
* @crc: Current CRC32 working state
* @total_len: Total length of data that was CRC32'd
*
* Return: Final POSIX CRC32 value
*/
static inline u32 crc32_posix_end(u32 crc, size_t total_len)
{
/* Extend with the length of the string. */
while (total_len != 0) {
u8 c = total_len & 0xff;
crc = crc32_be(crc, &c, 1);
total_len >>= 8;
}
return ~crc;
}
static inline u32 crc32_posix(const void *buff, size_t len)
{
return crc32_posix_end(crc32_be(0, buff, len), len);
}
#endif /* NFP_CRC32_H */
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp.h
* Interface for NFP device access and query functions.
*/
#ifndef __NFP_H__
#define __NFP_H__
#include <linux/device.h>
#include "nfp_cpp.h"
/* Implemented in nfp_hwinfo.c */
const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup);
/* Implemented in nfp_nsp.c */
struct nfp_nsp;
struct firmware;
struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp);
void nfp_nsp_close(struct nfp_nsp *state);
int nfp_nsp_wait(struct nfp_nsp *state);
int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size);
int nfp_nsp_write_eth_table(struct nfp_nsp *state,
const void *buf, unsigned int size);
/* Implemented in nfp_resource.c */
#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU
#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL
/* NFP Resource Table self-identifier */
#define NFP_RESOURCE_TBL_NAME "nfp.res"
#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */
/* All other keys are CRC32-POSIX of the 8-byte identification string */
/* ARM/PCI vNIC Interfaces 0..3 */
#define NFP_RESOURCE_VNIC_PCI_0 "vnic.p0"
#define NFP_RESOURCE_VNIC_PCI_1 "vnic.p1"
#define NFP_RESOURCE_VNIC_PCI_2 "vnic.p2"
#define NFP_RESOURCE_VNIC_PCI_3 "vnic.p3"
/* NFP Hardware Info Database */
#define NFP_RESOURCE_NFP_HWINFO "nfp.info"
/* Service Processor */
#define NFP_RESOURCE_NSP "nfp.sp"
/* Netronone Flow Firmware Table */
#define NFP_RESOURCE_NFP_NFFW "nfp.nffw"
/* MAC Statistics Accumulator */
#define NFP_RESOURCE_MAC_STATISTICS "mac.stat"
struct nfp_resource *
nfp_resource_acquire(struct nfp_cpp *cpp, const char *name);
void nfp_resource_release(struct nfp_resource *res);
u32 nfp_resource_cpp_id(struct nfp_resource *res);
const char *nfp_resource_name(struct nfp_resource *res);
u64 nfp_resource_address(struct nfp_resource *res);
u64 nfp_resource_size(struct nfp_resource *res);
#endif /* !__NFP_H__ */
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef NFP6000_NFP6000_H
#define NFP6000_NFP6000_H
#include <linux/errno.h>
#include <linux/types.h>
/* CPP Target IDs */
#define NFP_CPP_TARGET_INVALID 0
#define NFP_CPP_TARGET_NBI 1
#define NFP_CPP_TARGET_QDR 2
#define NFP_CPP_TARGET_ILA 6
#define NFP_CPP_TARGET_MU 7
#define NFP_CPP_TARGET_PCIE 9
#define NFP_CPP_TARGET_ARM 10
#define NFP_CPP_TARGET_CRYPTO 12
#define NFP_CPP_TARGET_ISLAND_XPB 14 /* Shared with CAP */
#define NFP_CPP_TARGET_ISLAND_CAP 14 /* Shared with XPB */
#define NFP_CPP_TARGET_CT_XPB 14
#define NFP_CPP_TARGET_LOCAL_SCRATCH 15
#define NFP_CPP_TARGET_CLS NFP_CPP_TARGET_LOCAL_SCRATCH
#define NFP_ISL_EMEM0 24
#define NFP_MU_ADDR_ACCESS_TYPE_MASK 3ULL
#define NFP_MU_ADDR_ACCESS_TYPE_DIRECT 2ULL
#define PUSHPULL(_pull, _push) ((_pull) << 4 | (_push) << 0)
#define PUSH_WIDTH(_pushpull) pushpull_width((_pushpull) >> 0)
#define PULL_WIDTH(_pushpull) pushpull_width((_pushpull) >> 4)
static inline int pushpull_width(int pp)
{
pp &= 0xf;
if (pp == 0)
return -EINVAL;
return 2 << pp;
}
static inline int nfp_cppat_mu_locality_lsb(int mode, bool addr40)
{
switch (mode) {
case 0 ... 3:
return addr40 ? 38 : 30;
default:
return -EINVAL;
}
}
int nfp_target_pushpull(u32 cpp_id, u64 address);
int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address,
u32 *cpp_target_id, u64 *cpp_target_address,
const u32 *imb_table);
#endif /* NFP6000_NFP6000_H */
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_xpb.h
* Author: Jason McMullan <jason.mcmullan@netronome.com>
*/
#ifndef NFP6000_XPB_H
#define NFP6000_XPB_H
/* For use with NFP6000 Databook "XPB Addressing" section
*/
#define NFP_XPB_OVERLAY(island) (((island) & 0x3f) << 24)
#define NFP_XPB_ISLAND(island) (NFP_XPB_OVERLAY(island) + 0x60000)
#define NFP_XPB_ISLAND_of(offset) (((offset) >> 24) & 0x3F)
/* For use with NFP6000 Databook "XPB Island and Device IDs" chapter
*/
#define NFP_XPB_DEVICE(island, slave, device) \
(NFP_XPB_OVERLAY(island) | \
(((slave) & 3) << 22) | \
(((device) & 0x3f) << 16))
#endif /* NFP6000_XPB_H */
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp6000_pcie.c
* Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
* Jason McMullan <jason.mcmullan@netronome.com>
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*
* Multiplexes the NFP BARs between NFP internal resources and
* implements the PCIe specific interface for generic CPP bus access.
*
* The BARs are managed with refcounts and are allocated/acquired
* using target, token and offset/size matching. The generic CPP bus
* abstraction builds upon this BAR interface.
*/
#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kref.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/sort.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "nfp_cpp.h"
#include "nfp6000/nfp6000.h"
#include "nfp6000_pcie.h"
#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0)
#define NFP_PCIE_BAR_EXPLICIT_BAR0(_x, _y) \
(0x00000080 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
#define NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(_x) (((_x) & 0x3) << 30)
#define NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType_of(_x) (((_x) >> 30) & 0x3)
#define NFP_PCIE_BAR_EXPLICIT_BAR0_Token(_x) (((_x) & 0x3) << 28)
#define NFP_PCIE_BAR_EXPLICIT_BAR0_Token_of(_x) (((_x) >> 28) & 0x3)
#define NFP_PCIE_BAR_EXPLICIT_BAR0_Address(_x) (((_x) & 0xffffff) << 0)
#define NFP_PCIE_BAR_EXPLICIT_BAR0_Address_of(_x) (((_x) >> 0) & 0xffffff)
#define NFP_PCIE_BAR_EXPLICIT_BAR1(_x, _y) \
(0x00000084 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
#define NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(_x) (((_x) & 0x7f) << 24)
#define NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef_of(_x) (((_x) >> 24) & 0x7f)
#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(_x) (((_x) & 0x3ff) << 14)
#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster_of(_x) (((_x) >> 14) & 0x3ff)
#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(_x) (((_x) & 0x3fff) << 0)
#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef_of(_x) (((_x) >> 0) & 0x3fff)
#define NFP_PCIE_BAR_EXPLICIT_BAR2(_x, _y) \
(0x00000088 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
#define NFP_PCIE_BAR_EXPLICIT_BAR2_Target(_x) (((_x) & 0xf) << 28)
#define NFP_PCIE_BAR_EXPLICIT_BAR2_Target_of(_x) (((_x) >> 28) & 0xf)
#define NFP_PCIE_BAR_EXPLICIT_BAR2_Action(_x) (((_x) & 0x1f) << 23)
#define NFP_PCIE_BAR_EXPLICIT_BAR2_Action_of(_x) (((_x) >> 23) & 0x1f)
#define NFP_PCIE_BAR_EXPLICIT_BAR2_Length(_x) (((_x) & 0x1f) << 18)
#define NFP_PCIE_BAR_EXPLICIT_BAR2_Length_of(_x) (((_x) >> 18) & 0x1f)
#define NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(_x) (((_x) & 0xff) << 10)
#define NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask_of(_x) (((_x) >> 10) & 0xff)
#define NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(_x) (((_x) & 0x3ff) << 0)
#define NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster_of(_x) (((_x) >> 0) & 0x3ff)
#define NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(_x) (((_x) & 0x1f) << 16)
#define NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(_x) (((_x) >> 16) & 0x1f)
#define NFP_PCIE_BAR_PCIE2CPP_BaseAddress(_x) (((_x) & 0xffff) << 0)
#define NFP_PCIE_BAR_PCIE2CPP_BaseAddress_of(_x) (((_x) >> 0) & 0xffff)
#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect(_x) (((_x) & 0x3) << 27)
#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(_x) (((_x) >> 27) & 0x3)
#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT 0
#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT 1
#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE 3
#define NFP_PCIE_BAR_PCIE2CPP_MapType(_x) (((_x) & 0x7) << 29)
#define NFP_PCIE_BAR_PCIE2CPP_MapType_of(_x) (((_x) >> 29) & 0x7)
#define NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED 0
#define NFP_PCIE_BAR_PCIE2CPP_MapType_BULK 1
#define NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET 2
#define NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL 3
#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0 4
#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1 5
#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2 6
#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3 7
#define NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(_x) (((_x) & 0xf) << 23)
#define NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(_x) (((_x) >> 23) & 0xf)
#define NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(_x) (((_x) & 0x3) << 21)
#define NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(_x) (((_x) >> 21) & 0x3)
#define NFP_PCIE_EM 0x020000
#define NFP_PCIE_SRAM 0x000000
#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize)
#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize)
#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2))
#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4))
#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
(0x400 + ((bar) * 8 + (slot)) * 4)
#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
(((bar) * 8 + (slot)) * 4)
/* The number of explicit BARs to reserve.
* Minimum is 0, maximum is 4 on the NFP6000.
*/
#define NFP_PCIE_EXPLICIT_BARS 2
struct nfp6000_pcie;
struct nfp6000_area_priv;
/**
* struct nfp_bar - describes BAR configuration and usage
* @nfp: backlink to owner
* @barcfg: cached contents of BAR config CSR
* @base: the BAR's base CPP offset
* @mask: mask for the BAR aperture (read only)
* @bitsize: bitsize of BAR aperture (read only)
* @index: index of the BAR
* @refcnt: number of current users
* @iomem: mapped IO memory
* @resource: iomem resource window
*/
struct nfp_bar {
struct nfp6000_pcie *nfp;
u32 barcfg;
u64 base; /* CPP address base */
u64 mask; /* Bit mask of the bar */
u32 bitsize; /* Bit size of the bar */
int index;
atomic_t refcnt;
void __iomem *iomem;
struct resource *resource;
};
#define NFP_PCI_BAR_MAX (PCI_64BIT_BAR_COUNT * 8)
struct nfp6000_pcie {
struct pci_dev *pdev;
struct device *dev;
/* PCI BAR management */
spinlock_t bar_lock; /* Protect the PCI2CPP BAR cache */
int bars;
struct nfp_bar bar[NFP_PCI_BAR_MAX];
wait_queue_head_t bar_waiters;
/* Reserved BAR access */
struct {
void __iomem *csr;
void __iomem *em;
void __iomem *expl[4];
} iomem;
/* Explicit IO access */
struct {
struct mutex mutex; /* Lock access to this explicit group */
u8 master_id;
u8 signal_ref;
void __iomem *data;
struct {
void __iomem *addr;
int bitsize;
int free[4];
} group[4];
} expl;
};
static u32 nfp_bar_maptype(struct nfp_bar *bar)
{
return NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
}
static resource_size_t nfp_bar_resource_len(struct nfp_bar *bar)
{
return pci_resource_len(bar->nfp->pdev, (bar->index / 8) * 2) / 8;
}
static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar)
{
return pci_resource_start(bar->nfp->pdev, (bar->index / 8) * 2)
+ nfp_bar_resource_len(bar) * (bar->index & 7);
}
#define TARGET_WIDTH_32 4
#define TARGET_WIDTH_64 8
static int
compute_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
u32 *bar_config, u64 *bar_base,
int tgt, int act, int tok, u64 offset, size_t size, int width)
{
int bitsize;
u32 newcfg;
if (tgt >= NFP_CPP_NUM_TARGETS)
return -EINVAL;
switch (width) {
case 8:
newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT);
break;
case 4:
newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
break;
case 0:
newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE);
break;
default:
return -EINVAL;
}
if (act != NFP_CPP_ACTION_RW && act != 0) {
/* Fixed CPP mapping with specific action */
u64 mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1);
newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED);
newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
newcfg |= NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(act);
newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
if ((offset & mask) != ((offset + size - 1) & mask))
return -EINVAL;
offset &= mask;
bitsize = 40 - 16;
} else {
u64 mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1);
/* Bulk mapping */
newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_BULK);
newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
if ((offset & mask) != ((offset + size - 1) & mask))
return -EINVAL;
offset &= mask;
bitsize = 40 - 21;
}
if (bar->bitsize < bitsize)
return -EINVAL;
newcfg |= offset >> bitsize;
if (bar_base)
*bar_base = offset;
if (bar_config)
*bar_config = newcfg;
return 0;
}
static int
nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg)
{
int base, slot;
int xbar;
base = bar->index >> 3;
slot = bar->index & 7;
if (nfp->iomem.csr) {
xbar = NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
writel(newcfg, nfp->iomem.csr + xbar);
/* Readback to ensure BAR is flushed */
readl(nfp->iomem.csr + xbar);
} else {
xbar = NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
pci_write_config_dword(nfp->pdev, xbar, newcfg);
}
bar->barcfg = newcfg;
return 0;
}
static int
reconfigure_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
int tgt, int act, int tok, u64 offset, size_t size, int width)
{
u64 newbase;
u32 newcfg;
int err;
err = compute_bar(nfp, bar, &newcfg, &newbase,
tgt, act, tok, offset, size, width);
if (err)
return err;
bar->base = newbase;
return nfp6000_bar_write(nfp, bar, newcfg);
}
/* Check if BAR can be used with the given parameters. */
static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok,
u64 offset, size_t size, int width)
{
int bartgt, baract, bartok;
int barwidth;
u32 maptype;
maptype = NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
bartgt = NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(bar->barcfg);
bartok = NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(bar->barcfg);
baract = NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(bar->barcfg);
barwidth = NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(bar->barcfg);
switch (barwidth) {
case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT:
barwidth = 4;
break;
case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT:
barwidth = 8;
break;
case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE:
barwidth = 0;
break;
default:
barwidth = -1;
break;
}
switch (maptype) {
case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET:
bartok = -1;
/* FALLTHROUGH */
case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK:
baract = NFP_CPP_ACTION_RW;
if (act == 0)
act = NFP_CPP_ACTION_RW;
/* FALLTHROUGH */
case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED:
break;
default:
/* We don't match explicit bars through the area interface */
return 0;
}
/* Make sure to match up the width */
if (barwidth != width)
return 0;
if ((bartgt < 0 || bartgt == tgt) &&
(bartok < 0 || bartok == tok) &&
(baract == act) &&
bar->base <= offset &&
(bar->base + (1 << bar->bitsize)) >= (offset + size))
return 1;
/* No match */
return 0;
}
static int
find_matching_bar(struct nfp6000_pcie *nfp,
u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
{
int n;
for (n = 0; n < nfp->bars; n++) {
struct nfp_bar *bar = &nfp->bar[n];
if (matching_bar(bar, tgt, act, tok, offset, size, width))
return n;
}
return -1;
}
/* Return EAGAIN if no resource is available */
static int
find_unused_bar_noblock(struct nfp6000_pcie *nfp,
int tgt, int act, int tok,
u64 offset, size_t size, int width)
{
int n, invalid = 0;
for (n = 0; n < nfp->bars; n++) {
struct nfp_bar *bar = &nfp->bar[n];
int err;
if (bar->bitsize == 0) {
invalid++;
continue;
}
if (atomic_read(&bar->refcnt) != 0)
continue;
/* Just check to see if we can make it fit... */
err = compute_bar(nfp, bar, NULL, NULL,
tgt, act, tok, offset, size, width);
if (err < 0)
invalid++;
else
return n;
}
return (n == invalid) ? -EINVAL : -EAGAIN;
}
static int
find_unused_bar_and_lock(struct nfp6000_pcie *nfp,
int tgt, int act, int tok,
u64 offset, size_t size, int width)
{
unsigned long flags;
int n;
spin_lock_irqsave(&nfp->bar_lock, flags);
n = find_unused_bar_noblock(nfp, tgt, act, tok, offset, size, width);
if (n < 0)
spin_unlock_irqrestore(&nfp->bar_lock, flags);
else
__release(&nfp->bar_lock);
return n;
}
static void nfp_bar_get(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
{
atomic_inc(&bar->refcnt);
}
static void nfp_bar_put(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
{
if (atomic_dec_and_test(&bar->refcnt))
wake_up_interruptible(&nfp->bar_waiters);
}
static int
nfp_wait_for_bar(struct nfp6000_pcie *nfp, int *barnum,
u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
{
return wait_event_interruptible(nfp->bar_waiters,
(*barnum = find_unused_bar_and_lock(nfp, tgt, act, tok,
offset, size, width))
!= -EAGAIN);
}
static int
nfp_alloc_bar(struct nfp6000_pcie *nfp,
u32 tgt, u32 act, u32 tok,
u64 offset, size_t size, int width, int nonblocking)
{
unsigned long irqflags;
int barnum, retval;
if (size > (1 << 24))
return -EINVAL;
spin_lock_irqsave(&nfp->bar_lock, irqflags);
barnum = find_matching_bar(nfp, tgt, act, tok, offset, size, width);
if (barnum >= 0) {
/* Found a perfect match. */
nfp_bar_get(nfp, &nfp->bar[barnum]);
spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
return barnum;
}
barnum = find_unused_bar_noblock(nfp, tgt, act, tok,
offset, size, width);
if (barnum < 0) {
if (nonblocking)
goto err_nobar;
/* Wait until a BAR becomes available. The
* find_unused_bar function will reclaim the bar_lock
* if a free BAR is found.
*/
spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
retval = nfp_wait_for_bar(nfp, &barnum, tgt, act, tok,
offset, size, width);
if (retval)
return retval;
__acquire(&nfp->bar_lock);
}
nfp_bar_get(nfp, &nfp->bar[barnum]);
retval = reconfigure_bar(nfp, &nfp->bar[barnum],
tgt, act, tok, offset, size, width);
if (retval < 0) {
nfp_bar_put(nfp, &nfp->bar[barnum]);
barnum = retval;
}
err_nobar:
spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
return barnum;
}
static void disable_bars(struct nfp6000_pcie *nfp);
static int bar_cmp(const void *aptr, const void *bptr)
{
const struct nfp_bar *a = aptr, *b = bptr;
if (a->bitsize == b->bitsize)
return a->index - b->index;
else
return a->bitsize - b->bitsize;
}
/* Map all PCI bars and fetch the actual BAR configurations from the
* board. We assume that the BAR with the PCIe config block is
* already mapped.
*
* BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM)
* BAR0.1: Reserved for XPB access (for MSI-X access to PCIe PBA)
* BAR0.2: --
* BAR0.3: --
* BAR0.4: Reserved for Explicit 0.0-0.3 access
* BAR0.5: Reserved for Explicit 1.0-1.3 access
* BAR0.6: Reserved for Explicit 2.0-2.3 access
* BAR0.7: Reserved for Explicit 3.0-3.3 access
*
* BAR1.0-BAR1.7: --
* BAR2.0-BAR2.7: --
*/
static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
{
const u32 barcfg_msix_general =
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
const u32 barcfg_msix_xpb =
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
NFP_CPP_TARGET_ISLAND_XPB);
const u32 barcfg_explicit[4] = {
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0),
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1),
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2),
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3),
};
struct nfp_bar *bar;
int i, bars_free;
int expl_groups;
bar = &nfp->bar[0];
for (i = 0; i < ARRAY_SIZE(nfp->bar); i++, bar++) {
struct resource *res;
res = &nfp->pdev->resource[(i >> 3) * 2];
/* Skip over BARs that are not IORESOURCE_MEM */
if (!(resource_type(res) & IORESOURCE_MEM)) {
bar--;
continue;
}
bar->resource = res;
bar->barcfg = 0;
bar->nfp = nfp;
bar->index = i;
bar->mask = nfp_bar_resource_len(bar) - 1;
bar->bitsize = fls(bar->mask);
bar->base = 0;
bar->iomem = NULL;
}
nfp->bars = bar - &nfp->bar[0];
if (nfp->bars < 8) {
dev_err(nfp->dev, "No usable BARs found!\n");
return -EINVAL;
}
bars_free = nfp->bars;
/* Convert unit ID (0..3) to signal master/data master ID (0x40..0x70)
*/
mutex_init(&nfp->expl.mutex);
nfp->expl.master_id = ((NFP_CPP_INTERFACE_UNIT_of(interface) & 3) + 4)
<< 4;
nfp->expl.signal_ref = 0x10;
/* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */
bar = &nfp->bar[0];
bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
nfp_bar_resource_len(bar));
if (bar->iomem) {
dev_info(nfp->dev,
"BAR0.0 RESERVED: General Mapping/MSI-X SRAM\n");
atomic_inc(&bar->refcnt);
bars_free--;
nfp6000_bar_write(nfp, bar, barcfg_msix_general);
nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000;
}
if (nfp->pdev->device == PCI_DEVICE_NFP4000 ||
nfp->pdev->device == PCI_DEVICE_NFP6000) {
nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
expl_groups = 4;
} else {
int pf = nfp->pdev->devfn & 7;
nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf);
expl_groups = 1;
}
nfp->iomem.em = bar->iomem + NFP_PCIE_EM;
/* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */
bar = &nfp->bar[1];
dev_info(nfp->dev, "BAR0.1 RESERVED: PCIe XPB/MSI-X PBA\n");
atomic_inc(&bar->refcnt);
bars_free--;
nfp6000_bar_write(nfp, bar, barcfg_msix_xpb);
/* Use BAR0.4..BAR0.7 for EXPL IO */
for (i = 0; i < 4; i++) {
int j;
if (i >= NFP_PCIE_EXPLICIT_BARS || i >= expl_groups) {
nfp->expl.group[i].bitsize = 0;
continue;
}
bar = &nfp->bar[4 + i];
bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
nfp_bar_resource_len(bar));
if (bar->iomem) {
dev_info(nfp->dev,
"BAR0.%d RESERVED: Explicit%d Mapping\n",
4 + i, i);
atomic_inc(&bar->refcnt);
bars_free--;
nfp->expl.group[i].bitsize = bar->bitsize;
nfp->expl.group[i].addr = bar->iomem;
nfp6000_bar_write(nfp, bar, barcfg_explicit[i]);
for (j = 0; j < 4; j++)
nfp->expl.group[i].free[j] = true;
}
nfp->iomem.expl[i] = bar->iomem;
}
/* Sort bars by bit size - use the smallest possible first. */
sort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]),
bar_cmp, NULL);
dev_info(nfp->dev, "%d NFP PCI2CPP BARs, %d free\n",
nfp->bars, bars_free);
return 0;
}
static void disable_bars(struct nfp6000_pcie *nfp)
{
struct nfp_bar *bar = &nfp->bar[0];
int n;
for (n = 0; n < nfp->bars; n++, bar++) {
if (bar->iomem) {
iounmap(bar->iomem);
bar->iomem = NULL;
}
}
}
/*
* Generic CPP bus access interface.
*/
struct nfp6000_area_priv {
atomic_t refcnt;
struct nfp_bar *bar;
u32 bar_offset;
u32 target;
u32 action;
u32 token;
u64 offset;
struct {
int read;
int write;
int bar;
} width;
size_t size;
void __iomem *iomem;
phys_addr_t phys;
struct resource resource;
};
static int nfp6000_area_init(struct nfp_cpp_area *area, u32 dest,
unsigned long long address, unsigned long size)
{
struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
u32 target = NFP_CPP_ID_TARGET_of(dest);
u32 action = NFP_CPP_ID_ACTION_of(dest);
u32 token = NFP_CPP_ID_TOKEN_of(dest);
int pp;
pp = nfp_target_pushpull(NFP_CPP_ID(target, action, token), address);
if (pp < 0)
return pp;
priv->width.read = PUSH_WIDTH(pp);
priv->width.write = PULL_WIDTH(pp);
if (priv->width.read > 0 &&
priv->width.write > 0 &&
priv->width.read != priv->width.write) {
return -EINVAL;
}
if (priv->width.read > 0)
priv->width.bar = priv->width.read;
else
priv->width.bar = priv->width.write;
atomic_set(&priv->refcnt, 0);
priv->bar = NULL;
priv->target = target;
priv->action = action;
priv->token = token;
priv->offset = address;
priv->size = size;
memset(&priv->resource, 0, sizeof(priv->resource));
return 0;
}
static void nfp6000_area_cleanup(struct nfp_cpp_area *area)
{
}
static void priv_area_get(struct nfp_cpp_area *area)
{
struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
atomic_inc(&priv->refcnt);
}
static int priv_area_put(struct nfp_cpp_area *area)
{
struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
if (WARN_ON(!atomic_read(&priv->refcnt)))
return 0;
return atomic_dec_and_test(&priv->refcnt);
}
static int nfp6000_area_acquire(struct nfp_cpp_area *area)
{
struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
int barnum, err;
if (priv->bar) {
/* Already allocated. */
priv_area_get(area);
return 0;
}
barnum = nfp_alloc_bar(nfp, priv->target, priv->action, priv->token,
priv->offset, priv->size, priv->width.bar, 1);
if (barnum < 0) {
err = barnum;
goto err_alloc_bar;
}
priv->bar = &nfp->bar[barnum];
/* Calculate offset into BAR. */
if (nfp_bar_maptype(priv->bar) ==
NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) {
priv->bar_offset = priv->offset &
(NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1);
priv->bar_offset += NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(
priv->bar, priv->target);
priv->bar_offset += NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(
priv->bar, priv->token);
} else {
priv->bar_offset = priv->offset & priv->bar->mask;
}
/* We don't actually try to acquire the resource area using
* request_resource. This would prevent sharing the mapped
* BAR between multiple CPP areas and prevent us from
* effectively utilizing the limited amount of BAR resources.
*/
priv->phys = nfp_bar_resource_start(priv->bar) + priv->bar_offset;
priv->resource.name = nfp_cpp_area_name(area);
priv->resource.start = priv->phys;
priv->resource.end = priv->resource.start + priv->size - 1;
priv->resource.flags = IORESOURCE_MEM;
/* If the bar is already mapped in, use its mapping */
if (priv->bar->iomem)
priv->iomem = priv->bar->iomem + priv->bar_offset;
else
/* Must have been too big. Sub-allocate. */
priv->iomem = ioremap_nocache(priv->phys, priv->size);
if (IS_ERR_OR_NULL(priv->iomem)) {
dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n",
(int)priv->size, priv->bar->index);
err = !priv->iomem ? -ENOMEM : PTR_ERR(priv->iomem);
priv->iomem = NULL;
goto err_iomem_remap;
}
priv_area_get(area);
return 0;
err_iomem_remap:
nfp_bar_put(nfp, priv->bar);
priv->bar = NULL;
err_alloc_bar:
return err;
}
static void nfp6000_area_release(struct nfp_cpp_area *area)
{
struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
if (!priv_area_put(area))
return;
if (!priv->bar->iomem)
iounmap(priv->iomem);
nfp_bar_put(nfp, priv->bar);
priv->bar = NULL;
priv->iomem = NULL;
}
static phys_addr_t nfp6000_area_phys(struct nfp_cpp_area *area)
{
struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
return priv->phys;
}
static void __iomem *nfp6000_area_iomem(struct nfp_cpp_area *area)
{
struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
return priv->iomem;
}
static struct resource *nfp6000_area_resource(struct nfp_cpp_area *area)
{
/* Use the BAR resource as the resource for the CPP area.
* This enables us to share the BAR among multiple CPP areas
* without resource conflicts.
*/
struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
return priv->bar->resource;
}
static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
unsigned long offset, unsigned int length)
{
u64 __maybe_unused *wrptr64 = kernel_vaddr;
const u64 __iomem __maybe_unused *rdptr64;
struct nfp6000_area_priv *priv;
u32 *wrptr32 = kernel_vaddr;
const u32 __iomem *rdptr32;
int n, width;
bool is_64;
priv = nfp_cpp_area_priv(area);
rdptr64 = priv->iomem + offset;
rdptr32 = priv->iomem + offset;
if (offset + length > priv->size)
return -EFAULT;
width = priv->width.read;
if (width <= 0)
return -EINVAL;
/* Unaligned? Translate to an explicit access */
if ((priv->offset + offset) & (width - 1))
return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area),
NFP_CPP_ID(priv->target,
priv->action,
priv->token),
priv->offset + offset,
kernel_vaddr, length, width);
is_64 = width == TARGET_WIDTH_64;
/* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */
if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
priv->action == NFP_CPP_ACTION_RW)
is_64 = false;
if (is_64) {
if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
return -EINVAL;
} else {
if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
return -EINVAL;
}
if (WARN_ON(!priv->bar))
return -EFAULT;
if (is_64)
#ifndef __raw_readq
return -EINVAL;
#else
for (n = 0; n < length; n += sizeof(u64))
*wrptr64++ = __raw_readq(rdptr64++);
#endif
else
for (n = 0; n < length; n += sizeof(u32))
*wrptr32++ = __raw_readl(rdptr32++);
return n;
}
static int
nfp6000_area_write(struct nfp_cpp_area *area,
const void *kernel_vaddr,
unsigned long offset, unsigned int length)
{
const u64 __maybe_unused *rdptr64 = kernel_vaddr;
u64 __iomem __maybe_unused *wrptr64;
const u32 *rdptr32 = kernel_vaddr;
struct nfp6000_area_priv *priv;
u32 __iomem *wrptr32;
int n, width;
bool is_64;
priv = nfp_cpp_area_priv(area);
wrptr64 = priv->iomem + offset;
wrptr32 = priv->iomem + offset;
if (offset + length > priv->size)
return -EFAULT;
width = priv->width.write;
if (width <= 0)
return -EINVAL;
/* Unaligned? Translate to an explicit access */
if ((priv->offset + offset) & (width - 1))
return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area),
NFP_CPP_ID(priv->target,
priv->action,
priv->token),
priv->offset + offset,
kernel_vaddr, length, width);
is_64 = width == TARGET_WIDTH_64;
/* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */
if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
priv->action == NFP_CPP_ACTION_RW)
is_64 = false;
if (is_64) {
if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
return -EINVAL;
} else {
if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
return -EINVAL;
}
if (WARN_ON(!priv->bar))
return -EFAULT;
if (is_64)
#ifndef __raw_writeq
return -EINVAL;
#else
for (n = 0; n < length; n += sizeof(u64)) {
__raw_writeq(*rdptr64++, wrptr64++);
wmb();
}
#endif
else
for (n = 0; n < length; n += sizeof(u32)) {
__raw_writel(*rdptr32++, wrptr32++);
wmb();
}
return n;
}
struct nfp6000_explicit_priv {
struct nfp6000_pcie *nfp;
struct {
int group;
int area;
} bar;
int bitsize;
void __iomem *data;
void __iomem *addr;
};
static int nfp6000_explicit_acquire(struct nfp_cpp_explicit *expl)
{
struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_explicit_cpp(expl));
struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
int i, j;
mutex_lock(&nfp->expl.mutex);
for (i = 0; i < ARRAY_SIZE(nfp->expl.group); i++) {
if (!nfp->expl.group[i].bitsize)
continue;
for (j = 0; j < ARRAY_SIZE(nfp->expl.group[i].free); j++) {
u16 data_offset;
if (!nfp->expl.group[i].free[j])
continue;
priv->nfp = nfp;
priv->bar.group = i;
priv->bar.area = j;
priv->bitsize = nfp->expl.group[i].bitsize - 2;
data_offset = (priv->bar.group << 9) +
(priv->bar.area << 7);
priv->data = nfp->expl.data + data_offset;
priv->addr = nfp->expl.group[i].addr +
(priv->bar.area << priv->bitsize);
nfp->expl.group[i].free[j] = false;
mutex_unlock(&nfp->expl.mutex);
return 0;
}
}
mutex_unlock(&nfp->expl.mutex);
return -EAGAIN;
}
static void nfp6000_explicit_release(struct nfp_cpp_explicit *expl)
{
struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
struct nfp6000_pcie *nfp = priv->nfp;
mutex_lock(&nfp->expl.mutex);
nfp->expl.group[priv->bar.group].free[priv->bar.area] = true;
mutex_unlock(&nfp->expl.mutex);
}
static int nfp6000_explicit_put(struct nfp_cpp_explicit *expl,
const void *buff, size_t len)
{
struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
const u32 *src = buff;
size_t i;
for (i = 0; i < len; i += sizeof(u32))
writel(*(src++), priv->data + i);
return i;
}
static int
nfp6000_explicit_do(struct nfp_cpp_explicit *expl,
const struct nfp_cpp_explicit_command *cmd, u64 address)
{
struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
u8 signal_master, signal_ref, data_master;
struct nfp6000_pcie *nfp = priv->nfp;
int sigmask = 0;
u16 data_ref;
u32 csr[3];
if (cmd->siga_mode)
sigmask |= 1 << cmd->siga;
if (cmd->sigb_mode)
sigmask |= 1 << cmd->sigb;
signal_master = cmd->signal_master;
if (!signal_master)
signal_master = nfp->expl.master_id;
signal_ref = cmd->signal_ref;
if (signal_master == nfp->expl.master_id)
signal_ref = nfp->expl.signal_ref +
((priv->bar.group * 4 + priv->bar.area) << 1);
data_master = cmd->data_master;
if (!data_master)
data_master = nfp->expl.master_id;
data_ref = cmd->data_ref;
if (data_master == nfp->expl.master_id)
data_ref = 0x1000 +
(priv->bar.group << 9) + (priv->bar.area << 7);
csr[0] = NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(sigmask) |
NFP_PCIE_BAR_EXPLICIT_BAR0_Token(
NFP_CPP_ID_TOKEN_of(cmd->cpp_id)) |
NFP_PCIE_BAR_EXPLICIT_BAR0_Address(address >> 16);
csr[1] = NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(signal_ref) |
NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(data_master) |
NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(data_ref);
csr[2] = NFP_PCIE_BAR_EXPLICIT_BAR2_Target(
NFP_CPP_ID_TARGET_of(cmd->cpp_id)) |
NFP_PCIE_BAR_EXPLICIT_BAR2_Action(
NFP_CPP_ID_ACTION_of(cmd->cpp_id)) |
NFP_PCIE_BAR_EXPLICIT_BAR2_Length(cmd->len) |
NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(cmd->byte_mask) |
NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(signal_master);
if (nfp->iomem.csr) {
writel(csr[0], nfp->iomem.csr +
NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
priv->bar.area));
writel(csr[1], nfp->iomem.csr +
NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
priv->bar.area));
writel(csr[2], nfp->iomem.csr +
NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
priv->bar.area));
/* Readback to ensure BAR is flushed */
readl(nfp->iomem.csr +
NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
priv->bar.area));
readl(nfp->iomem.csr +
NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
priv->bar.area));
readl(nfp->iomem.csr +
NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
priv->bar.area));
} else {
pci_write_config_dword(nfp->pdev, 0x400 +
NFP_PCIE_BAR_EXPLICIT_BAR0(
priv->bar.group, priv->bar.area),
csr[0]);
pci_write_config_dword(nfp->pdev, 0x400 +
NFP_PCIE_BAR_EXPLICIT_BAR1(
priv->bar.group, priv->bar.area),
csr[1]);
pci_write_config_dword(nfp->pdev, 0x400 +
NFP_PCIE_BAR_EXPLICIT_BAR2(
priv->bar.group, priv->bar.area),
csr[2]);
}
/* Issue the 'kickoff' transaction */
readb(priv->addr + (address & ((1 << priv->bitsize) - 1)));
return sigmask;
}
static int nfp6000_explicit_get(struct nfp_cpp_explicit *expl,
void *buff, size_t len)
{
struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
u32 *dst = buff;
size_t i;
for (i = 0; i < len; i += sizeof(u32))
*(dst++) = readl(priv->data + i);
return i;
}
static int nfp6000_init(struct nfp_cpp *cpp)
{
nfp_cpp_area_cache_add(cpp, SZ_64K);
nfp_cpp_area_cache_add(cpp, SZ_64K);
nfp_cpp_area_cache_add(cpp, SZ_256K);
return 0;
}
static void nfp6000_free(struct nfp_cpp *cpp)
{
struct nfp6000_pcie *nfp = nfp_cpp_priv(cpp);
disable_bars(nfp);
kfree(nfp);
}
static void nfp6000_read_serial(struct device *dev, u8 *serial)
{
struct pci_dev *pdev = to_pci_dev(dev);
int pos;
u32 reg;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
if (!pos) {
memset(serial, 0, NFP_SERIAL_LEN);
return;
}
pci_read_config_dword(pdev, pos + 4, &reg);
put_unaligned_be16(reg >> 16, serial + 4);
pci_read_config_dword(pdev, pos + 8, &reg);
put_unaligned_be32(reg, serial);
}
static u16 nfp6000_get_interface(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int pos;
u32 reg;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
if (!pos)
return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff);
pci_read_config_dword(pdev, pos + 4, &reg);
return reg & 0xffff;
}
static const struct nfp_cpp_operations nfp6000_pcie_ops = {
.owner = THIS_MODULE,
.init = nfp6000_init,
.free = nfp6000_free,
.read_serial = nfp6000_read_serial,
.get_interface = nfp6000_get_interface,
.area_priv_size = sizeof(struct nfp6000_area_priv),
.area_init = nfp6000_area_init,
.area_cleanup = nfp6000_area_cleanup,
.area_acquire = nfp6000_area_acquire,
.area_release = nfp6000_area_release,
.area_phys = nfp6000_area_phys,
.area_iomem = nfp6000_area_iomem,
.area_resource = nfp6000_area_resource,
.area_read = nfp6000_area_read,
.area_write = nfp6000_area_write,
.explicit_priv_size = sizeof(struct nfp6000_explicit_priv),
.explicit_acquire = nfp6000_explicit_acquire,
.explicit_release = nfp6000_explicit_release,
.explicit_put = nfp6000_explicit_put,
.explicit_do = nfp6000_explicit_do,
.explicit_get = nfp6000_explicit_get,
};
/**
* nfp_cpp_from_nfp6000_pcie() - Build a NFP CPP bus from a NFP6000 PCI device
* @pdev: NFP6000 PCI device
*
* Return: NFP CPP handle
*/
struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
{
struct nfp6000_pcie *nfp;
u16 interface;
int err;
/* Finished with card initialization. */
dev_info(&pdev->dev,
"Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n");
nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
if (!nfp) {
err = -ENOMEM;
goto err_ret;
}
nfp->dev = &pdev->dev;
nfp->pdev = pdev;
init_waitqueue_head(&nfp->bar_waiters);
spin_lock_init(&nfp->bar_lock);
interface = nfp6000_get_interface(&pdev->dev);
if (NFP_CPP_INTERFACE_TYPE_of(interface) !=
NFP_CPP_INTERFACE_TYPE_PCI) {
dev_err(&pdev->dev,
"Interface type %d is not the expected %d\n",
NFP_CPP_INTERFACE_TYPE_of(interface),
NFP_CPP_INTERFACE_TYPE_PCI);
err = -ENODEV;
goto err_free_nfp;
}
if (NFP_CPP_INTERFACE_CHANNEL_of(interface) !=
NFP_CPP_INTERFACE_CHANNEL_PEROPENER) {
dev_err(&pdev->dev, "Interface channel %d is not the expected %d\n",
NFP_CPP_INTERFACE_CHANNEL_of(interface),
NFP_CPP_INTERFACE_CHANNEL_PEROPENER);
err = -ENODEV;
goto err_free_nfp;
}
err = enable_bars(nfp, interface);
if (err)
goto err_free_nfp;
/* Probe for all the common NFP devices */
return nfp_cpp_from_operations(&nfp6000_pcie_ops, &pdev->dev, nfp);
err_free_nfp:
kfree(nfp);
err_ret:
dev_err(&pdev->dev, "NFP6000 PCI setup failed\n");
return ERR_PTR(err);
}
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp6000_pcie.h
* Author: Jason McMullan <jason.mcmullan@netronome.com>
*/
#ifndef NFP6000_PCIE_H
#define NFP6000_PCIE_H
#include "nfp_cpp.h"
struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev);
#endif /* NFP6000_PCIE_H */
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_arm.h
* Definitions for ARM-based registers and memory spaces
*/
#ifndef NFP_ARM_H
#define NFP_ARM_H
#define NFP_ARM_QUEUE(_q) (0x100000 + (0x800 * ((_q) & 0xff)))
#define NFP_ARM_IM 0x200000
#define NFP_ARM_EM 0x300000
#define NFP_ARM_GCSR 0x400000
#define NFP_ARM_MPCORE 0x800000
#define NFP_ARM_PL310 0xa00000
/* Register Type: BulkBARConfig */
#define NFP_ARM_GCSR_BULK_BAR(_bar) (0x0 + (0x4 * ((_bar) & 0x7)))
#define NFP_ARM_GCSR_BULK_BAR_TYPE (0x1 << 31)
#define NFP_ARM_GCSR_BULK_BAR_TYPE_BULK (0x0)
#define NFP_ARM_GCSR_BULK_BAR_TYPE_EXPA (0x80000000)
#define NFP_ARM_GCSR_BULK_BAR_TGT(_x) (((_x) & 0xf) << 27)
#define NFP_ARM_GCSR_BULK_BAR_TGT_of(_x) (((_x) >> 27) & 0xf)
#define NFP_ARM_GCSR_BULK_BAR_TOK(_x) (((_x) & 0x3) << 25)
#define NFP_ARM_GCSR_BULK_BAR_TOK_of(_x) (((_x) >> 25) & 0x3)
#define NFP_ARM_GCSR_BULK_BAR_LEN (0x1 << 24)
#define NFP_ARM_GCSR_BULK_BAR_LEN_32BIT (0x0)
#define NFP_ARM_GCSR_BULK_BAR_LEN_64BIT (0x1000000)
#define NFP_ARM_GCSR_BULK_BAR_ADDR(_x) ((_x) & 0x7ff)
#define NFP_ARM_GCSR_BULK_BAR_ADDR_of(_x) ((_x) & 0x7ff)
/* Register Type: ExpansionBARConfig */
#define NFP_ARM_GCSR_EXPA_BAR(_bar) (0x20 + (0x4 * ((_bar) & 0xf)))
#define NFP_ARM_GCSR_EXPA_BAR_TYPE (0x1 << 31)
#define NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPA (0x0)
#define NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPL (0x80000000)
#define NFP_ARM_GCSR_EXPA_BAR_TGT(_x) (((_x) & 0xf) << 27)
#define NFP_ARM_GCSR_EXPA_BAR_TGT_of(_x) (((_x) >> 27) & 0xf)
#define NFP_ARM_GCSR_EXPA_BAR_TOK(_x) (((_x) & 0x3) << 25)
#define NFP_ARM_GCSR_EXPA_BAR_TOK_of(_x) (((_x) >> 25) & 0x3)
#define NFP_ARM_GCSR_EXPA_BAR_LEN (0x1 << 24)
#define NFP_ARM_GCSR_EXPA_BAR_LEN_32BIT (0x0)
#define NFP_ARM_GCSR_EXPA_BAR_LEN_64BIT (0x1000000)
#define NFP_ARM_GCSR_EXPA_BAR_ACT(_x) (((_x) & 0x1f) << 19)
#define NFP_ARM_GCSR_EXPA_BAR_ACT_of(_x) (((_x) >> 19) & 0x1f)
#define NFP_ARM_GCSR_EXPA_BAR_ACT_DERIVED (0)
#define NFP_ARM_GCSR_EXPA_BAR_ADDR(_x) ((_x) & 0x7fff)
#define NFP_ARM_GCSR_EXPA_BAR_ADDR_of(_x) ((_x) & 0x7fff)
/* Register Type: ExplicitBARConfig0_Reg */
#define NFP_ARM_GCSR_EXPL0_BAR(_bar) (0x60 + (0x4 * ((_bar) & 0x7)))
#define NFP_ARM_GCSR_EXPL0_BAR_ADDR(_x) ((_x) & 0x3ffff)
#define NFP_ARM_GCSR_EXPL0_BAR_ADDR_of(_x) ((_x) & 0x3ffff)
/* Register Type: ExplicitBARConfig1_Reg */
#define NFP_ARM_GCSR_EXPL1_BAR(_bar) (0x80 + (0x4 * ((_bar) & 0x7)))
#define NFP_ARM_GCSR_EXPL1_BAR_POSTED (0x1 << 31)
#define NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF(_x) (((_x) & 0x7f) << 24)
#define NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF_of(_x) (((_x) >> 24) & 0x7f)
#define NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER(_x) (((_x) & 0xff) << 16)
#define NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER_of(_x) (((_x) >> 16) & 0xff)
#define NFP_ARM_GCSR_EXPL1_BAR_DATA_REF(_x) ((_x) & 0x3fff)
#define NFP_ARM_GCSR_EXPL1_BAR_DATA_REF_of(_x) ((_x) & 0x3fff)
/* Register Type: ExplicitBARConfig2_Reg */
#define NFP_ARM_GCSR_EXPL2_BAR(_bar) (0xa0 + (0x4 * ((_bar) & 0x7)))
#define NFP_ARM_GCSR_EXPL2_BAR_TGT(_x) (((_x) & 0xf) << 28)
#define NFP_ARM_GCSR_EXPL2_BAR_TGT_of(_x) (((_x) >> 28) & 0xf)
#define NFP_ARM_GCSR_EXPL2_BAR_ACT(_x) (((_x) & 0x1f) << 23)
#define NFP_ARM_GCSR_EXPL2_BAR_ACT_of(_x) (((_x) >> 23) & 0x1f)
#define NFP_ARM_GCSR_EXPL2_BAR_LEN(_x) (((_x) & 0x1f) << 18)
#define NFP_ARM_GCSR_EXPL2_BAR_LEN_of(_x) (((_x) >> 18) & 0x1f)
#define NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK(_x) (((_x) & 0xff) << 10)
#define NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK_of(_x) (((_x) >> 10) & 0xff)
#define NFP_ARM_GCSR_EXPL2_BAR_TOK(_x) (((_x) & 0x3) << 8)
#define NFP_ARM_GCSR_EXPL2_BAR_TOK_of(_x) (((_x) >> 8) & 0x3)
#define NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER(_x) ((_x) & 0xff)
#define NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER_of(_x) ((_x) & 0xff)
/* Register Type: PostedCommandSignal */
#define NFP_ARM_GCSR_EXPL_POST(_bar) (0xc0 + (0x4 * ((_bar) & 0x7)))
#define NFP_ARM_GCSR_EXPL_POST_SIG_B(_x) (((_x) & 0x7f) << 25)
#define NFP_ARM_GCSR_EXPL_POST_SIG_B_of(_x) (((_x) >> 25) & 0x7f)
#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS (0x1 << 24)
#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PULL (0x0)
#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PUSH (0x1000000)
#define NFP_ARM_GCSR_EXPL_POST_SIG_A(_x) (((_x) & 0x7f) << 17)
#define NFP_ARM_GCSR_EXPL_POST_SIG_A_of(_x) (((_x) >> 17) & 0x7f)
#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS (0x1 << 16)
#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PULL (0x0)
#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PUSH (0x10000)
#define NFP_ARM_GCSR_EXPL_POST_SIG_B_RCVD (0x1 << 7)
#define NFP_ARM_GCSR_EXPL_POST_SIG_B_VALID (0x1 << 6)
#define NFP_ARM_GCSR_EXPL_POST_SIG_A_RCVD (0x1 << 5)
#define NFP_ARM_GCSR_EXPL_POST_SIG_A_VALID (0x1 << 4)
#define NFP_ARM_GCSR_EXPL_POST_CMD_COMPLETE (0x1)
/* Register Type: MPCoreBaseAddress */
#define NFP_ARM_GCSR_MPCORE_BASE 0x00e0
#define NFP_ARM_GCSR_MPCORE_BASE_ADDR(_x) (((_x) & 0x7ffff) << 13)
#define NFP_ARM_GCSR_MPCORE_BASE_ADDR_of(_x) (((_x) >> 13) & 0x7ffff)
/* Register Type: PL310BaseAddress */
#define NFP_ARM_GCSR_PL310_BASE 0x00e4
#define NFP_ARM_GCSR_PL310_BASE_ADDR(_x) (((_x) & 0xfffff) << 12)
#define NFP_ARM_GCSR_PL310_BASE_ADDR_of(_x) (((_x) >> 12) & 0xfffff)
/* Register Type: MPCoreConfig */
#define NFP_ARM_GCSR_MP0_CFG 0x00e8
#define NFP_ARM_GCSR_MP0_CFG_SPI_BOOT (0x1 << 14)
#define NFP_ARM_GCSR_MP0_CFG_ENDIAN(_x) (((_x) & 0x3) << 12)
#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_of(_x) (((_x) >> 12) & 0x3)
#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_LITTLE (0)
#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_BIG (1)
#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR (0x1 << 8)
#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR_LO (0x0)
#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR_HI (0x100)
#define NFP_ARM_GCSR_MP0_CFG_OUTCLK_EN(_x) (((_x) & 0xf) << 4)
#define NFP_ARM_GCSR_MP0_CFG_OUTCLK_EN_of(_x) (((_x) >> 4) & 0xf)
#define NFP_ARM_GCSR_MP0_CFG_ARMID(_x) ((_x) & 0xf)
#define NFP_ARM_GCSR_MP0_CFG_ARMID_of(_x) ((_x) & 0xf)
/* Register Type: MPCoreIDCacheDataError */
#define NFP_ARM_GCSR_MP0_CACHE_ERR 0x00ec
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D7 (0x1 << 15)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D6 (0x1 << 14)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D5 (0x1 << 13)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D4 (0x1 << 12)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D3 (0x1 << 11)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D2 (0x1 << 10)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D1 (0x1 << 9)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D0 (0x1 << 8)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I7 (0x1 << 7)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I6 (0x1 << 6)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I5 (0x1 << 5)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I4 (0x1 << 4)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I3 (0x1 << 3)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I2 (0x1 << 2)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I1 (0x1 << 1)
#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I0 (0x1)
/* Register Type: ARMDFT */
#define NFP_ARM_GCSR_DFT 0x0100
#define NFP_ARM_GCSR_DFT_DBG_REQ (0x1 << 20)
#define NFP_ARM_GCSR_DFT_DBG_EN (0x1 << 19)
#define NFP_ARM_GCSR_DFT_WFE_EVT_TRG (0x1 << 18)
#define NFP_ARM_GCSR_DFT_ETM_WFI_RDY (0x1 << 17)
#define NFP_ARM_GCSR_DFT_ETM_PWR_ON (0x1 << 16)
#define NFP_ARM_GCSR_DFT_BIST_FAIL_of(_x) (((_x) >> 8) & 0xf)
#define NFP_ARM_GCSR_DFT_BIST_DONE_of(_x) (((_x) >> 4) & 0xf)
#define NFP_ARM_GCSR_DFT_BIST_RUN(_x) ((_x) & 0x7)
#define NFP_ARM_GCSR_DFT_BIST_RUN_of(_x) ((_x) & 0x7)
/* Gasket CSRs */
/* NOTE: These cannot be remapped, and are always at this location.
*/
#define NFP_ARM_GCSR_START (0xd6000000 + NFP_ARM_GCSR)
#define NFP_ARM_GCSR_SIZE SZ_64K
/* BAR CSRs
*/
#define NFP_ARM_GCSR_BULK_BITS 11
#define NFP_ARM_GCSR_EXPA_BITS 15
#define NFP_ARM_GCSR_EXPL_BITS 18
#define NFP_ARM_GCSR_BULK_SHIFT (40 - 11)
#define NFP_ARM_GCSR_EXPA_SHIFT (40 - 15)
#define NFP_ARM_GCSR_EXPL_SHIFT (40 - 18)
#define NFP_ARM_GCSR_BULK_SIZE (1 << NFP_ARM_GCSR_BULK_SHIFT)
#define NFP_ARM_GCSR_EXPA_SIZE (1 << NFP_ARM_GCSR_EXPA_SHIFT)
#define NFP_ARM_GCSR_EXPL_SIZE (1 << NFP_ARM_GCSR_EXPL_SHIFT)
#define NFP_ARM_GCSR_EXPL2_CSR(target, action, length, \
byte_mask, token, signal_master) \
(NFP_ARM_GCSR_EXPL2_BAR_TGT(target) | \
NFP_ARM_GCSR_EXPL2_BAR_ACT(action) | \
NFP_ARM_GCSR_EXPL2_BAR_LEN(length) | \
NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK(byte_mask) | \
NFP_ARM_GCSR_EXPL2_BAR_TOK(token) | \
NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER(signal_master))
#define NFP_ARM_GCSR_EXPL1_CSR(posted, signal_ref, data_master, data_ref) \
(((posted) ? NFP_ARM_GCSR_EXPL1_BAR_POSTED : 0) | \
NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF(signal_ref) | \
NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER(data_master) | \
NFP_ARM_GCSR_EXPL1_BAR_DATA_REF(data_ref))
#define NFP_ARM_GCSR_EXPL0_CSR(address) \
NFP_ARM_GCSR_EXPL0_BAR_ADDR((address) >> NFP_ARM_GCSR_EXPL_SHIFT)
#define NFP_ARM_GCSR_EXPL_POST_EXPECT_A(sig_ref, is_push, is_required) \
(NFP_ARM_GCSR_EXPL_POST_SIG_A(sig_ref) | \
((is_push) ? NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PUSH : \
NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PULL) | \
((is_required) ? NFP_ARM_GCSR_EXPL_POST_SIG_A_VALID : 0))
#define NFP_ARM_GCSR_EXPL_POST_EXPECT_B(sig_ref, is_push, is_required) \
(NFP_ARM_GCSR_EXPL_POST_SIG_B(sig_ref) | \
((is_push) ? NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PUSH : \
NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PULL) | \
((is_required) ? NFP_ARM_GCSR_EXPL_POST_SIG_B_VALID : 0))
#define NFP_ARM_GCSR_EXPA_CSR(mode, target, token, is_64, action, address) \
(((mode) ? NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPL : \
NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPA) | \
NFP_ARM_GCSR_EXPA_BAR_TGT(target) | \
NFP_ARM_GCSR_EXPA_BAR_TOK(token) | \
((is_64) ? NFP_ARM_GCSR_EXPA_BAR_LEN_64BIT : \
NFP_ARM_GCSR_EXPA_BAR_LEN_32BIT) | \
NFP_ARM_GCSR_EXPA_BAR_ACT(action) | \
NFP_ARM_GCSR_EXPA_BAR_ADDR((address) >> NFP_ARM_GCSR_EXPA_SHIFT))
#define NFP_ARM_GCSR_BULK_CSR(mode, target, token, is_64, address) \
(((mode) ? NFP_ARM_GCSR_BULK_BAR_TYPE_EXPA : \
NFP_ARM_GCSR_BULK_BAR_TYPE_BULK) | \
NFP_ARM_GCSR_BULK_BAR_TGT(target) | \
NFP_ARM_GCSR_BULK_BAR_TOK(token) | \
((is_64) ? NFP_ARM_GCSR_BULK_BAR_LEN_64BIT : \
NFP_ARM_GCSR_BULK_BAR_LEN_32BIT) | \
NFP_ARM_GCSR_BULK_BAR_ADDR((address) >> NFP_ARM_GCSR_BULK_SHIFT))
/* MP Core CSRs */
#define NFP_ARM_MPCORE_SIZE SZ_128K
/* PL320 CSRs */
#define NFP_ARM_PCSR_SIZE SZ_64K
#endif /* NFP_ARM_H */
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_cpp.h
* Interface for low-level NFP CPP access.
* Authors: Jason McMullan <jason.mcmullan@netronome.com>
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*/
#ifndef __NFP_CPP_H__
#define __NFP_CPP_H__
#include <linux/ctype.h>
#include <linux/types.h>
#ifndef NFP_SUBSYS
#define NFP_SUBSYS "nfp"
#endif
#define nfp_err(cpp, fmt, args...) \
dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
#define nfp_warn(cpp, fmt, args...) \
dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
#define nfp_info(cpp, fmt, args...) \
dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
#define nfp_dbg(cpp, fmt, args...) \
dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
#define PCI_64BIT_BAR_COUNT 3
/* NFP hardware vendor/device ids.
*/
#define PCI_DEVICE_NFP4000 0x4000
#define PCI_DEVICE_NFP6000 0x6000
#define NFP_CPP_NUM_TARGETS 16
struct device;
struct nfp_cpp_area;
struct nfp_cpp;
struct resource;
/* Wildcard indicating a CPP read or write action
*
* The action used will be either read or write depending on whether a
* read or write instruction/call is performed on the NFP_CPP_ID. It
* is recomended that the RW action is used even if all actions to be
* performed on a NFP_CPP_ID are known to be only reads or writes.
* Doing so will in many cases save NFP CPP internal software
* resources.
*/
#define NFP_CPP_ACTION_RW 32
#define NFP_CPP_TARGET_ID_MASK 0x1f
/**
* NFP_CPP_ID() - pack target, token, and action into a CPP ID.
* @target: NFP CPP target id
* @action: NFP CPP action id
* @token: NFP CPP token id
*
* Create a 32-bit CPP identifier representing the access to be made.
* These identifiers are used as parameters to other NFP CPP
* functions. Some CPP devices may allow wildcard identifiers to be
* specified.
*
* Return: NFP CPP ID
*/
#define NFP_CPP_ID(target, action, token) \
((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \
(((action) & 0xff) << 8))
/**
* NFP_CPP_ISLAND_ID() - pack target, token, action, and island into a CPP ID.
* @target: NFP CPP target id
* @action: NFP CPP action id
* @token: NFP CPP token id
* @island: NFP CPP island id
*
* Create a 32-bit CPP identifier representing the access to be made.
* These identifiers are used as parameters to other NFP CPP
* functions. Some CPP devices may allow wildcard identifiers to be
* specified.
*
* Return: NFP CPP ID
*/
#define NFP_CPP_ISLAND_ID(target, action, token, island) \
((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \
(((action) & 0xff) << 8) | (((island) & 0xff) << 0))
/**
* NFP_CPP_ID_TARGET_of() - Return the NFP CPP target of a NFP CPP ID
* @id: NFP CPP ID
*
* Return: NFP CPP target
*/
static inline u8 NFP_CPP_ID_TARGET_of(u32 id)
{
return (id >> 24) & NFP_CPP_TARGET_ID_MASK;
}
/**
* NFP_CPP_ID_TOKEN_of() - Return the NFP CPP token of a NFP CPP ID
* @id: NFP CPP ID
* Return: NFP CPP token
*/
static inline u8 NFP_CPP_ID_TOKEN_of(u32 id)
{
return (id >> 16) & 0xff;
}
/**
* NFP_CPP_ID_ACTION_of() - Return the NFP CPP action of a NFP CPP ID
* @id: NFP CPP ID
*
* Return: NFP CPP action
*/
static inline u8 NFP_CPP_ID_ACTION_of(u32 id)
{
return (id >> 8) & 0xff;
}
/**
* NFP_CPP_ID_ISLAND_of() - Return the NFP CPP island of a NFP CPP ID
* @id: NFP CPP ID
*
* Return: NFP CPP island
*/
static inline u8 NFP_CPP_ID_ISLAND_of(u32 id)
{
return (id >> 0) & 0xff;
}
/* NFP Interface types - logical interface for this CPP connection
* 4 bits are reserved for interface type.
*/
#define NFP_CPP_INTERFACE_TYPE_INVALID 0x0
#define NFP_CPP_INTERFACE_TYPE_PCI 0x1
#define NFP_CPP_INTERFACE_TYPE_ARM 0x2
#define NFP_CPP_INTERFACE_TYPE_RPC 0x3
#define NFP_CPP_INTERFACE_TYPE_ILA 0x4
/**
* NFP_CPP_INTERFACE() - Construct a 16-bit NFP Interface ID
* @type: NFP Interface Type
* @unit: Unit identifier for the interface type
* @channel: Channel identifier for the interface unit
*
* Interface IDs consists of 4 bits of interface type,
* 4 bits of unit identifier, and 8 bits of channel identifier.
*
* The NFP Interface ID is used in the implementation of
* NFP CPP API mutexes, which use the MU Atomic CompareAndWrite
* operation - hence the limit to 16 bits to be able to
* use the NFP Interface ID as a lock owner.
*
* Return: Interface ID
*/
#define NFP_CPP_INTERFACE(type, unit, channel) \
((((type) & 0xf) << 12) | \
(((unit) & 0xf) << 8) | \
(((channel) & 0xff) << 0))
/**
* NFP_CPP_INTERFACE_TYPE_of() - Get the interface type
* @interface: NFP Interface ID
* Return: NFP Interface ID's type
*/
#define NFP_CPP_INTERFACE_TYPE_of(interface) (((interface) >> 12) & 0xf)
/**
* NFP_CPP_INTERFACE_UNIT_of() - Get the interface unit
* @interface: NFP Interface ID
* Return: NFP Interface ID's unit
*/
#define NFP_CPP_INTERFACE_UNIT_of(interface) (((interface) >> 8) & 0xf)
/**
* NFP_CPP_INTERFACE_CHANNEL_of() - Get the interface channel
* @interface: NFP Interface ID
* Return: NFP Interface ID's channel
*/
#define NFP_CPP_INTERFACE_CHANNEL_of(interface) (((interface) >> 0) & 0xff)
/* Implemented in nfp_cppcore.c */
void nfp_cpp_free(struct nfp_cpp *cpp);
u32 nfp_cpp_model(struct nfp_cpp *cpp);
u16 nfp_cpp_interface(struct nfp_cpp *cpp);
int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial);
void *nfp_hwinfo_cache(struct nfp_cpp *cpp);
void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val);
void *nfp_rtsym_cache(struct nfp_cpp *cpp);
void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val);
void nfp_nffw_cache_flush(struct nfp_cpp *cpp);
struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp,
u32 cpp_id,
const char *name,
unsigned long long address,
unsigned long size);
struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address,
unsigned long size);
void nfp_cpp_area_free(struct nfp_cpp_area *area);
int nfp_cpp_area_acquire(struct nfp_cpp_area *area);
int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area);
void nfp_cpp_area_release(struct nfp_cpp_area *area);
void nfp_cpp_area_release_free(struct nfp_cpp_area *area);
int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
void *buffer, size_t length);
int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
const void *buffer, size_t length);
int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
unsigned long long offset, unsigned long size);
const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area);
void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area);
struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area);
struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area);
phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area);
void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area);
int nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset,
u32 *value);
int nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset,
u32 value);
int nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset,
u64 *value);
int nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset,
u64 value);
int nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset,
u32 value, size_t length);
int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_tgt, u32 *value);
int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_tgt, u32 value);
int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt, u32 mask, u32 value);
/* Implemented in nfp_cpplib.c */
int nfp_cpp_read(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, void *kernel_vaddr, size_t length);
int nfp_cpp_write(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, const void *kernel_vaddr,
size_t length);
int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u32 *value);
int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u32 value);
int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 *value);
int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 value);
struct nfp_cpp_mutex;
int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target,
unsigned long long address, u32 key_id);
struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
unsigned long long address,
u32 key_id);
void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex);
struct nfp_cpp_explicit;
struct nfp_cpp_explicit_command {
u32 cpp_id;
u16 data_ref;
u8 data_master;
u8 len;
u8 byte_mask;
u8 signal_master;
u8 signal_ref;
u8 posted;
u8 siga;
u8 sigb;
s8 siga_mode;
s8 sigb_mode;
};
#define NFP_SERIAL_LEN 6
/**
* struct nfp_cpp_operations - NFP CPP operations structure
* @area_priv_size: Size of the nfp_cpp_area private data
* @owner: Owner module
* @init: Initialize the NFP CPP bus
* @free: Free the bus
* @read_serial: Read serial number to memory provided
* @get_interface: Return CPP interface
* @area_init: Initialize a new NFP CPP area (not serialized)
* @area_cleanup: Clean up a NFP CPP area (not serialized)
* @area_acquire: Acquire the NFP CPP area (serialized)
* @area_release: Release area (serialized)
* @area_resource: Get resource range of area (not serialized)
* @area_phys: Get physical address of area (not serialized)
* @area_iomem: Get iomem of area (not serialized)
* @area_read: Perform a read from a NFP CPP area (serialized)
* @area_write: Perform a write to a NFP CPP area (serialized)
* @explicit_priv_size: Size of an explicit's private area
* @explicit_acquire: Acquire an explicit area
* @explicit_release: Release an explicit area
* @explicit_put: Write data to send
* @explicit_get: Read data received
* @explicit_do: Perform the transaction
*/
struct nfp_cpp_operations {
size_t area_priv_size;
struct module *owner;
int (*init)(struct nfp_cpp *cpp);
void (*free)(struct nfp_cpp *cpp);
void (*read_serial)(struct device *dev, u8 *serial);
u16 (*get_interface)(struct device *dev);
int (*area_init)(struct nfp_cpp_area *area,
u32 dest, unsigned long long address,
unsigned long size);
void (*area_cleanup)(struct nfp_cpp_area *area);
int (*area_acquire)(struct nfp_cpp_area *area);
void (*area_release)(struct nfp_cpp_area *area);
struct resource *(*area_resource)(struct nfp_cpp_area *area);
phys_addr_t (*area_phys)(struct nfp_cpp_area *area);
void __iomem *(*area_iomem)(struct nfp_cpp_area *area);
int (*area_read)(struct nfp_cpp_area *area, void *kernel_vaddr,
unsigned long offset, unsigned int length);
int (*area_write)(struct nfp_cpp_area *area, const void *kernel_vaddr,
unsigned long offset, unsigned int length);
size_t explicit_priv_size;
int (*explicit_acquire)(struct nfp_cpp_explicit *expl);
void (*explicit_release)(struct nfp_cpp_explicit *expl);
int (*explicit_put)(struct nfp_cpp_explicit *expl,
const void *buff, size_t len);
int (*explicit_get)(struct nfp_cpp_explicit *expl,
void *buff, size_t len);
int (*explicit_do)(struct nfp_cpp_explicit *expl,
const struct nfp_cpp_explicit_command *cmd,
u64 address);
};
struct nfp_cpp *
nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
struct device *parent, void *priv);
void *nfp_cpp_priv(struct nfp_cpp *priv);
int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size);
/* The following section contains extensions to the
* NFP CPP API, to be used in a Linux kernel-space context.
*/
/* Use this channel ID for multiple virtual channel interfaces
* (ie ARM and PCIe) when setting up the interface field.
*/
#define NFP_CPP_INTERFACE_CHANNEL_PEROPENER 255
struct device *nfp_cpp_device(struct nfp_cpp *cpp);
/* Return code masks for nfp_cpp_explicit_do()
*/
#define NFP_SIGNAL_MASK_A BIT(0) /* Signal A fired */
#define NFP_SIGNAL_MASK_B BIT(1) /* Signal B fired */
enum nfp_cpp_explicit_signal_mode {
NFP_SIGNAL_NONE = 0,
NFP_SIGNAL_PUSH = 1,
NFP_SIGNAL_PUSH_OPTIONAL = -1,
NFP_SIGNAL_PULL = 2,
NFP_SIGNAL_PULL_OPTIONAL = -2,
};
struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp);
int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl, u32 cpp_id,
u8 len, u8 mask);
int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl,
u8 data_master, u16 data_ref);
int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl,
u8 signal_master, u8 signal_ref);
int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted,
u8 siga,
enum nfp_cpp_explicit_signal_mode siga_mode,
u8 sigb,
enum nfp_cpp_explicit_signal_mode sigb_mode);
int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl,
const void *buff, size_t len);
int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address);
int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len);
void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl);
struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *expl);
void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit);
/* Implemented in nfp_cpplib.c */
int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model);
int nfp_cpp_explicit_read(struct nfp_cpp *cpp, u32 cpp_id,
u64 addr, void *buff, size_t len,
int width_read);
int nfp_cpp_explicit_write(struct nfp_cpp *cpp, u32 cpp_id,
u64 addr, const void *buff, size_t len,
int width_write);
#endif /* !__NFP_CPP_H__ */
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_cppcore.c
* Provides low-level access to the NFP's internal CPP bus
* Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
* Jason McMullan <jason.mcmullan@netronome.com>
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*/
#include <asm/unaligned.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include "nfp_arm.h"
#include "nfp_cpp.h"
#include "nfp6000/nfp6000.h"
#define NFP_ARM_GCSR_SOFTMODEL2 0x0000014c
#define NFP_ARM_GCSR_SOFTMODEL3 0x00000150
struct nfp_cpp_resource {
struct list_head list;
const char *name;
u32 cpp_id;
u64 start;
u64 end;
};
struct nfp_cpp_mutex {
struct list_head list;
struct nfp_cpp *cpp;
int target;
u16 usage;
u16 depth;
unsigned long long address;
u32 key;
};
struct nfp_cpp {
struct device dev;
void *priv; /* Private data of the low-level implementation */
u32 model;
u16 interface;
u8 serial[NFP_SERIAL_LEN];
const struct nfp_cpp_operations *op;
struct list_head resource_list; /* NFP CPP resource list */
struct list_head mutex_cache; /* Mutex cache */
rwlock_t resource_lock;
wait_queue_head_t waitq;
/* NFP6000 CPP Mapping Table */
u32 imb_cat_table[16];
/* Cached areas for cpp/xpb readl/writel speedups */
struct mutex area_cache_mutex; /* Lock for the area cache */
struct list_head area_cache_list;
/* Cached information */
void *hwinfo;
void *rtsym;
};
/* Element of the area_cache_list */
struct nfp_cpp_area_cache {
struct list_head entry;
u32 id;
u64 addr;
u32 size;
struct nfp_cpp_area *area;
};
struct nfp_cpp_area {
struct nfp_cpp *cpp;
struct kref kref;
atomic_t refcount;
struct mutex mutex; /* Lock for the area's refcount */
unsigned long long offset;
unsigned long size;
struct nfp_cpp_resource resource;
void __iomem *iomem;
/* Here follows the 'priv' part of nfp_cpp_area. */
};
struct nfp_cpp_explicit {
struct nfp_cpp *cpp;
struct nfp_cpp_explicit_command cmd;
/* Here follows the 'priv' part of nfp_cpp_area. */
};
static void __resource_add(struct list_head *head, struct nfp_cpp_resource *res)
{
struct nfp_cpp_resource *tmp;
struct list_head *pos;
list_for_each(pos, head) {
tmp = container_of(pos, struct nfp_cpp_resource, list);
if (tmp->cpp_id > res->cpp_id)
break;
if (tmp->cpp_id == res->cpp_id && tmp->start > res->start)
break;
}
list_add_tail(&res->list, pos);
}
static void __resource_del(struct nfp_cpp_resource *res)
{
list_del_init(&res->list);
}
static void __release_cpp_area(struct kref *kref)
{
struct nfp_cpp_area *area =
container_of(kref, struct nfp_cpp_area, kref);
struct nfp_cpp *cpp = nfp_cpp_area_cpp(area);
if (area->cpp->op->area_cleanup)
area->cpp->op->area_cleanup(area);
write_lock(&cpp->resource_lock);
__resource_del(&area->resource);
write_unlock(&cpp->resource_lock);
kfree(area);
}
static void nfp_cpp_area_put(struct nfp_cpp_area *area)
{
kref_put(&area->kref, __release_cpp_area);
}
static struct nfp_cpp_area *nfp_cpp_area_get(struct nfp_cpp_area *area)
{
kref_get(&area->kref);
return area;
}
/**
* nfp_cpp_free() - free the CPP handle
* @cpp: CPP handle
*/
void nfp_cpp_free(struct nfp_cpp *cpp)
{
struct nfp_cpp_area_cache *cache, *ctmp;
struct nfp_cpp_resource *res, *rtmp;
struct nfp_cpp_mutex *mutex, *mtmp;
/* There should be no mutexes in the cache at this point. */
WARN_ON(!list_empty(&cpp->mutex_cache));
/* .. but if there are, unlock them and complain. */
list_for_each_entry_safe(mutex, mtmp, &cpp->mutex_cache, list) {
dev_err(cpp->dev.parent, "Dangling mutex: @%d::0x%llx, %d locks held by %d owners\n",
mutex->target, (unsigned long long)mutex->address,
mutex->depth, mutex->usage);
/* Forcing an unlock */
mutex->depth = 1;
nfp_cpp_mutex_unlock(mutex);
/* Forcing a free */
mutex->usage = 1;
nfp_cpp_mutex_free(mutex);
}
/* Remove all caches */
list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) {
list_del(&cache->entry);
if (cache->id)
nfp_cpp_area_release(cache->area);
nfp_cpp_area_free(cache->area);
kfree(cache);
}
/* There should be no dangling areas at this point */
WARN_ON(!list_empty(&cpp->resource_list));
/* .. but if they weren't, try to clean up. */
list_for_each_entry_safe(res, rtmp, &cpp->resource_list, list) {
struct nfp_cpp_area *area = container_of(res,
struct nfp_cpp_area,
resource);
dev_err(cpp->dev.parent, "Dangling area: %d:%d:%d:0x%0llx-0x%0llx%s%s\n",
NFP_CPP_ID_TARGET_of(res->cpp_id),
NFP_CPP_ID_ACTION_of(res->cpp_id),
NFP_CPP_ID_TOKEN_of(res->cpp_id),
res->start, res->end,
res->name ? " " : "",
res->name ? res->name : "");
if (area->cpp->op->area_release)
area->cpp->op->area_release(area);
__release_cpp_area(&area->kref);
}
if (cpp->op->free)
cpp->op->free(cpp);
kfree(cpp->hwinfo);
kfree(cpp->rtsym);
device_unregister(&cpp->dev);
kfree(cpp);
}
/**
* nfp_cpp_model() - Retrieve the Model ID of the NFP
* @cpp: NFP CPP handle
*
* Return: NFP CPP Model ID
*/
u32 nfp_cpp_model(struct nfp_cpp *cpp)
{
return cpp->model;
}
/**
* nfp_cpp_interface() - Retrieve the Interface ID of the NFP
* @cpp: NFP CPP handle
*
* Return: NFP CPP Interface ID
*/
u16 nfp_cpp_interface(struct nfp_cpp *cpp)
{
return cpp->interface;
}
/**
* nfp_cpp_serial() - Retrieve the Serial ID of the NFP
* @cpp: NFP CPP handle
* @serial: Pointer to NFP serial number
*
* Return: Length of NFP serial number
*/
int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial)
{
*serial = &cpp->serial[0];
return sizeof(cpp->serial);
}
void *nfp_hwinfo_cache(struct nfp_cpp *cpp)
{
return cpp->hwinfo;
}
void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val)
{
cpp->hwinfo = val;
}
void *nfp_rtsym_cache(struct nfp_cpp *cpp)
{
return cpp->rtsym;
}
void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val)
{
cpp->rtsym = val;
}
/**
* nfp_nffw_cache_flush() - Flush cached firmware information
* @cpp: NFP CPP handle
*
* Flush cached firmware information. This function should be called
* every time firmware is loaded on unloaded.
*/
void nfp_nffw_cache_flush(struct nfp_cpp *cpp)
{
kfree(nfp_rtsym_cache(cpp));
nfp_rtsym_cache_set(cpp, NULL);
}
/**
* nfp_cpp_area_alloc_with_name() - allocate a new CPP area
* @cpp: CPP device handle
* @dest: NFP CPP ID
* @name: Name of region
* @address: Address of region
* @size: Size of region
*
* Allocate and initialize a CPP area structure. The area must later
* be locked down with an 'acquire' before it can be safely accessed.
*
* NOTE: @address and @size must be 32-bit aligned values.
*
* Return: NFP CPP area handle, or NULL
*/
struct nfp_cpp_area *
nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 dest, const char *name,
unsigned long long address, unsigned long size)
{
struct nfp_cpp_area *area;
u64 tmp64 = address;
int err, name_len;
/* Remap from cpp_island to cpp_target */
err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table);
if (err < 0)
return NULL;
address = tmp64;
if (!name)
name = "(reserved)";
name_len = strlen(name) + 1;
area = kzalloc(sizeof(*area) + cpp->op->area_priv_size + name_len,
GFP_KERNEL);
if (!area)
return NULL;
area->cpp = cpp;
area->resource.name = (void *)area + sizeof(*area) +
cpp->op->area_priv_size;
memcpy((char *)area->resource.name, name, name_len);
area->resource.cpp_id = dest;
area->resource.start = address;
area->resource.end = area->resource.start + size - 1;
INIT_LIST_HEAD(&area->resource.list);
atomic_set(&area->refcount, 0);
kref_init(&area->kref);
mutex_init(&area->mutex);
if (cpp->op->area_init) {
int err;
err = cpp->op->area_init(area, dest, address, size);
if (err < 0) {
kfree(area);
return NULL;
}
}
write_lock(&cpp->resource_lock);
__resource_add(&cpp->resource_list, &area->resource);
write_unlock(&cpp->resource_lock);
area->offset = address;
area->size = size;
return area;
}
/**
* nfp_cpp_area_alloc() - allocate a new CPP area
* @cpp: CPP handle
* @dest: CPP id
* @address: Start address on CPP target
* @size: Size of area in bytes
*
* Allocate and initialize a CPP area structure. The area must later
* be locked down with an 'acquire' before it can be safely accessed.
*
* NOTE: @address and @size must be 32-bit aligned values.
*
* Return: NFP CPP Area handle, or NULL
*/
struct nfp_cpp_area *
nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
unsigned long long address, unsigned long size)
{
return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size);
}
/**
* nfp_cpp_area_free() - free up the CPP area
* @area: CPP area handle
*
* Frees up memory resources held by the CPP area.
*/
void nfp_cpp_area_free(struct nfp_cpp_area *area)
{
nfp_cpp_area_put(area);
}
/**
* nfp_cpp_area_acquire() - lock down a CPP area for access
* @area: CPP area handle
*
* Locks down the CPP area for a potential long term activity. Area
* must always be locked down before being accessed.
*
* Return: 0, or -ERRNO
*/
int nfp_cpp_area_acquire(struct nfp_cpp_area *area)
{
mutex_lock(&area->mutex);
if (atomic_inc_return(&area->refcount) == 1) {
int (*a_a)(struct nfp_cpp_area *);
a_a = area->cpp->op->area_acquire;
if (a_a) {
int err;
wait_event_interruptible(area->cpp->waitq,
(err = a_a(area)) != -EAGAIN);
if (err < 0) {
atomic_dec(&area->refcount);
mutex_unlock(&area->mutex);
return err;
}
}
}
mutex_unlock(&area->mutex);
nfp_cpp_area_get(area);
return 0;
}
/**
* nfp_cpp_area_acquire_nonblocking() - lock down a CPP area for access
* @area: CPP area handle
*
* Locks down the CPP area for a potential long term activity. Area
* must always be locked down before being accessed.
*
* NOTE: Returns -EAGAIN is no area is available
*
* Return: 0, or -ERRNO
*/
int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area)
{
mutex_lock(&area->mutex);
if (atomic_inc_return(&area->refcount) == 1) {
if (area->cpp->op->area_acquire) {
int err;
err = area->cpp->op->area_acquire(area);
if (err < 0) {
atomic_dec(&area->refcount);
mutex_unlock(&area->mutex);
return err;
}
}
}
mutex_unlock(&area->mutex);
nfp_cpp_area_get(area);
return 0;
}
/**
* nfp_cpp_area_release() - release a locked down CPP area
* @area: CPP area handle
*
* Releases a previously locked down CPP area.
*/
void nfp_cpp_area_release(struct nfp_cpp_area *area)
{
mutex_lock(&area->mutex);
/* Only call the release on refcount == 0 */
if (atomic_dec_and_test(&area->refcount)) {
if (area->cpp->op->area_release) {
area->cpp->op->area_release(area);
/* Let anyone waiting for a BAR try to get one.. */
wake_up_interruptible_all(&area->cpp->waitq);
}
}
mutex_unlock(&area->mutex);
nfp_cpp_area_put(area);
}
/**
* nfp_cpp_area_release_free() - release CPP area and free it
* @area: CPP area handle
*
* Releases CPP area and frees up memory resources held by the it.
*/
void nfp_cpp_area_release_free(struct nfp_cpp_area *area)
{
nfp_cpp_area_release(area);
nfp_cpp_area_free(area);
}
/**
* nfp_cpp_area_read() - read data from CPP area
* @area: CPP area handle
* @offset: offset into CPP area
* @kernel_vaddr: kernel address to put data into
* @length: number of bytes to read
*
* Read data from indicated CPP region.
*
* NOTE: @offset and @length must be 32-bit aligned values.
*
* NOTE: Area must have been locked down with an 'acquire'.
*
* Return: length of io, or -ERRNO
*/
int nfp_cpp_area_read(struct nfp_cpp_area *area,
unsigned long offset, void *kernel_vaddr,
size_t length)
{
return area->cpp->op->area_read(area, kernel_vaddr, offset, length);
}
/**
* nfp_cpp_area_write() - write data to CPP area
* @area: CPP area handle
* @offset: offset into CPP area
* @kernel_vaddr: kernel address to read data from
* @length: number of bytes to write
*
* Write data to indicated CPP region.
*
* NOTE: @offset and @length must be 32-bit aligned values.
*
* NOTE: Area must have been locked down with an 'acquire'.
*
* Return: length of io, or -ERRNO
*/
int nfp_cpp_area_write(struct nfp_cpp_area *area,
unsigned long offset, const void *kernel_vaddr,
size_t length)
{
return area->cpp->op->area_write(area, kernel_vaddr, offset, length);
}
/**
* nfp_cpp_area_check_range() - check if address range fits in CPP area
* @area: CPP area handle
* @offset: offset into CPP target
* @length: size of address range in bytes
*
* Check if address range fits within CPP area. Return 0 if area
* fits or -EFAULT on error.
*
* Return: 0, or -ERRNO
*/
int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
unsigned long long offset, unsigned long length)
{
if (offset < area->offset ||
offset + length > area->offset + area->size)
return -EFAULT;
return 0;
}
/**
* nfp_cpp_area_name() - return name of a CPP area
* @cpp_area: CPP area handle
*
* Return: Name of the area, or NULL
*/
const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area)
{
return cpp_area->resource.name;
}
/**
* nfp_cpp_area_priv() - return private struct for CPP area
* @cpp_area: CPP area handle
*
* Return: Private data for the CPP area
*/
void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area)
{
return &cpp_area[1];
}
/**
* nfp_cpp_area_cpp() - return CPP handle for CPP area
* @cpp_area: CPP area handle
*
* Return: NFP CPP handle
*/
struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area)
{
return cpp_area->cpp;
}
/**
* nfp_cpp_area_resource() - get resource
* @area: CPP area handle
*
* NOTE: Area must have been locked down with an 'acquire'.
*
* Return: struct resource pointer, or NULL
*/
struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area)
{
struct resource *res = NULL;
if (area->cpp->op->area_resource)
res = area->cpp->op->area_resource(area);
return res;
}
/**
* nfp_cpp_area_phys() - get physical address of CPP area
* @area: CPP area handle
*
* NOTE: Area must have been locked down with an 'acquire'.
*
* Return: phy_addr_t of the area, or NULL
*/
phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area)
{
phys_addr_t addr = ~0;
if (area->cpp->op->area_phys)
addr = area->cpp->op->area_phys(area);
return addr;
}
/**
* nfp_cpp_area_iomem() - get IOMEM region for CPP area
* @area: CPP area handle
*
* Returns an iomem pointer for use with readl()/writel() style
* operations.
*
* NOTE: Area must have been locked down with an 'acquire'.
*
* Return: __iomem pointer to the area, or NULL
*/
void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area)
{
void __iomem *iomem = NULL;
if (area->cpp->op->area_iomem)
iomem = area->cpp->op->area_iomem(area);
return iomem;
}
/**
* nfp_cpp_area_readl() - Read a u32 word from an area
* @area: CPP Area handle
* @offset: Offset into area
* @value: Pointer to read buffer
*
* Return: length of the io, or -ERRNO
*/
int nfp_cpp_area_readl(struct nfp_cpp_area *area,
unsigned long offset, u32 *value)
{
u8 tmp[4];
int err;
err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
*value = get_unaligned_le32(tmp);
return err;
}
/**
* nfp_cpp_area_writel() - Write a u32 word to an area
* @area: CPP Area handle
* @offset: Offset into area
* @value: Value to write
*
* Return: length of the io, or -ERRNO
*/
int nfp_cpp_area_writel(struct nfp_cpp_area *area,
unsigned long offset, u32 value)
{
u8 tmp[4];
put_unaligned_le32(value, tmp);
return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
}
/**
* nfp_cpp_area_readq() - Read a u64 word from an area
* @area: CPP Area handle
* @offset: Offset into area
* @value: Pointer to read buffer
*
* Return: length of the io, or -ERRNO
*/
int nfp_cpp_area_readq(struct nfp_cpp_area *area,
unsigned long offset, u64 *value)
{
u8 tmp[8];
int err;
err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
*value = get_unaligned_le64(tmp);
return err;
}
/**
* nfp_cpp_area_writeq() - Write a u64 word to an area
* @area: CPP Area handle
* @offset: Offset into area
* @value: Value to write
*
* Return: length of the io, or -ERRNO
*/
int nfp_cpp_area_writeq(struct nfp_cpp_area *area,
unsigned long offset, u64 value)
{
u8 tmp[8];
put_unaligned_le64(value, tmp);
return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
}
/**
* nfp_cpp_area_fill() - fill a CPP area with a value
* @area: CPP area
* @offset: offset into CPP area
* @value: value to fill with
* @length: length of area to fill
*
* Fill indicated area with given value.
*
* Return: length of io, or -ERRNO
*/
int nfp_cpp_area_fill(struct nfp_cpp_area *area,
unsigned long offset, u32 value, size_t length)
{
u8 tmp[4];
size_t i;
int k;
put_unaligned_le32(value, tmp);
if (offset % sizeof(tmp) || length % sizeof(tmp))
return -EINVAL;
for (i = 0; i < length; i += sizeof(tmp)) {
k = nfp_cpp_area_write(area, offset + i, &tmp, sizeof(tmp));
if (k < 0)
return k;
}
return i;
}
/**
* nfp_cpp_area_cache_add() - Permanently reserve and area for the hot cache
* @cpp: NFP CPP handle
* @size: Size of the area - MUST BE A POWER OF 2.
*/
int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
{
struct nfp_cpp_area_cache *cache;
struct nfp_cpp_area *area;
/* Allocate an area - we use the MU target's base as a placeholder,
* as all supported chips have a MU.
*/
area = nfp_cpp_area_alloc(cpp, NFP_CPP_ID(7, NFP_CPP_ACTION_RW, 0),
0, size);
if (!area)
return -ENOMEM;
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (!cache)
return -ENOMEM;
cache->id = 0;
cache->addr = 0;
cache->size = size;
cache->area = area;
mutex_lock(&cpp->area_cache_mutex);
list_add_tail(&cache->entry, &cpp->area_cache_list);
mutex_unlock(&cpp->area_cache_mutex);
return 0;
}
static struct nfp_cpp_area_cache *
area_cache_get(struct nfp_cpp *cpp, u32 id,
u64 addr, unsigned long *offset, size_t length)
{
struct nfp_cpp_area_cache *cache;
int err;
/* Early exit when length == 0, which prevents
* the need for special case code below when
* checking against available cache size.
*/
if (length == 0)
return NULL;
if (list_empty(&cpp->area_cache_list) || id == 0)
return NULL;
/* Remap from cpp_island to cpp_target */
err = nfp_target_cpp(id, addr, &id, &addr, cpp->imb_cat_table);
if (err < 0)
return NULL;
addr += *offset;
mutex_lock(&cpp->area_cache_mutex);
/* See if we have a match */
list_for_each_entry(cache, &cpp->area_cache_list, entry) {
if (id == cache->id &&
addr >= cache->addr &&
addr + length <= cache->addr + cache->size)
goto exit;
}
/* No matches - inspect the tail of the LRU */
cache = list_entry(cpp->area_cache_list.prev,
struct nfp_cpp_area_cache, entry);
/* Can we fit in the cache entry? */
if (round_down(addr + length - 1, cache->size) !=
round_down(addr, cache->size)) {
mutex_unlock(&cpp->area_cache_mutex);
return NULL;
}
/* If id != 0, we will need to release it */
if (cache->id) {
nfp_cpp_area_release(cache->area);
cache->id = 0;
cache->addr = 0;
}
/* Adjust the start address to be cache size aligned */
cache->id = id;
cache->addr = addr & ~(u64)(cache->size - 1);
/* Re-init to the new ID and address */
if (cpp->op->area_init) {
err = cpp->op->area_init(cache->area,
id, cache->addr, cache->size);
if (err < 0) {
mutex_unlock(&cpp->area_cache_mutex);
return NULL;
}
}
/* Attempt to acquire */
err = nfp_cpp_area_acquire(cache->area);
if (err < 0) {
mutex_unlock(&cpp->area_cache_mutex);
return NULL;
}
exit:
/* Adjust offset */
*offset = addr - cache->addr;
return cache;
}
static void
area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache)
{
if (!cache)
return;
/* Move to front of LRU */
list_del(&cache->entry);
list_add(&cache->entry, &cpp->area_cache_list);
mutex_unlock(&cpp->area_cache_mutex);
}
/**
* nfp_cpp_read() - read from CPP target
* @cpp: CPP handle
* @destination: CPP id
* @address: offset into CPP target
* @kernel_vaddr: kernel buffer for result
* @length: number of bytes to read
*
* Return: length of io, or -ERRNO
*/
int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
unsigned long long address, void *kernel_vaddr, size_t length)
{
struct nfp_cpp_area_cache *cache;
struct nfp_cpp_area *area;
unsigned long offset = 0;
int err;
cache = area_cache_get(cpp, destination, address, &offset, length);
if (cache) {
area = cache->area;
} else {
area = nfp_cpp_area_alloc(cpp, destination, address, length);
if (!area)
return -ENOMEM;
err = nfp_cpp_area_acquire(area);
if (err)
goto out;
}
err = nfp_cpp_area_read(area, offset, kernel_vaddr, length);
out:
if (cache)
area_cache_put(cpp, cache);
else
nfp_cpp_area_release_free(area);
return err;
}
/**
* nfp_cpp_write() - write to CPP target
* @cpp: CPP handle
* @destination: CPP id
* @address: offset into CPP target
* @kernel_vaddr: kernel buffer to read from
* @length: number of bytes to write
*
* Return: length of io, or -ERRNO
*/
int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
unsigned long long address,
const void *kernel_vaddr, size_t length)
{
struct nfp_cpp_area_cache *cache;
struct nfp_cpp_area *area;
unsigned long offset = 0;
int err;
cache = area_cache_get(cpp, destination, address, &offset, length);
if (cache) {
area = cache->area;
} else {
area = nfp_cpp_area_alloc(cpp, destination, address, length);
if (!area)
return -ENOMEM;
err = nfp_cpp_area_acquire(area);
if (err)
goto out;
}
err = nfp_cpp_area_write(area, offset, kernel_vaddr, length);
out:
if (cache)
area_cache_put(cpp, cache);
else
nfp_cpp_area_release_free(area);
return err;
}
/* Return the correct CPP address, and fixup xpb_addr as needed. */
static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
{
int island;
u32 xpb;
xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0);
/* Ensure that non-local XPB accesses go
* out through the global XPBM bus.
*/
island = (*xpb_addr >> 24) & 0x3f;
if (!island)
return xpb;
if (island != 1) {
*xpb_addr |= 1 << 30;
return xpb;
}
/* Accesses to the ARM Island overlay uses Island 0 / Global Bit */
*xpb_addr &= ~0x7f000000;
if (*xpb_addr < 0x60000) {
*xpb_addr |= 1 << 30;
} else {
/* And only non-ARM interfaces use the island id = 1 */
if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp))
!= NFP_CPP_INTERFACE_TYPE_ARM)
*xpb_addr |= 1 << 24;
}
return xpb;
}
/**
* nfp_xpb_readl() - Read a u32 word from a XPB location
* @cpp: CPP device handle
* @xpb_addr: Address for operation
* @value: Pointer to read buffer
*
* Return: length of the io, or -ERRNO
*/
int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
{
u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value);
}
/**
* nfp_xpb_writel() - Write a u32 word to a XPB location
* @cpp: CPP device handle
* @xpb_addr: Address for operation
* @value: Value to write
*
* Return: length of the io, or -ERRNO
*/
int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
{
u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value);
}
/**
* nfp_xpb_writelm() - Modify bits of a 32-bit value from the XPB bus
* @cpp: NFP CPP device handle
* @xpb_tgt: XPB target and address
* @mask: mask of bits to alter
* @value: value to modify
*
* KERNEL: This operation is safe to call in interrupt or softirq context.
*
* Return: length of the io, or -ERRNO
*/
int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt,
u32 mask, u32 value)
{
int err;
u32 tmp;
err = nfp_xpb_readl(cpp, xpb_tgt, &tmp);
if (err < 0)
return err;
tmp &= ~mask;
tmp |= mask & value;
return nfp_xpb_writel(cpp, xpb_tgt, tmp);
}
/* Lockdep markers */
static struct lock_class_key nfp_cpp_resource_lock_key;
static void nfp_cpp_dev_release(struct device *dev)
{
/* Nothing to do here - it just makes the kernel happy */
}
/**
* nfp_cpp_from_operations() - Create a NFP CPP handle
* from an operations structure
* @ops: NFP CPP operations structure
* @parent: Parent device
* @priv: Private data of low-level implementation
*
* NOTE: On failure, cpp_ops->free will be called!
*
* Return: NFP CPP handle on success, ERR_PTR on failure
*/
struct nfp_cpp *
nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
struct device *parent, void *priv)
{
const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0);
struct nfp_cpp *cpp;
u32 mask[2];
u32 xpbaddr;
size_t tgt;
int err;
cpp = kzalloc(sizeof(*cpp), GFP_KERNEL);
if (!cpp) {
err = -ENOMEM;
goto err_malloc;
}
cpp->op = ops;
cpp->priv = priv;
cpp->interface = ops->get_interface(parent);
if (ops->read_serial)
ops->read_serial(parent, cpp->serial);
rwlock_init(&cpp->resource_lock);
init_waitqueue_head(&cpp->waitq);
lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
INIT_LIST_HEAD(&cpp->mutex_cache);
INIT_LIST_HEAD(&cpp->resource_list);
INIT_LIST_HEAD(&cpp->area_cache_list);
mutex_init(&cpp->area_cache_mutex);
cpp->dev.init_name = "cpp";
cpp->dev.parent = parent;
cpp->dev.release = nfp_cpp_dev_release;
err = device_register(&cpp->dev);
if (err < 0) {
put_device(&cpp->dev);
goto err_dev;
}
dev_set_drvdata(&cpp->dev, cpp);
/* NOTE: cpp_lock is NOT locked for op->init,
* since it may call NFP CPP API operations
*/
if (cpp->op->init) {
err = cpp->op->init(cpp);
if (err < 0) {
dev_err(parent,
"NFP interface initialization failed\n");
goto err_out;
}
}
err = nfp_cpp_model_autodetect(cpp, &cpp->model);
if (err < 0) {
dev_err(parent, "NFP model detection failed\n");
goto err_out;
}
for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) {
/* Hardcoded XPB IMB Base, island 0 */
xpbaddr = 0x000a0000 + (tgt * 4);
err = nfp_xpb_readl(cpp, xpbaddr,
&cpp->imb_cat_table[tgt]);
if (err < 0) {
dev_err(parent,
"Can't read CPP mapping from device\n");
goto err_out;
}
}
nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL2,
&mask[0]);
nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3,
&mask[1]);
dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n",
nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp));
return cpp;
err_out:
device_unregister(&cpp->dev);
err_dev:
kfree(cpp);
err_malloc:
return ERR_PTR(err);
}
/**
* nfp_cpp_priv() - Get the operations private data of a CPP handle
* @cpp: CPP handle
*
* Return: Private data for the NFP CPP handle
*/
void *nfp_cpp_priv(struct nfp_cpp *cpp)
{
return cpp->priv;
}
/**
* nfp_cpp_device() - Get the Linux device handle of a CPP handle
* @cpp: CPP handle
*
* Return: Device for the NFP CPP bus
*/
struct device *nfp_cpp_device(struct nfp_cpp *cpp)
{
return &cpp->dev;
}
#define NFP_EXPL_OP(func, expl, args...) \
({ \
struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
int err = -ENODEV; \
\
if (cpp->op->func) \
err = cpp->op->func(expl, ##args); \
err; \
})
#define NFP_EXPL_OP_NR(func, expl, args...) \
({ \
struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
\
if (cpp->op->func) \
cpp->op->func(expl, ##args); \
\
})
/**
* nfp_cpp_explicit_acquire() - Acquire explicit access handle
* @cpp: NFP CPP handle
*
* The 'data_ref' and 'signal_ref' values are useful when
* constructing the NFP_EXPL_CSR1 and NFP_EXPL_POST values.
*
* Return: NFP CPP explicit handle
*/
struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp)
{
struct nfp_cpp_explicit *expl;
int err;
expl = kzalloc(sizeof(*expl) + cpp->op->explicit_priv_size, GFP_KERNEL);
if (!expl)
return NULL;
expl->cpp = cpp;
err = NFP_EXPL_OP(explicit_acquire, expl);
if (err < 0) {
kfree(expl);
return NULL;
}
return expl;
}
/**
* nfp_cpp_explicit_set_target() - Set target fields for explicit
* @expl: Explicit handle
* @cpp_id: CPP ID field
* @len: CPP Length field
* @mask: CPP Mask field
*
* Return: 0, or -ERRNO
*/
int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl,
u32 cpp_id, u8 len, u8 mask)
{
expl->cmd.cpp_id = cpp_id;
expl->cmd.len = len;
expl->cmd.byte_mask = mask;
return 0;
}
/**
* nfp_cpp_explicit_set_data() - Set data fields for explicit
* @expl: Explicit handle
* @data_master: CPP Data Master field
* @data_ref: CPP Data Ref field
*
* Return: 0, or -ERRNO
*/
int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl,
u8 data_master, u16 data_ref)
{
expl->cmd.data_master = data_master;
expl->cmd.data_ref = data_ref;
return 0;
}
/**
* nfp_cpp_explicit_set_signal() - Set signal fields for explicit
* @expl: Explicit handle
* @signal_master: CPP Signal Master field
* @signal_ref: CPP Signal Ref field
*
* Return: 0, or -ERRNO
*/
int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl,
u8 signal_master, u8 signal_ref)
{
expl->cmd.signal_master = signal_master;
expl->cmd.signal_ref = signal_ref;
return 0;
}
/**
* nfp_cpp_explicit_set_posted() - Set completion fields for explicit
* @expl: Explicit handle
* @posted: True for signaled completion, false otherwise
* @siga: CPP Signal A field
* @siga_mode: CPP Signal A Mode field
* @sigb: CPP Signal B field
* @sigb_mode: CPP Signal B Mode field
*
* Return: 0, or -ERRNO
*/
int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted,
u8 siga,
enum nfp_cpp_explicit_signal_mode siga_mode,
u8 sigb,
enum nfp_cpp_explicit_signal_mode sigb_mode)
{
expl->cmd.posted = posted;
expl->cmd.siga = siga;
expl->cmd.sigb = sigb;
expl->cmd.siga_mode = siga_mode;
expl->cmd.sigb_mode = sigb_mode;
return 0;
}
/**
* nfp_cpp_explicit_put() - Set up the write (pull) data for a explicit access
* @expl: NFP CPP Explicit handle
* @buff: Data to have the target pull in the transaction
* @len: Length of data, in bytes
*
* The 'len' parameter must be less than or equal to 128 bytes.
*
* If this function is called before the configuration
* registers are set, it will return -EINVAL.
*
* Return: 0, or -ERRNO
*/
int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl,
const void *buff, size_t len)
{
return NFP_EXPL_OP(explicit_put, expl, buff, len);
}
/**
* nfp_cpp_explicit_do() - Execute a transaction, and wait for it to complete
* @expl: NFP CPP Explicit handle
* @address: Address to send in the explicit transaction
*
* If this function is called before the configuration
* registers are set, it will return -1, with an errno of EINVAL.
*
* Return: 0, or -ERRNO
*/
int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address)
{
return NFP_EXPL_OP(explicit_do, expl, &expl->cmd, address);
}
/**
* nfp_cpp_explicit_get() - Get the 'push' (read) data from a explicit access
* @expl: NFP CPP Explicit handle
* @buff: Data that the target pushed in the transaction
* @len: Length of data, in bytes
*
* The 'len' parameter must be less than or equal to 128 bytes.
*
* If this function is called before all three configuration
* registers are set, it will return -1, with an errno of EINVAL.
*
* If this function is called before nfp_cpp_explicit_do()
* has completed, it will return -1, with an errno of EBUSY.
*
* Return: 0, or -ERRNO
*/
int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len)
{
return NFP_EXPL_OP(explicit_get, expl, buff, len);
}
/**
* nfp_cpp_explicit_release() - Release explicit access handle
* @expl: NFP CPP Explicit handle
*
*/
void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl)
{
NFP_EXPL_OP_NR(explicit_release, expl);
kfree(expl);
}
/**
* nfp_cpp_explicit_cpp() - return CPP handle for CPP explicit
* @cpp_explicit: CPP explicit handle
*
* Return: NFP CPP handle of the explicit
*/
struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *cpp_explicit)
{
return cpp_explicit->cpp;
}
/**
* nfp_cpp_explicit_priv() - return private struct for CPP explicit
* @cpp_explicit: CPP explicit handle
*
* Return: private data of the explicit, or NULL
*/
void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit)
{
return &cpp_explicit[1];
}
/* THIS FUNCTION IS NOT EXPORTED */
static u32 nfp_mutex_locked(u16 interface)
{
return (u32)interface << 16 | 0x000f;
}
static u32 nfp_mutex_unlocked(u16 interface)
{
return (u32)interface << 16 | 0x0000;
}
static bool nfp_mutex_is_locked(u32 val)
{
return (val & 0xffff) == 0x000f;
}
static bool nfp_mutex_is_unlocked(u32 val)
{
return (val & 0xffff) == 0000;
}
/* If you need more than 65536 recursive locks, please rethink your code. */
#define MUTEX_DEPTH_MAX 0xffff
static int
nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address)
{
/* Not permitted on invalid interfaces */
if (NFP_CPP_INTERFACE_TYPE_of(interface) ==
NFP_CPP_INTERFACE_TYPE_INVALID)
return -EINVAL;
/* Address must be 64-bit aligned */
if (address & 7)
return -EINVAL;
if (*target != NFP_CPP_TARGET_MU)
return -EINVAL;
return 0;
}
/**
* nfp_cpp_mutex_init() - Initialize a mutex location
* @cpp: NFP CPP handle
* @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
* @address: Offset into the address space of the NFP CPP target ID
* @key: Unique 32-bit value for this mutex
*
* The CPP target:address must point to a 64-bit aligned location, and
* will initialize 64 bits of data at the location.
*
* This creates the initial mutex state, as locked by this
* nfp_cpp_interface().
*
* This function should only be called when setting up
* the initial lock state upon boot-up of the system.
*
* Return: 0 on success, or -errno on failure
*/
int nfp_cpp_mutex_init(struct nfp_cpp *cpp,
int target, unsigned long long address, u32 key)
{
const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
u16 interface = nfp_cpp_interface(cpp);
int err;
err = nfp_cpp_mutex_validate(interface, &target, address);
if (err)
return err;
err = nfp_cpp_writel(cpp, muw, address + 4, key);
if (err)
return err;
err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface));
if (err)
return err;
return 0;
}
/**
* nfp_cpp_mutex_alloc() - Create a mutex handle
* @cpp: NFP CPP handle
* @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
* @address: Offset into the address space of the NFP CPP target ID
* @key: 32-bit unique key (must match the key at this location)
*
* The CPP target:address must point to a 64-bit aligned location, and
* reserve 64 bits of data at the location for use by the handle.
*
* Only target/address pairs that point to entities that support the
* MU Atomic Engine's CmpAndSwap32 command are supported.
*
* Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
*/
struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
unsigned long long address, u32 key)
{
const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
u16 interface = nfp_cpp_interface(cpp);
struct nfp_cpp_mutex *mutex;
int err;
u32 tmp;
err = nfp_cpp_mutex_validate(interface, &target, address);
if (err)
return NULL;
/* Look for mutex on cache list */
list_for_each_entry(mutex, &cpp->mutex_cache, list) {
if (mutex->target == target && mutex->address == address) {
mutex->usage++;
return mutex;
}
}
err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
if (err < 0)
return NULL;
if (tmp != key)
return NULL;
mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
if (!mutex)
return NULL;
mutex->cpp = cpp;
mutex->target = target;
mutex->address = address;
mutex->key = key;
mutex->depth = 0;
mutex->usage = 1;
/* Add mutex to cache list */
list_add(&mutex->list, &cpp->mutex_cache);
return mutex;
}
/**
* nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state
* @mutex: NFP CPP Mutex handle
*/
void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
{
if (--mutex->usage)
return;
/* Remove mutex from cache */
list_del(&mutex->list);
kfree(mutex);
}
/**
* nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine
* @mutex: NFP CPP Mutex handle
*
* Return: 0 on success, or -errno on failure
*/
int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
{
unsigned long warn_at = jiffies + 15 * HZ;
unsigned int timeout_ms = 1;
int err;
/* We can't use a waitqueue here, because the unlocker
* might be on a separate CPU.
*
* So just wait for now.
*/
for (;;) {
err = nfp_cpp_mutex_trylock(mutex);
if (err != -EBUSY)
break;
err = msleep_interruptible(timeout_ms);
if (err != 0)
return -ERESTARTSYS;
if (time_is_before_eq_jiffies(warn_at)) {
warn_at = jiffies + 60 * HZ;
dev_warn(mutex->cpp->dev.parent,
"Warning: waiting for NFP mutex [usage:%hd depth:%hd target:%d addr:%llx key:%08x]\n",
mutex->usage, mutex->depth,
mutex->target, mutex->address, mutex->key);
}
}
return err;
}
/**
* nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine
* @mutex: NFP CPP Mutex handle
*
* Return: 0 on success, or -errno on failure
*/
int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
{
const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
struct nfp_cpp *cpp = mutex->cpp;
u32 key, value;
u16 interface;
int err;
interface = nfp_cpp_interface(cpp);
if (mutex->depth > 1) {
mutex->depth--;
return 0;
}
err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
if (err < 0)
return err;
if (key != mutex->key)
return -EPERM;
err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
if (err < 0)
return err;
if (value != nfp_mutex_locked(interface))
return -EACCES;
err = nfp_cpp_writel(cpp, muw, mutex->address,
nfp_mutex_unlocked(interface));
if (err < 0)
return err;
mutex->depth = 0;
return 0;
}
/**
* nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle
* @mutex: NFP CPP Mutex handle
*
* Return: 0 if the lock succeeded, -errno on failure
*/
int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
{
const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */
const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
struct nfp_cpp *cpp = mutex->cpp;
u32 key, value, tmp;
int err;
if (mutex->depth > 0) {
if (mutex->depth == MUTEX_DEPTH_MAX)
return -E2BIG;
mutex->depth++;
return 0;
}
/* Verify that the lock marker is not damaged */
err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
if (err < 0)
return err;
if (key != mutex->key)
return -EPERM;
/* Compare against the unlocked state, and if true,
* write the interface id into the top 16 bits, and
* mark as locked.
*/
value = nfp_mutex_locked(nfp_cpp_interface(cpp));
/* We use test_set_imm here, as it implies a read
* of the current state, and sets the bits in the
* bytemask of the command to 1s. Since the mutex
* is guaranteed to be 64-bit aligned, the bytemask
* of this 32-bit command is ensured to be 8'b00001111,
* which implies that the lower 4 bits will be set to
* ones regardless of the initial state.
*
* Since this is a 'Readback' operation, with no Pull
* data, we can treat this as a normal Push (read)
* atomic, which returns the original value.
*/
err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
if (err < 0)
return err;
/* Was it unlocked? */
if (nfp_mutex_is_unlocked(tmp)) {
/* The read value can only be 0x....0000 in the unlocked state.
* If there was another contending for this lock, then
* the lock state would be 0x....000f
*/
/* Write our owner ID into the lock
* While not strictly necessary, this helps with
* debug and bookkeeping.
*/
err = nfp_cpp_writel(cpp, muw, mutex->address, value);
if (err < 0)
return err;
mutex->depth = 1;
return 0;
}
/* Already locked by us? Success! */
if (tmp == value) {
mutex->depth = 1;
return 0;
}
return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
}
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_cpplib.c
* Library of functions to access the NFP's CPP bus
* Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
* Jason McMullan <jason.mcmullan@netronome.com>
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*/
#include <asm/unaligned.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "nfp_cpp.h"
#include "nfp6000/nfp6000.h"
#include "nfp6000/nfp_xpb.h"
/* NFP6000 PL */
#define NFP_PL_DEVICE_ID 0x00000004
#define NFP_PL_DEVICE_ID_MASK GENMASK(7, 0)
#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144
/**
* nfp_cpp_readl() - Read a u32 word from a CPP location
* @cpp: CPP device handle
* @cpp_id: CPP ID for operation
* @address: Address for operation
* @value: Pointer to read buffer
*
* Return: length of the io, or -ERRNO
*/
int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u32 *value)
{
u8 tmp[4];
int err;
err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
*value = get_unaligned_le32(tmp);
return err;
}
/**
* nfp_cpp_writel() - Write a u32 word to a CPP location
* @cpp: CPP device handle
* @cpp_id: CPP ID for operation
* @address: Address for operation
* @value: Value to write
*
* Return: length of the io, or -ERRNO
*/
int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u32 value)
{
u8 tmp[4];
put_unaligned_le32(value, tmp);
return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
}
/**
* nfp_cpp_readq() - Read a u64 word from a CPP location
* @cpp: CPP device handle
* @cpp_id: CPP ID for operation
* @address: Address for operation
* @value: Pointer to read buffer
*
* Return: length of the io, or -ERRNO
*/
int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 *value)
{
u8 tmp[8];
int err;
err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
*value = get_unaligned_le64(tmp);
return err;
}
/**
* nfp_cpp_writeq() - Write a u64 word to a CPP location
* @cpp: CPP device handle
* @cpp_id: CPP ID for operation
* @address: Address for operation
* @value: Value to write
*
* Return: length of the io, or -ERRNO
*/
int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 value)
{
u8 tmp[8];
put_unaligned_le64(value, tmp);
return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
}
/* NOTE: This code should not use nfp_xpb_* functions,
* as those are model-specific
*/
int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model)
{
const u32 arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0);
u32 reg;
int err;
err = nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, model);
if (err < 0)
return err;
/* The PL's PluDeviceID revision code is authoratative */
*model &= ~0xff;
err = nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + NFP_PL_DEVICE_ID,
&reg);
if (err < 0)
return err;
*model |= (NFP_PL_DEVICE_ID_MASK & reg) - 0x10;
return 0;
}
static u8 nfp_bytemask(int width, u64 addr)
{
if (width == 8)
return 0xff;
else if (width == 4)
return 0x0f << (addr & 4);
else if (width == 2)
return 0x03 << (addr & 6);
else if (width == 1)
return 0x01 << (addr & 7);
else
return 0;
}
int nfp_cpp_explicit_read(struct nfp_cpp *cpp, u32 cpp_id,
u64 addr, void *buff, size_t len, int width_read)
{
struct nfp_cpp_explicit *expl;
char *tmp = buff;
int err, i, incr;
u8 byte_mask;
if (len & (width_read - 1))
return -EINVAL;
expl = nfp_cpp_explicit_acquire(cpp);
if (!expl)
return -EBUSY;
incr = min_t(int, 16 * width_read, 128);
incr = min_t(int, incr, len);
/* Translate a NFP_CPP_ACTION_RW to action 0 */
if (NFP_CPP_ID_ACTION_of(cpp_id) == NFP_CPP_ACTION_RW)
cpp_id = NFP_CPP_ID(NFP_CPP_ID_TARGET_of(cpp_id), 0,
NFP_CPP_ID_TOKEN_of(cpp_id));
byte_mask = nfp_bytemask(width_read, addr);
nfp_cpp_explicit_set_target(expl, cpp_id,
incr / width_read - 1, byte_mask);
nfp_cpp_explicit_set_posted(expl, 1, 0, NFP_SIGNAL_PUSH,
0, NFP_SIGNAL_NONE);
for (i = 0; i < len; i += incr, addr += incr, tmp += incr) {
if (i + incr > len) {
incr = len - i;
nfp_cpp_explicit_set_target(expl, cpp_id,
incr / width_read - 1,
0xff);
}
err = nfp_cpp_explicit_do(expl, addr);
if (err < 0)
goto exit_release;
err = nfp_cpp_explicit_get(expl, tmp, incr);
if (err < 0)
goto exit_release;
}
err = len;
exit_release:
nfp_cpp_explicit_release(expl);
return err;
}
int nfp_cpp_explicit_write(struct nfp_cpp *cpp, u32 cpp_id, u64 addr,
const void *buff, size_t len, int width_write)
{
struct nfp_cpp_explicit *expl;
const char *tmp = buff;
int err, i, incr;
u8 byte_mask;
if (len & (width_write - 1))
return -EINVAL;
expl = nfp_cpp_explicit_acquire(cpp);
if (!expl)
return -EBUSY;
incr = min_t(int, 16 * width_write, 128);
incr = min_t(int, incr, len);
/* Translate a NFP_CPP_ACTION_RW to action 1 */
if (NFP_CPP_ID_ACTION_of(cpp_id) == NFP_CPP_ACTION_RW)
cpp_id = NFP_CPP_ID(NFP_CPP_ID_TARGET_of(cpp_id), 1,
NFP_CPP_ID_TOKEN_of(cpp_id));
byte_mask = nfp_bytemask(width_write, addr);
nfp_cpp_explicit_set_target(expl, cpp_id,
incr / width_write - 1, byte_mask);
nfp_cpp_explicit_set_posted(expl, 1, 0, NFP_SIGNAL_PULL,
0, NFP_SIGNAL_NONE);
for (i = 0; i < len; i += incr, addr += incr, tmp += incr) {
if (i + incr > len) {
incr = len - i;
nfp_cpp_explicit_set_target(expl, cpp_id,
incr / width_write - 1,
0xff);
}
err = nfp_cpp_explicit_put(expl, tmp, incr);
if (err < 0)
goto exit_release;
err = nfp_cpp_explicit_do(expl, addr);
if (err < 0)
goto exit_release;
}
err = len;
exit_release:
nfp_cpp_explicit_release(expl);
return err;
}
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM
* after chip reset.
*
* Examples of the fields:
* me.count = 40
* me.mask = 0x7f_ffff_ffff
*
* me.count is the total number of MEs on the system.
* me.mask is the bitmask of MEs that are available for application usage.
*
* (ie, in this example, ME 39 has been reserved by boardconfig.)
*/
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#define NFP_SUBSYS "nfp_hwinfo"
#include "crc32.h"
#include "nfp.h"
#include "nfp_cpp.h"
#include "nfp6000/nfp6000.h"
#define HWINFO_SIZE_MIN 0x100
#define HWINFO_WAIT 20 /* seconds */
/* The Hardware Info Table defines the properties of the system.
*
* HWInfo v1 Table (fixed size)
*
* 0x0000: u32 version Hardware Info Table version (1.0)
* 0x0004: u32 size Total size of the table, including
* the CRC32 (IEEE 802.3)
* 0x0008: u32 jumptab Offset of key/value table
* 0x000c: u32 keys Total number of keys in the key/value table
* NNNNNN: Key/value jump table and string data
* (size - 4): u32 crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc)
* CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE
*
* HWInfo v2 Table (variable size)
*
* 0x0000: u32 version Hardware Info Table version (2.0)
* 0x0004: u32 size Current size of the data area, excluding CRC32
* 0x0008: u32 limit Maximum size of the table
* 0x000c: u32 reserved Unused, set to zero
* NNNNNN: Key/value data
* (size - 4): u32 crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc)
* CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE
*
* If the HWInfo table is in the process of being updated, the low bit
* of version will be set.
*
* HWInfo v1 Key/Value Table
* -------------------------
*
* The key/value table is a set of offsets to ASCIIZ strings which have
* been strcmp(3) sorted (yes, please use bsearch(3) on the table).
*
* All keys are guaranteed to be unique.
*
* N+0: u32 key_1 Offset to the first key
* N+4: u32 val_1 Offset to the first value
* N+8: u32 key_2 Offset to the second key
* N+c: u32 val_2 Offset to the second value
* ...
*
* HWInfo v2 Key/Value Table
* -------------------------
*
* Packed UTF8Z strings, ie 'key1\000value1\000key2\000value2\000'
*
* Unsorted.
*/
#define NFP_HWINFO_VERSION_1 ('H' << 24 | 'I' << 16 | 1 << 8 | 0 << 1 | 0)
#define NFP_HWINFO_VERSION_2 ('H' << 24 | 'I' << 16 | 2 << 8 | 0 << 1 | 0)
#define NFP_HWINFO_VERSION_UPDATING BIT(0)
struct nfp_hwinfo {
u8 start[0];
__le32 version;
__le32 size;
/* v2 specific fields */
__le32 limit;
__le32 resv;
char data[];
};
static bool nfp_hwinfo_is_updating(struct nfp_hwinfo *hwinfo)
{
return le32_to_cpu(hwinfo->version) & NFP_HWINFO_VERSION_UPDATING;
}
static int
hwinfo_db_walk(struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo, u32 size)
{
const char *key, *val, *end = hwinfo->data + size;
for (key = hwinfo->data; *key && key < end;
key = val + strlen(val) + 1) {
val = key + strlen(key) + 1;
if (val >= end) {
nfp_warn(cpp, "Bad HWINFO - overflowing key\n");
return -EINVAL;
}
if (val + strlen(val) + 1 > end) {
nfp_warn(cpp, "Bad HWINFO - overflowing value\n");
return -EINVAL;
}
}
return 0;
}
static int
hwinfo_db_validate(struct nfp_cpp *cpp, struct nfp_hwinfo *db, u32 len)
{
u32 size, crc;
size = le32_to_cpu(db->size);
if (size > len) {
nfp_err(cpp, "Unsupported hwinfo size %u > %u\n", size, len);
return -EINVAL;
}
size -= sizeof(u32);
crc = crc32_posix(db, size);
if (crc != get_unaligned_le32(db->start + size)) {
nfp_err(cpp, "Corrupt hwinfo table (CRC mismatch), calculated 0x%x, expected 0x%x\n",
crc, get_unaligned_le32(db->start + size));
return -EINVAL;
}
return hwinfo_db_walk(cpp, db, size);
}
static int hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
{
struct nfp_hwinfo *header;
struct nfp_resource *res;
u64 cpp_addr;
u32 cpp_id;
int err;
u8 *db;
res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO);
if (!IS_ERR(res)) {
cpp_id = nfp_resource_cpp_id(res);
cpp_addr = nfp_resource_address(res);
*cpp_size = nfp_resource_size(res);
nfp_resource_release(res);
if (*cpp_size < HWINFO_SIZE_MIN)
return -ENOENT;
} else if (PTR_ERR(res) == -ENOENT) {
/* Try getting the HWInfo table from the 'classic' location */
cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU,
NFP_CPP_ACTION_RW, 0, 1);
cpp_addr = 0x30000;
*cpp_size = 0x0e000;
} else {
return PTR_ERR(res);
}
db = kmalloc(*cpp_size + 1, GFP_KERNEL);
if (!db)
return -ENOMEM;
err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size);
if (err != *cpp_size) {
kfree(db);
return err < 0 ? err : -EIO;
}
header = (void *)db;
if (nfp_hwinfo_is_updating(header)) {
kfree(db);
return -EBUSY;
}
if (le32_to_cpu(header->version) != NFP_HWINFO_VERSION_2) {
nfp_err(cpp, "Unknown HWInfo version: 0x%08x\n",
le32_to_cpu(header->version));
kfree(db);
return -EINVAL;
}
/* NULL-terminate for safety */
db[*cpp_size] = '\0';
nfp_hwinfo_cache_set(cpp, db);
return 0;
}
static int hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size)
{
const unsigned long wait_until = jiffies + HWINFO_WAIT * HZ;
int err;
for (;;) {
const unsigned long start_time = jiffies;
err = hwinfo_try_fetch(cpp, hwdb_size);
if (!err)
return 0;
err = msleep_interruptible(100);
if (err || time_after(start_time, wait_until)) {
nfp_err(cpp, "NFP access error\n");
return -EIO;
}
}
}
static int nfp_hwinfo_load(struct nfp_cpp *cpp)
{
struct nfp_hwinfo *db;
size_t hwdb_size = 0;
int err;
err = hwinfo_fetch(cpp, &hwdb_size);
if (err)
return err;
db = nfp_hwinfo_cache(cpp);
err = hwinfo_db_validate(cpp, db, hwdb_size);
if (err) {
kfree(db);
nfp_hwinfo_cache_set(cpp, NULL);
return err;
}
return 0;
}
/**
* nfp_hwinfo_lookup() - Find a value in the HWInfo table by name
* @cpp: NFP CPP handle
* @lookup: HWInfo name to search for
*
* Return: Value of the HWInfo name, or NULL
*/
const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup)
{
const char *key, *val, *end;
struct nfp_hwinfo *hwinfo;
int err;
hwinfo = nfp_hwinfo_cache(cpp);
if (!hwinfo) {
err = nfp_hwinfo_load(cpp);
if (err)
return NULL;
hwinfo = nfp_hwinfo_cache(cpp);
}
if (!hwinfo || !lookup)
return NULL;
end = hwinfo->data + le32_to_cpu(hwinfo->size) - sizeof(u32);
for (key = hwinfo->data; *key && key < end;
key = val + strlen(val) + 1) {
val = key + strlen(key) + 1;
if (strcmp(key, lookup) == 0)
return val;
}
return NULL;
}
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_mip.c
* Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
* Jason McMullan <jason.mcmullan@netronome.com>
* Espen Skoglund <espen.skoglund@netronome.com>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include "nfp.h"
#include "nfp_cpp.h"
#include "nfp_nffw.h"
#define NFP_MIP_SIGNATURE cpu_to_le32(0x0050494d) /* "MIP\0" */
#define NFP_MIP_VERSION cpu_to_le32(1)
#define NFP_MIP_MAX_OFFSET (256 * 1024)
struct nfp_mip {
__le32 signature;
__le32 mip_version;
__le32 mip_size;
__le32 first_entry;
__le32 version;
__le32 buildnum;
__le32 buildtime;
__le32 loadtime;
__le32 symtab_addr;
__le32 symtab_size;
__le32 strtab_addr;
__le32 strtab_size;
char name[16];
char toolchain[32];
};
/* Read memory and check if it could be a valid MIP */
static int
nfp_mip_try_read(struct nfp_cpp *cpp, u32 cpp_id, u64 addr, struct nfp_mip *mip)
{
int ret;
ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip));
if (ret != sizeof(*mip)) {
nfp_err(cpp, "Failed to read MIP data (%d, %zu)\n",
ret, sizeof(*mip));
return -EIO;
}
if (mip->signature != NFP_MIP_SIGNATURE) {
nfp_warn(cpp, "Incorrect MIP signature (0x%08x)\n",
le32_to_cpu(mip->signature));
return -EINVAL;
}
if (mip->mip_version != NFP_MIP_VERSION) {
nfp_warn(cpp, "Unsupported MIP version (%d)\n",
le32_to_cpu(mip->mip_version));
return -EINVAL;
}
return 0;
}
/* Try to locate MIP using the resource table */
static int nfp_mip_read_resource(struct nfp_cpp *cpp, struct nfp_mip *mip)
{
struct nfp_nffw_info *nffw_info;
u32 cpp_id;
u64 addr;
int err;
nffw_info = nfp_nffw_info_open(cpp);
if (IS_ERR(nffw_info))
return PTR_ERR(nffw_info);
err = nfp_nffw_info_mip_first(nffw_info, &cpp_id, &addr);
if (err)
goto exit_close_nffw;
err = nfp_mip_try_read(cpp, cpp_id, addr, mip);
exit_close_nffw:
nfp_nffw_info_close(nffw_info);
return err;
}
/**
* nfp_mip_open() - Get device MIP structure
* @cpp: NFP CPP Handle
*
* Copy MIP structure from NFP device and return it. The returned
* structure is handled internally by the library and should be
* freed by calling nfp_mip_close().
*
* Return: pointer to mip, NULL on failure.
*/
const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp)
{
struct nfp_mip *mip;
int err;
mip = kmalloc(sizeof(*mip), GFP_KERNEL);
if (!mip)
return NULL;
err = nfp_mip_read_resource(cpp, mip);
if (err) {
kfree(mip);
return NULL;
}
return mip;
}
void nfp_mip_close(const struct nfp_mip *mip)
{
kfree(mip);
}
/**
* nfp_mip_symtab() - Get the address and size of the MIP symbol table
* @mip: MIP handle
* @addr: Location for NFP DDR address of MIP symbol table
* @size: Location for size of MIP symbol table
*/
void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size)
{
*addr = le32_to_cpu(mip->symtab_addr);
*size = le32_to_cpu(mip->symtab_size);
}
/**
* nfp_mip_strtab() - Get the address and size of the MIP symbol name table
* @mip: MIP handle
* @addr: Location for NFP DDR address of MIP symbol name table
* @size: Location for size of MIP symbol name table
*/
void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size)
{
*addr = le32_to_cpu(mip->strtab_addr);
*size = le32_to_cpu(mip->strtab_size);
}
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_nffw.c
* Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
* Jason McMullan <jason.mcmullan@netronome.com>
* Francois H. Theron <francois.theron@netronome.com>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include "nfp.h"
#include "nfp_cpp.h"
#include "nfp_nffw.h"
#include "nfp6000/nfp6000.h"
/* Init-CSR owner IDs for firmware map to firmware IDs which start at 4.
* Lower IDs are reserved for target and loader IDs.
*/
#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */
#define NFFW_FWID_BASE 4
#define NFFW_FWID_ALL 255
/**
* NFFW_INFO_VERSION history:
* 0: This was never actually used (before versioning), but it refers to
* the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later
* changed to 200.
* 1: First versioned struct, with
* FWINFO_CNT = 120
* MEINFO_CNT = 120
* 2: FWINFO_CNT = 200
* MEINFO_CNT = 200
*/
#define NFFW_INFO_VERSION_CURRENT 2
/* Enough for all current chip families */
#define NFFW_MEINFO_CNT_V1 120
#define NFFW_FWINFO_CNT_V1 120
#define NFFW_MEINFO_CNT_V2 200
#define NFFW_FWINFO_CNT_V2 200
/* Work in 32-bit words to make cross-platform endianness easier to handle */
/** nfp.nffw meinfo **/
struct nffw_meinfo {
__le32 ctxmask__fwid__meid;
};
struct nffw_fwinfo {
__le32 loaded__mu_da__mip_off_hi;
__le32 mip_cppid; /* 0 means no MIP */
__le32 mip_offset_lo;
};
struct nfp_nffw_info_v1 {
struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V1];
struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V1];
};
struct nfp_nffw_info_v2 {
struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V2];
struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V2];
};
/** Resource: nfp.nffw main **/
struct nfp_nffw_info_data {
__le32 flags[2];
union {
struct nfp_nffw_info_v1 v1;
struct nfp_nffw_info_v2 v2;
} info;
};
struct nfp_nffw_info {
struct nfp_cpp *cpp;
struct nfp_resource *res;
struct nfp_nffw_info_data fwinf;
};
/* flg_info_version = flags[0]<27:16>
* This is a small version counter intended only to detect if the current
* implementation can read the current struct. Struct changes should be very
* rare and as such a 12-bit counter should cover large spans of time. By the
* time it wraps around, we don't expect to have 4096 versions of this struct
* to be in use at the same time.
*/
static u32 nffw_res_info_version_get(const struct nfp_nffw_info_data *res)
{
return (le32_to_cpu(res->flags[0]) >> 16) & 0xfff;
}
/* flg_init = flags[0]<0> */
static u32 nffw_res_flg_init_get(const struct nfp_nffw_info_data *res)
{
return (le32_to_cpu(res->flags[0]) >> 0) & 1;
}
/* loaded = loaded__mu_da__mip_off_hi<31:31> */
static u32 nffw_fwinfo_loaded_get(const struct nffw_fwinfo *fi)
{
return (le32_to_cpu(fi->loaded__mu_da__mip_off_hi) >> 31) & 1;
}
/* mip_cppid = mip_cppid */
static u32 nffw_fwinfo_mip_cppid_get(const struct nffw_fwinfo *fi)
{
return le32_to_cpu(fi->mip_cppid);
}
/* loaded = loaded__mu_da__mip_off_hi<8:8> */
static u32 nffw_fwinfo_mip_mu_da_get(const struct nffw_fwinfo *fi)
{
return (le32_to_cpu(fi->loaded__mu_da__mip_off_hi) >> 8) & 1;
}
/* mip_offset = (loaded__mu_da__mip_off_hi<7:0> << 8) | mip_offset_lo */
static u64 nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi)
{
u64 mip_off_hi = le32_to_cpu(fi->loaded__mu_da__mip_off_hi);
return (mip_off_hi & 0xFF) << 32 | le32_to_cpu(fi->mip_offset_lo);
}
#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7)
#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12)
#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0
#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12)
static int nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp)
{
unsigned int mode, addr40;
u32 xpbaddr, imbcppat;
int err;
/* Hardcoded XPB IMB Base, island 0 */
xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4;
err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat);
if (err < 0)
return err;
mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat);
addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE);
return nfp_cppat_mu_locality_lsb(mode, addr40);
}
static unsigned int
nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr)
{
/* For the this code, version 0 is most likely to be
* version 1 in this case. Since the kernel driver
* does not take responsibility for initialising the
* nfp.nffw resource, any previous code (CA firmware or
* userspace) that left the version 0 and did set
* the init flag is going to be version 1.
*/
switch (nffw_res_info_version_get(fwinf)) {
case 0:
case 1:
*arr = &fwinf->info.v1.fwinfo[0];
return NFFW_FWINFO_CNT_V1;
case 2:
*arr = &fwinf->info.v2.fwinfo[0];
return NFFW_FWINFO_CNT_V2;
default:
*arr = NULL;
return 0;
}
}
/**
* nfp_nffw_info_open() - Acquire the lock on the NFFW table
* @cpp: NFP CPP handle
*
* Return: 0, or -ERRNO
*/
struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
{
struct nfp_nffw_info_data *fwinf;
struct nfp_nffw_info *state;
u32 info_ver;
int err;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);
state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW);
if (IS_ERR(state->res))
goto err_free;
fwinf = &state->fwinf;
if (sizeof(*fwinf) > nfp_resource_size(state->res))
goto err_release;
err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
nfp_resource_address(state->res),
fwinf, sizeof(*fwinf));
if (err < sizeof(*fwinf))
goto err_release;
if (!nffw_res_flg_init_get(fwinf))
goto err_release;
info_ver = nffw_res_info_version_get(fwinf);
if (info_ver > NFFW_INFO_VERSION_CURRENT)
goto err_release;
state->cpp = cpp;
return state;
err_release:
nfp_resource_release(state->res);
err_free:
kfree(state);
return ERR_PTR(-EIO);
}
/**
* nfp_nffw_info_release() - Release the lock on the NFFW table
* @state: NFP FW info state
*
* Return: 0, or -ERRNO
*/
void nfp_nffw_info_close(struct nfp_nffw_info *state)
{
nfp_resource_release(state->res);
kfree(state);
}
/**
* nfp_nffw_info_fwid_first() - Return the first firmware ID in the NFFW
* @state: NFP FW info state
*
* Return: First NFFW firmware info, NULL on failure
*/
static struct nffw_fwinfo *nfp_nffw_info_fwid_first(struct nfp_nffw_info *state)
{
struct nffw_fwinfo *fwinfo;
unsigned int cnt, i;
cnt = nffw_res_fwinfos(&state->fwinf, &fwinfo);
if (!cnt)
return NULL;
for (i = 0; i < cnt; i++)
if (nffw_fwinfo_loaded_get(&fwinfo[i]))
return &fwinfo[i];
return NULL;
}
/**
* nfp_nffw_info_mip_first() - Retrieve the location of the first FW's MIP
* @state: NFP FW info state
* @cpp_id: Pointer to the CPP ID of the MIP
* @off: Pointer to the CPP Address of the MIP
*
* Return: 0, or -ERRNO
*/
int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off)
{
struct nffw_fwinfo *fwinfo;
fwinfo = nfp_nffw_info_fwid_first(state);
if (!fwinfo)
return -EINVAL;
*cpp_id = nffw_fwinfo_mip_cppid_get(fwinfo);
*off = nffw_fwinfo_mip_offset_get(fwinfo);
if (nffw_fwinfo_mip_mu_da_get(fwinfo)) {
int locality_off;
if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU)
return 0;
locality_off = nfp_mip_mu_locality_lsb(state->cpp);
if (locality_off < 0)
return locality_off;
*off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off);
*off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off;
}
return 0;
}
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_nffw.h
* Authors: Jason McMullan <jason.mcmullan@netronome.com>
* Francois H. Theron <francois.theron@netronome.com>
*/
#ifndef NFP_NFFW_H
#define NFP_NFFW_H
/* Implemented in nfp_nffw.c */
struct nfp_nffw_info;
struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp);
void nfp_nffw_info_close(struct nfp_nffw_info *state);
int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off);
/* Implemented in nfp_mip.c */
struct nfp_mip;
const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp);
void nfp_mip_close(const struct nfp_mip *mip);
void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size);
void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size);
/* Implemented in nfp_rtsym.c */
#define NFP_RTSYM_TYPE_NONE 0
#define NFP_RTSYM_TYPE_OBJECT 1
#define NFP_RTSYM_TYPE_FUNCTION 2
#define NFP_RTSYM_TYPE_ABS 3
#define NFP_RTSYM_TARGET_NONE 0
#define NFP_RTSYM_TARGET_LMEM -1
#define NFP_RTSYM_TARGET_EMU_CACHE -7
/**
* struct nfp_rtsym - RTSYM descriptor
* @name: Symbol name
* @addr: Address in the domain/target's address space
* @size: Size (in bytes) of the symbol
* @type: NFP_RTSYM_TYPE_* of the symbol
* @target: CPP Target identifier, or NFP_RTSYM_TARGET_*
* @domain: CPP Target Domain (island)
*/
struct nfp_rtsym {
const char *name;
u64 addr;
u64 size;
int type;
int target;
int domain;
};
int nfp_rtsym_count(struct nfp_cpp *cpp);
const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx);
const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name);
u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error);
#endif /* NFP_NFFW_H */
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_nsp.c
* Author: Jakub Kicinski <jakub.kicinski@netronome.com>
* Jason McMullan <jason.mcmullan@netronome.com>
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#define NFP_SUBSYS "nfp_nsp"
#include "nfp.h"
#include "nfp_cpp.h"
/* Offsets relative to the CSR base */
#define NSP_STATUS 0x00
#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48)
#define NSP_STATUS_MAJOR GENMASK_ULL(47, 44)
#define NSP_STATUS_MINOR GENMASK_ULL(43, 32)
#define NSP_STATUS_CODE GENMASK_ULL(31, 16)
#define NSP_STATUS_RESULT GENMASK_ULL(15, 8)
#define NSP_STATUS_BUSY BIT_ULL(0)
#define NSP_COMMAND 0x08
#define NSP_COMMAND_OPTION GENMASK_ULL(63, 32)
#define NSP_COMMAND_CODE GENMASK_ULL(31, 16)
#define NSP_COMMAND_START BIT_ULL(0)
/* CPP address to retrieve the data from */
#define NSP_BUFFER 0x10
#define NSP_BUFFER_CPP GENMASK_ULL(63, 40)
#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38)
#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0)
#define NSP_DFLT_BUFFER 0x18
#define NSP_DFLT_BUFFER_CONFIG 0x20
#define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0)
#define NSP_MAGIC 0xab10
#define NSP_MAJOR 0
#define NSP_MINOR (__MAX_SPCODE - 1)
#define NSP_CODE_MAJOR GENMASK(15, 12)
#define NSP_CODE_MINOR GENMASK(11, 0)
enum nfp_nsp_cmd {
SPCODE_NOOP = 0, /* No operation */
SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */
SPCODE_FW_DEFAULT = 2, /* Load default (UNDI) FW */
SPCODE_PHY_INIT = 3, /* Initialize the PHY */
SPCODE_MAC_INIT = 4, /* Initialize the MAC */
SPCODE_PHY_RXADAPT = 5, /* Re-run PHY RX Adaptation */
SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */
SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */
SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */
__MAX_SPCODE,
};
struct nfp_nsp {
struct nfp_cpp *cpp;
struct nfp_resource *res;
};
static int nfp_nsp_check(struct nfp_nsp *state)
{
struct nfp_cpp *cpp = state->cpp;
u64 nsp_status, reg;
u32 nsp_cpp;
int err;
nsp_cpp = nfp_resource_cpp_id(state->res);
nsp_status = nfp_resource_address(state->res) + NSP_STATUS;
err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, &reg);
if (err < 0)
return err;
if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) {
nfp_err(cpp, "Cannot detect NFP Service Processor\n");
return -ENODEV;
}
if (FIELD_GET(NSP_STATUS_MAJOR, reg) != NSP_MAJOR ||
FIELD_GET(NSP_STATUS_MINOR, reg) < NSP_MINOR) {
nfp_err(cpp, "Unsupported ABI %lld.%lld\n",
FIELD_GET(NSP_STATUS_MAJOR, reg),
FIELD_GET(NSP_STATUS_MINOR, reg));
return -EINVAL;
}
if (reg & NSP_STATUS_BUSY) {
nfp_err(cpp, "Service processor busy!\n");
return -EBUSY;
}
return 0;
}
/**
* nfp_nsp_open() - Prepare for communication and lock the NSP resource.
* @cpp: NFP CPP Handle
*/
struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp)
{
struct nfp_resource *res;
struct nfp_nsp *state;
int err;
res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP);
if (IS_ERR(res))
return (void *)res;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state) {
nfp_resource_release(res);
return ERR_PTR(-ENOMEM);
}
state->cpp = cpp;
state->res = res;
err = nfp_nsp_check(state);
if (err) {
nfp_nsp_close(state);
return ERR_PTR(err);
}
return state;
}
/**
* nfp_nsp_close() - Clean up and unlock the NSP resource.
* @state: NFP SP state
*/
void nfp_nsp_close(struct nfp_nsp *state)
{
nfp_resource_release(state->res);
kfree(state);
}
static int
nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
u32 nsp_cpp, u64 addr, u64 mask, u64 val)
{
const unsigned long wait_until = jiffies + 30 * HZ;
int err;
for (;;) {
const unsigned long start_time = jiffies;
err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg);
if (err < 0)
return err;
if ((*reg & mask) == val)
return 0;
err = msleep_interruptible(100);
if (err)
return err;
if (time_after(start_time, wait_until))
return -ETIMEDOUT;
}
}
/**
* nfp_nsp_command() - Execute a command on the NFP Service Processor
* @state: NFP SP state
* @code: NFP SP Command Code
* @option: NFP SP Command Argument
* @buff_cpp: NFP SP Buffer CPP Address info
* @buff_addr: NFP SP Buffer Host address
*
* Return: 0 for success with no result
*
* 1..255 for NSP completion with a result code
*
* -EAGAIN if the NSP is not yet present
* -ENODEV if the NSP is not a supported model
* -EBUSY if the NSP is stuck
* -EINTR if interrupted while waiting for completion
* -ETIMEDOUT if the NSP took longer than 30 seconds to complete
*/
static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
u32 buff_cpp, u64 buff_addr)
{
u64 reg, nsp_base, nsp_buffer, nsp_status, nsp_command;
struct nfp_cpp *cpp = state->cpp;
u32 nsp_cpp;
int err;
nsp_cpp = nfp_resource_cpp_id(state->res);
nsp_base = nfp_resource_address(state->res);
nsp_status = nsp_base + NSP_STATUS;
nsp_command = nsp_base + NSP_COMMAND;
nsp_buffer = nsp_base + NSP_BUFFER;
err = nfp_nsp_check(state);
if (err)
return err;
if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) ||
!FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) {
nfp_err(cpp, "Host buffer out of reach %08x %016llx\n",
buff_cpp, buff_addr);
return -EINVAL;
}
err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer,
FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) |
FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr));
if (err < 0)
return err;
err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command,
FIELD_PREP(NSP_COMMAND_OPTION, option) |
FIELD_PREP(NSP_COMMAND_CODE, code) |
FIELD_PREP(NSP_COMMAND_START, 1));
if (err < 0)
return err;
/* Wait for NSP_COMMAND_START to go to 0 */
err = nfp_nsp_wait_reg(cpp, &reg,
nsp_cpp, nsp_command, NSP_COMMAND_START, 0);
if (err) {
nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n",
err, code);
return err;
}
/* Wait for NSP_STATUS_BUSY to go to 0 */
err = nfp_nsp_wait_reg(cpp, &reg,
nsp_cpp, nsp_status, NSP_STATUS_BUSY, 0);
if (err) {
nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n",
err, code);
return err;
}
err = FIELD_GET(NSP_STATUS_RESULT, reg);
if (err) {
nfp_warn(cpp, "Result (error) code set: %d command: %d\n",
-err, code);
return -err;
}
err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &reg);
if (err < 0)
return err;
return FIELD_GET(NSP_COMMAND_OPTION, reg);
}
static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
const void *in_buf, unsigned int in_size,
void *out_buf, unsigned int out_size)
{
struct nfp_cpp *cpp = nsp->cpp;
unsigned int max_size;
u64 reg, cpp_buf;
int ret, err;
u32 cpp_id;
err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
nfp_resource_address(nsp->res) + NSP_STATUS, &reg);
if (err < 0)
return err;
if (FIELD_GET(NSP_STATUS_MINOR, reg) < 13) {
nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %lld.%lld)\n",
code, FIELD_GET(NSP_STATUS_MAJOR, reg),
FIELD_GET(NSP_STATUS_MINOR, reg));
return -EOPNOTSUPP;
}
err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
nfp_resource_address(nsp->res) +
NSP_DFLT_BUFFER_CONFIG,
&reg);
if (err < 0)
return err;
max_size = max(in_size, out_size);
if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) {
nfp_err(cpp, "NSP: default buffer too small for command 0x%04x (%llu < %u)\n",
code, FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M,
max_size);
return -EINVAL;
}
err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
nfp_resource_address(nsp->res) +
NSP_DFLT_BUFFER,
&reg);
if (err < 0)
return err;
cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8;
cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg);
if (in_buf && in_size) {
err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size);
if (err < 0)
return err;
}
ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf);
if (ret < 0)
return ret;
if (out_buf && out_size) {
err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size);
if (err < 0)
return err;
}
return ret;
}
int nfp_nsp_wait(struct nfp_nsp *state)
{
const unsigned long wait_until = jiffies + 30 * HZ;
int err;
nfp_dbg(state->cpp, "Waiting for NSP to respond (30 sec max).\n");
for (;;) {
const unsigned long start_time = jiffies;
err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0);
if (err != -EAGAIN)
break;
err = msleep_interruptible(100);
if (err)
break;
if (time_after(start_time, wait_until)) {
err = -ETIMEDOUT;
break;
}
}
if (err)
nfp_err(state->cpp, "NSP failed to respond %d\n", err);
return err;
}
int nfp_nsp_device_soft_reset(struct nfp_nsp *state)
{
int err;
err = nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
nfp_nffw_cache_flush(state->cpp);
return err;
}
int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw)
{
return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, fw->size, fw->data,
fw->size, NULL, 0);
}
int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size)
{
return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0,
buf, size);
}
int nfp_nsp_write_eth_table(struct nfp_nsp *state,
const void *buf, unsigned int size)
{
return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size,
NULL, 0);
}
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Authors: David Brunecz <david.brunecz@netronome.com>
* Jakub Kicinski <jakub.kicinski@netronome.com>
* Jason Mcmullan <jason.mcmullan@netronome.com>
*/
#include <linux/bitfield.h>
#include <linux/ethtool.h>
#include <linux/if_ether.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include "nfp.h"
#include "nfp_nsp_eth.h"
#include "nfp6000/nfp6000.h"
#define NSP_ETH_NBI_PORT_COUNT 24
#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT)
#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \
sizeof(struct eth_table_entry))
#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0)
#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8)
#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48)
#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54)
#define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES)
#define NSP_ETH_STATE_ENABLED BIT_ULL(1)
#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2)
#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3)
#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8)
#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2)
#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3)
enum nfp_eth_rate {
RATE_INVALID = 0,
RATE_10M,
RATE_100M,
RATE_1G,
RATE_10G,
RATE_25G,
};
struct eth_table_entry {
__le64 port;
__le64 state;
u8 mac_addr[6];
u8 resv[2];
__le64 control;
};
static unsigned int nfp_eth_rate(enum nfp_eth_rate rate)
{
unsigned int rate_xlate[] = {
[RATE_INVALID] = 0,
[RATE_10M] = SPEED_10,
[RATE_100M] = SPEED_100,
[RATE_1G] = SPEED_1000,
[RATE_10G] = SPEED_10000,
[RATE_25G] = SPEED_25000,
};
if (rate >= ARRAY_SIZE(rate_xlate))
return 0;
return rate_xlate[rate];
}
static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src)
{
int i;
for (i = 0; i < ETH_ALEN; i++)
dst[ETH_ALEN - i - 1] = src[i];
}
static void
nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index,
struct nfp_eth_table_port *dst)
{
unsigned int rate;
u64 port, state;
port = le64_to_cpu(src->port);
state = le64_to_cpu(src->state);
dst->eth_index = FIELD_GET(NSP_ETH_PORT_INDEX, port);
dst->index = index;
dst->nbi = index / NSP_ETH_NBI_PORT_COUNT;
dst->base = index % NSP_ETH_NBI_PORT_COUNT;
dst->lanes = FIELD_GET(NSP_ETH_PORT_LANES, port);
dst->enabled = FIELD_GET(NSP_ETH_STATE_ENABLED, state);
dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state);
dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state);
rate = nfp_eth_rate(FIELD_GET(NSP_ETH_STATE_RATE, state));
dst->speed = dst->lanes * rate;
nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr);
snprintf(dst->label, sizeof(dst->label) - 1, "%llu.%llu",
FIELD_GET(NSP_ETH_PORT_PHYLABEL, port),
FIELD_GET(NSP_ETH_PORT_LABEL, port));
}
/**
* nfp_eth_read_ports() - retrieve port information
* @cpp: NFP CPP handle
*
* Read the port information from the device. Returned structure should
* be freed with kfree() once no longer needed.
*
* Return: populated ETH table or NULL on error.
*/
struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp)
{
struct nfp_eth_table *ret;
struct nfp_nsp *nsp;
nsp = nfp_nsp_open(cpp);
if (IS_ERR(nsp))
return NULL;
ret = __nfp_eth_read_ports(cpp, nsp);
nfp_nsp_close(nsp);
return ret;
}
struct nfp_eth_table *
__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
{
struct eth_table_entry *entries;
struct nfp_eth_table *table;
unsigned int cnt;
int i, j, ret;
entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL);
if (!entries)
return NULL;
ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
if (ret < 0) {
nfp_err(cpp, "reading port table failed %d\n", ret);
kfree(entries);
return NULL;
}
/* Some versions of flash will give us 0 instead of port count */
cnt = ret;
if (!cnt) {
for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
cnt++;
}
table = kzalloc(sizeof(*table) +
sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL);
if (!table) {
kfree(entries);
return NULL;
}
table->count = cnt;
for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++)
if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
nfp_eth_port_translate(&entries[i], i,
&table->ports[j++]);
kfree(entries);
return table;
}
/**
* nfp_eth_set_mod_enable() - set PHY module enable control bit
* @cpp: NFP CPP handle
* @idx: NFP chip-wide port index
* @enable: Desired state
*
* Enable or disable PHY module (this usually means setting the TX lanes
* disable bits).
*
* Return: 0 or -ERRNO.
*/
int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable)
{
struct eth_table_entry *entries;
struct nfp_nsp *nsp;
u64 reg;
int ret;
entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL);
if (!entries)
return -ENOMEM;
nsp = nfp_nsp_open(cpp);
if (IS_ERR(nsp)) {
kfree(entries);
return PTR_ERR(nsp);
}
ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
if (ret < 0) {
nfp_err(cpp, "reading port table failed %d\n", ret);
goto exit_close_nsp;
}
if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) {
nfp_warn(cpp, "trying to set port state on disabled port %d\n",
idx);
ret = -EINVAL;
goto exit_close_nsp;
}
/* Check if we are already in requested state */
reg = le64_to_cpu(entries[idx].state);
if (enable == FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) {
ret = 0;
goto exit_close_nsp;
}
reg = le64_to_cpu(entries[idx].control);
reg &= ~NSP_ETH_CTRL_ENABLED;
reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable);
entries[idx].control = cpu_to_le64(reg);
ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
exit_close_nsp:
nfp_nsp_close(nsp);
kfree(entries);
return ret < 0 ? ret : 0;
}
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef NSP_NSP_ETH_H
#define NSP_NSP_ETH_H 1
#include <linux/types.h>
#include <linux/if_ether.h>
/**
* struct nfp_eth_table - ETH table information
* @count: number of table entries
* @ports: table of ports
*
* @eth_index: port index according to legacy ethX numbering
* @index: chip-wide first channel index
* @nbi: NBI index
* @base: first channel index (within NBI)
* @lanes: number of channels
* @speed: interface speed (in Mbps)
* @mac_addr: interface MAC address
* @label: interface id string
* @enabled: is enabled?
* @tx_enabled: is TX enabled?
* @rx_enabled: is RX enabled?
*/
struct nfp_eth_table {
unsigned int count;
struct nfp_eth_table_port {
unsigned int eth_index;
unsigned int index;
unsigned int nbi;
unsigned int base;
unsigned int lanes;
unsigned int speed;
u8 mac_addr[ETH_ALEN];
char label[8];
bool enabled;
bool tx_enabled;
bool rx_enabled;
} ports[0];
};
struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
struct nfp_eth_table *
__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp);
int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable);
#endif
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_resource.c
* Author: Jakub Kicinski <jakub.kicinski@netronome.com>
* Jason McMullan <jason.mcmullan@netronome.com>
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "crc32.h"
#include "nfp.h"
#include "nfp_cpp.h"
#include "nfp6000/nfp6000.h"
#define NFP_RESOURCE_ENTRY_NAME_SZ 8
/**
* struct nfp_resource_entry - Resource table entry
* @owner: NFP CPP Lock, interface owner
* @key: NFP CPP Lock, posix_crc32(name, 8)
* @region: Memory region descriptor
* @name: ASCII, zero padded name
* @reserved
* @cpp_action: CPP Action
* @cpp_token: CPP Token
* @cpp_target: CPP Target ID
* @page_offset: 256-byte page offset into target's CPP address
* @page_size: size, in 256-byte pages
*/
struct nfp_resource_entry {
struct nfp_resource_entry_mutex {
u32 owner;
u32 key;
} mutex;
struct nfp_resource_entry_region {
u8 name[NFP_RESOURCE_ENTRY_NAME_SZ];
u8 reserved[5];
u8 cpp_action;
u8 cpp_token;
u8 cpp_target;
u32 page_offset;
u32 page_size;
} region;
};
#define NFP_RESOURCE_TBL_SIZE 4096
#define NFP_RESOURCE_TBL_ENTRIES (NFP_RESOURCE_TBL_SIZE / \
sizeof(struct nfp_resource_entry))
struct nfp_resource {
char name[NFP_RESOURCE_ENTRY_NAME_SZ + 1];
u32 cpp_id;
u64 addr;
u64 size;
struct nfp_cpp_mutex *mutex;
};
static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res)
{
char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ] = {};
struct nfp_resource_entry entry;
u32 cpp_id, key;
int ret, i;
cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */
strncpy(name_pad, res->name, sizeof(name_pad));
/* Search for a matching entry */
key = NFP_RESOURCE_TBL_KEY;
if (memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8))
key = crc32_posix(name_pad, sizeof(name_pad));
for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
u64 addr = NFP_RESOURCE_TBL_BASE +
sizeof(struct nfp_resource_entry) * i;
ret = nfp_cpp_read(cpp, cpp_id, addr, &entry, sizeof(entry));
if (ret != sizeof(entry))
return -EIO;
if (entry.mutex.key != key)
continue;
/* Found key! */
res->mutex =
nfp_cpp_mutex_alloc(cpp,
NFP_RESOURCE_TBL_TARGET, addr, key);
res->cpp_id = NFP_CPP_ID(entry.region.cpp_target,
entry.region.cpp_action,
entry.region.cpp_token);
res->addr = (u64)entry.region.page_offset << 8;
res->size = (u64)entry.region.page_size << 8;
return 0;
}
return -ENOENT;
}
static int
nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res,
struct nfp_cpp_mutex *dev_mutex)
{
int err;
if (nfp_cpp_mutex_lock(dev_mutex))
return -EINVAL;
err = nfp_cpp_resource_find(cpp, res);
if (err)
goto err_unlock_dev;
err = nfp_cpp_mutex_trylock(res->mutex);
if (err)
goto err_res_mutex_free;
nfp_cpp_mutex_unlock(dev_mutex);
return 0;
err_res_mutex_free:
nfp_cpp_mutex_free(res->mutex);
err_unlock_dev:
nfp_cpp_mutex_unlock(dev_mutex);
return err;
}
/**
* nfp_resource_acquire() - Acquire a resource handle
* @cpp: NFP CPP handle
* @name: Name of the resource
*
* NOTE: This function locks the acquired resource
*
* Return: NFP Resource handle, or ERR_PTR()
*/
struct nfp_resource *
nfp_resource_acquire(struct nfp_cpp *cpp, const char *name)
{
unsigned long warn_at = jiffies + 15 * HZ;
struct nfp_cpp_mutex *dev_mutex;
struct nfp_resource *res;
int err;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return ERR_PTR(-ENOMEM);
strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ);
dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET,
NFP_RESOURCE_TBL_BASE,
NFP_RESOURCE_TBL_KEY);
if (!dev_mutex) {
kfree(res);
return ERR_PTR(-ENOMEM);
}
for (;;) {
err = nfp_resource_try_acquire(cpp, res, dev_mutex);
if (!err)
break;
if (err != -EBUSY)
goto err_free;
err = msleep_interruptible(1);
if (err != 0) {
err = -ERESTARTSYS;
goto err_free;
}
if (time_is_before_eq_jiffies(warn_at)) {
warn_at = jiffies + 60 * HZ;
nfp_warn(cpp, "Warning: waiting for NFP resource %s\n",
name);
}
}
nfp_cpp_mutex_free(dev_mutex);
return res;
err_free:
nfp_cpp_mutex_free(dev_mutex);
kfree(res);
return ERR_PTR(err);
}
/**
* nfp_resource_release() - Release a NFP Resource handle
* @res: NFP Resource handle
*
* NOTE: This function implictly unlocks the resource handle
*/
void nfp_resource_release(struct nfp_resource *res)
{
nfp_cpp_mutex_unlock(res->mutex);
nfp_cpp_mutex_free(res->mutex);
kfree(res);
}
/**
* nfp_resource_cpp_id() - Return the cpp_id of a resource handle
* @res: NFP Resource handle
*
* Return: NFP CPP ID
*/
u32 nfp_resource_cpp_id(struct nfp_resource *res)
{
return res->cpp_id;
}
/**
* nfp_resource_name() - Return the name of a resource handle
* @res: NFP Resource handle
*
* Return: const char pointer to the name of the resource
*/
const char *nfp_resource_name(struct nfp_resource *res)
{
return res->name;
}
/**
* nfp_resource_address() - Return the address of a resource handle
* @res: NFP Resource handle
*
* Return: Address of the resource
*/
u64 nfp_resource_address(struct nfp_resource *res)
{
return res->addr;
}
/**
* nfp_resource_size() - Return the size in bytes of a resource handle
* @res: NFP Resource handle
*
* Return: Size of the resource in bytes
*/
u64 nfp_resource_size(struct nfp_resource *res)
{
return res->size;
}
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_rtsym.c
* Interface for accessing run-time symbol table
* Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
* Jason McMullan <jason.mcmullan@netronome.com>
* Espen Skoglund <espen.skoglund@netronome.com>
* Francois H. Theron <francois.theron@netronome.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include "nfp.h"
#include "nfp_cpp.h"
#include "nfp_nffw.h"
#include "nfp6000/nfp6000.h"
/* These need to match the linker */
#define SYM_TGT_LMEM 0
#define SYM_TGT_EMU_CACHE 0x17
struct nfp_rtsym_entry {
u8 type;
u8 target;
u8 island;
u8 addr_hi;
__le32 addr_lo;
__le16 name;
u8 menum;
u8 size_hi;
__le32 size_lo;
};
struct nfp_rtsym_cache {
int num;
char *strtab;
struct nfp_rtsym symtab[];
};
static int nfp_meid(u8 island_id, u8 menum)
{
return (island_id & 0x3F) == island_id && menum < 12 ?
(island_id << 4) | (menum + 4) : -1;
}
static void
nfp_rtsym_sw_entry_init(struct nfp_rtsym_cache *cache, u32 strtab_size,
struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw)
{
sw->type = fw->type;
sw->name = cache->strtab + le16_to_cpu(fw->name) % strtab_size;
sw->addr = ((u64)fw->addr_hi << 32) | le32_to_cpu(fw->addr_lo);
sw->size = ((u64)fw->size_hi << 32) | le32_to_cpu(fw->size_lo);
switch (fw->target) {
case SYM_TGT_LMEM:
sw->target = NFP_RTSYM_TARGET_LMEM;
break;
case SYM_TGT_EMU_CACHE:
sw->target = NFP_RTSYM_TARGET_EMU_CACHE;
break;
default:
sw->target = fw->target;
break;
}
if (fw->menum != 0xff)
sw->domain = nfp_meid(fw->island, fw->menum);
else if (fw->island != 0xff)
sw->domain = fw->island;
else
sw->domain = -1;
}
static int nfp_rtsymtab_probe(struct nfp_cpp *cpp)
{
const u32 dram = NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) |
NFP_ISL_EMEM0;
u32 strtab_addr, symtab_addr, strtab_size, symtab_size;
struct nfp_rtsym_entry *rtsymtab;
struct nfp_rtsym_cache *cache;
const struct nfp_mip *mip;
int err, n, size;
mip = nfp_mip_open(cpp);
if (!mip)
return -EIO;
nfp_mip_strtab(mip, &strtab_addr, &strtab_size);
nfp_mip_symtab(mip, &symtab_addr, &symtab_size);
nfp_mip_close(mip);
if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab))
return -ENXIO;
/* Align to 64 bits */
symtab_size = round_up(symtab_size, 8);
strtab_size = round_up(strtab_size, 8);
rtsymtab = kmalloc(symtab_size, GFP_KERNEL);
if (!rtsymtab)
return -ENOMEM;
size = sizeof(*cache);
size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym);
size += strtab_size + 1;
cache = kmalloc(size, GFP_KERNEL);
if (!cache) {
err = -ENOMEM;
goto err_free_rtsym_raw;
}
cache->num = symtab_size / sizeof(*rtsymtab);
cache->strtab = (void *)&cache->symtab[cache->num];
err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size);
if (err != symtab_size)
goto err_free_cache;
err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size);
if (err != strtab_size)
goto err_free_cache;
cache->strtab[strtab_size] = '\0';
for (n = 0; n < cache->num; n++)
nfp_rtsym_sw_entry_init(cache, strtab_size,
&cache->symtab[n], &rtsymtab[n]);
kfree(rtsymtab);
nfp_rtsym_cache_set(cpp, cache);
return 0;
err_free_cache:
kfree(cache);
err_free_rtsym_raw:
kfree(rtsymtab);
return err;
}
static struct nfp_rtsym_cache *nfp_rtsym(struct nfp_cpp *cpp)
{
struct nfp_rtsym_cache *cache;
int err;
cache = nfp_rtsym_cache(cpp);
if (cache)
return cache;
err = nfp_rtsymtab_probe(cpp);
if (err < 0)
return ERR_PTR(err);
return nfp_rtsym_cache(cpp);
}
/**
* nfp_rtsym_count() - Get the number of RTSYM descriptors
* @cpp: NFP CPP handle
*
* Return: Number of RTSYM descriptors, or -ERRNO
*/
int nfp_rtsym_count(struct nfp_cpp *cpp)
{
struct nfp_rtsym_cache *cache;
cache = nfp_rtsym(cpp);
if (IS_ERR(cache))
return PTR_ERR(cache);
return cache->num;
}
/**
* nfp_rtsym_get() - Get the Nth RTSYM descriptor
* @cpp: NFP CPP handle
* @idx: Index (0-based) of the RTSYM descriptor
*
* Return: const pointer to a struct nfp_rtsym descriptor, or NULL
*/
const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx)
{
struct nfp_rtsym_cache *cache;
cache = nfp_rtsym(cpp);
if (IS_ERR(cache))
return NULL;
if (idx >= cache->num)
return NULL;
return &cache->symtab[idx];
}
/**
* nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name
* @cpp: NFP CPP handle
* @name: Symbol name
*
* Return: const pointer to a struct nfp_rtsym descriptor, or NULL
*/
const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name)
{
struct nfp_rtsym_cache *cache;
int n;
cache = nfp_rtsym(cpp);
if (IS_ERR(cache))
return NULL;
for (n = 0; n < cache->num; n++) {
if (strcmp(name, cache->symtab[n].name) == 0)
return &cache->symtab[n];
}
return NULL;
}
/**
* nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
* @cpp: NFP CPP handle
* @name: Symbol name
* @error: Poniter to error code (optional)
*
* Lookup a symbol, map, read it and return it's value. Value of the symbol
* will be interpreted as a simple little-endian unsigned value. Symbol can
* be 4 or 8 bytes in size.
*
* Return: value read, on error sets the error and returns ~0ULL.
*/
u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error)
{
const struct nfp_rtsym *sym;
u32 val32, id;
u64 val;
int err;
sym = nfp_rtsym_lookup(cpp, name);
if (!sym)
return -ENOENT;
id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain);
switch (sym->size) {
case 4:
err = nfp_cpp_readl(cpp, id, sym->addr, &val32);
val = val32;
break;
case 8:
err = nfp_cpp_readq(cpp, id, sym->addr, &val);
break;
default:
nfp_err(cpp,
"rtsym '%s' unsupported or non-scalar size: %lld\n",
name, sym->size);
err = -EINVAL;
break;
}
if (err == sym->size)
err = 0;
else if (err >= 0)
err = -EIO;
if (error)
*error = err;
if (err)
return ~0ULL;
return val;
}
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* nfp_target.c
* CPP Access Width Decoder
* Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
* Jason McMullan <jason.mcmullan@netronome.com>
* Francois H. Theron <francois.theron@netronome.com>
*/
#include <linux/bitops.h>
#include "nfp_cpp.h"
#include "nfp6000/nfp6000.h"
#define P32 1
#define P64 2
/* This structure ONLY includes items that can be done with a read or write of
* 32-bit or 64-bit words. All others are not listed.
*/
#define AT(_action, _token, _pull, _push) \
case NFP_CPP_ID(0, (_action), (_token)): \
return PUSHPULL((_pull), (_push))
static int target_rw(u32 cpp_id, int pp, int start, int len)
{
switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
AT(0, 0, 0, pp);
AT(1, 0, pp, 0);
AT(NFP_CPP_ACTION_RW, 0, pp, pp);
default:
return -EINVAL;
}
}
static int nfp6000_nbi_dma(u32 cpp_id)
{
switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
AT(0, 0, 0, P64); /* ReadNbiDma */
AT(1, 0, P64, 0); /* WriteNbiDma */
AT(NFP_CPP_ACTION_RW, 0, P64, P64);
default:
return -EINVAL;
}
}
static int nfp6000_nbi_stats(u32 cpp_id)
{
switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
AT(0, 0, 0, P32); /* ReadNbiStats */
AT(1, 0, P32, 0); /* WriteNbiStats */
AT(NFP_CPP_ACTION_RW, 0, P32, P32);
default:
return -EINVAL;
}
}
static int nfp6000_nbi_tm(u32 cpp_id)
{
switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
AT(0, 0, 0, P64); /* ReadNbiTM */
AT(1, 0, P64, 0); /* WriteNbiTM */
AT(NFP_CPP_ACTION_RW, 0, P64, P64);
default:
return -EINVAL;
}
}
static int nfp6000_nbi_ppc(u32 cpp_id)
{
switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
AT(0, 0, 0, P64); /* ReadNbiPreclassifier */
AT(1, 0, P64, 0); /* WriteNbiPreclassifier */
AT(NFP_CPP_ACTION_RW, 0, P64, P64);
default:
return -EINVAL;
}
}
static int nfp6000_nbi(u32 cpp_id, u64 address)
{
u64 rel_addr = address & 0x3fFFFF;
if (rel_addr < (1 << 20))
return nfp6000_nbi_dma(cpp_id);
if (rel_addr < (2 << 20))
return nfp6000_nbi_stats(cpp_id);
if (rel_addr < (3 << 20))
return nfp6000_nbi_tm(cpp_id);
return nfp6000_nbi_ppc(cpp_id);
}
/* This structure ONLY includes items that can be done with a read or write of
* 32-bit or 64-bit words. All others are not listed.
*/
static int nfp6000_mu_common(u32 cpp_id)
{
switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
AT(NFP_CPP_ACTION_RW, 0, P64, P64); /* read_be/write_be */
AT(NFP_CPP_ACTION_RW, 1, P64, P64); /* read_le/write_le */
AT(NFP_CPP_ACTION_RW, 2, P64, P64); /* read_swap_be/write_swap_be */
AT(NFP_CPP_ACTION_RW, 3, P64, P64); /* read_swap_le/write_swap_le */
AT(0, 0, 0, P64); /* read_be */
AT(0, 1, 0, P64); /* read_le */
AT(0, 2, 0, P64); /* read_swap_be */
AT(0, 3, 0, P64); /* read_swap_le */
AT(1, 0, P64, 0); /* write_be */
AT(1, 1, P64, 0); /* write_le */
AT(1, 2, P64, 0); /* write_swap_be */
AT(1, 3, P64, 0); /* write_swap_le */
AT(3, 0, 0, P32); /* atomic_read */
AT(3, 2, P32, 0); /* mask_compare_write */
AT(4, 0, P32, 0); /* atomic_write */
AT(4, 2, 0, 0); /* atomic_write_imm */
AT(4, 3, 0, P32); /* swap_imm */
AT(5, 0, P32, 0); /* set */
AT(5, 3, 0, P32); /* test_set_imm */
AT(6, 0, P32, 0); /* clr */
AT(6, 3, 0, P32); /* test_clr_imm */
AT(7, 0, P32, 0); /* add */
AT(7, 3, 0, P32); /* test_add_imm */
AT(8, 0, P32, 0); /* addsat */
AT(8, 3, 0, P32); /* test_subsat_imm */
AT(9, 0, P32, 0); /* sub */
AT(9, 3, 0, P32); /* test_sub_imm */
AT(10, 0, P32, 0); /* subsat */
AT(10, 3, 0, P32); /* test_subsat_imm */
AT(13, 0, 0, P32); /* microq128_get */
AT(13, 1, 0, P32); /* microq128_pop */
AT(13, 2, P32, 0); /* microq128_put */
AT(15, 0, P32, 0); /* xor */
AT(15, 3, 0, P32); /* test_xor_imm */
AT(28, 0, 0, P32); /* read32_be */
AT(28, 1, 0, P32); /* read32_le */
AT(28, 2, 0, P32); /* read32_swap_be */
AT(28, 3, 0, P32); /* read32_swap_le */
AT(31, 0, P32, 0); /* write32_be */
AT(31, 1, P32, 0); /* write32_le */
AT(31, 2, P32, 0); /* write32_swap_be */
AT(31, 3, P32, 0); /* write32_swap_le */
default:
return -EINVAL;
}
}
static int nfp6000_mu_ctm(u32 cpp_id)
{
switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
AT(16, 1, 0, P32); /* packet_read_packet_status */
AT(17, 1, 0, P32); /* packet_credit_get */
AT(17, 3, 0, P64); /* packet_add_thread */
AT(18, 2, 0, P64); /* packet_free_and_return_pointer */
AT(18, 3, 0, P64); /* packet_return_pointer */
AT(21, 0, 0, P64); /* pe_dma_to_memory_indirect */
AT(21, 1, 0, P64); /* pe_dma_to_memory_indirect_swap */
AT(21, 2, 0, P64); /* pe_dma_to_memory_indirect_free */
AT(21, 3, 0, P64); /* pe_dma_to_memory_indirect_free_swap */
default:
return nfp6000_mu_common(cpp_id);
}
}
static int nfp6000_mu_emu(u32 cpp_id)
{
switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
AT(18, 0, 0, P32); /* read_queue */
AT(18, 1, 0, P32); /* read_queue_ring */
AT(18, 2, P32, 0); /* write_queue */
AT(18, 3, P32, 0); /* write_queue_ring */
AT(20, 2, P32, 0); /* journal */
AT(21, 0, 0, P32); /* get */
AT(21, 1, 0, P32); /* get_eop */
AT(21, 2, 0, P32); /* get_freely */
AT(22, 0, 0, P32); /* pop */
AT(22, 1, 0, P32); /* pop_eop */
AT(22, 2, 0, P32); /* pop_freely */
default:
return nfp6000_mu_common(cpp_id);
}
}
static int nfp6000_mu_imu(u32 cpp_id)
{
return nfp6000_mu_common(cpp_id);
}
static int nfp6000_mu(u32 cpp_id, u64 address)
{
int pp;
if (address < 0x2000000000ULL)
pp = nfp6000_mu_ctm(cpp_id);
else if (address < 0x8000000000ULL)
pp = nfp6000_mu_emu(cpp_id);
else if (address < 0x9800000000ULL)
pp = nfp6000_mu_ctm(cpp_id);
else if (address < 0x9C00000000ULL)
pp = nfp6000_mu_emu(cpp_id);
else if (address < 0xA000000000ULL)
pp = nfp6000_mu_imu(cpp_id);
else
pp = nfp6000_mu_ctm(cpp_id);
return pp;
}
static int nfp6000_ila(u32 cpp_id)
{
switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
AT(0, 1, 0, P32); /* read_check_error */
AT(2, 0, 0, P32); /* read_int */
AT(3, 0, P32, 0); /* write_int */
default:
return target_rw(cpp_id, P32, 48, 4);
}
}
static int nfp6000_pci(u32 cpp_id)
{
switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
AT(2, 0, 0, P32);
AT(3, 0, P32, 0);
default:
return target_rw(cpp_id, P32, 4, 4);
}
}
static int nfp6000_crypto(u32 cpp_id)
{
switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
AT(2, 0, P64, 0);
default:
return target_rw(cpp_id, P64, 12, 4);
}
}
static int nfp6000_cap_xpb(u32 cpp_id)
{
switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
AT(0, 1, 0, P32); /* RingGet */
AT(0, 2, P32, 0); /* Interthread Signal */
AT(1, 1, P32, 0); /* RingPut */
AT(1, 2, P32, 0); /* CTNNWr */
AT(2, 0, 0, P32); /* ReflectRd, signal none */
AT(2, 1, 0, P32); /* ReflectRd, signal self */
AT(2, 2, 0, P32); /* ReflectRd, signal remote */
AT(2, 3, 0, P32); /* ReflectRd, signal both */
AT(3, 0, P32, 0); /* ReflectWr, signal none */
AT(3, 1, P32, 0); /* ReflectWr, signal self */
AT(3, 2, P32, 0); /* ReflectWr, signal remote */
AT(3, 3, P32, 0); /* ReflectWr, signal both */
AT(NFP_CPP_ACTION_RW, 1, P32, P32);
default:
return target_rw(cpp_id, P32, 1, 63);
}
}
static int nfp6000_cls(u32 cpp_id)
{
switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
AT(0, 3, P32, 0); /* xor */
AT(2, 0, P32, 0); /* set */
AT(2, 1, P32, 0); /* clr */
AT(4, 0, P32, 0); /* add */
AT(4, 1, P32, 0); /* add64 */
AT(6, 0, P32, 0); /* sub */
AT(6, 1, P32, 0); /* sub64 */
AT(6, 2, P32, 0); /* subsat */
AT(8, 2, P32, 0); /* hash_mask */
AT(8, 3, P32, 0); /* hash_clear */
AT(9, 0, 0, P32); /* ring_get */
AT(9, 1, 0, P32); /* ring_pop */
AT(9, 2, 0, P32); /* ring_get_freely */
AT(9, 3, 0, P32); /* ring_pop_freely */
AT(10, 0, P32, 0); /* ring_put */
AT(10, 2, P32, 0); /* ring_journal */
AT(14, 0, P32, 0); /* reflect_write_sig_local */
AT(15, 1, 0, P32); /* reflect_read_sig_local */
AT(17, 2, P32, 0); /* statisic */
AT(24, 0, 0, P32); /* ring_read */
AT(24, 1, P32, 0); /* ring_write */
AT(25, 0, 0, P32); /* ring_workq_add_thread */
AT(25, 1, P32, 0); /* ring_workq_add_work */
default:
return target_rw(cpp_id, P32, 0, 64);
}
}
int nfp_target_pushpull(u32 cpp_id, u64 address)
{
switch (NFP_CPP_ID_TARGET_of(cpp_id)) {
case NFP_CPP_TARGET_NBI:
return nfp6000_nbi(cpp_id, address);
case NFP_CPP_TARGET_QDR:
return target_rw(cpp_id, P32, 24, 4);
case NFP_CPP_TARGET_ILA:
return nfp6000_ila(cpp_id);
case NFP_CPP_TARGET_MU:
return nfp6000_mu(cpp_id, address);
case NFP_CPP_TARGET_PCIE:
return nfp6000_pci(cpp_id);
case NFP_CPP_TARGET_ARM:
if (address < 0x10000)
return target_rw(cpp_id, P64, 1, 1);
else
return target_rw(cpp_id, P32, 1, 1);
case NFP_CPP_TARGET_CRYPTO:
return nfp6000_crypto(cpp_id);
case NFP_CPP_TARGET_CT_XPB:
return nfp6000_cap_xpb(cpp_id);
case NFP_CPP_TARGET_CLS:
return nfp6000_cls(cpp_id);
case 0:
return target_rw(cpp_id, P32, 4, 4);
default:
return -EINVAL;
}
}
#undef AT
#undef P32
#undef P64
/* All magic NFP-6xxx IMB 'mode' numbers here are from:
* Databook (1 August 2013)
* - System Overview and Connectivity
* -- Internal Connectivity
* --- Distributed Switch Fabric - Command Push/Pull (DSF-CPP) Bus
* ---- CPP addressing
* ----- Table 3.6. CPP Address Translation Mode Commands
*/
#define _NIC_NFP6000_MU_LOCALITY_DIRECT 2
static int nfp_decode_basic(u64 addr, int *dest_island, int cpp_tgt,
int mode, bool addr40, int isld1, int isld0)
{
int iid_lsb, idx_lsb;
/* This function doesn't handle MU or CTXBP */
if (cpp_tgt == NFP_CPP_TARGET_MU || cpp_tgt == NFP_CPP_TARGET_CT_XPB)
return -EINVAL;
switch (mode) {
case 0:
/* For VQDR, in this mode for 32-bit addressing
* it would be islands 0, 16, 32 and 48 depending on channel
* and upper address bits.
* Since those are not all valid islands, most decode
* cases would result in bad island IDs, but we do them
* anyway since this is decoding an address that is already
* assumed to be used as-is to get to sram.
*/
iid_lsb = addr40 ? 34 : 26;
*dest_island = (addr >> iid_lsb) & 0x3F;
return 0;
case 1:
/* For VQDR 32-bit, this would decode as:
* Channel 0: island#0
* Channel 1: island#0
* Channel 2: island#1
* Channel 3: island#1
* That would be valid as long as both islands
* have VQDR. Let's allow this.
*/
idx_lsb = addr40 ? 39 : 31;
if (addr & BIT_ULL(idx_lsb))
*dest_island = isld1;
else
*dest_island = isld0;
return 0;
case 2:
/* For VQDR 32-bit:
* Channel 0: (island#0 | 0)
* Channel 1: (island#0 | 1)
* Channel 2: (island#1 | 0)
* Channel 3: (island#1 | 1)
*
* Make sure we compare against isldN values
* by clearing the LSB.
* This is what the silicon does.
*/
isld0 &= ~1;
isld1 &= ~1;
idx_lsb = addr40 ? 39 : 31;
iid_lsb = idx_lsb - 1;
if (addr & BIT_ULL(idx_lsb))
*dest_island = isld1 | (int)((addr >> iid_lsb) & 1);
else
*dest_island = isld0 | (int)((addr >> iid_lsb) & 1);
return 0;
case 3:
/* In this mode the data address starts to affect the island ID
* so rather not allow it. In some really specific case
* one could use this to send the upper half of the
* VQDR channel to another MU, but this is getting very
* specific.
* However, as above for mode 0, this is the decoder
* and the caller should validate the resulting IID.
* This blindly does what the silicon would do.
*/
isld0 &= ~3;
isld1 &= ~3;
idx_lsb = addr40 ? 39 : 31;
iid_lsb = idx_lsb - 2;
if (addr & BIT_ULL(idx_lsb))
*dest_island = isld1 | (int)((addr >> iid_lsb) & 3);
else
*dest_island = isld0 | (int)((addr >> iid_lsb) & 3);
return 0;
default:
return -EINVAL;
}
}
static int nfp_encode_basic_qdr(u64 addr, int dest_island, int cpp_tgt,
int mode, bool addr40, int isld1, int isld0)
{
int v, ret;
/* Full Island ID and channel bits overlap? */
ret = nfp_decode_basic(addr, &v, cpp_tgt, mode, addr40, isld1, isld0);
if (ret)
return ret;
/* The current address won't go where expected? */
if (dest_island != -1 && dest_island != v)
return -EINVAL;
/* If dest_island was -1, we don't care where it goes. */
return 0;
}
/* Try each option, take first one that fits.
* Not sure if we would want to do some smarter
* searching and prefer 0 or non-0 island IDs.
*/
static int nfp_encode_basic_search(u64 *addr, int dest_island, int *isld,
int iid_lsb, int idx_lsb, int v_max)
{
int i, v;
for (i = 0; i < 2; i++)
for (v = 0; v < v_max; v++) {
if (dest_island != (isld[i] | v))
continue;
*addr &= ~GENMASK_ULL(idx_lsb, iid_lsb);
*addr |= ((u64)i << idx_lsb);
*addr |= ((u64)v << iid_lsb);
return 0;
}
return -ENODEV;
}
/* For VQDR, we may not modify the Channel bits, which might overlap
* with the Index bit. When it does, we need to ensure that isld0 == isld1.
*/
static int nfp_encode_basic(u64 *addr, int dest_island, int cpp_tgt,
int mode, bool addr40, int isld1, int isld0)
{
int iid_lsb, idx_lsb;
int isld[2];
u64 v64;
isld[0] = isld0;
isld[1] = isld1;
/* This function doesn't handle MU or CTXBP */
if (cpp_tgt == NFP_CPP_TARGET_MU || cpp_tgt == NFP_CPP_TARGET_CT_XPB)
return -EINVAL;
switch (mode) {
case 0:
if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40)
/* In this specific mode we'd rather not modify
* the address but we can verify if the existing
* contents will point to a valid island.
*/
return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
mode, addr40, isld1, isld0);
iid_lsb = addr40 ? 34 : 26;
/* <39:34> or <31:26> */
v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
*addr &= ~v64;
*addr |= ((u64)dest_island << iid_lsb) & v64;
return 0;
case 1:
if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40)
return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
mode, addr40, isld1, isld0);
idx_lsb = addr40 ? 39 : 31;
if (dest_island == isld0) {
/* Only need to clear the Index bit */
*addr &= ~BIT_ULL(idx_lsb);
return 0;
}
if (dest_island == isld1) {
/* Only need to set the Index bit */
*addr |= BIT_ULL(idx_lsb);
return 0;
}
return -ENODEV;
case 2:
/* iid<0> = addr<30> = channel<0>
* channel<1> = addr<31> = Index
*/
if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40)
/* Special case where we allow channel bits to
* be set before hand and with them select an island.
* So we need to confirm that it's at least plausible.
*/
return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
mode, addr40, isld1, isld0);
/* Make sure we compare against isldN values
* by clearing the LSB.
* This is what the silicon does.
*/
isld[0] &= ~1;
isld[1] &= ~1;
idx_lsb = addr40 ? 39 : 31;
iid_lsb = idx_lsb - 1;
return nfp_encode_basic_search(addr, dest_island, isld,
iid_lsb, idx_lsb, 2);
case 3:
if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40)
/* iid<0> = addr<29> = data
* iid<1> = addr<30> = channel<0>
* channel<1> = addr<31> = Index
*/
return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
mode, addr40, isld1, isld0);
isld[0] &= ~3;
isld[1] &= ~3;
idx_lsb = addr40 ? 39 : 31;
iid_lsb = idx_lsb - 2;
return nfp_encode_basic_search(addr, dest_island, isld,
iid_lsb, idx_lsb, 4);
default:
return -EINVAL;
}
}
static int nfp_encode_mu(u64 *addr, int dest_island, int mode,
bool addr40, int isld1, int isld0)
{
int iid_lsb, idx_lsb, locality_lsb;
int isld[2];
u64 v64;
int da;
isld[0] = isld0;
isld[1] = isld1;
locality_lsb = nfp_cppat_mu_locality_lsb(mode, addr40);
if (((*addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT)
da = 1;
else
da = 0;
switch (mode) {
case 0:
iid_lsb = addr40 ? 32 : 24;
v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
*addr &= ~v64;
*addr |= (((u64)dest_island) << iid_lsb) & v64;
return 0;
case 1:
if (da) {
iid_lsb = addr40 ? 32 : 24;
v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
*addr &= ~v64;
*addr |= (((u64)dest_island) << iid_lsb) & v64;
return 0;
}
idx_lsb = addr40 ? 37 : 29;
if (dest_island == isld0) {
*addr &= ~BIT_ULL(idx_lsb);
return 0;
}
if (dest_island == isld1) {
*addr |= BIT_ULL(idx_lsb);
return 0;
}
return -ENODEV;
case 2:
if (da) {
iid_lsb = addr40 ? 32 : 24;
v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
*addr &= ~v64;
*addr |= (((u64)dest_island) << iid_lsb) & v64;
return 0;
}
/* Make sure we compare against isldN values
* by clearing the LSB.
* This is what the silicon does.
*/
isld[0] &= ~1;
isld[1] &= ~1;
idx_lsb = addr40 ? 37 : 29;
iid_lsb = idx_lsb - 1;
return nfp_encode_basic_search(addr, dest_island, isld,
iid_lsb, idx_lsb, 2);
case 3:
/* Only the EMU will use 40 bit addressing. Silently
* set the direct locality bit for everyone else.
* The SDK toolchain uses dest_island <= 0 to test
* for atypical address encodings to support access
* to local-island CTM with a 32-but address (high-locality
* is effewctively ignored and just used for
* routing to island #0).
*/
if (dest_island > 0 && (dest_island < 24 || dest_island > 26)) {
*addr |= ((u64)_NIC_NFP6000_MU_LOCALITY_DIRECT)
<< locality_lsb;
da = 1;
}
if (da) {
iid_lsb = addr40 ? 32 : 24;
v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
*addr &= ~v64;
*addr |= (((u64)dest_island) << iid_lsb) & v64;
return 0;
}
isld[0] &= ~3;
isld[1] &= ~3;
idx_lsb = addr40 ? 37 : 29;
iid_lsb = idx_lsb - 2;
return nfp_encode_basic_search(addr, dest_island, isld,
iid_lsb, idx_lsb, 4);
default:
return -EINVAL;
}
}
static int nfp_cppat_addr_encode(u64 *addr, int dest_island, int cpp_tgt,
int mode, bool addr40, int isld1, int isld0)
{
switch (cpp_tgt) {
case NFP_CPP_TARGET_NBI:
case NFP_CPP_TARGET_QDR:
case NFP_CPP_TARGET_ILA:
case NFP_CPP_TARGET_PCIE:
case NFP_CPP_TARGET_ARM:
case NFP_CPP_TARGET_CRYPTO:
case NFP_CPP_TARGET_CLS:
return nfp_encode_basic(addr, dest_island, cpp_tgt, mode,
addr40, isld1, isld0);
case NFP_CPP_TARGET_MU:
return nfp_encode_mu(addr, dest_island, mode,
addr40, isld1, isld0);
case NFP_CPP_TARGET_CT_XPB:
if (mode != 1 || addr40)
return -EINVAL;
*addr &= ~GENMASK_ULL(29, 24);
*addr |= ((u64)dest_island << 24) & GENMASK_ULL(29, 24);
return 0;
default:
return -EINVAL;
}
}
int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address,
u32 *cpp_target_id, u64 *cpp_target_address,
const u32 *imb_table)
{
const int island = NFP_CPP_ID_ISLAND_of(cpp_island_id);
const int target = NFP_CPP_ID_TARGET_of(cpp_island_id);
u32 imb;
int err;
if (target < 0 || target >= 16)
return -EINVAL;
if (island == 0) {
/* Already translated */
*cpp_target_id = cpp_island_id;
*cpp_target_address = cpp_island_address;
return 0;
}
/* CPP + Island only allowed on systems with IMB tables */
if (!imb_table)
return -EINVAL;
imb = imb_table[target];
*cpp_target_address = cpp_island_address;
err = nfp_cppat_addr_encode(cpp_target_address, island, target,
((imb >> 13) & 7), ((imb >> 12) & 1),
((imb >> 6) & 0x3f), ((imb >> 0) & 0x3f));
if (err)
return err;
*cpp_target_id = NFP_CPP_ID(target,
NFP_CPP_ID_ACTION_of(cpp_island_id),
NFP_CPP_ID_TOKEN_of(cpp_island_id));
return 0;
}
...@@ -62,6 +62,19 @@ ...@@ -62,6 +62,19 @@
(1ULL << __bf_shf(_mask))); \ (1ULL << __bf_shf(_mask))); \
}) })
/**
* FIELD_FIT() - check if value fits in the field
* @_mask: shifted mask defining the field's length and position
* @_val: value to test against the field
*
* Return: true if @_val can fit inside @_mask, false if @_val is too big.
*/
#define FIELD_FIT(_mask, _val) \
({ \
__BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
!((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
})
/** /**
* FIELD_PREP() - prepare a bitfield element * FIELD_PREP() - prepare a bitfield element
* @_mask: shifted mask defining the field's length and position * @_mask: shifted mask defining the field's length and position
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment