Commit 710d7fbe authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

staging: octeon: delete driver

This driver has been in the tree since 2009 with no real movement to get
it out.  Now it is starting to cause build issues and other problems for
people who want to fix coding style problems, but can not actually build
it.

As nothing is happening here, just delete the module entirely.
Reported-by: default avatarGuenter Roeck <linux@roeck-us.net>
Acked-by: default avatarGuenter Roeck <linux@roeck-us.net>
Cc: David Daney <ddaney@caviumnetworks.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: YueHaibing <yuehaibing@huawei.com>
Cc: Aaro Koskinen <aaro.koskinen@iki.fi>
Cc: Wambui Karuga <wambui.karugax@gmail.com>
Cc: Julia Lawall <julia.lawall@lip6.fr>
Cc: Florian Westphal <fw@strlen.de>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Branden Bonaby <brandonbonaby94@gmail.com>
Cc: "Petr Štetiar" <ynezz@true.cz>
Cc: Sandro Volery <sandro@volery.com>
Cc: Paul Burton <paulburton@kernel.org>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Giovanni Gherdovich <bobdc9664@seznam.cz>
Cc: Valery Ivanov <ivalery111@gmail.com>
Link: https://lore.kernel.org/r/20191210091509.3546251-1-gregkh@linuxfoundation.orgSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b33bdf80
......@@ -42,8 +42,6 @@ source "drivers/staging/rtl8188eu/Kconfig"
source "drivers/staging/rts5208/Kconfig"
source "drivers/staging/octeon/Kconfig"
source "drivers/staging/octeon-usb/Kconfig"
source "drivers/staging/vt6655/Kconfig"
......
......@@ -12,7 +12,6 @@ obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += rtl8188eu/
obj-$(CONFIG_RTS5208) += rts5208/
obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
obj-$(CONFIG_OCTEON_USB) += octeon-usb/
obj-$(CONFIG_VT6655) += vt6655/
obj-$(CONFIG_VT6656) += vt6656/
......
# SPDX-License-Identifier: GPL-2.0
config OCTEON_ETHERNET
tristate "Cavium Networks Octeon Ethernet support"
depends on CAVIUM_OCTEON_SOC || COMPILE_TEST
depends on NETDEVICES
depends on BROKEN
select PHYLIB
select MDIO_OCTEON
help
This driver supports the builtin ethernet ports on Cavium
Networks' products in the Octeon family. This driver supports the
CN3XXX and CN5XXX Octeon processors.
To compile this driver as a module, choose M here. The module
will be called octeon-ethernet.
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (C) 2005-2009 Cavium Networks
#
#
# Makefile for Cavium OCTEON on-board ethernet driver
#
obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o
octeon-ethernet-y := ethernet.o
octeon-ethernet-y += ethernet-mdio.o
octeon-ethernet-y += ethernet-mem.o
octeon-ethernet-y += ethernet-rgmii.o
octeon-ethernet-y += ethernet-rx.o
octeon-ethernet-y += ethernet-sgmii.o
octeon-ethernet-y += ethernet-spi.o
octeon-ethernet-y += ethernet-tx.o
This driver is functional and supports Ethernet on OCTEON+/OCTEON2/OCTEON3
chips at least up to CN7030.
TODO:
- general code review and clean up
- make driver self-contained instead of being split between staging and
arch/mips/cavium-octeon.
Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
*/
/*
* A few defines are used to control the operation of this driver:
* USE_ASYNC_IOBDMA
* Use asynchronous IO access to hardware. This uses Octeon's asynchronous
* IOBDMAs to issue IO accesses without stalling. Set this to zero
* to disable this. Note that IOBDMAs require CVMSEG.
* REUSE_SKBUFFS_WITHOUT_FREE
* Allows the TX path to free an skbuff into the FPA hardware pool. This
* can significantly improve performance for forwarding and bridging, but
* may be somewhat dangerous. Checks are made, but if any buffer is reused
* without the proper Linux cleanup, the networking stack may have very
* bizarre bugs.
*/
#ifndef __ETHERNET_DEFINES_H__
#define __ETHERNET_DEFINES_H__
#ifdef CONFIG_NETFILTER
#define REUSE_SKBUFFS_WITHOUT_FREE 0
#else
#define REUSE_SKBUFFS_WITHOUT_FREE 1
#endif
#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0)
/* Maximum number of SKBs to try to free per xmit packet. */
#define MAX_OUT_QUEUE_DEPTH 1000
#define FAU_TOTAL_TX_TO_CLEAN (CVMX_FAU_REG_END - sizeof(u32))
#define FAU_NUM_PACKET_BUFFERS_TO_FREE (FAU_TOTAL_TX_TO_CLEAN - sizeof(u32))
#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS + 1)
#endif /* __ETHERNET_DEFINES_H__ */
// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
*/
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/ratelimit.h>
#include <linux/of_mdio.h>
#include <generated/utsrelease.h>
#include <net/dst.h>
#include "octeon-ethernet.h"
#include "ethernet-defines.h"
#include "ethernet-mdio.h"
#include "ethernet-util.h"
static void cvm_oct_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, UTS_RELEASE, sizeof(info->version));
strlcpy(info->bus_info, "Builtin", sizeof(info->bus_info));
}
static int cvm_oct_nway_reset(struct net_device *dev)
{
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (dev->phydev)
return phy_start_aneg(dev->phydev);
return -EINVAL;
}
const struct ethtool_ops cvm_oct_ethtool_ops = {
.get_drvinfo = cvm_oct_get_drvinfo,
.nway_reset = cvm_oct_nway_reset,
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/**
* cvm_oct_ioctl - IOCTL support for PHY control
* @dev: Device to change
* @rq: the request
* @cmd: the command
*
* Returns Zero on success
*/
int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
if (!netif_running(dev))
return -EINVAL;
if (!dev->phydev)
return -EINVAL;
return phy_mii_ioctl(dev->phydev, rq, cmd);
}
void cvm_oct_note_carrier(struct octeon_ethernet *priv,
union cvmx_helper_link_info li)
{
if (li.s.link_up) {
pr_notice_ratelimited("%s: %u Mbps %s duplex, port %d, queue %d\n",
netdev_name(priv->netdev), li.s.speed,
(li.s.full_duplex) ? "Full" : "Half",
priv->port, priv->queue);
} else {
pr_notice_ratelimited("%s: Link down\n",
netdev_name(priv->netdev));
}
}
void cvm_oct_adjust_link(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
union cvmx_helper_link_info link_info;
link_info.u64 = 0;
link_info.s.link_up = dev->phydev->link ? 1 : 0;
link_info.s.full_duplex = dev->phydev->duplex ? 1 : 0;
link_info.s.speed = dev->phydev->speed;
priv->link_info = link_info.u64;
/*
* The polling task need to know about link status changes.
*/
if (priv->poll)
priv->poll(dev);
if (priv->last_link != dev->phydev->link) {
priv->last_link = dev->phydev->link;
cvmx_helper_link_set(priv->port, link_info);
cvm_oct_note_carrier(priv, link_info);
}
}
int cvm_oct_common_stop(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
union cvmx_helper_link_info link_info;
union cvmx_gmxx_prtx_cfg gmx_cfg;
int index = INDEX(priv->port);
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
priv->poll = NULL;
if (dev->phydev)
phy_disconnect(dev->phydev);
if (priv->last_link) {
link_info.u64 = 0;
priv->last_link = 0;
cvmx_helper_link_set(priv->port, link_info);
cvm_oct_note_carrier(priv, link_info);
}
return 0;
}
/**
* cvm_oct_phy_setup_device - setup the PHY
*
* @dev: Device to setup
*
* Returns Zero on success, negative on failure
*/
int cvm_oct_phy_setup_device(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
struct device_node *phy_node;
struct phy_device *phydev = NULL;
if (!priv->of_node)
goto no_phy;
phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
if (!phy_node && of_phy_is_fixed_link(priv->of_node)) {
int rc;
rc = of_phy_register_fixed_link(priv->of_node);
if (rc)
return rc;
phy_node = of_node_get(priv->of_node);
}
if (!phy_node)
goto no_phy;
phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
priv->phy_mode);
of_node_put(phy_node);
if (!phydev)
return -ENODEV;
priv->last_link = 0;
phy_start(phydev);
return 0;
no_phy:
/* If there is no phy, assume a direct MAC connection and that
* the link is up.
*/
netif_carrier_on(dev);
return 0;
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/string.h>
#include <linux/ethtool.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <net/dst.h>
#ifdef CONFIG_XFRM
#include <linux/xfrm.h>
#include <net/xfrm.h>
#endif /* CONFIG_XFRM */
extern const struct ethtool_ops cvm_oct_ethtool_ops;
void octeon_mdiobus_force_mod_depencency(void);
int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int cvm_oct_phy_setup_device(struct net_device *dev);
// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2010 Cavium Networks
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include "octeon-ethernet.h"
#include "ethernet-mem.h"
#include "ethernet-defines.h"
/**
* cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
* @pool: Pool to allocate an skbuff for
* @size: Size of the buffer needed for the pool
* @elements: Number of buffers to allocate
*
* Returns the actual number of buffers allocated.
*/
static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
{
int freed = elements;
while (freed) {
struct sk_buff *skb = dev_alloc_skb(size + 256);
if (unlikely(!skb))
break;
skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
*(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
cvmx_fpa_free(skb->data, pool, size / 128);
freed--;
}
return elements - freed;
}
/**
* cvm_oct_free_hw_skbuff- free hardware pool skbuffs
* @pool: Pool to allocate an skbuff for
* @size: Size of the buffer needed for the pool
* @elements: Number of buffers to allocate
*/
static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
{
char *memory;
do {
memory = cvmx_fpa_alloc(pool);
if (memory) {
struct sk_buff *skb =
*(struct sk_buff **)(memory - sizeof(void *));
elements--;
dev_kfree_skb(skb);
}
} while (memory);
if (elements < 0)
pr_warn("Freeing of pool %u had too many skbuffs (%d)\n",
pool, elements);
else if (elements > 0)
pr_warn("Freeing of pool %u is missing %d skbuffs\n",
pool, elements);
}
/**
* cvm_oct_fill_hw_memory - fill a hardware pool with memory.
* @pool: Pool to populate
* @size: Size of each buffer in the pool
* @elements: Number of buffers to allocate
*
* Returns the actual number of buffers allocated.
*/
static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
{
char *memory;
char *fpa;
int freed = elements;
while (freed) {
/*
* FPA memory must be 128 byte aligned. Since we are
* aligning we need to save the original pointer so we
* can feed it to kfree when the memory is returned to
* the kernel.
*
* We allocate an extra 256 bytes to allow for
* alignment and space for the original pointer saved
* just before the block.
*/
memory = kmalloc(size + 256, GFP_ATOMIC);
if (unlikely(!memory)) {
pr_warn("Unable to allocate %u bytes for FPA pool %d\n",
elements * size, pool);
break;
}
fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL);
*((char **)fpa - 1) = memory;
cvmx_fpa_free(fpa, pool, 0);
freed--;
}
return elements - freed;
}
/**
* cvm_oct_free_hw_memory - Free memory allocated by cvm_oct_fill_hw_memory
* @pool: FPA pool to free
* @size: Size of each buffer in the pool
* @elements: Number of buffers that should be in the pool
*/
static void cvm_oct_free_hw_memory(int pool, int size, int elements)
{
char *memory;
char *fpa;
do {
fpa = cvmx_fpa_alloc(pool);
if (fpa) {
elements--;
fpa = (char *)phys_to_virt(cvmx_ptr_to_phys(fpa));
memory = *((char **)fpa - 1);
kfree(memory);
}
} while (fpa);
if (elements < 0)
pr_warn("Freeing of pool %u had too many buffers (%d)\n",
pool, elements);
else if (elements > 0)
pr_warn("Warning: Freeing of pool %u is missing %d buffers\n",
pool, elements);
}
int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
{
int freed;
if (pool == CVMX_FPA_PACKET_POOL)
freed = cvm_oct_fill_hw_skbuff(pool, size, elements);
else
freed = cvm_oct_fill_hw_memory(pool, size, elements);
return freed;
}
void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
{
if (pool == CVMX_FPA_PACKET_POOL)
cvm_oct_free_hw_skbuff(pool, size, elements);
else
cvm_oct_free_hw_memory(pool, size, elements);
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
*/
int cvm_oct_mem_fill_fpa(int pool, int size, int elements);
void cvm_oct_mem_empty_fpa(int pool, int size, int elements);
// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/phy.h>
#include <linux/ratelimit.h>
#include <net/dst.h>
#include "octeon-ethernet.h"
#include "ethernet-defines.h"
#include "ethernet-util.h"
#include "ethernet-mdio.h"
static DEFINE_SPINLOCK(global_register_lock);
static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable)
{
union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
/* Set preamble checking. */
gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index,
interface));
gmxx_rxx_frm_ctl.s.pre_chk = enable;
cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface),
gmxx_rxx_frm_ctl.u64);
/* Set FCS stripping. */
ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
if (enable)
ipd_sub_port_fcs.s.port_bit |= 1ull << priv->port;
else
ipd_sub_port_fcs.s.port_bit &=
0xffffffffull ^ (1ull << priv->port);
cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
/* Clear any error bits. */
gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index,
interface));
cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
gmxx_rxx_int_reg.u64);
}
static void cvm_oct_check_preamble_errors(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
union cvmx_helper_link_info link_info;
unsigned long flags;
link_info.u64 = priv->link_info;
/*
* Take the global register lock since we are going to
* touch registers that affect more than one port.
*/
spin_lock_irqsave(&global_register_lock, flags);
if (link_info.s.speed == 10 && priv->last_speed == 10) {
/*
* Read the GMXX_RXX_INT_REG[PCTERR] bit and see if we are
* getting preamble errors.
*/
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
(index, interface));
if (gmxx_rxx_int_reg.s.pcterr) {
/*
* We are getting preamble errors at 10Mbps. Most
* likely the PHY is giving us packets with misaligned
* preambles. In order to get these packets we need to
* disable preamble checking and do it in software.
*/
cvm_oct_set_hw_preamble(priv, false);
printk_ratelimited("%s: Using 10Mbps with software preamble removal\n",
dev->name);
}
} else {
/*
* Since the 10Mbps preamble workaround is allowed we need to
* enable preamble checking, FCS stripping, and clear error
* bits on every speed change. If errors occur during 10Mbps
* operation the above code will change this stuff
*/
if (priv->last_speed != link_info.s.speed)
cvm_oct_set_hw_preamble(priv, true);
priv->last_speed = link_info.s.speed;
}
spin_unlock_irqrestore(&global_register_lock, flags);
}
static void cvm_oct_rgmii_poll(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
union cvmx_helper_link_info link_info;
bool status_change;
link_info = cvmx_helper_link_get(priv->port);
if (priv->link_info != link_info.u64 &&
cvmx_helper_link_set(priv->port, link_info))
link_info.u64 = priv->link_info;
status_change = priv->link_info != link_info.u64;
priv->link_info = link_info.u64;
cvm_oct_check_preamble_errors(dev);
if (likely(!status_change))
return;
/* Tell core. */
if (link_info.s.link_up) {
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
} else if (netif_carrier_ok(dev)) {
netif_carrier_off(dev);
}
cvm_oct_note_carrier(priv, link_info);
}
int cvm_oct_rgmii_open(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
int ret;
ret = cvm_oct_common_open(dev, cvm_oct_rgmii_poll);
if (ret)
return ret;
if (dev->phydev) {
/*
* In phydev mode, we need still periodic polling for the
* preamble error checking, and we also need to call this
* function on every link state change.
*
* Only true RGMII ports need to be polled. In GMII mode, port
* 0 is really a RGMII port.
*/
if ((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII &&
priv->port == 0) ||
(priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
priv->poll = cvm_oct_check_preamble_errors;
cvm_oct_check_preamble_errors(dev);
}
}
return 0;
}
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
*/
void cvm_oct_poll_controller(struct net_device *dev);
void cvm_oct_rx_initialize(void);
void cvm_oct_rx_shutdown(void);
static inline void cvm_oct_rx_refill_pool(int fill_threshold)
{
int number_to_free;
int num_freed;
/* Refill the packet buffer pool */
number_to_free =
cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
if (number_to_free > fill_threshold) {
cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
-number_to_free);
num_freed = cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
CVMX_FPA_PACKET_POOL_SIZE,
number_to_free);
if (num_freed != number_to_free) {
cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
number_to_free - num_freed);
}
}
}
// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
*/
#include <linux/phy.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/ratelimit.h>
#include <net/dst.h>
#include "octeon-ethernet.h"
#include "ethernet-defines.h"
#include "ethernet-util.h"
#include "ethernet-mdio.h"
int cvm_oct_sgmii_open(struct net_device *dev)
{
return cvm_oct_common_open(dev, cvm_oct_link_poll);
}
int cvm_oct_sgmii_init(struct net_device *dev)
{
cvm_oct_common_init(dev);
/* FIXME: Need autoneg logic */
return 0;
}
// SPDX-License-Identifier: GPL-2.0
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <net/dst.h>
#include "octeon-ethernet.h"
#include "ethernet-defines.h"
#include "ethernet-util.h"
static int number_spi_ports;
static int need_retrain[2] = { 0, 0 };
static void cvm_oct_spxx_int_pr(union cvmx_spxx_int_reg spx_int_reg, int index)
{
if (spx_int_reg.s.spf)
pr_err("SPI%d: SRX Spi4 interface down\n", index);
if (spx_int_reg.s.calerr)
pr_err("SPI%d: SRX Spi4 Calendar table parity error\n", index);
if (spx_int_reg.s.syncerr)
pr_err("SPI%d: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n",
index);
if (spx_int_reg.s.diperr)
pr_err("SPI%d: SRX Spi4 DIP4 error\n", index);
if (spx_int_reg.s.tpaovr)
pr_err("SPI%d: SRX Selected port has hit TPA overflow\n",
index);
if (spx_int_reg.s.rsverr)
pr_err("SPI%d: SRX Spi4 reserved control word detected\n",
index);
if (spx_int_reg.s.drwnng)
pr_err("SPI%d: SRX Spi4 receive FIFO drowning/overflow\n",
index);
if (spx_int_reg.s.clserr)
pr_err("SPI%d: SRX Spi4 packet closed on non-16B alignment without EOP\n",
index);
if (spx_int_reg.s.spiovr)
pr_err("SPI%d: SRX Spi4 async FIFO overflow\n", index);
if (spx_int_reg.s.abnorm)
pr_err("SPI%d: SRX Abnormal packet termination (ERR bit)\n",
index);
if (spx_int_reg.s.prtnxa)
pr_err("SPI%d: SRX Port out of range\n", index);
}
static void cvm_oct_stxx_int_pr(union cvmx_stxx_int_reg stx_int_reg, int index)
{
if (stx_int_reg.s.syncerr)
pr_err("SPI%d: STX Interface encountered a fatal error\n",
index);
if (stx_int_reg.s.frmerr)
pr_err("SPI%d: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n",
index);
if (stx_int_reg.s.unxfrm)
pr_err("SPI%d: STX Unexpected framing sequence\n", index);
if (stx_int_reg.s.nosync)
pr_err("SPI%d: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n",
index);
if (stx_int_reg.s.diperr)
pr_err("SPI%d: STX DIP2 error on the Spi4 Status channel\n",
index);
if (stx_int_reg.s.datovr)
pr_err("SPI%d: STX Spi4 FIFO overflow error\n", index);
if (stx_int_reg.s.ovrbst)
pr_err("SPI%d: STX Transmit packet burst too big\n", index);
if (stx_int_reg.s.calpar1)
pr_err("SPI%d: STX Calendar Table Parity Error Bank%d\n",
index, 1);
if (stx_int_reg.s.calpar0)
pr_err("SPI%d: STX Calendar Table Parity Error Bank%d\n",
index, 0);
}
static irqreturn_t cvm_oct_spi_spx_int(int index)
{
union cvmx_spxx_int_reg spx_int_reg;
union cvmx_stxx_int_reg stx_int_reg;
spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(index));
cvmx_write_csr(CVMX_SPXX_INT_REG(index), spx_int_reg.u64);
if (!need_retrain[index]) {
spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(index));
cvm_oct_spxx_int_pr(spx_int_reg, index);
}
stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(index));
cvmx_write_csr(CVMX_STXX_INT_REG(index), stx_int_reg.u64);
if (!need_retrain[index]) {
stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(index));
cvm_oct_stxx_int_pr(stx_int_reg, index);
}
cvmx_write_csr(CVMX_SPXX_INT_MSK(index), 0);
cvmx_write_csr(CVMX_STXX_INT_MSK(index), 0);
need_retrain[index] = 1;
return IRQ_HANDLED;
}
static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
{
irqreturn_t return_status = IRQ_NONE;
union cvmx_npi_rsl_int_blocks rsl_int_blocks;
/* Check and see if this interrupt was caused by the GMX block */
rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
if (rsl_int_blocks.s.spx1) /* 19 - SPX1_INT_REG & STX1_INT_REG */
return_status = cvm_oct_spi_spx_int(1);
if (rsl_int_blocks.s.spx0) /* 18 - SPX0_INT_REG & STX0_INT_REG */
return_status = cvm_oct_spi_spx_int(0);
return return_status;
}
static void cvm_oct_spi_enable_error_reporting(int interface)
{
union cvmx_spxx_int_msk spxx_int_msk;
union cvmx_stxx_int_msk stxx_int_msk;
spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
spxx_int_msk.s.calerr = 1;
spxx_int_msk.s.syncerr = 1;
spxx_int_msk.s.diperr = 1;
spxx_int_msk.s.tpaovr = 1;
spxx_int_msk.s.rsverr = 1;
spxx_int_msk.s.drwnng = 1;
spxx_int_msk.s.clserr = 1;
spxx_int_msk.s.spiovr = 1;
spxx_int_msk.s.abnorm = 1;
spxx_int_msk.s.prtnxa = 1;
cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
stxx_int_msk.s.frmerr = 1;
stxx_int_msk.s.unxfrm = 1;
stxx_int_msk.s.nosync = 1;
stxx_int_msk.s.diperr = 1;
stxx_int_msk.s.datovr = 1;
stxx_int_msk.s.ovrbst = 1;
stxx_int_msk.s.calpar1 = 1;
stxx_int_msk.s.calpar0 = 1;
cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
}
static void cvm_oct_spi_poll(struct net_device *dev)
{
static int spi4000_port;
struct octeon_ethernet *priv = netdev_priv(dev);
int interface;
for (interface = 0; interface < 2; interface++) {
if ((priv->port == interface * 16) && need_retrain[interface]) {
if (cvmx_spi_restart_interface
(interface, CVMX_SPI_MODE_DUPLEX, 10) == 0) {
need_retrain[interface] = 0;
cvm_oct_spi_enable_error_reporting(interface);
}
}
/*
* The SPI4000 TWSI interface is very slow. In order
* not to bring the system to a crawl, we only poll a
* single port every second. This means negotiation
* speed changes take up to 10 seconds, but at least
* we don't waste absurd amounts of time waiting for
* TWSI.
*/
if (priv->port == spi4000_port) {
/*
* This function does nothing if it is called on an
* interface without a SPI4000.
*/
cvmx_spi4000_check_speed(interface, priv->port);
/*
* Normal ordering increments. By decrementing
* we only match once per iteration.
*/
spi4000_port--;
if (spi4000_port < 0)
spi4000_port = 10;
}
}
}
int cvm_oct_spi_init(struct net_device *dev)
{
int r;
struct octeon_ethernet *priv = netdev_priv(dev);
if (number_spi_ports == 0) {
r = request_irq(OCTEON_IRQ_RML, cvm_oct_spi_rml_interrupt,
IRQF_SHARED, "SPI", &number_spi_ports);
if (r)
return r;
}
number_spi_ports++;
if ((priv->port == 0) || (priv->port == 16)) {
cvm_oct_spi_enable_error_reporting(INTERFACE(priv->port));
priv->poll = cvm_oct_spi_poll;
}
cvm_oct_common_init(dev);
return 0;
}
void cvm_oct_spi_uninit(struct net_device *dev)
{
int interface;
cvm_oct_common_uninit(dev);
number_spi_ports--;
if (number_spi_ports == 0) {
for (interface = 0; interface < 2; interface++) {
cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
}
free_irq(OCTEON_IRQ_RML, &number_spi_ports);
}
}
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
*/
int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
int do_free, int qos);
void cvm_oct_tx_initialize(void);
void cvm_oct_tx_shutdown(void);
void cvm_oct_tx_shutdown_dev(struct net_device *dev);
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2007 Cavium Networks
*/
/**
* cvm_oct_get_buffer_ptr - convert packet data address to pointer
* @packet_ptr: Packet data hardware address
*
* Returns Packet buffer pointer
*/
static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
{
return cvmx_phys_to_ptr(((packet_ptr.s.addr >> 7) - packet_ptr.s.back)
<< 7);
}
/**
* INTERFACE - convert IPD port to logical interface
* @ipd_port: Port to check
*
* Returns Logical interface
*/
static inline int INTERFACE(int ipd_port)
{
int interface;
if (ipd_port == CVMX_PIP_NUM_INPUT_PORTS)
return 10;
interface = cvmx_helper_get_interface_num(ipd_port);
if (interface >= 0)
return interface;
panic("Illegal ipd_port %d passed to %s\n", ipd_port, __func__);
}
/**
* INDEX - convert IPD/PKO port number to the port's interface index
* @ipd_port: Port to check
*
* Returns Index into interface port list
*/
static inline int INDEX(int ipd_port)
{
return cvmx_helper_get_interface_index_num(ipd_port);
}
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is based on code from OCTEON SDK by Cavium Networks.
*
* Copyright (c) 2003-2010 Cavium Networks
*/
/*
* External interface for the Cavium Octeon ethernet driver.
*/
#ifndef OCTEON_ETHERNET_H
#define OCTEON_ETHERNET_H
#include <linux/of.h>
#include <linux/phy.h>
#ifdef CONFIG_CAVIUM_OCTEON_SOC
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-asxx-defs.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-fau.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-util.h>
#include <asm/octeon/cvmx-ipd.h>
#include <asm/octeon/cvmx-ipd-defs.h>
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-pip.h>
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-pow.h>
#include <asm/octeon/cvmx-scratch.h>
#include <asm/octeon/cvmx-spi.h>
#include <asm/octeon/cvmx-spxx-defs.h>
#include <asm/octeon/cvmx-stxx-defs.h>
#include <asm/octeon/cvmx-wqe.h>
#else
#include "octeon-stubs.h"
#endif
/**
* This is the definition of the Ethernet driver's private
* driver state stored in netdev_priv(dev).
*/
struct octeon_ethernet {
/* PKO hardware output port */
int port;
/* PKO hardware queue for the port */
int queue;
/* Hardware fetch and add to count outstanding tx buffers */
int fau;
/* My netdev. */
struct net_device *netdev;
/*
* Type of port. This is one of the enums in
* cvmx_helper_interface_mode_t
*/
int imode;
/* PHY mode */
phy_interface_t phy_mode;
/* List of outstanding tx buffers per queue */
struct sk_buff_head tx_free_list[16];
unsigned int last_speed;
unsigned int last_link;
/* Last negotiated link state */
u64 link_info;
/* Called periodically to check link status */
void (*poll)(struct net_device *dev);
struct delayed_work port_periodic_work;
struct device_node *of_node;
};
int cvm_oct_free_work(void *work_queue_entry);
int cvm_oct_rgmii_open(struct net_device *dev);
int cvm_oct_sgmii_init(struct net_device *dev);
int cvm_oct_sgmii_open(struct net_device *dev);
int cvm_oct_spi_init(struct net_device *dev);
void cvm_oct_spi_uninit(struct net_device *dev);
int cvm_oct_common_init(struct net_device *dev);
void cvm_oct_common_uninit(struct net_device *dev);
void cvm_oct_adjust_link(struct net_device *dev);
int cvm_oct_common_stop(struct net_device *dev);
int cvm_oct_common_open(struct net_device *dev,
void (*link_poll)(struct net_device *));
void cvm_oct_note_carrier(struct octeon_ethernet *priv,
union cvmx_helper_link_info li);
void cvm_oct_link_poll(struct net_device *dev);
extern int always_use_pow;
extern int pow_send_group;
extern int pow_receive_groups;
extern char pow_send_list[];
extern struct net_device *cvm_oct_device[];
extern atomic_t cvm_oct_poll_queue_stopping;
extern u64 cvm_oct_tx_poll_interval;
extern int rx_napi_weight;
#endif
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment